#include <stdint.h>
#include <stdlib.h>
#include <memory.h>
#include "main.h"
#include "cfg_file.h"
uint32_t app_numa_mask = 0;
static uint32_t app_inited_port_mask = 0;
int app_pipe_to_profile[MAX_SCHED_SUBPORTS][MAX_SCHED_PIPES];
#define MAX_NAME_LEN 32
struct ring_conf ring_conf = {
.rx_size = APP_RX_DESC_DEFAULT,
.ring_size = APP_RING_SIZE,
.tx_size = APP_TX_DESC_DEFAULT,
};
struct burst_conf burst_conf = {
.rx_burst = MAX_PKT_RX_BURST,
.ring_burst = PKT_ENQUEUE,
.qos_dequeue = PKT_DEQUEUE,
.tx_burst = MAX_PKT_TX_BURST,
};
struct ring_thresh rx_thresh = {
.pthresh = RX_PTHRESH,
.hthresh = RX_HTHRESH,
.wthresh = RX_WTHRESH,
};
struct ring_thresh tx_thresh = {
.pthresh = TX_PTHRESH,
.hthresh = TX_HTHRESH,
.wthresh = TX_WTHRESH,
};
uint32_t nb_pfc;
const char *cfg_profile = NULL;
int mp_size = NB_MBUF;
struct flow_conf qos_conf[MAX_DATA_STREAMS];
},
};
static int
{
int ret;
uint16_t rx_size;
uint16_t tx_size;
if (app_inited_port_mask & (1u << portid))
return 0;
rx_conf.rx_thresh.pthresh = rx_thresh.pthresh;
rx_conf.rx_thresh.hthresh = rx_thresh.hthresh;
rx_conf.rx_thresh.wthresh = rx_thresh.wthresh;
rx_conf.rx_free_thresh = 32;
rx_conf.rx_drop_en = 0;
rx_conf.rx_deferred_start = 0;
tx_conf.tx_thresh.pthresh = tx_thresh.pthresh;
tx_conf.tx_thresh.hthresh = tx_thresh.hthresh;
tx_conf.tx_thresh.wthresh = tx_thresh.wthresh;
tx_conf.tx_free_thresh = 0;
tx_conf.tx_rs_thresh = 0;
tx_conf.tx_deferred_start = 0;
RTE_LOG(INFO, APP,
"Initializing port %"PRIu16
"... ", portid);
fflush(stdout);
if (ret != 0)
"Error during getting device (port %u) info: %s\n",
portid, strerror(-ret));
if (ret < 0)
"Cannot configure device: err=%d, port=%u\n",
ret, portid);
rx_size = ring_conf.rx_size;
tx_size = ring_conf.tx_size;
if (ret < 0)
"rte_eth_dev_adjust_nb_rx_tx_desc: err=%d,port=%u\n",
ret, portid);
ring_conf.rx_size = rx_size;
ring_conf.tx_size = tx_size;
fflush(stdout);
if (ret < 0)
"rte_eth_tx_queue_setup: err=%d, port=%u\n",
ret, portid);
fflush(stdout);
if (ret < 0)
"rte_eth_tx_queue_setup: err=%d, port=%u queue=%d\n",
ret, portid, 0);
if (ret < 0)
"rte_pmd_port_start: err=%d, port=%u\n",
ret, portid);
printf("done: ");
if (ret < 0)
"rte_eth_link_get: err=%d, port=%u: %s\n",
printf("%s\n", link_status_text);
if (ret != 0)
"rte_eth_promiscuous_enable: err=%s, port=%u\n",
app_inited_port_mask |= 1u << portid;
return 0;
}
static struct rte_sched_pipe_params pipe_profiles[MAX_SCHED_PIPE_PROFILES] = {
{
.tb_rate = 305175,
.tb_size = 1000000,
.tc_rate = {305175, 305175, 305175, 305175, 305175, 305175,
305175, 305175, 305175, 305175, 305175, 305175, 305175},
.tc_period = 40,
.tc_ov_weight = 1,
.wrr_weights = {1, 1, 1, 1},
},
};
static struct rte_sched_subport_profile_params
subport_profile[MAX_SCHED_SUBPORT_PROFILES] = {
{
.tb_rate = 1250000000,
.tb_size = 1000000,
.tc_rate = {1250000000, 1250000000, 1250000000, 1250000000,
1250000000, 1250000000, 1250000000, 1250000000, 1250000000,
1250000000, 1250000000, 1250000000, 1250000000},
.tc_period = 10,
},
};
struct rte_sched_subport_params subport_params[MAX_SCHED_SUBPORTS] = {
{
.n_pipes_per_subport_enabled = 4096,
.qsize = {64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64},
.pipe_profiles = pipe_profiles,
.n_pipe_profiles = sizeof(pipe_profiles) /
sizeof(struct rte_sched_pipe_params),
.n_max_pipe_profiles = MAX_SCHED_PIPE_PROFILES,
.cman_params = NULL,
},
};
.
name =
"port_scheduler_0",
.socket = 0,
.rate = 0,
.mtu = 6 + 6 + 4 + 4 + 2 + 1500,
.frame_overhead = RTE_SCHED_FRAME_OVERHEAD_DEFAULT,
.n_subports_per_port = 1,
.n_subport_profiles = 1,
.subport_profiles = subport_profile,
.n_max_subport_profiles = MAX_SCHED_SUBPORT_PROFILES,
.n_pipes_per_subport = MAX_SCHED_PIPES,
};
static struct rte_sched_port *
app_init_sched_port(uint32_t portid, uint32_t socketid)
{
static char port_name[32];
struct rte_sched_port *port = NULL;
uint32_t pipe, subport;
uint32_t pipe_count;
int err;
if (err < 0)
"rte_eth_link_get: err=%d, port=%u: %s\n",
port_params.
socket = socketid;
port_params.
rate = (uint64_t) link.link_speed * 1000 * 1000 / 8;
snprintf(port_name, sizeof(port_name), "port_%d", portid);
port_params.
name = port_name;
if (port == NULL){
rte_exit(EXIT_FAILURE,
"Unable to config sched port\n");
}
&subport_params[subport],
0);
if (err) {
rte_exit(EXIT_FAILURE,
"Unable to config sched "
"subport %u, err=%d\n", subport, err);
}
uint32_t n_pipes_per_subport =
subport_params[subport].n_pipes_per_subport_enabled;
pipe_count = 0;
for (pipe = 0; pipe < n_pipes_per_subport; pipe++) {
if (app_pipe_to_profile[subport][pipe] != -1) {
app_pipe_to_profile[subport][pipe]);
if (err) {
rte_exit(EXIT_FAILURE,
"Unable to config sched pipe %u "
"for profile %d, err=%d\n", pipe,
app_pipe_to_profile[subport][pipe], err);
}
pipe_count++;
}
}
if (pipe_count == 0)
rte_exit(EXIT_FAILURE,
"Error: invalid config, no pipes enabled for sched subport %u\n",
subport);
}
return port;
}
static int
app_load_cfg_profile(const char *profile)
{
int ret = 0;
if (profile == NULL)
return 0;
if (file == NULL)
rte_exit(EXIT_FAILURE,
"Cannot load configuration profile %s\n", profile);
ret = cfg_load_port(file, &port_params);
if (ret)
goto _app_load_cfg_profile_error_return;
ret = cfg_load_subport(file, subport_params);
if (ret)
goto _app_load_cfg_profile_error_return;
ret = cfg_load_subport_profile(file, subport_profile);
if (ret)
goto _app_load_cfg_profile_error_return;
ret = cfg_load_pipe(file, pipe_profiles);
if (ret)
goto _app_load_cfg_profile_error_return;
_app_load_cfg_profile_error_return:
return ret;
}
int app_init(void)
{
uint32_t i;
char ring_name[MAX_NAME_LEN];
char pool_name[MAX_NAME_LEN];
int ret;
rte_exit(EXIT_FAILURE,
"No Ethernet port - bye\n");
if (app_load_cfg_profile(cfg_profile) != 0)
rte_exit(EXIT_FAILURE,
"Invalid configuration profile\n");
for(i = 0; i < nb_pfc; i++) {
int retry_count = 100, retry_delay = 100;
snprintf(ring_name, MAX_NAME_LEN, "ring-%u-%u", i, qos_conf[i].rx_core);
if (ring == NULL)
else
qos_conf[i].rx_ring = ring;
snprintf(ring_name, MAX_NAME_LEN, "ring-%u-%u", i, qos_conf[i].tx_core);
if (ring == NULL)
else
qos_conf[i].tx_ring = ring;
snprintf(pool_name, MAX_NAME_LEN, "mbuf_pool%u", i);
mp_size, burst_conf.rx_burst * 4, 0,
RTE_MBUF_DEFAULT_BUF_SIZE,
if (qos_conf[i].mbuf_pool == NULL)
rte_exit(EXIT_FAILURE,
"Cannot init mbuf pool for socket %u\n", i);
app_init_port(qos_conf[i].rx_port, qos_conf[i].mbuf_pool);
app_init_port(qos_conf[i].tx_port, qos_conf[i].mbuf_pool);
memset(&link, 0, sizeof(link));
if (ret < 0)
"rte_eth_link_get: err=%d, port=%u: %s\n",
if (link.link_status == 0)
printf("Waiting for link on port %u\n", qos_conf[i].tx_port);
while (link.link_status == 0 && retry_count--) {
"rte_eth_link_get: err=%d, port=%u: %s\n",
}
qos_conf[i].sched_port = app_init_sched_port(qos_conf[i].tx_port, socket);
}
RTE_LOG(INFO, APP,
"time stamp clock running at %" PRIu64
" Hz\n",
RTE_LOG(INFO, APP,
"Ring sizes: NIC RX = %u, Mempool = %d SW queue = %u,"
"NIC TX = %u\n", ring_conf.rx_size, mp_size, ring_conf.ring_size,
ring_conf.tx_size);
RTE_LOG(INFO, APP,
"Burst sizes: RX read = %hu, RX write = %hu,\n"
" Worker read/QoS enqueue = %hu,\n"
" QoS dequeue = %hu, Worker write = %hu\n",
burst_conf.rx_burst, burst_conf.ring_burst, burst_conf.ring_burst,
burst_conf.qos_dequeue, burst_conf.tx_burst);
RTE_LOG(INFO, APP,
"NIC thresholds RX (p = %hhu, h = %hhu, w = %hhu),"
"TX (p = %hhu, h = %hhu, w = %hhu)\n",
rx_thresh.pthresh, rx_thresh.hthresh, rx_thresh.wthresh,
tx_thresh.pthresh, tx_thresh.hthresh, tx_thresh.wthresh);
return 0;
}
int rte_cfgfile_close(struct rte_cfgfile *cfg)
struct rte_cfgfile * rte_cfgfile_load(const char *filename, int flags)
__rte_noreturn void rte_exit(int exit_code, const char *format,...) __rte_format_printf(2
static uint64_t rte_get_timer_hz(void)
static void rte_delay_ms(unsigned ms)
const char * rte_strerror(int errnum)
int rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_queue, uint16_t nb_tx_queue, const struct rte_eth_conf *eth_conf)
int rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, uint16_t nb_rx_desc, unsigned int socket_id, const struct rte_eth_rxconf *rx_conf, struct rte_mempool *mb_pool)
#define RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE
int rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info) __rte_warn_unused_result
int rte_eth_promiscuous_enable(uint16_t port_id)
__rte_experimental int rte_eth_link_to_str(char *str, size_t len, const struct rte_eth_link *eth_link)
int rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id, uint16_t nb_tx_desc, unsigned int socket_id, const struct rte_eth_txconf *tx_conf)
#define RTE_ETH_LINK_MAX_STR_LEN
uint16_t rte_eth_dev_count_avail(void)
int rte_eth_link_get(uint16_t port_id, struct rte_eth_link *link) __rte_warn_unused_result
int rte_eth_dev_socket_id(uint16_t port_id)
int rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id, uint16_t *nb_rx_desc, uint16_t *nb_tx_desc)
int rte_eth_dev_start(uint16_t port_id)
unsigned int rte_lcore_to_socket_id(unsigned int lcore_id)
#define RTE_LOG(l, t,...)
struct rte_mempool * rte_pktmbuf_pool_create(const char *name, unsigned n, unsigned cache_size, uint16_t priv_size, uint16_t data_room_size, int socket_id)
struct rte_ring * rte_ring_lookup(const char *name)
struct rte_ring * rte_ring_create(const char *name, unsigned int count, int socket_id, unsigned int flags)
int rte_sched_pipe_config(struct rte_sched_port *port, uint32_t subport_id, uint32_t pipe_id, int32_t pipe_profile)
struct rte_sched_port * rte_sched_port_config(struct rte_sched_port_params *params)
int rte_sched_subport_config(struct rte_sched_port *port, uint32_t subport_id, struct rte_sched_subport_params *params, uint32_t subport_profile_id)
struct rte_eth_txmode txmode
struct rte_eth_rxmode rxmode
enum rte_eth_tx_mq_mode mq_mode
uint32_t n_subports_per_port