5 #ifndef _RTE_EVENT_ETH_TX_ADAPTER_ 6 #define _RTE_EVENT_ETH_TX_ADAPTER_ 268 pkt->hash.txadapter.txq = queue;
284 return pkt->hash.txadapter.txq;
303 #define RTE_EVENT_ETH_TX_ADAPTER_ENQUEUE_SAME_DEST 0x1 351 static inline uint16_t
358 const struct rte_event_fp_ops *fp_ops;
361 fp_ops = &rte_event_fp_ops[dev_id];
362 port = fp_ops->data[port_id];
363 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG 364 if (dev_id >= RTE_EVENT_MAX_DEVS ||
365 port_id >= RTE_EVENT_MAX_PORTS_PER_DEV) {
375 rte_eventdev_trace_eth_tx_adapter_enqueue(dev_id, port_id, ev,
378 return fp_ops->txa_enqueue_same_dest(port, ev, nb_events);
380 return fp_ops->txa_enqueue(port, ev, nb_events);
#define __rte_always_inline
int rte_event_eth_tx_adapter_start(uint8_t id)
static uint16_t rte_event_eth_tx_adapter_enqueue(uint8_t dev_id, uint8_t port_id, struct rte_event ev[], uint16_t nb_events, const uint8_t flags)
int rte_event_eth_tx_adapter_queue_del(uint8_t id, uint16_t eth_dev_id, int32_t queue)
int rte_event_eth_tx_adapter_stop(uint8_t id)
int rte_event_eth_tx_adapter_queue_add(uint8_t id, uint16_t eth_dev_id, int32_t queue)
static __rte_always_inline void rte_event_eth_tx_adapter_txq_set(struct rte_mbuf *pkt, uint16_t queue)
int rte_event_eth_tx_adapter_service_id_get(uint8_t id, uint32_t *service_id)
static __rte_always_inline uint16_t rte_event_eth_tx_adapter_txq_get(struct rte_mbuf *pkt)
int rte_event_eth_tx_adapter_stats_reset(uint8_t id)
int rte_event_eth_tx_adapter_stats_get(uint8_t id, struct rte_event_eth_tx_adapter_stats *stats)
int rte_event_eth_tx_adapter_create(uint8_t id, uint8_t dev_id, struct rte_event_port_conf *port_config)
int(* rte_event_eth_tx_adapter_conf_cb)(uint8_t id, uint8_t dev_id, struct rte_event_eth_tx_adapter_conf *conf, void *arg)
int rte_event_eth_tx_adapter_free(uint8_t id)
int rte_event_eth_tx_adapter_create_ext(uint8_t id, uint8_t dev_id, rte_event_eth_tx_adapter_conf_cb conf_cb, void *conf_arg)
int rte_event_eth_tx_adapter_event_port_get(uint8_t id, uint8_t *event_port_id)