DPDK  20.08.0
rte_event_eth_tx_adapter.h
Go to the documentation of this file.
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Intel Corporation.
3  */
4 
5 #ifndef _RTE_EVENT_ETH_TX_ADAPTER_
6 #define _RTE_EVENT_ETH_TX_ADAPTER_
7 
73 #ifdef __cplusplus
74 extern "C" {
75 #endif
76 
77 #include <stdint.h>
78 
79 #include <rte_mbuf.h>
80 
81 #include "rte_eventdev.h"
82 
90  uint8_t event_port_id;
95  uint32_t max_nb_tx;
100 };
101 
122 typedef int (*rte_event_eth_tx_adapter_conf_cb) (uint8_t id, uint8_t dev_id,
123  struct rte_event_eth_tx_adapter_conf *conf,
124  void *arg);
125 
130  uint64_t tx_retry;
132  uint64_t tx_packets;
134  uint64_t tx_dropped;
136 };
137 
152 int
153 rte_event_eth_tx_adapter_create(uint8_t id, uint8_t dev_id,
154  struct rte_event_port_conf *port_config);
155 
173 int
174 rte_event_eth_tx_adapter_create_ext(uint8_t id, uint8_t dev_id,
176  void *conf_arg);
177 
188 int
190 
200 int
202 
212 int
214 
230 int
232  uint16_t eth_dev_id,
233  int32_t queue);
234 
251 int
253  uint16_t eth_dev_id,
254  int32_t queue);
255 
265 static __rte_always_inline void
266 rte_event_eth_tx_adapter_txq_set(struct rte_mbuf *pkt, uint16_t queue)
267 {
268  pkt->hash.txadapter.txq = queue;
269 }
270 
281 static __rte_always_inline uint16_t
283 {
284  return pkt->hash.txadapter.txq;
285 }
286 
300 int
301 rte_event_eth_tx_adapter_event_port_get(uint8_t id, uint8_t *event_port_id);
302 
303 #define RTE_EVENT_ETH_TX_ADAPTER_ENQUEUE_SAME_DEST 0x1
304 
351 static inline uint16_t
353  uint8_t port_id,
354  struct rte_event ev[],
355  uint16_t nb_events,
356  const uint8_t flags)
357 {
358  const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
359 
360 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
361  if (dev_id >= RTE_EVENT_MAX_DEVS ||
362  !rte_eventdevs[dev_id].attached) {
363  rte_errno = EINVAL;
364  return 0;
365  }
366 
367  if (port_id >= dev->data->nb_ports) {
368  rte_errno = EINVAL;
369  return 0;
370  }
371 #endif
372  rte_eventdev_trace_eth_tx_adapter_enqueue(dev_id, port_id, ev,
373  nb_events, flags);
374  if (flags)
375  return dev->txa_enqueue_same_dest(dev->data->ports[port_id],
376  ev, nb_events);
377  else
378  return dev->txa_enqueue(dev->data->ports[port_id], ev,
379  nb_events);
380 }
381 
393 int
395  struct rte_event_eth_tx_adapter_stats *stats);
396 
406 int
408 
422 int
423 rte_event_eth_tx_adapter_service_id_get(uint8_t id, uint32_t *service_id);
424 
425 #ifdef __cplusplus
426 }
427 #endif
428 #endif /* _RTE_EVENT_ETH_TX_ADAPTER_ */
#define __rte_always_inline
Definition: rte_common.h:202
int rte_event_eth_tx_adapter_start(uint8_t id)
#define rte_errno
Definition: rte_errno.h:29
static uint16_t rte_event_eth_tx_adapter_enqueue(uint8_t dev_id, uint8_t port_id, struct rte_event ev[], uint16_t nb_events, const uint8_t flags)
int rte_event_eth_tx_adapter_queue_del(uint8_t id, uint16_t eth_dev_id, int32_t queue)
int rte_event_eth_tx_adapter_stop(uint8_t id)
int rte_event_eth_tx_adapter_queue_add(uint8_t id, uint16_t eth_dev_id, int32_t queue)
static __rte_always_inline void rte_event_eth_tx_adapter_txq_set(struct rte_mbuf *pkt, uint16_t queue)
int rte_event_eth_tx_adapter_service_id_get(uint8_t id, uint32_t *service_id)
static __rte_always_inline uint16_t rte_event_eth_tx_adapter_txq_get(struct rte_mbuf *pkt)
int rte_event_eth_tx_adapter_stats_reset(uint8_t id)
int rte_event_eth_tx_adapter_stats_get(uint8_t id, struct rte_event_eth_tx_adapter_stats *stats)
int rte_event_eth_tx_adapter_create(uint8_t id, uint8_t dev_id, struct rte_event_port_conf *port_config)
int(* rte_event_eth_tx_adapter_conf_cb)(uint8_t id, uint8_t dev_id, struct rte_event_eth_tx_adapter_conf *conf, void *arg)
int rte_event_eth_tx_adapter_free(uint8_t id)
int rte_event_eth_tx_adapter_create_ext(uint8_t id, uint8_t dev_id, rte_event_eth_tx_adapter_conf_cb conf_cb, void *conf_arg)
int rte_event_eth_tx_adapter_event_port_get(uint8_t id, uint8_t *event_port_id)