DPDK  20.11.10
rte_eventdev.h
Go to the documentation of this file.
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016 Cavium, Inc.
3  * Copyright(c) 2016-2018 Intel Corporation.
4  * Copyright 2016 NXP
5  * All rights reserved.
6  */
7 
8 #ifndef _RTE_EVENTDEV_H_
9 #define _RTE_EVENTDEV_H_
10 
209 #ifdef __cplusplus
210 extern "C" {
211 #endif
212 
213 #include <rte_common.h>
214 #include <rte_config.h>
215 #include <rte_memory.h>
216 #include <rte_errno.h>
217 
218 #include "rte_eventdev_trace_fp.h"
219 
220 struct rte_mbuf; /* we just use mbuf pointers; no need to include rte_mbuf.h */
221 struct rte_event;
222 
223 /* Event device capability bitmap flags */
224 #define RTE_EVENT_DEV_CAP_QUEUE_QOS (1ULL << 0)
225 
230 #define RTE_EVENT_DEV_CAP_EVENT_QOS (1ULL << 1)
231 
237 #define RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED (1ULL << 2)
238 
246 #define RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES (1ULL << 3)
247 
253 #define RTE_EVENT_DEV_CAP_BURST_MODE (1ULL << 4)
254 
261 #define RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE (1ULL << 5)
262 
272 #define RTE_EVENT_DEV_CAP_NONSEQ_MODE (1ULL << 6)
273 
282 #define RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK (1ULL << 7)
283 
288 #define RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT (1ULL << 8)
289 
294 #define RTE_EVENT_DEV_CAP_CARRY_FLOW_ID (1ULL << 9)
295 
300 /* Event device priority levels */
301 #define RTE_EVENT_DEV_PRIORITY_HIGHEST 0
302 
306 #define RTE_EVENT_DEV_PRIORITY_NORMAL 128
307 
311 #define RTE_EVENT_DEV_PRIORITY_LOWEST 255
312 
324 uint8_t
325 rte_event_dev_count(void);
326 
337 int
338 rte_event_dev_get_dev_id(const char *name);
339 
350 int
351 rte_event_dev_socket_id(uint8_t dev_id);
352 
357  const char *driver_name;
358  struct rte_device *dev;
393  int32_t max_num_events;
398  uint32_t event_dev_cap;
406 };
407 
423 int
424 rte_event_dev_info_get(uint8_t dev_id, struct rte_event_dev_info *dev_info);
425 
429 #define RTE_EVENT_DEV_ATTR_PORT_COUNT 0
430 
433 #define RTE_EVENT_DEV_ATTR_QUEUE_COUNT 1
434 
437 #define RTE_EVENT_DEV_ATTR_STARTED 2
438 
451 int
452 rte_event_dev_attr_get(uint8_t dev_id, uint32_t attr_id,
453  uint32_t *attr_value);
454 
455 
456 /* Event device configuration bitmap flags */
457 #define RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT (1ULL << 0)
458 
485  uint8_t nb_event_ports;
511  uint32_t event_dev_cfg;
521 };
522 
542 int
543 rte_event_dev_configure(uint8_t dev_id,
544  const struct rte_event_dev_config *dev_conf);
545 
546 /* Event queue specific APIs */
547 
548 /* Event queue configuration bitmap flags */
549 #define RTE_EVENT_QUEUE_CFG_ALL_TYPES (1ULL << 0)
550 
555 #define RTE_EVENT_QUEUE_CFG_SINGLE_LINK (1ULL << 1)
556 
563  uint32_t nb_atomic_flows;
585  uint32_t event_queue_cfg;
587  uint8_t schedule_type;
592  uint8_t priority;
600 };
601 
624 int
625 rte_event_queue_default_conf_get(uint8_t dev_id, uint8_t queue_id,
626  struct rte_event_queue_conf *queue_conf);
627 
646 int
647 rte_event_queue_setup(uint8_t dev_id, uint8_t queue_id,
648  const struct rte_event_queue_conf *queue_conf);
649 
653 #define RTE_EVENT_QUEUE_ATTR_PRIORITY 0
654 
657 #define RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_FLOWS 1
658 
661 #define RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_ORDER_SEQUENCES 2
662 
665 #define RTE_EVENT_QUEUE_ATTR_EVENT_QUEUE_CFG 3
666 
669 #define RTE_EVENT_QUEUE_ATTR_SCHEDULE_TYPE 4
670 
691 int
692 rte_event_queue_attr_get(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id,
693  uint32_t *attr_value);
694 
695 /* Event port specific APIs */
696 
697 /* Event port configuration bitmap flags */
698 #define RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL (1ULL << 0)
699 
705 #define RTE_EVENT_PORT_CFG_SINGLE_LINK (1ULL << 1)
706 
726  uint16_t dequeue_depth;
732  uint16_t enqueue_depth;
738  uint32_t event_port_cfg;
739 };
740 
763 int
764 rte_event_port_default_conf_get(uint8_t dev_id, uint8_t port_id,
765  struct rte_event_port_conf *port_conf);
766 
787 int
788 rte_event_port_setup(uint8_t dev_id, uint8_t port_id,
789  const struct rte_event_port_conf *port_conf);
790 
794 #define RTE_EVENT_PORT_ATTR_ENQ_DEPTH 0
795 
798 #define RTE_EVENT_PORT_ATTR_DEQ_DEPTH 1
799 
802 #define RTE_EVENT_PORT_ATTR_NEW_EVENT_THRESHOLD 2
803 
806 #define RTE_EVENT_PORT_ATTR_IMPLICIT_RELEASE_DISABLE 3
807 
824 int
825 rte_event_port_attr_get(uint8_t dev_id, uint8_t port_id, uint32_t attr_id,
826  uint32_t *attr_value);
827 
844 int
845 rte_event_dev_start(uint8_t dev_id);
846 
865 void
866 rte_event_dev_stop(uint8_t dev_id);
867 
868 typedef void (*eventdev_stop_flush_t)(uint8_t dev_id, struct rte_event event,
869  void *arg);
898 int
900  eventdev_stop_flush_t callback, void *userdata);
901 
913 int
914 rte_event_dev_close(uint8_t dev_id);
915 
916 /* Scheduler type definitions */
917 #define RTE_SCHED_TYPE_ORDERED 0
918 
944 #define RTE_SCHED_TYPE_ATOMIC 1
945 
963 #define RTE_SCHED_TYPE_PARALLEL 2
964 
976 /* Event types to classify the event source */
977 #define RTE_EVENT_TYPE_ETHDEV 0x0
978 
979 #define RTE_EVENT_TYPE_CRYPTODEV 0x1
980 
981 #define RTE_EVENT_TYPE_TIMER 0x2
982 
983 #define RTE_EVENT_TYPE_CPU 0x3
984 
987 #define RTE_EVENT_TYPE_ETH_RX_ADAPTER 0x4
988 
989 #define RTE_EVENT_TYPE_MAX 0x10
990 
992 /* Event enqueue operations */
993 #define RTE_EVENT_OP_NEW 0
994 
997 #define RTE_EVENT_OP_FORWARD 1
998 
1005 #define RTE_EVENT_OP_RELEASE 2
1006 
1043 struct rte_event {
1045  union {
1046  uint64_t event;
1048  struct {
1049  uint32_t flow_id:20;
1056  uint32_t sub_event_type:8;
1060  uint32_t event_type:4;
1064  uint8_t op:2;
1070  uint8_t rsvd:4;
1072  uint8_t sched_type:2;
1077  uint8_t queue_id;
1084  uint8_t priority;
1094  uint8_t impl_opaque;
1101  };
1102  };
1104  union {
1105  uint64_t u64;
1107  void *event_ptr;
1109  struct rte_mbuf *mbuf;
1111  };
1112 };
1113 
1114 /* Ethdev Rx adapter capability bitmap flags */
1115 #define RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT 0x1
1116 
1119 #define RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ 0x2
1120 
1123 #define RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID 0x4
1124 
1150 int
1151 rte_event_eth_rx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id,
1152  uint32_t *caps);
1153 
1154 #define RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT (1ULL << 0)
1155 
1170 int
1171 rte_event_timer_adapter_caps_get(uint8_t dev_id, uint32_t *caps);
1172 
1173 /* Crypto adapter capability bitmap flag */
1174 #define RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW 0x1
1175 
1181 #define RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD 0x2
1182 
1188 #define RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND 0x4
1189 
1193 #define RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA 0x8
1194 
1218 int
1219 rte_event_crypto_adapter_caps_get(uint8_t dev_id, uint8_t cdev_id,
1220  uint32_t *caps);
1221 
1222 /* Ethdev Tx adapter capability bitmap flags */
1223 #define RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT 0x1
1224 
1244 int
1245 rte_event_eth_tx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id,
1246  uint32_t *caps);
1247 
1248 struct rte_eventdev_ops;
1249 struct rte_eventdev;
1250 
1251 typedef uint16_t (*event_enqueue_t)(void *port, const struct rte_event *ev);
1254 typedef uint16_t (*event_enqueue_burst_t)(void *port,
1255  const struct rte_event ev[], uint16_t nb_events);
1258 typedef uint16_t (*event_dequeue_t)(void *port, struct rte_event *ev,
1259  uint64_t timeout_ticks);
1262 typedef uint16_t (*event_dequeue_burst_t)(void *port, struct rte_event ev[],
1263  uint16_t nb_events, uint64_t timeout_ticks);
1266 typedef uint16_t (*event_tx_adapter_enqueue)(void *port,
1267  struct rte_event ev[], uint16_t nb_events);
1270 typedef uint16_t (*event_tx_adapter_enqueue_same_dest)(void *port,
1271  struct rte_event ev[], uint16_t nb_events);
1276 #define RTE_EVENTDEV_NAME_MAX_LEN (64)
1277 
1286 struct rte_eventdev_data {
1287  int socket_id;
1289  uint8_t dev_id;
1291  uint8_t nb_queues;
1293  uint8_t nb_ports;
1295  void **ports;
1297  struct rte_event_port_conf *ports_cfg;
1299  struct rte_event_queue_conf *queues_cfg;
1301  uint16_t *links_map;
1303  void *dev_private;
1305  uint32_t event_dev_cap;
1307  struct rte_event_dev_config dev_conf;
1309  uint8_t service_inited;
1310  /* Service initialization state */
1311  uint32_t service_id;
1312  /* Service ID*/
1313  void *dev_stop_flush_arg;
1316  RTE_STD_C11
1317  uint8_t dev_started : 1;
1320  char name[RTE_EVENTDEV_NAME_MAX_LEN];
1323  uint64_t reserved_64s[4];
1324  void *reserved_ptrs[4];
1326 
1328 struct rte_eventdev {
1329  event_enqueue_t enqueue;
1331  event_enqueue_burst_t enqueue_burst;
1333  event_enqueue_burst_t enqueue_new_burst;
1335  event_enqueue_burst_t enqueue_forward_burst;
1337  event_dequeue_t dequeue;
1339  event_dequeue_burst_t dequeue_burst;
1341  event_tx_adapter_enqueue_same_dest txa_enqueue_same_dest;
1345  event_tx_adapter_enqueue txa_enqueue;
1347  struct rte_eventdev_data *data;
1349  struct rte_eventdev_ops *dev_ops;
1351  struct rte_device *dev;
1354  RTE_STD_C11
1355  uint8_t attached : 1;
1358  uint64_t reserved_64s[4];
1359  void *reserved_ptrs[4];
1361 
1362 extern struct rte_eventdev *rte_eventdevs;
1365 static __rte_always_inline uint16_t
1366 __rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
1367  const struct rte_event ev[], uint16_t nb_events,
1368  const event_enqueue_burst_t fn)
1369 {
1370  const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1371 
1372 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
1373  if (dev_id >= RTE_EVENT_MAX_DEVS || !rte_eventdevs[dev_id].attached) {
1374  rte_errno = EINVAL;
1375  return 0;
1376  }
1377 
1378  if (port_id >= dev->data->nb_ports) {
1379  rte_errno = EINVAL;
1380  return 0;
1381  }
1382 #endif
1383  rte_eventdev_trace_enq_burst(dev_id, port_id, ev, nb_events, (void *)fn);
1384  /*
1385  * Allow zero cost non burst mode routine invocation if application
1386  * requests nb_events as const one
1387  */
1388  if (nb_events == 1)
1389  return (*dev->enqueue)(dev->data->ports[port_id], ev);
1390  else
1391  return fn(dev->data->ports[port_id], ev, nb_events);
1392 }
1393 
1437 static inline uint16_t
1438 rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
1439  const struct rte_event ev[], uint16_t nb_events)
1440 {
1441  const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1442 
1443  return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
1444  dev->enqueue_burst);
1445 }
1446 
1488 static inline uint16_t
1489 rte_event_enqueue_new_burst(uint8_t dev_id, uint8_t port_id,
1490  const struct rte_event ev[], uint16_t nb_events)
1491 {
1492  const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1493 
1494  return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
1495  dev->enqueue_new_burst);
1496 }
1497 
1539 static inline uint16_t
1540 rte_event_enqueue_forward_burst(uint8_t dev_id, uint8_t port_id,
1541  const struct rte_event ev[], uint16_t nb_events)
1542 {
1543  const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1544 
1545  return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
1546  dev->enqueue_forward_burst);
1547 }
1548 
1574 int
1575 rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns,
1576  uint64_t *timeout_ticks);
1577 
1644 static inline uint16_t
1645 rte_event_dequeue_burst(uint8_t dev_id, uint8_t port_id, struct rte_event ev[],
1646  uint16_t nb_events, uint64_t timeout_ticks)
1647 {
1648  struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1649 
1650 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
1651  if (dev_id >= RTE_EVENT_MAX_DEVS || !rte_eventdevs[dev_id].attached) {
1652  rte_errno = EINVAL;
1653  return 0;
1654  }
1655 
1656  if (port_id >= dev->data->nb_ports) {
1657  rte_errno = EINVAL;
1658  return 0;
1659  }
1660 #endif
1661  rte_eventdev_trace_deq_burst(dev_id, port_id, ev, nb_events);
1662  /*
1663  * Allow zero cost non burst mode routine invocation if application
1664  * requests nb_events as const one
1665  */
1666  if (nb_events == 1)
1667  return (*dev->dequeue)(
1668  dev->data->ports[port_id], ev, timeout_ticks);
1669  else
1670  return (*dev->dequeue_burst)(
1671  dev->data->ports[port_id], ev, nb_events,
1672  timeout_ticks);
1673 }
1674 
1735 int
1736 rte_event_port_link(uint8_t dev_id, uint8_t port_id,
1737  const uint8_t queues[], const uint8_t priorities[],
1738  uint16_t nb_links);
1739 
1779 int
1780 rte_event_port_unlink(uint8_t dev_id, uint8_t port_id,
1781  uint8_t queues[], uint16_t nb_unlinks);
1782 
1804 int
1805 rte_event_port_unlinks_in_progress(uint8_t dev_id, uint8_t port_id);
1806 
1834 int
1835 rte_event_port_links_get(uint8_t dev_id, uint8_t port_id,
1836  uint8_t queues[], uint8_t priorities[]);
1837 
1853 int
1854 rte_event_dev_service_id_get(uint8_t dev_id, uint32_t *service_id);
1855 
1869 int
1870 rte_event_dev_dump(uint8_t dev_id, FILE *f);
1871 
1873 #define RTE_EVENT_DEV_XSTATS_NAME_SIZE 64
1874 
1879  RTE_EVENT_DEV_XSTATS_DEVICE,
1880  RTE_EVENT_DEV_XSTATS_PORT,
1881  RTE_EVENT_DEV_XSTATS_QUEUE,
1882 };
1883 
1891  char name[RTE_EVENT_DEV_XSTATS_NAME_SIZE];
1892 };
1893 
1926 int
1927 rte_event_dev_xstats_names_get(uint8_t dev_id,
1928  enum rte_event_dev_xstats_mode mode,
1929  uint8_t queue_port_id,
1930  struct rte_event_dev_xstats_name *xstats_names,
1931  unsigned int *ids,
1932  unsigned int size);
1933 
1960 int
1961 rte_event_dev_xstats_get(uint8_t dev_id,
1962  enum rte_event_dev_xstats_mode mode,
1963  uint8_t queue_port_id,
1964  const unsigned int ids[],
1965  uint64_t values[], unsigned int n);
1966 
1983 uint64_t
1984 rte_event_dev_xstats_by_name_get(uint8_t dev_id, const char *name,
1985  unsigned int *id);
1986 
2007 int
2008 rte_event_dev_xstats_reset(uint8_t dev_id,
2009  enum rte_event_dev_xstats_mode mode,
2010  int16_t queue_port_id,
2011  const uint32_t ids[],
2012  uint32_t nb_ids);
2013 
2024 int rte_event_dev_selftest(uint8_t dev_id);
2025 
2026 #ifdef __cplusplus
2027 }
2028 #endif
2029 
2030 #endif /* _RTE_EVENTDEV_H_ */
void rte_event_dev_stop(uint8_t dev_id)
uint32_t min_dequeue_timeout_ns
Definition: rte_eventdev.h:359
int rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns, uint64_t *timeout_ticks)
static uint16_t rte_event_enqueue_new_burst(uint8_t dev_id, uint8_t port_id, const struct rte_event ev[], uint16_t nb_events)
#define __rte_always_inline
Definition: rte_common.h:231
#define RTE_EVENT_DEV_XSTATS_NAME_SIZE
uint64_t u64
int rte_event_dev_attr_get(uint8_t dev_id, uint32_t attr_id, uint32_t *attr_value)
uint32_t flow_id
int rte_event_eth_tx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id, uint32_t *caps)
int rte_event_dev_stop_flush_callback_register(uint8_t dev_id, eventdev_stop_flush_t callback, void *userdata)
uint8_t priority
struct rte_device * dev
Definition: rte_eventdev.h:358
int rte_event_queue_setup(uint8_t dev_id, uint8_t queue_id, const struct rte_event_queue_conf *queue_conf)
uint8_t max_event_port_links
Definition: rte_eventdev.h:389
int rte_event_queue_default_conf_get(uint8_t dev_id, uint8_t queue_id, struct rte_event_queue_conf *queue_conf)
int rte_event_port_default_conf_get(uint8_t dev_id, uint8_t port_id, struct rte_event_port_conf *port_conf)
uint32_t dequeue_timeout_ns
Definition: rte_eventdev.h:363
#define rte_errno
Definition: rte_errno.h:29
uint32_t event_type
uint32_t event_dev_cap
Definition: rte_eventdev.h:398
int rte_event_dev_socket_id(uint8_t dev_id)
uint32_t max_event_port_enqueue_depth
Definition: rte_eventdev.h:384
uint64_t rte_event_dev_xstats_by_name_get(uint8_t dev_id, const char *name, unsigned int *id)
int rte_event_timer_adapter_caps_get(uint8_t dev_id, uint32_t *caps)
int rte_event_dev_info_get(uint8_t dev_id, struct rte_event_dev_info *dev_info)
uint32_t nb_atomic_order_sequences
Definition: rte_eventdev.h:571
uint32_t nb_event_port_dequeue_depth
Definition: rte_eventdev.h:495
int rte_event_port_unlinks_in_progress(uint8_t dev_id, uint8_t port_id)
void * event_ptr
int rte_event_port_link(uint8_t dev_id, uint8_t port_id, const uint8_t queues[], const uint8_t priorities[], uint16_t nb_links)
int rte_event_queue_attr_get(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id, uint32_t *attr_value)
uint8_t max_single_link_event_port_queue_pairs
Definition: rte_eventdev.h:400
rte_event_dev_xstats_mode
int rte_event_port_links_get(uint8_t dev_id, uint8_t port_id, uint8_t queues[], uint8_t priorities[])
int rte_event_dev_selftest(uint8_t dev_id)
static uint16_t rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id, const struct rte_event ev[], uint16_t nb_events)
int rte_event_dev_start(uint8_t dev_id)
int rte_event_port_setup(uint8_t dev_id, uint8_t port_id, const struct rte_event_port_conf *port_conf)
uint8_t max_event_port_dequeue_depth
Definition: rte_eventdev.h:379
int rte_event_port_attr_get(uint8_t dev_id, uint8_t port_id, uint32_t attr_id, uint32_t *attr_value)
uint32_t nb_event_port_enqueue_depth
Definition: rte_eventdev.h:503
const char * driver_name
Definition: rte_eventdev.h:357
uint8_t impl_opaque
uint8_t queue_id
static uint16_t rte_event_dequeue_burst(uint8_t dev_id, uint8_t port_id, struct rte_event ev[], uint16_t nb_events, uint64_t timeout_ticks)
int rte_event_dev_get_dev_id(const char *name)
uint8_t rte_event_dev_count(void)
int rte_event_dev_close(uint8_t dev_id)
int rte_event_dev_xstats_get(uint8_t dev_id, enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id, const unsigned int ids[], uint64_t values[], unsigned int n)
#define __rte_cache_aligned
Definition: rte_common.h:405
int rte_event_dev_xstats_reset(uint8_t dev_id, enum rte_event_dev_xstats_mode mode, int16_t queue_port_id, const uint32_t ids[], uint32_t nb_ids)
#define RTE_STD_C11
Definition: rte_common.h:40
int rte_event_crypto_adapter_caps_get(uint8_t dev_id, uint8_t cdev_id, uint32_t *caps)
uint32_t dequeue_timeout_ns
Definition: rte_eventdev.h:464
int rte_event_dev_service_id_get(uint8_t dev_id, uint32_t *service_id)
static uint16_t rte_event_enqueue_forward_burst(uint8_t dev_id, uint8_t port_id, const struct rte_event ev[], uint16_t nb_events)
int rte_event_dev_dump(uint8_t dev_id, FILE *f)
uint8_t nb_single_link_event_port_queues
Definition: rte_eventdev.h:513
uint8_t rsvd
uint8_t op
int32_t new_event_threshold
Definition: rte_eventdev.h:713
uint8_t max_event_priority_levels
Definition: rte_eventdev.h:373
struct rte_mbuf * mbuf
uint32_t max_dequeue_timeout_ns
Definition: rte_eventdev.h:361
uint32_t max_event_queue_flows
Definition: rte_eventdev.h:367
int rte_event_dev_configure(uint8_t dev_id, const struct rte_event_dev_config *dev_conf)
void(* eventdev_stop_flush_t)(uint8_t dev_id, struct rte_event event, void *arg)
Definition: rte_eventdev.h:868
uint32_t sub_event_type
uint8_t sched_type
uint8_t max_event_queues
Definition: rte_eventdev.h:365
uint8_t max_event_queue_priority_levels
Definition: rte_eventdev.h:369
int rte_event_port_unlink(uint8_t dev_id, uint8_t port_id, uint8_t queues[], uint16_t nb_unlinks)
int rte_event_eth_rx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id, uint32_t *caps)
int rte_event_dev_xstats_names_get(uint8_t dev_id, enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id, struct rte_event_dev_xstats_name *xstats_names, unsigned int *ids, unsigned int size)
uint32_t nb_event_queue_flows
Definition: rte_eventdev.h:490