DPDK  21.08.0
rte_eventdev.h
Go to the documentation of this file.
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016 Cavium, Inc.
3  * Copyright(c) 2016-2018 Intel Corporation.
4  * Copyright 2016 NXP
5  * All rights reserved.
6  */
7 
8 #ifndef _RTE_EVENTDEV_H_
9 #define _RTE_EVENTDEV_H_
10 
209 #ifdef __cplusplus
210 extern "C" {
211 #endif
212 
213 #include <rte_common.h>
214 #include <rte_config.h>
215 #include <rte_errno.h>
216 #include <rte_mbuf_pool_ops.h>
217 #include <rte_memory.h>
218 #include <rte_mempool.h>
219 
220 #include "rte_eventdev_trace_fp.h"
221 
222 struct rte_mbuf; /* we just use mbuf pointers; no need to include rte_mbuf.h */
223 struct rte_event;
224 
225 /* Event device capability bitmap flags */
226 #define RTE_EVENT_DEV_CAP_QUEUE_QOS (1ULL << 0)
227 
232 #define RTE_EVENT_DEV_CAP_EVENT_QOS (1ULL << 1)
233 
239 #define RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED (1ULL << 2)
240 
248 #define RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES (1ULL << 3)
249 
255 #define RTE_EVENT_DEV_CAP_BURST_MODE (1ULL << 4)
256 
263 #define RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE (1ULL << 5)
264 
274 #define RTE_EVENT_DEV_CAP_NONSEQ_MODE (1ULL << 6)
275 
284 #define RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK (1ULL << 7)
285 
290 #define RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT (1ULL << 8)
291 
296 #define RTE_EVENT_DEV_CAP_CARRY_FLOW_ID (1ULL << 9)
297 
302 /* Event device priority levels */
303 #define RTE_EVENT_DEV_PRIORITY_HIGHEST 0
304 
308 #define RTE_EVENT_DEV_PRIORITY_NORMAL 128
309 
313 #define RTE_EVENT_DEV_PRIORITY_LOWEST 255
314 
326 uint8_t
327 rte_event_dev_count(void);
328 
339 int
340 rte_event_dev_get_dev_id(const char *name);
341 
352 int
353 rte_event_dev_socket_id(uint8_t dev_id);
354 
359  const char *driver_name;
360  struct rte_device *dev;
395  int32_t max_num_events;
400  uint32_t event_dev_cap;
408 };
409 
425 int
426 rte_event_dev_info_get(uint8_t dev_id, struct rte_event_dev_info *dev_info);
427 
431 #define RTE_EVENT_DEV_ATTR_PORT_COUNT 0
432 
435 #define RTE_EVENT_DEV_ATTR_QUEUE_COUNT 1
436 
439 #define RTE_EVENT_DEV_ATTR_STARTED 2
440 
453 int
454 rte_event_dev_attr_get(uint8_t dev_id, uint32_t attr_id,
455  uint32_t *attr_value);
456 
457 
458 /* Event device configuration bitmap flags */
459 #define RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT (1ULL << 0)
460 
487  uint8_t nb_event_ports;
513  uint32_t event_dev_cfg;
523 };
524 
544 int
545 rte_event_dev_configure(uint8_t dev_id,
546  const struct rte_event_dev_config *dev_conf);
547 
548 /* Event queue specific APIs */
549 
550 /* Event queue configuration bitmap flags */
551 #define RTE_EVENT_QUEUE_CFG_ALL_TYPES (1ULL << 0)
552 
557 #define RTE_EVENT_QUEUE_CFG_SINGLE_LINK (1ULL << 1)
558 
565  uint32_t nb_atomic_flows;
587  uint32_t event_queue_cfg;
589  uint8_t schedule_type;
594  uint8_t priority;
602 };
603 
626 int
627 rte_event_queue_default_conf_get(uint8_t dev_id, uint8_t queue_id,
628  struct rte_event_queue_conf *queue_conf);
629 
648 int
649 rte_event_queue_setup(uint8_t dev_id, uint8_t queue_id,
650  const struct rte_event_queue_conf *queue_conf);
651 
655 #define RTE_EVENT_QUEUE_ATTR_PRIORITY 0
656 
659 #define RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_FLOWS 1
660 
663 #define RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_ORDER_SEQUENCES 2
664 
667 #define RTE_EVENT_QUEUE_ATTR_EVENT_QUEUE_CFG 3
668 
671 #define RTE_EVENT_QUEUE_ATTR_SCHEDULE_TYPE 4
672 
693 int
694 rte_event_queue_attr_get(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id,
695  uint32_t *attr_value);
696 
697 /* Event port specific APIs */
698 
699 /* Event port configuration bitmap flags */
700 #define RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL (1ULL << 0)
701 
707 #define RTE_EVENT_PORT_CFG_SINGLE_LINK (1ULL << 1)
708 
728  uint16_t dequeue_depth;
734  uint16_t enqueue_depth;
740  uint32_t event_port_cfg;
741 };
742 
765 int
766 rte_event_port_default_conf_get(uint8_t dev_id, uint8_t port_id,
767  struct rte_event_port_conf *port_conf);
768 
789 int
790 rte_event_port_setup(uint8_t dev_id, uint8_t port_id,
791  const struct rte_event_port_conf *port_conf);
792 
796 #define RTE_EVENT_PORT_ATTR_ENQ_DEPTH 0
797 
800 #define RTE_EVENT_PORT_ATTR_DEQ_DEPTH 1
801 
804 #define RTE_EVENT_PORT_ATTR_NEW_EVENT_THRESHOLD 2
805 
808 #define RTE_EVENT_PORT_ATTR_IMPLICIT_RELEASE_DISABLE 3
809 
826 int
827 rte_event_port_attr_get(uint8_t dev_id, uint8_t port_id, uint32_t attr_id,
828  uint32_t *attr_value);
829 
846 int
847 rte_event_dev_start(uint8_t dev_id);
848 
867 void
868 rte_event_dev_stop(uint8_t dev_id);
869 
870 typedef void (*eventdev_stop_flush_t)(uint8_t dev_id, struct rte_event event,
871  void *arg);
900 int
902  eventdev_stop_flush_t callback, void *userdata);
903 
915 int
916 rte_event_dev_close(uint8_t dev_id);
917 
922  uint16_t nb_elem;
924  uint16_t rsvd : 15;
926  uint16_t attr_valid : 1;
929  union {
930  /* Used by Rx/Tx adapter.
931  * Indicates that all the elements in this vector belong to the
932  * same port and queue pair when originating from Rx adapter,
933  * valid only when event type is ETHDEV_VECTOR or
934  * ETH_RX_ADAPTER_VECTOR.
935  * Can also be used to indicate the Tx adapter the destination
936  * port and queue of the mbufs in the vector
937  */
938  struct {
939  uint16_t port;
940  /* Ethernet device port id. */
941  uint16_t queue;
942  /* Ethernet device queue id. */
943  };
944  };
946  uint64_t impl_opaque;
952  union {
953  struct rte_mbuf *mbufs[0];
954  void *ptrs[0];
955  uint64_t *u64s[0];
956  } __rte_aligned(16);
961 };
962 
963 /* Scheduler type definitions */
964 #define RTE_SCHED_TYPE_ORDERED 0
965 
991 #define RTE_SCHED_TYPE_ATOMIC 1
992 
1010 #define RTE_SCHED_TYPE_PARALLEL 2
1011 
1023 /* Event types to classify the event source */
1024 #define RTE_EVENT_TYPE_ETHDEV 0x0
1025 
1026 #define RTE_EVENT_TYPE_CRYPTODEV 0x1
1027 
1028 #define RTE_EVENT_TYPE_TIMER 0x2
1029 
1030 #define RTE_EVENT_TYPE_CPU 0x3
1031 
1034 #define RTE_EVENT_TYPE_ETH_RX_ADAPTER 0x4
1035 
1036 #define RTE_EVENT_TYPE_VECTOR 0x8
1037 
1048 #define RTE_EVENT_TYPE_ETHDEV_VECTOR \
1049  (RTE_EVENT_TYPE_VECTOR | RTE_EVENT_TYPE_ETHDEV)
1050 
1051 #define RTE_EVENT_TYPE_CPU_VECTOR (RTE_EVENT_TYPE_VECTOR | RTE_EVENT_TYPE_CPU)
1052 
1053 #define RTE_EVENT_TYPE_ETH_RX_ADAPTER_VECTOR \
1054  (RTE_EVENT_TYPE_VECTOR | RTE_EVENT_TYPE_ETH_RX_ADAPTER)
1055 
1057 #define RTE_EVENT_TYPE_MAX 0x10
1058 
1060 /* Event enqueue operations */
1061 #define RTE_EVENT_OP_NEW 0
1062 
1065 #define RTE_EVENT_OP_FORWARD 1
1066 
1073 #define RTE_EVENT_OP_RELEASE 2
1074 
1111 struct rte_event {
1113  union {
1114  uint64_t event;
1116  struct {
1117  uint32_t flow_id:20;
1124  uint32_t sub_event_type:8;
1128  uint32_t event_type:4;
1132  uint8_t op:2;
1138  uint8_t rsvd:4;
1140  uint8_t sched_type:2;
1145  uint8_t queue_id;
1152  uint8_t priority;
1162  uint8_t impl_opaque;
1169  };
1170  };
1172  union {
1173  uint64_t u64;
1175  void *event_ptr;
1177  struct rte_mbuf *mbuf;
1181  };
1182 };
1183 
1184 /* Ethdev Rx adapter capability bitmap flags */
1185 #define RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT 0x1
1186 
1189 #define RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ 0x2
1190 
1193 #define RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID 0x4
1194 
1200 #define RTE_EVENT_ETH_RX_ADAPTER_CAP_EVENT_VECTOR 0x8
1201 
1222 int
1223 rte_event_eth_rx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id,
1224  uint32_t *caps);
1225 
1226 #define RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT (1ULL << 0)
1227 
1229 #define RTE_EVENT_TIMER_ADAPTER_CAP_PERIODIC (1ULL << 1)
1230 
1245 int
1246 rte_event_timer_adapter_caps_get(uint8_t dev_id, uint32_t *caps);
1247 
1248 /* Crypto adapter capability bitmap flag */
1249 #define RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW 0x1
1250 
1256 #define RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD 0x2
1257 
1263 #define RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND 0x4
1264 
1268 #define RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA 0x8
1269 
1293 int
1294 rte_event_crypto_adapter_caps_get(uint8_t dev_id, uint8_t cdev_id,
1295  uint32_t *caps);
1296 
1297 /* Ethdev Tx adapter capability bitmap flags */
1298 #define RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT 0x1
1299 
1301 #define RTE_EVENT_ETH_TX_ADAPTER_CAP_EVENT_VECTOR 0x2
1302 
1323 int
1324 rte_event_eth_tx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id,
1325  uint32_t *caps);
1326 
1327 struct rte_eventdev_ops;
1328 struct rte_eventdev;
1329 
1330 typedef uint16_t (*event_enqueue_t)(void *port, const struct rte_event *ev);
1333 typedef uint16_t (*event_enqueue_burst_t)(void *port,
1334  const struct rte_event ev[], uint16_t nb_events);
1337 typedef uint16_t (*event_dequeue_t)(void *port, struct rte_event *ev,
1338  uint64_t timeout_ticks);
1341 typedef uint16_t (*event_dequeue_burst_t)(void *port, struct rte_event ev[],
1342  uint16_t nb_events, uint64_t timeout_ticks);
1345 typedef uint16_t (*event_tx_adapter_enqueue)(void *port,
1346  struct rte_event ev[], uint16_t nb_events);
1349 typedef uint16_t (*event_tx_adapter_enqueue_same_dest)(void *port,
1350  struct rte_event ev[], uint16_t nb_events);
1355 typedef uint16_t (*event_crypto_adapter_enqueue)(void *port,
1356  struct rte_event ev[], uint16_t nb_events);
1359 #define RTE_EVENTDEV_NAME_MAX_LEN (64)
1360 
1369 struct rte_eventdev_data {
1370  int socket_id;
1372  uint8_t dev_id;
1374  uint8_t nb_queues;
1376  uint8_t nb_ports;
1378  void **ports;
1380  struct rte_event_port_conf *ports_cfg;
1382  struct rte_event_queue_conf *queues_cfg;
1384  uint16_t *links_map;
1386  void *dev_private;
1388  uint32_t event_dev_cap;
1390  struct rte_event_dev_config dev_conf;
1392  uint8_t service_inited;
1393  /* Service initialization state */
1394  uint32_t service_id;
1395  /* Service ID*/
1396  void *dev_stop_flush_arg;
1399  RTE_STD_C11
1400  uint8_t dev_started : 1;
1403  char name[RTE_EVENTDEV_NAME_MAX_LEN];
1406  uint64_t reserved_64s[4];
1407  void *reserved_ptrs[4];
1409 
1411 struct rte_eventdev {
1412  event_enqueue_t enqueue;
1414  event_enqueue_burst_t enqueue_burst;
1416  event_enqueue_burst_t enqueue_new_burst;
1418  event_enqueue_burst_t enqueue_forward_burst;
1420  event_dequeue_t dequeue;
1422  event_dequeue_burst_t dequeue_burst;
1424  event_tx_adapter_enqueue_same_dest txa_enqueue_same_dest;
1428  event_tx_adapter_enqueue txa_enqueue;
1430  struct rte_eventdev_data *data;
1432  struct rte_eventdev_ops *dev_ops;
1434  struct rte_device *dev;
1437  RTE_STD_C11
1438  uint8_t attached : 1;
1441  event_crypto_adapter_enqueue ca_enqueue;
1444  uint64_t reserved_64s[4];
1445  void *reserved_ptrs[3];
1447 
1448 extern struct rte_eventdev *rte_eventdevs;
1451 static __rte_always_inline uint16_t
1452 __rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
1453  const struct rte_event ev[], uint16_t nb_events,
1454  const event_enqueue_burst_t fn)
1455 {
1456  const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1457 
1458 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
1459  if (dev_id >= RTE_EVENT_MAX_DEVS || !rte_eventdevs[dev_id].attached) {
1460  rte_errno = EINVAL;
1461  return 0;
1462  }
1463 
1464  if (port_id >= dev->data->nb_ports) {
1465  rte_errno = EINVAL;
1466  return 0;
1467  }
1468 #endif
1469  rte_eventdev_trace_enq_burst(dev_id, port_id, ev, nb_events, fn);
1470  /*
1471  * Allow zero cost non burst mode routine invocation if application
1472  * requests nb_events as const one
1473  */
1474  if (nb_events == 1)
1475  return (*dev->enqueue)(dev->data->ports[port_id], ev);
1476  else
1477  return fn(dev->data->ports[port_id], ev, nb_events);
1478 }
1479 
1523 static inline uint16_t
1524 rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
1525  const struct rte_event ev[], uint16_t nb_events)
1526 {
1527  const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1528 
1529  return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
1530  dev->enqueue_burst);
1531 }
1532 
1574 static inline uint16_t
1575 rte_event_enqueue_new_burst(uint8_t dev_id, uint8_t port_id,
1576  const struct rte_event ev[], uint16_t nb_events)
1577 {
1578  const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1579 
1580  return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
1581  dev->enqueue_new_burst);
1582 }
1583 
1625 static inline uint16_t
1626 rte_event_enqueue_forward_burst(uint8_t dev_id, uint8_t port_id,
1627  const struct rte_event ev[], uint16_t nb_events)
1628 {
1629  const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1630 
1631  return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
1632  dev->enqueue_forward_burst);
1633 }
1634 
1660 int
1661 rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns,
1662  uint64_t *timeout_ticks);
1663 
1730 static inline uint16_t
1731 rte_event_dequeue_burst(uint8_t dev_id, uint8_t port_id, struct rte_event ev[],
1732  uint16_t nb_events, uint64_t timeout_ticks)
1733 {
1734  struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1735 
1736 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
1737  if (dev_id >= RTE_EVENT_MAX_DEVS || !rte_eventdevs[dev_id].attached) {
1738  rte_errno = EINVAL;
1739  return 0;
1740  }
1741 
1742  if (port_id >= dev->data->nb_ports) {
1743  rte_errno = EINVAL;
1744  return 0;
1745  }
1746 #endif
1747  rte_eventdev_trace_deq_burst(dev_id, port_id, ev, nb_events);
1748  /*
1749  * Allow zero cost non burst mode routine invocation if application
1750  * requests nb_events as const one
1751  */
1752  if (nb_events == 1)
1753  return (*dev->dequeue)(
1754  dev->data->ports[port_id], ev, timeout_ticks);
1755  else
1756  return (*dev->dequeue_burst)(
1757  dev->data->ports[port_id], ev, nb_events,
1758  timeout_ticks);
1759 }
1760 
1821 int
1822 rte_event_port_link(uint8_t dev_id, uint8_t port_id,
1823  const uint8_t queues[], const uint8_t priorities[],
1824  uint16_t nb_links);
1825 
1865 int
1866 rte_event_port_unlink(uint8_t dev_id, uint8_t port_id,
1867  uint8_t queues[], uint16_t nb_unlinks);
1868 
1890 int
1891 rte_event_port_unlinks_in_progress(uint8_t dev_id, uint8_t port_id);
1892 
1920 int
1921 rte_event_port_links_get(uint8_t dev_id, uint8_t port_id,
1922  uint8_t queues[], uint8_t priorities[]);
1923 
1939 int
1940 rte_event_dev_service_id_get(uint8_t dev_id, uint32_t *service_id);
1941 
1955 int
1956 rte_event_dev_dump(uint8_t dev_id, FILE *f);
1957 
1959 #define RTE_EVENT_DEV_XSTATS_NAME_SIZE 64
1960 
1965  RTE_EVENT_DEV_XSTATS_DEVICE,
1966  RTE_EVENT_DEV_XSTATS_PORT,
1967  RTE_EVENT_DEV_XSTATS_QUEUE,
1968 };
1969 
1977  char name[RTE_EVENT_DEV_XSTATS_NAME_SIZE];
1978 };
1979 
2012 int
2013 rte_event_dev_xstats_names_get(uint8_t dev_id,
2014  enum rte_event_dev_xstats_mode mode,
2015  uint8_t queue_port_id,
2016  struct rte_event_dev_xstats_name *xstats_names,
2017  unsigned int *ids,
2018  unsigned int size);
2019 
2046 int
2047 rte_event_dev_xstats_get(uint8_t dev_id,
2048  enum rte_event_dev_xstats_mode mode,
2049  uint8_t queue_port_id,
2050  const unsigned int ids[],
2051  uint64_t values[], unsigned int n);
2052 
2069 uint64_t
2070 rte_event_dev_xstats_by_name_get(uint8_t dev_id, const char *name,
2071  unsigned int *id);
2072 
2093 int
2094 rte_event_dev_xstats_reset(uint8_t dev_id,
2095  enum rte_event_dev_xstats_mode mode,
2096  int16_t queue_port_id,
2097  const uint32_t ids[],
2098  uint32_t nb_ids);
2099 
2110 int rte_event_dev_selftest(uint8_t dev_id);
2111 
2142 __rte_experimental
2143 struct rte_mempool *
2144 rte_event_vector_pool_create(const char *name, unsigned int n,
2145  unsigned int cache_size, uint16_t nb_elem,
2146  int socket_id);
2147 
2148 #ifdef __cplusplus
2149 }
2150 #endif
2151 
2152 #endif /* _RTE_EVENTDEV_H_ */
void rte_event_dev_stop(uint8_t dev_id)
uint32_t min_dequeue_timeout_ns
Definition: rte_eventdev.h:361
int rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns, uint64_t *timeout_ticks)
static uint16_t rte_event_enqueue_new_burst(uint8_t dev_id, uint8_t port_id, const struct rte_event ev[], uint16_t nb_events)
#define __rte_always_inline
Definition: rte_common.h:228
#define RTE_EVENT_DEV_XSTATS_NAME_SIZE
uint16_t attr_valid
Definition: rte_eventdev.h:926
uint64_t u64
int rte_event_dev_attr_get(uint8_t dev_id, uint32_t attr_id, uint32_t *attr_value)
uint32_t flow_id
int rte_event_eth_tx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id, uint32_t *caps)
int rte_event_dev_stop_flush_callback_register(uint8_t dev_id, eventdev_stop_flush_t callback, void *userdata)
uint8_t priority
struct rte_device * dev
Definition: rte_eventdev.h:360
int rte_event_queue_setup(uint8_t dev_id, uint8_t queue_id, const struct rte_event_queue_conf *queue_conf)
uint8_t max_event_port_links
Definition: rte_eventdev.h:391
int rte_event_queue_default_conf_get(uint8_t dev_id, uint8_t queue_id, struct rte_event_queue_conf *queue_conf)
int rte_event_port_default_conf_get(uint8_t dev_id, uint8_t port_id, struct rte_event_port_conf *port_conf)
uint32_t dequeue_timeout_ns
Definition: rte_eventdev.h:365
#define rte_errno
Definition: rte_errno.h:29
uint32_t event_type
uint32_t event_dev_cap
Definition: rte_eventdev.h:400
int rte_event_dev_socket_id(uint8_t dev_id)
uint64_t impl_opaque
Definition: rte_eventdev.h:946
uint32_t max_event_port_enqueue_depth
Definition: rte_eventdev.h:386
uint64_t rte_event_dev_xstats_by_name_get(uint8_t dev_id, const char *name, unsigned int *id)
int rte_event_timer_adapter_caps_get(uint8_t dev_id, uint32_t *caps)
int rte_event_dev_info_get(uint8_t dev_id, struct rte_event_dev_info *dev_info)
uint32_t cache_size
Definition: rte_mempool.h:229
uint32_t nb_atomic_order_sequences
Definition: rte_eventdev.h:573
uint32_t nb_event_port_dequeue_depth
Definition: rte_eventdev.h:497
int rte_event_port_unlinks_in_progress(uint8_t dev_id, uint8_t port_id)
void * event_ptr
int rte_event_port_link(uint8_t dev_id, uint8_t port_id, const uint8_t queues[], const uint8_t priorities[], uint16_t nb_links)
int rte_event_queue_attr_get(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id, uint32_t *attr_value)
__rte_experimental struct rte_mempool * rte_event_vector_pool_create(const char *name, unsigned int n, unsigned int cache_size, uint16_t nb_elem, int socket_id)
struct rte_event_vector * vec
uint8_t max_single_link_event_port_queue_pairs
Definition: rte_eventdev.h:402
rte_event_dev_xstats_mode
int rte_event_port_links_get(uint8_t dev_id, uint8_t port_id, uint8_t queues[], uint8_t priorities[])
int rte_event_dev_selftest(uint8_t dev_id)
static uint16_t rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id, const struct rte_event ev[], uint16_t nb_events)
int rte_event_dev_start(uint8_t dev_id)
int rte_event_port_setup(uint8_t dev_id, uint8_t port_id, const struct rte_event_port_conf *port_conf)
uint8_t max_event_port_dequeue_depth
Definition: rte_eventdev.h:381
int rte_event_port_attr_get(uint8_t dev_id, uint8_t port_id, uint32_t attr_id, uint32_t *attr_value)
uint32_t nb_event_port_enqueue_depth
Definition: rte_eventdev.h:505
const char * driver_name
Definition: rte_eventdev.h:359
uint8_t impl_opaque
uint8_t queue_id
static uint16_t rte_event_dequeue_burst(uint8_t dev_id, uint8_t port_id, struct rte_event ev[], uint16_t nb_events, uint64_t timeout_ticks)
int rte_event_dev_get_dev_id(const char *name)
uint8_t rte_event_dev_count(void)
int rte_event_dev_close(uint8_t dev_id)
int rte_event_dev_xstats_get(uint8_t dev_id, enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id, const unsigned int ids[], uint64_t values[], unsigned int n)
#define __rte_cache_aligned
Definition: rte_common.h:402
int rte_event_dev_xstats_reset(uint8_t dev_id, enum rte_event_dev_xstats_mode mode, int16_t queue_port_id, const uint32_t ids[], uint32_t nb_ids)
#define RTE_STD_C11
Definition: rte_common.h:42
int rte_event_crypto_adapter_caps_get(uint8_t dev_id, uint8_t cdev_id, uint32_t *caps)
uint32_t dequeue_timeout_ns
Definition: rte_eventdev.h:466
int rte_event_dev_service_id_get(uint8_t dev_id, uint32_t *service_id)
static uint16_t rte_event_enqueue_forward_burst(uint8_t dev_id, uint8_t port_id, const struct rte_event ev[], uint16_t nb_events)
__extension__ struct rte_eth_link __rte_aligned(8)
int rte_event_dev_dump(uint8_t dev_id, FILE *f)
uint8_t nb_single_link_event_port_queues
Definition: rte_eventdev.h:515
uint8_t rsvd
uint8_t op
int32_t new_event_threshold
Definition: rte_eventdev.h:715
uint8_t max_event_priority_levels
Definition: rte_eventdev.h:375
struct rte_mbuf * mbuf
uint32_t max_dequeue_timeout_ns
Definition: rte_eventdev.h:363
uint32_t max_event_queue_flows
Definition: rte_eventdev.h:369
int rte_event_dev_configure(uint8_t dev_id, const struct rte_event_dev_config *dev_conf)
void(* eventdev_stop_flush_t)(uint8_t dev_id, struct rte_event event, void *arg)
Definition: rte_eventdev.h:870
uint32_t sub_event_type
uint8_t sched_type
uint8_t max_event_queues
Definition: rte_eventdev.h:367
uint8_t max_event_queue_priority_levels
Definition: rte_eventdev.h:371
int rte_event_port_unlink(uint8_t dev_id, uint8_t port_id, uint8_t queues[], uint16_t nb_unlinks)
int rte_event_eth_rx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id, uint32_t *caps)
int rte_event_dev_xstats_names_get(uint8_t dev_id, enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id, struct rte_event_dev_xstats_name *xstats_names, unsigned int *ids, unsigned int size)
uint32_t nb_event_queue_flows
Definition: rte_eventdev.h:492