DPDK  23.07.0
rte_eventdev.h
Go to the documentation of this file.
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016 Cavium, Inc.
3  * Copyright(c) 2016-2018 Intel Corporation.
4  * Copyright 2016 NXP
5  * All rights reserved.
6  */
7 
8 #ifndef _RTE_EVENTDEV_H_
9 #define _RTE_EVENTDEV_H_
10 
208 #ifdef __cplusplus
209 extern "C" {
210 #endif
211 
212 #include <rte_compat.h>
213 #include <rte_common.h>
214 #include <rte_errno.h>
215 #include <rte_mbuf_pool_ops.h>
216 #include <rte_mempool.h>
217 
218 #include "rte_eventdev_trace_fp.h"
219 
220 struct rte_mbuf; /* we just use mbuf pointers; no need to include rte_mbuf.h */
221 struct rte_event;
222 
223 /* Event device capability bitmap flags */
224 #define RTE_EVENT_DEV_CAP_QUEUE_QOS (1ULL << 0)
225 
236 #define RTE_EVENT_DEV_CAP_EVENT_QOS (1ULL << 1)
237 
243 #define RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED (1ULL << 2)
244 
252 #define RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES (1ULL << 3)
253 
259 #define RTE_EVENT_DEV_CAP_BURST_MODE (1ULL << 4)
260 
267 #define RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE (1ULL << 5)
268 
278 #define RTE_EVENT_DEV_CAP_NONSEQ_MODE (1ULL << 6)
279 
288 #define RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK (1ULL << 7)
289 
294 #define RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT (1ULL << 8)
295 
300 #define RTE_EVENT_DEV_CAP_CARRY_FLOW_ID (1ULL << 9)
301 
306 #define RTE_EVENT_DEV_CAP_MAINTENANCE_FREE (1ULL << 10)
307 
316 #define RTE_EVENT_DEV_CAP_RUNTIME_QUEUE_ATTR (1ULL << 11)
317 
323 /* Event device priority levels */
324 #define RTE_EVENT_DEV_PRIORITY_HIGHEST 0
325 
329 #define RTE_EVENT_DEV_PRIORITY_NORMAL 128
330 
334 #define RTE_EVENT_DEV_PRIORITY_LOWEST 255
335 
340 /* Event queue scheduling weights */
341 #define RTE_EVENT_QUEUE_WEIGHT_HIGHEST 255
342 
345 #define RTE_EVENT_QUEUE_WEIGHT_LOWEST 0
346 
350 /* Event queue scheduling affinity */
351 #define RTE_EVENT_QUEUE_AFFINITY_HIGHEST 255
352 
355 #define RTE_EVENT_QUEUE_AFFINITY_LOWEST 0
356 
367 uint8_t
368 rte_event_dev_count(void);
369 
380 int
381 rte_event_dev_get_dev_id(const char *name);
382 
393 int
394 rte_event_dev_socket_id(uint8_t dev_id);
395 
400  const char *driver_name;
401  struct rte_device *dev;
436  int32_t max_num_events;
441  uint32_t event_dev_cap;
449 };
450 
465 int
466 rte_event_dev_info_get(uint8_t dev_id, struct rte_event_dev_info *dev_info);
467 
471 #define RTE_EVENT_DEV_ATTR_PORT_COUNT 0
472 
475 #define RTE_EVENT_DEV_ATTR_QUEUE_COUNT 1
476 
479 #define RTE_EVENT_DEV_ATTR_STARTED 2
480 
493 int
494 rte_event_dev_attr_get(uint8_t dev_id, uint32_t attr_id,
495  uint32_t *attr_value);
496 
497 
498 /* Event device configuration bitmap flags */
499 #define RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT (1ULL << 0)
500 
527  uint8_t nb_event_ports;
553  uint32_t event_dev_cfg;
563 };
564 
584 int
585 rte_event_dev_configure(uint8_t dev_id,
586  const struct rte_event_dev_config *dev_conf);
587 
588 /* Event queue specific APIs */
589 
590 /* Event queue configuration bitmap flags */
591 #define RTE_EVENT_QUEUE_CFG_ALL_TYPES (1ULL << 0)
592 
597 #define RTE_EVENT_QUEUE_CFG_SINGLE_LINK (1ULL << 1)
598 
605  uint32_t nb_atomic_flows;
627  uint32_t event_queue_cfg;
629  uint8_t schedule_type;
634  uint8_t priority;
642  uint8_t weight;
650  uint8_t affinity;
658 };
659 
681 int
682 rte_event_queue_default_conf_get(uint8_t dev_id, uint8_t queue_id,
683  struct rte_event_queue_conf *queue_conf);
684 
703 int
704 rte_event_queue_setup(uint8_t dev_id, uint8_t queue_id,
705  const struct rte_event_queue_conf *queue_conf);
706 
710 #define RTE_EVENT_QUEUE_ATTR_PRIORITY 0
711 
714 #define RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_FLOWS 1
715 
718 #define RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_ORDER_SEQUENCES 2
719 
722 #define RTE_EVENT_QUEUE_ATTR_EVENT_QUEUE_CFG 3
723 
726 #define RTE_EVENT_QUEUE_ATTR_SCHEDULE_TYPE 4
727 
730 #define RTE_EVENT_QUEUE_ATTR_WEIGHT 5
731 
734 #define RTE_EVENT_QUEUE_ATTR_AFFINITY 6
735 
756 int
757 rte_event_queue_attr_get(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id,
758  uint32_t *attr_value);
759 
778 __rte_experimental
779 int
780 rte_event_queue_attr_set(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id,
781  uint64_t attr_value);
782 
783 /* Event port specific APIs */
784 
785 /* Event port configuration bitmap flags */
786 #define RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL (1ULL << 0)
787 
793 #define RTE_EVENT_PORT_CFG_SINGLE_LINK (1ULL << 1)
794 
798 #define RTE_EVENT_PORT_CFG_HINT_PRODUCER (1ULL << 2)
799 
808 #define RTE_EVENT_PORT_CFG_HINT_CONSUMER (1ULL << 3)
809 
819 #define RTE_EVENT_PORT_CFG_HINT_WORKER (1ULL << 4)
820 
846  uint16_t dequeue_depth;
852  uint16_t enqueue_depth;
858  uint32_t event_port_cfg;
859 };
860 
882 int
883 rte_event_port_default_conf_get(uint8_t dev_id, uint8_t port_id,
884  struct rte_event_port_conf *port_conf);
885 
906 int
907 rte_event_port_setup(uint8_t dev_id, uint8_t port_id,
908  const struct rte_event_port_conf *port_conf);
909 
910 typedef void (*rte_eventdev_port_flush_t)(uint8_t dev_id,
911  struct rte_event event, void *arg);
941 __rte_experimental
942 void
943 rte_event_port_quiesce(uint8_t dev_id, uint8_t port_id,
944  rte_eventdev_port_flush_t release_cb, void *args);
945 
949 #define RTE_EVENT_PORT_ATTR_ENQ_DEPTH 0
950 
953 #define RTE_EVENT_PORT_ATTR_DEQ_DEPTH 1
954 
957 #define RTE_EVENT_PORT_ATTR_NEW_EVENT_THRESHOLD 2
958 
961 #define RTE_EVENT_PORT_ATTR_IMPLICIT_RELEASE_DISABLE 3
962 
979 int
980 rte_event_port_attr_get(uint8_t dev_id, uint8_t port_id, uint32_t attr_id,
981  uint32_t *attr_value);
982 
999 int
1000 rte_event_dev_start(uint8_t dev_id);
1001 
1020 void
1021 rte_event_dev_stop(uint8_t dev_id);
1022 
1023 typedef void (*rte_eventdev_stop_flush_t)(uint8_t dev_id,
1024  struct rte_event event, void *arg);
1054  rte_eventdev_stop_flush_t callback, void *userdata);
1055 
1067 int
1068 rte_event_dev_close(uint8_t dev_id);
1074  uint16_t nb_elem;
1076  uint16_t elem_offset : 12;
1078  uint16_t rsvd : 3;
1080  uint16_t attr_valid : 1;
1083  union {
1084  /* Used by Rx/Tx adapter.
1085  * Indicates that all the elements in this vector belong to the
1086  * same port and queue pair when originating from Rx adapter,
1087  * valid only when event type is ETHDEV_VECTOR or
1088  * ETH_RX_ADAPTER_VECTOR.
1089  * Can also be used to indicate the Tx adapter the destination
1090  * port and queue of the mbufs in the vector
1091  */
1092  struct {
1093  uint16_t port;
1094  /* Ethernet device port id. */
1095  uint16_t queue;
1096  /* Ethernet device queue id. */
1097  };
1098  };
1100  uint64_t impl_opaque;
1101 
1102 /* empty structures do not have zero size in C++ leading to compilation errors
1103  * with clang about structure having different sizes in C and C++.
1104  * Since these are all zero-sized arrays, we can omit the "union" wrapper for
1105  * C++ builds, removing the warning.
1106  */
1107 #ifndef __cplusplus
1108 
1113  union {
1114 #endif
1115  struct rte_mbuf *mbufs[0];
1116  void *ptrs[0];
1117  uint64_t u64s[0];
1118 #ifndef __cplusplus
1119  } __rte_aligned(16);
1120 #endif
1121 
1125 } __rte_aligned(16);
1126 
1127 /* Scheduler type definitions */
1128 #define RTE_SCHED_TYPE_ORDERED 0
1129 
1155 #define RTE_SCHED_TYPE_ATOMIC 1
1156 
1174 #define RTE_SCHED_TYPE_PARALLEL 2
1175 
1187 /* Event types to classify the event source */
1188 #define RTE_EVENT_TYPE_ETHDEV 0x0
1189 
1190 #define RTE_EVENT_TYPE_CRYPTODEV 0x1
1191 
1192 #define RTE_EVENT_TYPE_TIMER 0x2
1193 
1194 #define RTE_EVENT_TYPE_CPU 0x3
1195 
1198 #define RTE_EVENT_TYPE_ETH_RX_ADAPTER 0x4
1199 
1200 #define RTE_EVENT_TYPE_VECTOR 0x8
1201 
1212 #define RTE_EVENT_TYPE_ETHDEV_VECTOR \
1213  (RTE_EVENT_TYPE_VECTOR | RTE_EVENT_TYPE_ETHDEV)
1214 
1215 #define RTE_EVENT_TYPE_CPU_VECTOR (RTE_EVENT_TYPE_VECTOR | RTE_EVENT_TYPE_CPU)
1216 
1217 #define RTE_EVENT_TYPE_ETH_RX_ADAPTER_VECTOR \
1218  (RTE_EVENT_TYPE_VECTOR | RTE_EVENT_TYPE_ETH_RX_ADAPTER)
1219 
1220 #define RTE_EVENT_TYPE_CRYPTODEV_VECTOR \
1221  (RTE_EVENT_TYPE_VECTOR | RTE_EVENT_TYPE_CRYPTODEV)
1222 
1224 #define RTE_EVENT_TYPE_MAX 0x10
1225 
1227 /* Event enqueue operations */
1228 #define RTE_EVENT_OP_NEW 0
1229 
1232 #define RTE_EVENT_OP_FORWARD 1
1233 
1240 #define RTE_EVENT_OP_RELEASE 2
1241 
1277 struct rte_event {
1279  union {
1280  uint64_t event;
1282  struct {
1283  uint32_t flow_id:20;
1290  uint32_t sub_event_type:8;
1294  uint32_t event_type:4;
1298  uint8_t op:2;
1304  uint8_t rsvd:4;
1306  uint8_t sched_type:2;
1311  uint8_t queue_id;
1318  uint8_t priority;
1328  uint8_t impl_opaque;
1335  };
1336  };
1338  union {
1339  uint64_t u64;
1341  void *event_ptr;
1343  struct rte_mbuf *mbuf;
1347  };
1348 };
1349 
1350 /* Ethdev Rx adapter capability bitmap flags */
1351 #define RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT 0x1
1352 
1355 #define RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ 0x2
1356 
1359 #define RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID 0x4
1360 
1366 #define RTE_EVENT_ETH_RX_ADAPTER_CAP_EVENT_VECTOR 0x8
1367 
1387 int
1388 rte_event_eth_rx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id,
1389  uint32_t *caps);
1390 
1391 #define RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT (1ULL << 0)
1392 
1394 #define RTE_EVENT_TIMER_ADAPTER_CAP_PERIODIC (1ULL << 1)
1395 
1410 int
1411 rte_event_timer_adapter_caps_get(uint8_t dev_id, uint32_t *caps);
1412 
1413 /* Crypto adapter capability bitmap flag */
1414 #define RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW 0x1
1415 
1421 #define RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD 0x2
1422 
1428 #define RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND 0x4
1429 
1433 #define RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA 0x8
1434 
1438 #define RTE_EVENT_CRYPTO_ADAPTER_CAP_EVENT_VECTOR 0x10
1439 
1462 int
1463 rte_event_crypto_adapter_caps_get(uint8_t dev_id, uint8_t cdev_id,
1464  uint32_t *caps);
1465 
1466 /* Ethdev Tx adapter capability bitmap flags */
1467 #define RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT 0x1
1468 
1470 #define RTE_EVENT_ETH_TX_ADAPTER_CAP_EVENT_VECTOR 0x2
1471 
1491 int
1492 rte_event_eth_tx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id,
1493  uint32_t *caps);
1494 
1519 int
1520 rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns,
1521  uint64_t *timeout_ticks);
1522 
1582 int
1583 rte_event_port_link(uint8_t dev_id, uint8_t port_id,
1584  const uint8_t queues[], const uint8_t priorities[],
1585  uint16_t nb_links);
1586 
1626 int
1627 rte_event_port_unlink(uint8_t dev_id, uint8_t port_id,
1628  uint8_t queues[], uint16_t nb_unlinks);
1629 
1651 int
1652 rte_event_port_unlinks_in_progress(uint8_t dev_id, uint8_t port_id);
1653 
1680 int
1681 rte_event_port_links_get(uint8_t dev_id, uint8_t port_id,
1682  uint8_t queues[], uint8_t priorities[]);
1683 
1699 int
1700 rte_event_dev_service_id_get(uint8_t dev_id, uint32_t *service_id);
1701 
1715 int
1716 rte_event_dev_dump(uint8_t dev_id, FILE *f);
1717 
1719 #define RTE_EVENT_DEV_XSTATS_NAME_SIZE 64
1720 
1725  RTE_EVENT_DEV_XSTATS_DEVICE,
1726  RTE_EVENT_DEV_XSTATS_PORT,
1727  RTE_EVENT_DEV_XSTATS_QUEUE,
1728 };
1729 
1737  char name[RTE_EVENT_DEV_XSTATS_NAME_SIZE];
1738 };
1739 
1772 int
1773 rte_event_dev_xstats_names_get(uint8_t dev_id,
1774  enum rte_event_dev_xstats_mode mode,
1775  uint8_t queue_port_id,
1776  struct rte_event_dev_xstats_name *xstats_names,
1777  uint64_t *ids,
1778  unsigned int size);
1779 
1806 int
1807 rte_event_dev_xstats_get(uint8_t dev_id,
1808  enum rte_event_dev_xstats_mode mode,
1809  uint8_t queue_port_id,
1810  const uint64_t ids[],
1811  uint64_t values[], unsigned int n);
1812 
1829 uint64_t
1830 rte_event_dev_xstats_by_name_get(uint8_t dev_id, const char *name,
1831  uint64_t *id);
1832 
1853 int
1854 rte_event_dev_xstats_reset(uint8_t dev_id,
1855  enum rte_event_dev_xstats_mode mode,
1856  int16_t queue_port_id,
1857  const uint64_t ids[],
1858  uint32_t nb_ids);
1859 
1870 int rte_event_dev_selftest(uint8_t dev_id);
1871 
1902 struct rte_mempool *
1903 rte_event_vector_pool_create(const char *name, unsigned int n,
1904  unsigned int cache_size, uint16_t nb_elem,
1905  int socket_id);
1906 
1907 #include <rte_eventdev_core.h>
1908 
1909 static __rte_always_inline uint16_t
1910 __rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
1911  const struct rte_event ev[], uint16_t nb_events,
1912  const event_enqueue_burst_t fn)
1913 {
1914  const struct rte_event_fp_ops *fp_ops;
1915  void *port;
1916 
1917  fp_ops = &rte_event_fp_ops[dev_id];
1918  port = fp_ops->data[port_id];
1919 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
1920  if (dev_id >= RTE_EVENT_MAX_DEVS ||
1921  port_id >= RTE_EVENT_MAX_PORTS_PER_DEV) {
1922  rte_errno = EINVAL;
1923  return 0;
1924  }
1925 
1926  if (port == NULL) {
1927  rte_errno = EINVAL;
1928  return 0;
1929  }
1930 #endif
1931  rte_eventdev_trace_enq_burst(dev_id, port_id, ev, nb_events, (void *)fn);
1932  /*
1933  * Allow zero cost non burst mode routine invocation if application
1934  * requests nb_events as const one
1935  */
1936  if (nb_events == 1)
1937  return (fp_ops->enqueue)(port, ev);
1938  else
1939  return fn(port, ev, nb_events);
1940 }
1941 
1985 static inline uint16_t
1986 rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
1987  const struct rte_event ev[], uint16_t nb_events)
1988 {
1989  const struct rte_event_fp_ops *fp_ops;
1990 
1991  fp_ops = &rte_event_fp_ops[dev_id];
1992  return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
1993  fp_ops->enqueue_burst);
1994 }
1995 
2037 static inline uint16_t
2038 rte_event_enqueue_new_burst(uint8_t dev_id, uint8_t port_id,
2039  const struct rte_event ev[], uint16_t nb_events)
2040 {
2041  const struct rte_event_fp_ops *fp_ops;
2042 
2043  fp_ops = &rte_event_fp_ops[dev_id];
2044  return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
2045  fp_ops->enqueue_new_burst);
2046 }
2047 
2089 static inline uint16_t
2090 rte_event_enqueue_forward_burst(uint8_t dev_id, uint8_t port_id,
2091  const struct rte_event ev[], uint16_t nb_events)
2092 {
2093  const struct rte_event_fp_ops *fp_ops;
2094 
2095  fp_ops = &rte_event_fp_ops[dev_id];
2096  return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
2097  fp_ops->enqueue_forward_burst);
2098 }
2099 
2166 static inline uint16_t
2167 rte_event_dequeue_burst(uint8_t dev_id, uint8_t port_id, struct rte_event ev[],
2168  uint16_t nb_events, uint64_t timeout_ticks)
2169 {
2170  const struct rte_event_fp_ops *fp_ops;
2171  void *port;
2172 
2173  fp_ops = &rte_event_fp_ops[dev_id];
2174  port = fp_ops->data[port_id];
2175 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
2176  if (dev_id >= RTE_EVENT_MAX_DEVS ||
2177  port_id >= RTE_EVENT_MAX_PORTS_PER_DEV) {
2178  rte_errno = EINVAL;
2179  return 0;
2180  }
2181 
2182  if (port == NULL) {
2183  rte_errno = EINVAL;
2184  return 0;
2185  }
2186 #endif
2187  rte_eventdev_trace_deq_burst(dev_id, port_id, ev, nb_events);
2188  /*
2189  * Allow zero cost non burst mode routine invocation if application
2190  * requests nb_events as const one
2191  */
2192  if (nb_events == 1)
2193  return (fp_ops->dequeue)(port, ev, timeout_ticks);
2194  else
2195  return (fp_ops->dequeue_burst)(port, ev, nb_events,
2196  timeout_ticks);
2197 }
2198 
2199 #define RTE_EVENT_DEV_MAINT_OP_FLUSH (1 << 0)
2200 
2241 __rte_experimental
2242 static inline int
2243 rte_event_maintain(uint8_t dev_id, uint8_t port_id, int op)
2244 {
2245  const struct rte_event_fp_ops *fp_ops;
2246  void *port;
2247 
2248  fp_ops = &rte_event_fp_ops[dev_id];
2249  port = fp_ops->data[port_id];
2250 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
2251  if (dev_id >= RTE_EVENT_MAX_DEVS ||
2252  port_id >= RTE_EVENT_MAX_PORTS_PER_DEV)
2253  return -EINVAL;
2254 
2255  if (port == NULL)
2256  return -EINVAL;
2257 
2258  if (op & (~RTE_EVENT_DEV_MAINT_OP_FLUSH))
2259  return -EINVAL;
2260 #endif
2261  rte_eventdev_trace_maintain(dev_id, port_id, op);
2262 
2263  if (fp_ops->maintain != NULL)
2264  fp_ops->maintain(port, op);
2265 
2266  return 0;
2267 }
2268 
2269 #ifdef __cplusplus
2270 }
2271 #endif
2272 
2273 #endif /* _RTE_EVENTDEV_H_ */
void rte_event_dev_stop(uint8_t dev_id)
uint32_t min_dequeue_timeout_ns
Definition: rte_eventdev.h:402
int rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns, uint64_t *timeout_ticks)
int rte_event_dev_xstats_names_get(uint8_t dev_id, enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id, struct rte_event_dev_xstats_name *xstats_names, uint64_t *ids, unsigned int size)
static uint16_t rte_event_enqueue_new_burst(uint8_t dev_id, uint8_t port_id, const struct rte_event ev[], uint16_t nb_events)
#define __rte_always_inline
Definition: rte_common.h:255
#define RTE_EVENT_DEV_XSTATS_NAME_SIZE
uint64_t u64
int rte_event_dev_attr_get(uint8_t dev_id, uint32_t attr_id, uint32_t *attr_value)
uint16_t elem_offset
uint32_t flow_id
int rte_event_eth_tx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id, uint32_t *caps)
uint16_t nb_elem
uint8_t priority
struct rte_device * dev
Definition: rte_eventdev.h:401
int rte_event_queue_setup(uint8_t dev_id, uint8_t queue_id, const struct rte_event_queue_conf *queue_conf)
uint8_t max_event_port_links
Definition: rte_eventdev.h:432
int rte_event_queue_default_conf_get(uint8_t dev_id, uint8_t queue_id, struct rte_event_queue_conf *queue_conf)
int rte_event_port_default_conf_get(uint8_t dev_id, uint8_t port_id, struct rte_event_port_conf *port_conf)
uint32_t dequeue_timeout_ns
Definition: rte_eventdev.h:406
#define rte_errno
Definition: rte_errno.h:29
uint32_t event_type
uint32_t event_dev_cap
Definition: rte_eventdev.h:441
int rte_event_dev_socket_id(uint8_t dev_id)
int rte_event_dev_xstats_get(uint8_t dev_id, enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id, const uint64_t ids[], uint64_t values[], unsigned int n)
static __rte_experimental int rte_event_maintain(uint8_t dev_id, uint8_t port_id, int op)
int rte_event_dev_xstats_reset(uint8_t dev_id, enum rte_event_dev_xstats_mode mode, int16_t queue_port_id, const uint64_t ids[], uint32_t nb_ids)
uint32_t max_event_port_enqueue_depth
Definition: rte_eventdev.h:427
int rte_event_timer_adapter_caps_get(uint8_t dev_id, uint32_t *caps)
int rte_event_dev_info_get(uint8_t dev_id, struct rte_event_dev_info *dev_info)
uint32_t cache_size
Definition: rte_mempool.h:231
uint32_t nb_atomic_order_sequences
Definition: rte_eventdev.h:613
uint32_t nb_event_port_dequeue_depth
Definition: rte_eventdev.h:537
int rte_event_port_unlinks_in_progress(uint8_t dev_id, uint8_t port_id)
void * event_ptr
int rte_event_port_link(uint8_t dev_id, uint8_t port_id, const uint8_t queues[], const uint8_t priorities[], uint16_t nb_links)
int rte_event_queue_attr_get(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id, uint32_t *attr_value)
struct rte_event_vector * vec
uint8_t max_single_link_event_port_queue_pairs
Definition: rte_eventdev.h:443
rte_event_dev_xstats_mode
int rte_event_port_links_get(uint8_t dev_id, uint8_t port_id, uint8_t queues[], uint8_t priorities[])
int rte_event_dev_stop_flush_callback_register(uint8_t dev_id, rte_eventdev_stop_flush_t callback, void *userdata)
int rte_event_dev_selftest(uint8_t dev_id)
static uint16_t rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id, const struct rte_event ev[], uint16_t nb_events)
int rte_event_dev_start(uint8_t dev_id)
uint64_t rte_event_dev_xstats_by_name_get(uint8_t dev_id, const char *name, uint64_t *id)
int rte_event_port_setup(uint8_t dev_id, uint8_t port_id, const struct rte_event_port_conf *port_conf)
uint8_t max_event_port_dequeue_depth
Definition: rte_eventdev.h:422
int rte_event_port_attr_get(uint8_t dev_id, uint8_t port_id, uint32_t attr_id, uint32_t *attr_value)
uint32_t nb_event_port_enqueue_depth
Definition: rte_eventdev.h:545
const char * driver_name
Definition: rte_eventdev.h:400
__rte_experimental void rte_event_port_quiesce(uint8_t dev_id, uint8_t port_id, rte_eventdev_port_flush_t release_cb, void *args)
uint8_t impl_opaque
uint8_t queue_id
static uint16_t rte_event_dequeue_burst(uint8_t dev_id, uint8_t port_id, struct rte_event ev[], uint16_t nb_events, uint64_t timeout_ticks)
int rte_event_dev_get_dev_id(const char *name)
uint8_t rte_event_dev_count(void)
int rte_event_dev_close(uint8_t dev_id)
#define RTE_STD_C11
Definition: rte_common.h:39
int rte_event_crypto_adapter_caps_get(uint8_t dev_id, uint8_t cdev_id, uint32_t *caps)
uint32_t dequeue_timeout_ns
Definition: rte_eventdev.h:506
int rte_event_dev_service_id_get(uint8_t dev_id, uint32_t *service_id)
static uint16_t rte_event_enqueue_forward_burst(uint8_t dev_id, uint8_t port_id, const struct rte_event ev[], uint16_t nb_events)
#define RTE_EVENT_DEV_MAINT_OP_FLUSH
struct rte_mempool * rte_event_vector_pool_create(const char *name, unsigned int n, unsigned int cache_size, uint16_t nb_elem, int socket_id)
int rte_event_dev_dump(uint8_t dev_id, FILE *f)
uint8_t nb_single_link_event_port_queues
Definition: rte_eventdev.h:555
uint8_t rsvd
void(* rte_eventdev_port_flush_t)(uint8_t dev_id, struct rte_event event, void *arg)
Definition: rte_eventdev.h:910
uint8_t op
void(* rte_eventdev_stop_flush_t)(uint8_t dev_id, struct rte_event event, void *arg)
int32_t new_event_threshold
Definition: rte_eventdev.h:833
uint8_t max_event_priority_levels
Definition: rte_eventdev.h:416
struct rte_mbuf * mbuf
uint32_t max_dequeue_timeout_ns
Definition: rte_eventdev.h:404
uint32_t max_event_queue_flows
Definition: rte_eventdev.h:410
int rte_event_dev_configure(uint8_t dev_id, const struct rte_event_dev_config *dev_conf)
uint32_t sub_event_type
uint8_t sched_type
uint8_t max_event_queues
Definition: rte_eventdev.h:408
struct rte_event_vector __rte_aligned(16)
uint8_t max_event_queue_priority_levels
Definition: rte_eventdev.h:412
int rte_event_port_unlink(uint8_t dev_id, uint8_t port_id, uint8_t queues[], uint16_t nb_unlinks)
int rte_event_eth_rx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id, uint32_t *caps)
__rte_experimental int rte_event_queue_attr_set(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id, uint64_t attr_value)
uint32_t nb_event_queue_flows
Definition: rte_eventdev.h:532