DPDK  23.03.0
rte_eventdev.h
Go to the documentation of this file.
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016 Cavium, Inc.
3  * Copyright(c) 2016-2018 Intel Corporation.
4  * Copyright 2016 NXP
5  * All rights reserved.
6  */
7 
8 #ifndef _RTE_EVENTDEV_H_
9 #define _RTE_EVENTDEV_H_
10 
209 #ifdef __cplusplus
210 extern "C" {
211 #endif
212 
213 #include <rte_compat.h>
214 #include <rte_common.h>
215 #include <rte_errno.h>
216 #include <rte_mbuf_pool_ops.h>
217 #include <rte_mempool.h>
218 
219 #include "rte_eventdev_trace_fp.h"
220 
221 struct rte_mbuf; /* we just use mbuf pointers; no need to include rte_mbuf.h */
222 struct rte_event;
223 
224 /* Event device capability bitmap flags */
225 #define RTE_EVENT_DEV_CAP_QUEUE_QOS (1ULL << 0)
226 
237 #define RTE_EVENT_DEV_CAP_EVENT_QOS (1ULL << 1)
238 
244 #define RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED (1ULL << 2)
245 
253 #define RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES (1ULL << 3)
254 
260 #define RTE_EVENT_DEV_CAP_BURST_MODE (1ULL << 4)
261 
268 #define RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE (1ULL << 5)
269 
279 #define RTE_EVENT_DEV_CAP_NONSEQ_MODE (1ULL << 6)
280 
289 #define RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK (1ULL << 7)
290 
295 #define RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT (1ULL << 8)
296 
301 #define RTE_EVENT_DEV_CAP_CARRY_FLOW_ID (1ULL << 9)
302 
307 #define RTE_EVENT_DEV_CAP_MAINTENANCE_FREE (1ULL << 10)
308 
317 #define RTE_EVENT_DEV_CAP_RUNTIME_QUEUE_ATTR (1ULL << 11)
318 
324 /* Event device priority levels */
325 #define RTE_EVENT_DEV_PRIORITY_HIGHEST 0
326 
330 #define RTE_EVENT_DEV_PRIORITY_NORMAL 128
331 
335 #define RTE_EVENT_DEV_PRIORITY_LOWEST 255
336 
341 /* Event queue scheduling weights */
342 #define RTE_EVENT_QUEUE_WEIGHT_HIGHEST 255
343 
346 #define RTE_EVENT_QUEUE_WEIGHT_LOWEST 0
347 
351 /* Event queue scheduling affinity */
352 #define RTE_EVENT_QUEUE_AFFINITY_HIGHEST 255
353 
356 #define RTE_EVENT_QUEUE_AFFINITY_LOWEST 0
357 
368 uint8_t
369 rte_event_dev_count(void);
370 
381 int
382 rte_event_dev_get_dev_id(const char *name);
383 
394 int
395 rte_event_dev_socket_id(uint8_t dev_id);
396 
401  const char *driver_name;
402  struct rte_device *dev;
437  int32_t max_num_events;
442  uint32_t event_dev_cap;
450 };
451 
467 int
468 rte_event_dev_info_get(uint8_t dev_id, struct rte_event_dev_info *dev_info);
469 
473 #define RTE_EVENT_DEV_ATTR_PORT_COUNT 0
474 
477 #define RTE_EVENT_DEV_ATTR_QUEUE_COUNT 1
478 
481 #define RTE_EVENT_DEV_ATTR_STARTED 2
482 
495 int
496 rte_event_dev_attr_get(uint8_t dev_id, uint32_t attr_id,
497  uint32_t *attr_value);
498 
499 
500 /* Event device configuration bitmap flags */
501 #define RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT (1ULL << 0)
502 
529  uint8_t nb_event_ports;
555  uint32_t event_dev_cfg;
565 };
566 
586 int
587 rte_event_dev_configure(uint8_t dev_id,
588  const struct rte_event_dev_config *dev_conf);
589 
590 /* Event queue specific APIs */
591 
592 /* Event queue configuration bitmap flags */
593 #define RTE_EVENT_QUEUE_CFG_ALL_TYPES (1ULL << 0)
594 
599 #define RTE_EVENT_QUEUE_CFG_SINGLE_LINK (1ULL << 1)
600 
607  uint32_t nb_atomic_flows;
629  uint32_t event_queue_cfg;
631  uint8_t schedule_type;
636  uint8_t priority;
644  uint8_t weight;
652  uint8_t affinity;
660 };
661 
684 int
685 rte_event_queue_default_conf_get(uint8_t dev_id, uint8_t queue_id,
686  struct rte_event_queue_conf *queue_conf);
687 
706 int
707 rte_event_queue_setup(uint8_t dev_id, uint8_t queue_id,
708  const struct rte_event_queue_conf *queue_conf);
709 
713 #define RTE_EVENT_QUEUE_ATTR_PRIORITY 0
714 
717 #define RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_FLOWS 1
718 
721 #define RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_ORDER_SEQUENCES 2
722 
725 #define RTE_EVENT_QUEUE_ATTR_EVENT_QUEUE_CFG 3
726 
729 #define RTE_EVENT_QUEUE_ATTR_SCHEDULE_TYPE 4
730 
733 #define RTE_EVENT_QUEUE_ATTR_WEIGHT 5
734 
737 #define RTE_EVENT_QUEUE_ATTR_AFFINITY 6
738 
759 int
760 rte_event_queue_attr_get(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id,
761  uint32_t *attr_value);
762 
781 __rte_experimental
782 int
783 rte_event_queue_attr_set(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id,
784  uint64_t attr_value);
785 
786 /* Event port specific APIs */
787 
788 /* Event port configuration bitmap flags */
789 #define RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL (1ULL << 0)
790 
796 #define RTE_EVENT_PORT_CFG_SINGLE_LINK (1ULL << 1)
797 
801 #define RTE_EVENT_PORT_CFG_HINT_PRODUCER (1ULL << 2)
802 
811 #define RTE_EVENT_PORT_CFG_HINT_CONSUMER (1ULL << 3)
812 
822 #define RTE_EVENT_PORT_CFG_HINT_WORKER (1ULL << 4)
823 
849  uint16_t dequeue_depth;
855  uint16_t enqueue_depth;
861  uint32_t event_port_cfg;
862 };
863 
886 int
887 rte_event_port_default_conf_get(uint8_t dev_id, uint8_t port_id,
888  struct rte_event_port_conf *port_conf);
889 
910 int
911 rte_event_port_setup(uint8_t dev_id, uint8_t port_id,
912  const struct rte_event_port_conf *port_conf);
913 
914 typedef void (*rte_eventdev_port_flush_t)(uint8_t dev_id,
915  struct rte_event event, void *arg);
945 __rte_experimental
946 void
947 rte_event_port_quiesce(uint8_t dev_id, uint8_t port_id,
948  rte_eventdev_port_flush_t release_cb, void *args);
949 
953 #define RTE_EVENT_PORT_ATTR_ENQ_DEPTH 0
954 
957 #define RTE_EVENT_PORT_ATTR_DEQ_DEPTH 1
958 
961 #define RTE_EVENT_PORT_ATTR_NEW_EVENT_THRESHOLD 2
962 
965 #define RTE_EVENT_PORT_ATTR_IMPLICIT_RELEASE_DISABLE 3
966 
983 int
984 rte_event_port_attr_get(uint8_t dev_id, uint8_t port_id, uint32_t attr_id,
985  uint32_t *attr_value);
986 
1003 int
1004 rte_event_dev_start(uint8_t dev_id);
1005 
1024 void
1025 rte_event_dev_stop(uint8_t dev_id);
1026 
1027 typedef void (*rte_eventdev_stop_flush_t)(uint8_t dev_id,
1028  struct rte_event event, void *arg);
1058  rte_eventdev_stop_flush_t callback, void *userdata);
1059 
1071 int
1072 rte_event_dev_close(uint8_t dev_id);
1078  uint16_t nb_elem;
1080  uint16_t elem_offset : 12;
1082  uint16_t rsvd : 3;
1084  uint16_t attr_valid : 1;
1087  union {
1088  /* Used by Rx/Tx adapter.
1089  * Indicates that all the elements in this vector belong to the
1090  * same port and queue pair when originating from Rx adapter,
1091  * valid only when event type is ETHDEV_VECTOR or
1092  * ETH_RX_ADAPTER_VECTOR.
1093  * Can also be used to indicate the Tx adapter the destination
1094  * port and queue of the mbufs in the vector
1095  */
1096  struct {
1097  uint16_t port;
1098  /* Ethernet device port id. */
1099  uint16_t queue;
1100  /* Ethernet device queue id. */
1101  };
1102  };
1104  uint64_t impl_opaque;
1105 
1106 /* empty structures do not have zero size in C++ leading to compilation errors
1107  * with clang about structure having different sizes in C and C++.
1108  * Since these are all zero-sized arrays, we can omit the "union" wrapper for
1109  * C++ builds, removing the warning.
1110  */
1111 #ifndef __cplusplus
1112 
1117  union {
1118 #endif
1119  struct rte_mbuf *mbufs[0];
1120  void *ptrs[0];
1121  uint64_t u64s[0];
1122 #ifndef __cplusplus
1123  } __rte_aligned(16);
1124 #endif
1125 
1129 } __rte_aligned(16);
1130 
1131 /* Scheduler type definitions */
1132 #define RTE_SCHED_TYPE_ORDERED 0
1133 
1159 #define RTE_SCHED_TYPE_ATOMIC 1
1160 
1178 #define RTE_SCHED_TYPE_PARALLEL 2
1179 
1191 /* Event types to classify the event source */
1192 #define RTE_EVENT_TYPE_ETHDEV 0x0
1193 
1194 #define RTE_EVENT_TYPE_CRYPTODEV 0x1
1195 
1196 #define RTE_EVENT_TYPE_TIMER 0x2
1197 
1198 #define RTE_EVENT_TYPE_CPU 0x3
1199 
1202 #define RTE_EVENT_TYPE_ETH_RX_ADAPTER 0x4
1203 
1204 #define RTE_EVENT_TYPE_VECTOR 0x8
1205 
1216 #define RTE_EVENT_TYPE_ETHDEV_VECTOR \
1217  (RTE_EVENT_TYPE_VECTOR | RTE_EVENT_TYPE_ETHDEV)
1218 
1219 #define RTE_EVENT_TYPE_CPU_VECTOR (RTE_EVENT_TYPE_VECTOR | RTE_EVENT_TYPE_CPU)
1220 
1221 #define RTE_EVENT_TYPE_ETH_RX_ADAPTER_VECTOR \
1222  (RTE_EVENT_TYPE_VECTOR | RTE_EVENT_TYPE_ETH_RX_ADAPTER)
1223 
1224 #define RTE_EVENT_TYPE_CRYPTODEV_VECTOR \
1225  (RTE_EVENT_TYPE_VECTOR | RTE_EVENT_TYPE_CRYPTODEV)
1226 
1228 #define RTE_EVENT_TYPE_MAX 0x10
1229 
1231 /* Event enqueue operations */
1232 #define RTE_EVENT_OP_NEW 0
1233 
1236 #define RTE_EVENT_OP_FORWARD 1
1237 
1244 #define RTE_EVENT_OP_RELEASE 2
1245 
1282 struct rte_event {
1284  union {
1285  uint64_t event;
1287  struct {
1288  uint32_t flow_id:20;
1295  uint32_t sub_event_type:8;
1299  uint32_t event_type:4;
1303  uint8_t op:2;
1309  uint8_t rsvd:4;
1311  uint8_t sched_type:2;
1316  uint8_t queue_id;
1323  uint8_t priority;
1333  uint8_t impl_opaque;
1340  };
1341  };
1343  union {
1344  uint64_t u64;
1346  void *event_ptr;
1348  struct rte_mbuf *mbuf;
1352  };
1353 };
1354 
1355 /* Ethdev Rx adapter capability bitmap flags */
1356 #define RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT 0x1
1357 
1360 #define RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ 0x2
1361 
1364 #define RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID 0x4
1365 
1371 #define RTE_EVENT_ETH_RX_ADAPTER_CAP_EVENT_VECTOR 0x8
1372 
1393 int
1394 rte_event_eth_rx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id,
1395  uint32_t *caps);
1396 
1397 #define RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT (1ULL << 0)
1398 
1400 #define RTE_EVENT_TIMER_ADAPTER_CAP_PERIODIC (1ULL << 1)
1401 
1416 int
1417 rte_event_timer_adapter_caps_get(uint8_t dev_id, uint32_t *caps);
1418 
1419 /* Crypto adapter capability bitmap flag */
1420 #define RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW 0x1
1421 
1427 #define RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD 0x2
1428 
1434 #define RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND 0x4
1435 
1439 #define RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA 0x8
1440 
1444 #define RTE_EVENT_CRYPTO_ADAPTER_CAP_EVENT_VECTOR 0x10
1445 
1469 int
1470 rte_event_crypto_adapter_caps_get(uint8_t dev_id, uint8_t cdev_id,
1471  uint32_t *caps);
1472 
1473 /* Ethdev Tx adapter capability bitmap flags */
1474 #define RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT 0x1
1475 
1477 #define RTE_EVENT_ETH_TX_ADAPTER_CAP_EVENT_VECTOR 0x2
1478 
1499 int
1500 rte_event_eth_tx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id,
1501  uint32_t *caps);
1502 
1528 int
1529 rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns,
1530  uint64_t *timeout_ticks);
1531 
1592 int
1593 rte_event_port_link(uint8_t dev_id, uint8_t port_id,
1594  const uint8_t queues[], const uint8_t priorities[],
1595  uint16_t nb_links);
1596 
1636 int
1637 rte_event_port_unlink(uint8_t dev_id, uint8_t port_id,
1638  uint8_t queues[], uint16_t nb_unlinks);
1639 
1661 int
1662 rte_event_port_unlinks_in_progress(uint8_t dev_id, uint8_t port_id);
1663 
1691 int
1692 rte_event_port_links_get(uint8_t dev_id, uint8_t port_id,
1693  uint8_t queues[], uint8_t priorities[]);
1694 
1710 int
1711 rte_event_dev_service_id_get(uint8_t dev_id, uint32_t *service_id);
1712 
1726 int
1727 rte_event_dev_dump(uint8_t dev_id, FILE *f);
1728 
1730 #define RTE_EVENT_DEV_XSTATS_NAME_SIZE 64
1731 
1736  RTE_EVENT_DEV_XSTATS_DEVICE,
1737  RTE_EVENT_DEV_XSTATS_PORT,
1738  RTE_EVENT_DEV_XSTATS_QUEUE,
1739 };
1740 
1748  char name[RTE_EVENT_DEV_XSTATS_NAME_SIZE];
1749 };
1750 
1783 int
1784 rte_event_dev_xstats_names_get(uint8_t dev_id,
1785  enum rte_event_dev_xstats_mode mode,
1786  uint8_t queue_port_id,
1787  struct rte_event_dev_xstats_name *xstats_names,
1788  uint64_t *ids,
1789  unsigned int size);
1790 
1817 int
1818 rte_event_dev_xstats_get(uint8_t dev_id,
1819  enum rte_event_dev_xstats_mode mode,
1820  uint8_t queue_port_id,
1821  const uint64_t ids[],
1822  uint64_t values[], unsigned int n);
1823 
1840 uint64_t
1841 rte_event_dev_xstats_by_name_get(uint8_t dev_id, const char *name,
1842  uint64_t *id);
1843 
1864 int
1865 rte_event_dev_xstats_reset(uint8_t dev_id,
1866  enum rte_event_dev_xstats_mode mode,
1867  int16_t queue_port_id,
1868  const uint64_t ids[],
1869  uint32_t nb_ids);
1870 
1881 int rte_event_dev_selftest(uint8_t dev_id);
1882 
1913 struct rte_mempool *
1914 rte_event_vector_pool_create(const char *name, unsigned int n,
1915  unsigned int cache_size, uint16_t nb_elem,
1916  int socket_id);
1917 
1918 #include <rte_eventdev_core.h>
1919 
1920 static __rte_always_inline uint16_t
1921 __rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
1922  const struct rte_event ev[], uint16_t nb_events,
1923  const event_enqueue_burst_t fn)
1924 {
1925  const struct rte_event_fp_ops *fp_ops;
1926  void *port;
1927 
1928  fp_ops = &rte_event_fp_ops[dev_id];
1929  port = fp_ops->data[port_id];
1930 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
1931  if (dev_id >= RTE_EVENT_MAX_DEVS ||
1932  port_id >= RTE_EVENT_MAX_PORTS_PER_DEV) {
1933  rte_errno = EINVAL;
1934  return 0;
1935  }
1936 
1937  if (port == NULL) {
1938  rte_errno = EINVAL;
1939  return 0;
1940  }
1941 #endif
1942  rte_eventdev_trace_enq_burst(dev_id, port_id, ev, nb_events, (void *)fn);
1943  /*
1944  * Allow zero cost non burst mode routine invocation if application
1945  * requests nb_events as const one
1946  */
1947  if (nb_events == 1)
1948  return (fp_ops->enqueue)(port, ev);
1949  else
1950  return fn(port, ev, nb_events);
1951 }
1952 
1996 static inline uint16_t
1997 rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
1998  const struct rte_event ev[], uint16_t nb_events)
1999 {
2000  const struct rte_event_fp_ops *fp_ops;
2001 
2002  fp_ops = &rte_event_fp_ops[dev_id];
2003  return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
2004  fp_ops->enqueue_burst);
2005 }
2006 
2048 static inline uint16_t
2049 rte_event_enqueue_new_burst(uint8_t dev_id, uint8_t port_id,
2050  const struct rte_event ev[], uint16_t nb_events)
2051 {
2052  const struct rte_event_fp_ops *fp_ops;
2053 
2054  fp_ops = &rte_event_fp_ops[dev_id];
2055  return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
2056  fp_ops->enqueue_new_burst);
2057 }
2058 
2100 static inline uint16_t
2101 rte_event_enqueue_forward_burst(uint8_t dev_id, uint8_t port_id,
2102  const struct rte_event ev[], uint16_t nb_events)
2103 {
2104  const struct rte_event_fp_ops *fp_ops;
2105 
2106  fp_ops = &rte_event_fp_ops[dev_id];
2107  return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
2108  fp_ops->enqueue_forward_burst);
2109 }
2110 
2177 static inline uint16_t
2178 rte_event_dequeue_burst(uint8_t dev_id, uint8_t port_id, struct rte_event ev[],
2179  uint16_t nb_events, uint64_t timeout_ticks)
2180 {
2181  const struct rte_event_fp_ops *fp_ops;
2182  void *port;
2183 
2184  fp_ops = &rte_event_fp_ops[dev_id];
2185  port = fp_ops->data[port_id];
2186 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
2187  if (dev_id >= RTE_EVENT_MAX_DEVS ||
2188  port_id >= RTE_EVENT_MAX_PORTS_PER_DEV) {
2189  rte_errno = EINVAL;
2190  return 0;
2191  }
2192 
2193  if (port == NULL) {
2194  rte_errno = EINVAL;
2195  return 0;
2196  }
2197 #endif
2198  rte_eventdev_trace_deq_burst(dev_id, port_id, ev, nb_events);
2199  /*
2200  * Allow zero cost non burst mode routine invocation if application
2201  * requests nb_events as const one
2202  */
2203  if (nb_events == 1)
2204  return (fp_ops->dequeue)(port, ev, timeout_ticks);
2205  else
2206  return (fp_ops->dequeue_burst)(port, ev, nb_events,
2207  timeout_ticks);
2208 }
2209 
2210 #define RTE_EVENT_DEV_MAINT_OP_FLUSH (1 << 0)
2211 
2252 __rte_experimental
2253 static inline int
2254 rte_event_maintain(uint8_t dev_id, uint8_t port_id, int op)
2255 {
2256  const struct rte_event_fp_ops *fp_ops;
2257  void *port;
2258 
2259  fp_ops = &rte_event_fp_ops[dev_id];
2260  port = fp_ops->data[port_id];
2261 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
2262  if (dev_id >= RTE_EVENT_MAX_DEVS ||
2263  port_id >= RTE_EVENT_MAX_PORTS_PER_DEV)
2264  return -EINVAL;
2265 
2266  if (port == NULL)
2267  return -EINVAL;
2268 
2269  if (op & (~RTE_EVENT_DEV_MAINT_OP_FLUSH))
2270  return -EINVAL;
2271 #endif
2272  rte_eventdev_trace_maintain(dev_id, port_id, op);
2273 
2274  if (fp_ops->maintain != NULL)
2275  fp_ops->maintain(port, op);
2276 
2277  return 0;
2278 }
2279 
2280 #ifdef __cplusplus
2281 }
2282 #endif
2283 
2284 #endif /* _RTE_EVENTDEV_H_ */
void rte_event_dev_stop(uint8_t dev_id)
uint32_t min_dequeue_timeout_ns
Definition: rte_eventdev.h:403
int rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns, uint64_t *timeout_ticks)
int rte_event_dev_xstats_names_get(uint8_t dev_id, enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id, struct rte_event_dev_xstats_name *xstats_names, uint64_t *ids, unsigned int size)
static uint16_t rte_event_enqueue_new_burst(uint8_t dev_id, uint8_t port_id, const struct rte_event ev[], uint16_t nb_events)
#define __rte_always_inline
Definition: rte_common.h:255
#define RTE_EVENT_DEV_XSTATS_NAME_SIZE
uint64_t u64
int rte_event_dev_attr_get(uint8_t dev_id, uint32_t attr_id, uint32_t *attr_value)
uint16_t elem_offset
uint32_t flow_id
int rte_event_eth_tx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id, uint32_t *caps)
uint16_t nb_elem
uint8_t priority
struct rte_device * dev
Definition: rte_eventdev.h:402
int rte_event_queue_setup(uint8_t dev_id, uint8_t queue_id, const struct rte_event_queue_conf *queue_conf)
uint8_t max_event_port_links
Definition: rte_eventdev.h:433
int rte_event_queue_default_conf_get(uint8_t dev_id, uint8_t queue_id, struct rte_event_queue_conf *queue_conf)
int rte_event_port_default_conf_get(uint8_t dev_id, uint8_t port_id, struct rte_event_port_conf *port_conf)
uint32_t dequeue_timeout_ns
Definition: rte_eventdev.h:407
#define rte_errno
Definition: rte_errno.h:29
uint32_t event_type
uint32_t event_dev_cap
Definition: rte_eventdev.h:442
int rte_event_dev_socket_id(uint8_t dev_id)
int rte_event_dev_xstats_get(uint8_t dev_id, enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id, const uint64_t ids[], uint64_t values[], unsigned int n)
static __rte_experimental int rte_event_maintain(uint8_t dev_id, uint8_t port_id, int op)
int rte_event_dev_xstats_reset(uint8_t dev_id, enum rte_event_dev_xstats_mode mode, int16_t queue_port_id, const uint64_t ids[], uint32_t nb_ids)
uint32_t max_event_port_enqueue_depth
Definition: rte_eventdev.h:428
int rte_event_timer_adapter_caps_get(uint8_t dev_id, uint32_t *caps)
int rte_event_dev_info_get(uint8_t dev_id, struct rte_event_dev_info *dev_info)
uint32_t cache_size
Definition: rte_mempool.h:231
uint32_t nb_atomic_order_sequences
Definition: rte_eventdev.h:615
uint32_t nb_event_port_dequeue_depth
Definition: rte_eventdev.h:539
int rte_event_port_unlinks_in_progress(uint8_t dev_id, uint8_t port_id)
void * event_ptr
int rte_event_port_link(uint8_t dev_id, uint8_t port_id, const uint8_t queues[], const uint8_t priorities[], uint16_t nb_links)
int rte_event_queue_attr_get(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id, uint32_t *attr_value)
struct rte_event_vector * vec
uint8_t max_single_link_event_port_queue_pairs
Definition: rte_eventdev.h:444
rte_event_dev_xstats_mode
int rte_event_port_links_get(uint8_t dev_id, uint8_t port_id, uint8_t queues[], uint8_t priorities[])
int rte_event_dev_stop_flush_callback_register(uint8_t dev_id, rte_eventdev_stop_flush_t callback, void *userdata)
int rte_event_dev_selftest(uint8_t dev_id)
static uint16_t rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id, const struct rte_event ev[], uint16_t nb_events)
int rte_event_dev_start(uint8_t dev_id)
uint64_t rte_event_dev_xstats_by_name_get(uint8_t dev_id, const char *name, uint64_t *id)
int rte_event_port_setup(uint8_t dev_id, uint8_t port_id, const struct rte_event_port_conf *port_conf)
uint8_t max_event_port_dequeue_depth
Definition: rte_eventdev.h:423
int rte_event_port_attr_get(uint8_t dev_id, uint8_t port_id, uint32_t attr_id, uint32_t *attr_value)
uint32_t nb_event_port_enqueue_depth
Definition: rte_eventdev.h:547
const char * driver_name
Definition: rte_eventdev.h:401
__rte_experimental void rte_event_port_quiesce(uint8_t dev_id, uint8_t port_id, rte_eventdev_port_flush_t release_cb, void *args)
uint8_t impl_opaque
uint8_t queue_id
static uint16_t rte_event_dequeue_burst(uint8_t dev_id, uint8_t port_id, struct rte_event ev[], uint16_t nb_events, uint64_t timeout_ticks)
int rte_event_dev_get_dev_id(const char *name)
uint8_t rte_event_dev_count(void)
int rte_event_dev_close(uint8_t dev_id)
#define RTE_STD_C11
Definition: rte_common.h:39
int rte_event_crypto_adapter_caps_get(uint8_t dev_id, uint8_t cdev_id, uint32_t *caps)
uint32_t dequeue_timeout_ns
Definition: rte_eventdev.h:508
int rte_event_dev_service_id_get(uint8_t dev_id, uint32_t *service_id)
static uint16_t rte_event_enqueue_forward_burst(uint8_t dev_id, uint8_t port_id, const struct rte_event ev[], uint16_t nb_events)
#define RTE_EVENT_DEV_MAINT_OP_FLUSH
struct rte_mempool * rte_event_vector_pool_create(const char *name, unsigned int n, unsigned int cache_size, uint16_t nb_elem, int socket_id)
int rte_event_dev_dump(uint8_t dev_id, FILE *f)
uint8_t nb_single_link_event_port_queues
Definition: rte_eventdev.h:557
uint8_t rsvd
void(* rte_eventdev_port_flush_t)(uint8_t dev_id, struct rte_event event, void *arg)
Definition: rte_eventdev.h:914
uint8_t op
void(* rte_eventdev_stop_flush_t)(uint8_t dev_id, struct rte_event event, void *arg)
int32_t new_event_threshold
Definition: rte_eventdev.h:836
uint8_t max_event_priority_levels
Definition: rte_eventdev.h:417
struct rte_mbuf * mbuf
uint32_t max_dequeue_timeout_ns
Definition: rte_eventdev.h:405
uint32_t max_event_queue_flows
Definition: rte_eventdev.h:411
int rte_event_dev_configure(uint8_t dev_id, const struct rte_event_dev_config *dev_conf)
uint32_t sub_event_type
uint8_t sched_type
uint8_t max_event_queues
Definition: rte_eventdev.h:409
struct rte_event_vector __rte_aligned(16)
uint8_t max_event_queue_priority_levels
Definition: rte_eventdev.h:413
int rte_event_port_unlink(uint8_t dev_id, uint8_t port_id, uint8_t queues[], uint16_t nb_unlinks)
int rte_event_eth_rx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id, uint32_t *caps)
__rte_experimental int rte_event_queue_attr_set(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id, uint64_t attr_value)
uint32_t nb_event_queue_flows
Definition: rte_eventdev.h:534