DPDK  24.03.0
rte_eventdev.h
Go to the documentation of this file.
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016 Cavium, Inc.
3  * Copyright(c) 2016-2018 Intel Corporation.
4  * Copyright 2016 NXP
5  * All rights reserved.
6  */
7 
8 #ifndef _RTE_EVENTDEV_H_
9 #define _RTE_EVENTDEV_H_
10 
240 #ifdef __cplusplus
241 extern "C" {
242 #endif
243 
244 #include <rte_compat.h>
245 #include <rte_common.h>
246 #include <rte_errno.h>
247 #include <rte_mbuf_pool_ops.h>
248 #include <rte_mempool.h>
249 
250 #include "rte_eventdev_trace_fp.h"
251 
252 struct rte_mbuf; /* we just use mbuf pointers; no need to include rte_mbuf.h */
253 struct rte_event;
254 
255 /* Event device capability bitmap flags */
256 #define RTE_EVENT_DEV_CAP_QUEUE_QOS (1ULL << 0)
257 
274 #define RTE_EVENT_DEV_CAP_EVENT_QOS (1ULL << 1)
275 
288 #define RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED (1ULL << 2)
289 
298 #define RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES (1ULL << 3)
299 
322 #define RTE_EVENT_DEV_CAP_BURST_MODE (1ULL << 4)
323 
333 #define RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE (1ULL << 5)
334 
346 #define RTE_EVENT_DEV_CAP_NONSEQ_MODE (1ULL << 6)
347 
358 #define RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK (1ULL << 7)
359 
369 #define RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT (1ULL << 8)
370 
379 #define RTE_EVENT_DEV_CAP_CARRY_FLOW_ID (1ULL << 9)
380 
388 #define RTE_EVENT_DEV_CAP_MAINTENANCE_FREE (1ULL << 10)
389 
401 #define RTE_EVENT_DEV_CAP_RUNTIME_QUEUE_ATTR (1ULL << 11)
402 
411 #define RTE_EVENT_DEV_CAP_PROFILE_LINK (1ULL << 12)
412 
425 #define RTE_EVENT_DEV_CAP_ATOMIC (1ULL << 13)
426 
433 #define RTE_EVENT_DEV_CAP_ORDERED (1ULL << 14)
434 
441 #define RTE_EVENT_DEV_CAP_PARALLEL (1ULL << 15)
442 
449 /* Event device priority levels */
450 #define RTE_EVENT_DEV_PRIORITY_HIGHEST 0
451 
457 #define RTE_EVENT_DEV_PRIORITY_NORMAL 128
458 
464 #define RTE_EVENT_DEV_PRIORITY_LOWEST 255
465 
472 /* Event queue scheduling weights */
473 #define RTE_EVENT_QUEUE_WEIGHT_HIGHEST 255
474 
479 #define RTE_EVENT_QUEUE_WEIGHT_LOWEST 0
480 
486 /* Event queue scheduling affinity */
487 #define RTE_EVENT_QUEUE_AFFINITY_HIGHEST 255
488 
493 #define RTE_EVENT_QUEUE_AFFINITY_LOWEST 0
494 
506 uint8_t
507 rte_event_dev_count(void);
508 
521 int
522 rte_event_dev_get_dev_id(const char *name);
523 
535 int
536 rte_event_dev_socket_id(uint8_t dev_id);
537 
542  const char *driver_name;
543  struct rte_device *dev;
607  int32_t max_num_events;
614  uint32_t event_dev_cap;
626 };
627 
644 int
645 rte_event_dev_info_get(uint8_t dev_id, struct rte_event_dev_info *dev_info);
646 
650 #define RTE_EVENT_DEV_ATTR_PORT_COUNT 0
651 
654 #define RTE_EVENT_DEV_ATTR_QUEUE_COUNT 1
655 
658 #define RTE_EVENT_DEV_ATTR_STARTED 2
659 
672 int
673 rte_event_dev_attr_get(uint8_t dev_id, uint32_t attr_id,
674  uint32_t *attr_value);
675 
676 
677 /* Event device configuration bitmap flags */
678 #define RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT (1ULL << 0)
679 
716  uint8_t nb_event_ports;
745  uint32_t event_dev_cfg;
755 };
756 
781 int
782 rte_event_dev_configure(uint8_t dev_id,
783  const struct rte_event_dev_config *dev_conf);
784 
785 /* Event queue specific APIs */
786 
787 /* Event queue configuration bitmap flags */
788 #define RTE_EVENT_QUEUE_CFG_ALL_TYPES (1ULL << 0)
789 
802 #define RTE_EVENT_QUEUE_CFG_SINGLE_LINK (1ULL << 1)
803 
823  uint32_t nb_atomic_flows;
854  uint32_t event_queue_cfg;
856  uint8_t schedule_type;
866  uint8_t priority;
877  uint8_t weight;
888  uint8_t affinity;
899 };
900 
922 int
923 rte_event_queue_default_conf_get(uint8_t dev_id, uint8_t queue_id,
924  struct rte_event_queue_conf *queue_conf);
925 
945 int
946 rte_event_queue_setup(uint8_t dev_id, uint8_t queue_id,
947  const struct rte_event_queue_conf *queue_conf);
948 
952 #define RTE_EVENT_QUEUE_ATTR_PRIORITY 0
953 
956 #define RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_FLOWS 1
957 
960 #define RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_ORDER_SEQUENCES 2
961 
964 #define RTE_EVENT_QUEUE_ATTR_EVENT_QUEUE_CFG 3
965 
968 #define RTE_EVENT_QUEUE_ATTR_SCHEDULE_TYPE 4
969 
972 #define RTE_EVENT_QUEUE_ATTR_WEIGHT 5
973 
976 #define RTE_EVENT_QUEUE_ATTR_AFFINITY 6
977 
998 int
999 rte_event_queue_attr_get(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id,
1000  uint32_t *attr_value);
1001 
1021 int
1022 rte_event_queue_attr_set(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id,
1023  uint64_t attr_value);
1024 
1025 /* Event port specific APIs */
1026 
1027 /* Event port configuration bitmap flags */
1028 #define RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL (1ULL << 0)
1029 
1035 #define RTE_EVENT_PORT_CFG_SINGLE_LINK (1ULL << 1)
1036 
1043 #define RTE_EVENT_PORT_CFG_HINT_PRODUCER (1ULL << 2)
1044 
1053 #define RTE_EVENT_PORT_CFG_HINT_CONSUMER (1ULL << 3)
1054 
1064 #define RTE_EVENT_PORT_CFG_HINT_WORKER (1ULL << 4)
1065 
1094  uint16_t dequeue_depth;
1101  uint16_t enqueue_depth;
1108  uint32_t event_port_cfg;
1109 };
1110 
1134 int
1135 rte_event_port_default_conf_get(uint8_t dev_id, uint8_t port_id,
1136  struct rte_event_port_conf *port_conf);
1137 
1164 int
1165 rte_event_port_setup(uint8_t dev_id, uint8_t port_id,
1166  const struct rte_event_port_conf *port_conf);
1167 
1168 typedef void (*rte_eventdev_port_flush_t)(uint8_t dev_id,
1169  struct rte_event event, void *arg);
1199 void
1200 rte_event_port_quiesce(uint8_t dev_id, uint8_t port_id,
1201  rte_eventdev_port_flush_t release_cb, void *args);
1202 
1206 #define RTE_EVENT_PORT_ATTR_ENQ_DEPTH 0
1207 
1210 #define RTE_EVENT_PORT_ATTR_DEQ_DEPTH 1
1211 
1216 #define RTE_EVENT_PORT_ATTR_NEW_EVENT_THRESHOLD 2
1217 
1220 #define RTE_EVENT_PORT_ATTR_IMPLICIT_RELEASE_DISABLE 3
1221 
1239 int
1240 rte_event_port_attr_get(uint8_t dev_id, uint8_t port_id, uint32_t attr_id,
1241  uint32_t *attr_value);
1242 
1261 int
1262 rte_event_dev_start(uint8_t dev_id);
1263 
1282 void
1283 rte_event_dev_stop(uint8_t dev_id);
1284 
1285 typedef void (*rte_eventdev_stop_flush_t)(uint8_t dev_id,
1286  struct rte_event event, void *arg);
1320  rte_eventdev_stop_flush_t callback, void *userdata);
1321 
1335 int
1336 rte_event_dev_close(uint8_t dev_id);
1337 
1341 struct __rte_aligned(16) rte_event_vector {
1342  uint16_t nb_elem;
1344  uint16_t elem_offset : 12;
1346  uint16_t rsvd : 3;
1348  uint16_t attr_valid : 1;
1351  union {
1352  /* Used by Rx/Tx adapter.
1353  * Indicates that all the elements in this vector belong to the
1354  * same port and queue pair when originating from Rx adapter,
1355  * valid only when event type is ETHDEV_VECTOR or
1356  * ETH_RX_ADAPTER_VECTOR.
1357  * Can also be used to indicate the Tx adapter the destination
1358  * port and queue of the mbufs in the vector
1359  */
1360  struct {
1361  uint16_t port;
1362  uint16_t queue;
1363  };
1364  };
1366  uint64_t impl_opaque;
1367 
1368 /* empty structures do not have zero size in C++ leading to compilation errors
1369  * with clang about structure having different sizes in C and C++.
1370  * Since these are all zero-sized arrays, we can omit the "union" wrapper for
1371  * C++ builds, removing the warning.
1372  */
1373 #ifndef __cplusplus
1374 
1379  union __rte_aligned(16) {
1380 #endif
1381  struct rte_mbuf *mbufs[0];
1382  void *ptrs[0];
1383  uint64_t u64s[0];
1384 #ifndef __cplusplus
1385  };
1386 #endif
1387 
1391 };
1392 
1393 /* Scheduler type definitions */
1394 #define RTE_SCHED_TYPE_ORDERED 0
1395 
1432 #define RTE_SCHED_TYPE_ATOMIC 1
1433 
1459 #define RTE_SCHED_TYPE_PARALLEL 2
1460 
1472 /* Event types to classify the event source */
1473 #define RTE_EVENT_TYPE_ETHDEV 0x0
1474 
1475 #define RTE_EVENT_TYPE_CRYPTODEV 0x1
1476 
1477 #define RTE_EVENT_TYPE_TIMER 0x2
1478 
1479 #define RTE_EVENT_TYPE_CPU 0x3
1480 
1483 #define RTE_EVENT_TYPE_ETH_RX_ADAPTER 0x4
1484 
1485 #define RTE_EVENT_TYPE_DMADEV 0x5
1486 
1487 #define RTE_EVENT_TYPE_VECTOR 0x8
1488 
1499 #define RTE_EVENT_TYPE_ETHDEV_VECTOR \
1500  (RTE_EVENT_TYPE_VECTOR | RTE_EVENT_TYPE_ETHDEV)
1501 
1502 #define RTE_EVENT_TYPE_CPU_VECTOR (RTE_EVENT_TYPE_VECTOR | RTE_EVENT_TYPE_CPU)
1503 
1504 #define RTE_EVENT_TYPE_ETH_RX_ADAPTER_VECTOR \
1505  (RTE_EVENT_TYPE_VECTOR | RTE_EVENT_TYPE_ETH_RX_ADAPTER)
1506 
1507 #define RTE_EVENT_TYPE_CRYPTODEV_VECTOR \
1508  (RTE_EVENT_TYPE_VECTOR | RTE_EVENT_TYPE_CRYPTODEV)
1509 
1511 #define RTE_EVENT_TYPE_MAX 0x10
1512 
1514 /* Event enqueue operations */
1515 #define RTE_EVENT_OP_NEW 0
1516 
1520 #define RTE_EVENT_OP_FORWARD 1
1521 
1532 #define RTE_EVENT_OP_RELEASE 2
1533 
1571 struct rte_event {
1572  /* WORD0 */
1573  union {
1574  uint64_t event;
1576  struct {
1577  uint32_t flow_id:20;
1589  uint32_t sub_event_type:8;
1596  uint32_t event_type:4;
1601  uint8_t op:2;
1611  uint8_t rsvd:4;
1619  uint8_t sched_type:2;
1636  uint8_t queue_id;
1644  uint8_t priority;
1668  uint8_t impl_opaque;
1682  };
1683  };
1684  /* WORD1 */
1685  union {
1686  uint64_t u64;
1688  void *event_ptr;
1690  struct rte_mbuf *mbuf;
1692  struct rte_event_vector *vec;
1694  };
1695 };
1696 
1697 /* Ethdev Rx adapter capability bitmap flags */
1698 #define RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT 0x1
1699 
1702 #define RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ 0x2
1703 
1706 #define RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID 0x4
1707 
1713 #define RTE_EVENT_ETH_RX_ADAPTER_CAP_EVENT_VECTOR 0x8
1714 
1734 int
1735 rte_event_eth_rx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id,
1736  uint32_t *caps);
1737 
1738 #define RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT (1ULL << 0)
1739 
1741 #define RTE_EVENT_TIMER_ADAPTER_CAP_PERIODIC (1ULL << 1)
1742 
1757 int
1758 rte_event_timer_adapter_caps_get(uint8_t dev_id, uint32_t *caps);
1759 
1760 /* Crypto adapter capability bitmap flag */
1761 #define RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW 0x1
1762 
1768 #define RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD 0x2
1769 
1775 #define RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND 0x4
1776 
1780 #define RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA 0x8
1781 
1785 #define RTE_EVENT_CRYPTO_ADAPTER_CAP_EVENT_VECTOR 0x10
1786 
1809 int
1810 rte_event_crypto_adapter_caps_get(uint8_t dev_id, uint8_t cdev_id,
1811  uint32_t *caps);
1812 
1813 /* DMA adapter capability bitmap flag */
1814 #define RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_NEW 0x1
1815 
1821 #define RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_FWD 0x2
1822 
1828 #define RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_VCHAN_EV_BIND 0x4
1829 
1851 __rte_experimental
1852 int
1853 rte_event_dma_adapter_caps_get(uint8_t dev_id, uint8_t dmadev_id, uint32_t *caps);
1854 
1855 /* Ethdev Tx adapter capability bitmap flags */
1856 #define RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT 0x1
1857 
1859 #define RTE_EVENT_ETH_TX_ADAPTER_CAP_EVENT_VECTOR 0x2
1860 
1880 int
1881 rte_event_eth_tx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id,
1882  uint32_t *caps);
1883 
1908 int
1909 rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns,
1910  uint64_t *timeout_ticks);
1911 
1975 int
1976 rte_event_port_link(uint8_t dev_id, uint8_t port_id,
1977  const uint8_t queues[], const uint8_t priorities[],
1978  uint16_t nb_links);
1979 
2023 int
2024 rte_event_port_unlink(uint8_t dev_id, uint8_t port_id,
2025  uint8_t queues[], uint16_t nb_unlinks);
2026 
2099 __rte_experimental
2100 int
2101 rte_event_port_profile_links_set(uint8_t dev_id, uint8_t port_id, const uint8_t queues[],
2102  const uint8_t priorities[], uint16_t nb_links, uint8_t profile_id);
2103 
2152 __rte_experimental
2153 int
2154 rte_event_port_profile_unlink(uint8_t dev_id, uint8_t port_id, uint8_t queues[],
2155  uint16_t nb_unlinks, uint8_t profile_id);
2156 
2178 int
2179 rte_event_port_unlinks_in_progress(uint8_t dev_id, uint8_t port_id);
2180 
2207 int
2208 rte_event_port_links_get(uint8_t dev_id, uint8_t port_id,
2209  uint8_t queues[], uint8_t priorities[]);
2210 
2242 __rte_experimental
2243 int
2244 rte_event_port_profile_links_get(uint8_t dev_id, uint8_t port_id, uint8_t queues[],
2245  uint8_t priorities[], uint8_t profile_id);
2246 
2262 int
2263 rte_event_dev_service_id_get(uint8_t dev_id, uint32_t *service_id);
2264 
2278 int
2279 rte_event_dev_dump(uint8_t dev_id, FILE *f);
2280 
2282 #define RTE_EVENT_DEV_XSTATS_NAME_SIZE 64
2283 
2288  RTE_EVENT_DEV_XSTATS_DEVICE,
2289  RTE_EVENT_DEV_XSTATS_PORT,
2290  RTE_EVENT_DEV_XSTATS_QUEUE,
2291 };
2292 
2300  char name[RTE_EVENT_DEV_XSTATS_NAME_SIZE];
2301 };
2302 
2335 int
2336 rte_event_dev_xstats_names_get(uint8_t dev_id,
2337  enum rte_event_dev_xstats_mode mode,
2338  uint8_t queue_port_id,
2339  struct rte_event_dev_xstats_name *xstats_names,
2340  uint64_t *ids,
2341  unsigned int size);
2342 
2369 int
2370 rte_event_dev_xstats_get(uint8_t dev_id,
2371  enum rte_event_dev_xstats_mode mode,
2372  uint8_t queue_port_id,
2373  const uint64_t ids[],
2374  uint64_t values[], unsigned int n);
2375 
2392 uint64_t
2393 rte_event_dev_xstats_by_name_get(uint8_t dev_id, const char *name,
2394  uint64_t *id);
2395 
2416 int
2417 rte_event_dev_xstats_reset(uint8_t dev_id,
2418  enum rte_event_dev_xstats_mode mode,
2419  int16_t queue_port_id,
2420  const uint64_t ids[],
2421  uint32_t nb_ids);
2422 
2433 int rte_event_dev_selftest(uint8_t dev_id);
2434 
2465 struct rte_mempool *
2466 rte_event_vector_pool_create(const char *name, unsigned int n,
2467  unsigned int cache_size, uint16_t nb_elem,
2468  int socket_id);
2469 
2470 #include <rte_eventdev_core.h>
2471 
2472 static __rte_always_inline uint16_t
2473 __rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
2474  const struct rte_event ev[], uint16_t nb_events,
2475  const event_enqueue_burst_t fn)
2476 {
2477  const struct rte_event_fp_ops *fp_ops;
2478  void *port;
2479 
2480  fp_ops = &rte_event_fp_ops[dev_id];
2481  port = fp_ops->data[port_id];
2482 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
2483  if (dev_id >= RTE_EVENT_MAX_DEVS ||
2484  port_id >= RTE_EVENT_MAX_PORTS_PER_DEV) {
2485  rte_errno = EINVAL;
2486  return 0;
2487  }
2488 
2489  if (port == NULL) {
2490  rte_errno = EINVAL;
2491  return 0;
2492  }
2493 #endif
2494  rte_eventdev_trace_enq_burst(dev_id, port_id, ev, nb_events, (void *)fn);
2495  /*
2496  * Allow zero cost non burst mode routine invocation if application
2497  * requests nb_events as const one
2498  */
2499  if (nb_events == 1)
2500  return (fp_ops->enqueue)(port, ev);
2501  else
2502  return fn(port, ev, nb_events);
2503 }
2504 
2548 static inline uint16_t
2549 rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
2550  const struct rte_event ev[], uint16_t nb_events)
2551 {
2552  const struct rte_event_fp_ops *fp_ops;
2553 
2554  fp_ops = &rte_event_fp_ops[dev_id];
2555  return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
2556  fp_ops->enqueue_burst);
2557 }
2558 
2600 static inline uint16_t
2601 rte_event_enqueue_new_burst(uint8_t dev_id, uint8_t port_id,
2602  const struct rte_event ev[], uint16_t nb_events)
2603 {
2604  const struct rte_event_fp_ops *fp_ops;
2605 
2606  fp_ops = &rte_event_fp_ops[dev_id];
2607  return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
2608  fp_ops->enqueue_new_burst);
2609 }
2610 
2652 static inline uint16_t
2653 rte_event_enqueue_forward_burst(uint8_t dev_id, uint8_t port_id,
2654  const struct rte_event ev[], uint16_t nb_events)
2655 {
2656  const struct rte_event_fp_ops *fp_ops;
2657 
2658  fp_ops = &rte_event_fp_ops[dev_id];
2659  return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
2660  fp_ops->enqueue_forward_burst);
2661 }
2662 
2729 static inline uint16_t
2730 rte_event_dequeue_burst(uint8_t dev_id, uint8_t port_id, struct rte_event ev[],
2731  uint16_t nb_events, uint64_t timeout_ticks)
2732 {
2733  const struct rte_event_fp_ops *fp_ops;
2734  void *port;
2735 
2736  fp_ops = &rte_event_fp_ops[dev_id];
2737  port = fp_ops->data[port_id];
2738 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
2739  if (dev_id >= RTE_EVENT_MAX_DEVS ||
2740  port_id >= RTE_EVENT_MAX_PORTS_PER_DEV) {
2741  rte_errno = EINVAL;
2742  return 0;
2743  }
2744 
2745  if (port == NULL) {
2746  rte_errno = EINVAL;
2747  return 0;
2748  }
2749 #endif
2750  rte_eventdev_trace_deq_burst(dev_id, port_id, ev, nb_events);
2751  /*
2752  * Allow zero cost non burst mode routine invocation if application
2753  * requests nb_events as const one
2754  */
2755  if (nb_events == 1)
2756  return (fp_ops->dequeue)(port, ev, timeout_ticks);
2757  else
2758  return (fp_ops->dequeue_burst)(port, ev, nb_events,
2759  timeout_ticks);
2760 }
2761 
2762 #define RTE_EVENT_DEV_MAINT_OP_FLUSH (1 << 0)
2763 
2804 static inline int
2805 rte_event_maintain(uint8_t dev_id, uint8_t port_id, int op)
2806 {
2807  const struct rte_event_fp_ops *fp_ops;
2808  void *port;
2809 
2810  fp_ops = &rte_event_fp_ops[dev_id];
2811  port = fp_ops->data[port_id];
2812 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
2813  if (dev_id >= RTE_EVENT_MAX_DEVS ||
2814  port_id >= RTE_EVENT_MAX_PORTS_PER_DEV)
2815  return -EINVAL;
2816 
2817  if (port == NULL)
2818  return -EINVAL;
2819 
2820  if (op & (~RTE_EVENT_DEV_MAINT_OP_FLUSH))
2821  return -EINVAL;
2822 #endif
2823  rte_eventdev_trace_maintain(dev_id, port_id, op);
2824 
2825  if (fp_ops->maintain != NULL)
2826  fp_ops->maintain(port, op);
2827 
2828  return 0;
2829 }
2830 
2852 static inline uint8_t
2853 rte_event_port_profile_switch(uint8_t dev_id, uint8_t port_id, uint8_t profile_id)
2854 {
2855  const struct rte_event_fp_ops *fp_ops;
2856  void *port;
2857 
2858  fp_ops = &rte_event_fp_ops[dev_id];
2859  port = fp_ops->data[port_id];
2860 
2861 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
2862  if (dev_id >= RTE_EVENT_MAX_DEVS ||
2863  port_id >= RTE_EVENT_MAX_PORTS_PER_DEV)
2864  return -EINVAL;
2865 
2866  if (port == NULL)
2867  return -EINVAL;
2868 
2869  if (profile_id >= RTE_EVENT_MAX_PROFILES_PER_PORT)
2870  return -EINVAL;
2871 #endif
2872  rte_eventdev_trace_port_profile_switch(dev_id, port_id, profile_id);
2873 
2874  return fp_ops->profile_switch(port, profile_id);
2875 }
2876 
2877 #ifdef __cplusplus
2878 }
2879 #endif
2880 
2881 #endif /* _RTE_EVENTDEV_H_ */
void rte_event_dev_stop(uint8_t dev_id)
uint32_t min_dequeue_timeout_ns
Definition: rte_eventdev.h:544
int rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns, uint64_t *timeout_ticks)
int rte_event_dev_xstats_names_get(uint8_t dev_id, enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id, struct rte_event_dev_xstats_name *xstats_names, uint64_t *ids, unsigned int size)
static uint16_t rte_event_enqueue_new_burst(uint8_t dev_id, uint8_t port_id, const struct rte_event ev[], uint16_t nb_events)
#define __rte_always_inline
Definition: rte_common.h:355
#define RTE_EVENT_DEV_XSTATS_NAME_SIZE
uint64_t u64
int rte_event_dev_attr_get(uint8_t dev_id, uint32_t attr_id, uint32_t *attr_value)
uint32_t flow_id
int rte_event_eth_tx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id, uint32_t *caps)
uint8_t priority
struct rte_device * dev
Definition: rte_eventdev.h:543
int rte_event_queue_setup(uint8_t dev_id, uint8_t queue_id, const struct rte_event_queue_conf *queue_conf)
uint8_t max_event_port_links
Definition: rte_eventdev.h:604
int rte_event_queue_default_conf_get(uint8_t dev_id, uint8_t queue_id, struct rte_event_queue_conf *queue_conf)
int rte_event_port_default_conf_get(uint8_t dev_id, uint8_t port_id, struct rte_event_port_conf *port_conf)
uint32_t dequeue_timeout_ns
Definition: rte_eventdev.h:548
#define rte_errno
Definition: rte_errno.h:29
uint32_t event_type
__rte_experimental int rte_event_dma_adapter_caps_get(uint8_t dev_id, uint8_t dmadev_id, uint32_t *caps)
uint32_t event_dev_cap
Definition: rte_eventdev.h:614
int rte_event_queue_attr_set(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id, uint64_t attr_value)
int rte_event_dev_socket_id(uint8_t dev_id)
int rte_event_dev_xstats_get(uint8_t dev_id, enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id, const uint64_t ids[], uint64_t values[], unsigned int n)
static uint8_t rte_event_port_profile_switch(uint8_t dev_id, uint8_t port_id, uint8_t profile_id)
int rte_event_dev_xstats_reset(uint8_t dev_id, enum rte_event_dev_xstats_mode mode, int16_t queue_port_id, const uint64_t ids[], uint32_t nb_ids)
uint32_t max_event_port_enqueue_depth
Definition: rte_eventdev.h:597
int rte_event_timer_adapter_caps_get(uint8_t dev_id, uint32_t *caps)
int rte_event_dev_info_get(uint8_t dev_id, struct rte_event_dev_info *dev_info)
uint32_t cache_size
Definition: rte_mempool.h:241
uint32_t nb_atomic_order_sequences
Definition: rte_eventdev.h:836
uint32_t nb_event_port_dequeue_depth
Definition: rte_eventdev.h:731
int rte_event_port_unlinks_in_progress(uint8_t dev_id, uint8_t port_id)
void * event_ptr
__rte_experimental int rte_event_port_profile_links_set(uint8_t dev_id, uint8_t port_id, const uint8_t queues[], const uint8_t priorities[], uint16_t nb_links, uint8_t profile_id)
int rte_event_port_link(uint8_t dev_id, uint8_t port_id, const uint8_t queues[], const uint8_t priorities[], uint16_t nb_links)
int rte_event_queue_attr_get(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id, uint32_t *attr_value)
uint8_t max_profiles_per_port
Definition: rte_eventdev.h:622
struct rte_event_vector * vec
uint8_t max_single_link_event_port_queue_pairs
Definition: rte_eventdev.h:616
rte_event_dev_xstats_mode
int rte_event_port_links_get(uint8_t dev_id, uint8_t port_id, uint8_t queues[], uint8_t priorities[])
int rte_event_dev_stop_flush_callback_register(uint8_t dev_id, rte_eventdev_stop_flush_t callback, void *userdata)
int rte_event_dev_selftest(uint8_t dev_id)
static uint16_t rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id, const struct rte_event ev[], uint16_t nb_events)
int rte_event_dev_start(uint8_t dev_id)
uint64_t rte_event_dev_xstats_by_name_get(uint8_t dev_id, const char *name, uint64_t *id)
int rte_event_port_setup(uint8_t dev_id, uint8_t port_id, const struct rte_event_port_conf *port_conf)
void rte_event_port_quiesce(uint8_t dev_id, uint8_t port_id, rte_eventdev_port_flush_t release_cb, void *args)
uint8_t max_event_port_dequeue_depth
Definition: rte_eventdev.h:590
int rte_event_port_attr_get(uint8_t dev_id, uint8_t port_id, uint32_t attr_id, uint32_t *attr_value)
uint32_t nb_event_port_enqueue_depth
Definition: rte_eventdev.h:738
const char * driver_name
Definition: rte_eventdev.h:542
uint8_t impl_opaque
uint8_t queue_id
struct __rte_aligned(16) rte_event_vector
static uint16_t rte_event_dequeue_burst(uint8_t dev_id, uint8_t port_id, struct rte_event ev[], uint16_t nb_events, uint64_t timeout_ticks)
int rte_event_dev_get_dev_id(const char *name)
uint8_t rte_event_dev_count(void)
int rte_event_dev_close(uint8_t dev_id)
int rte_event_crypto_adapter_caps_get(uint8_t dev_id, uint8_t cdev_id, uint32_t *caps)
__rte_experimental int rte_event_port_profile_links_get(uint8_t dev_id, uint8_t port_id, uint8_t queues[], uint8_t priorities[], uint8_t profile_id)
uint32_t dequeue_timeout_ns
Definition: rte_eventdev.h:685
static int rte_event_maintain(uint8_t dev_id, uint8_t port_id, int op)
int rte_event_dev_service_id_get(uint8_t dev_id, uint32_t *service_id)
static uint16_t rte_event_enqueue_forward_burst(uint8_t dev_id, uint8_t port_id, const struct rte_event ev[], uint16_t nb_events)
#define RTE_EVENT_DEV_MAINT_OP_FLUSH
struct rte_mempool * rte_event_vector_pool_create(const char *name, unsigned int n, unsigned int cache_size, uint16_t nb_elem, int socket_id)
int rte_event_dev_dump(uint8_t dev_id, FILE *f)
uint8_t nb_single_link_event_port_queues
Definition: rte_eventdev.h:747
uint8_t rsvd
void(* rte_eventdev_port_flush_t)(uint8_t dev_id, struct rte_event event, void *arg)
uint8_t op
void(* rte_eventdev_stop_flush_t)(uint8_t dev_id, struct rte_event event, void *arg)
uint8_t max_event_priority_levels
Definition: rte_eventdev.h:571
struct rte_mbuf * mbuf
uint32_t max_dequeue_timeout_ns
Definition: rte_eventdev.h:546
uint32_t max_event_queue_flows
Definition: rte_eventdev.h:555
int rte_event_dev_configure(uint8_t dev_id, const struct rte_event_dev_config *dev_conf)
uint32_t sub_event_type
uint8_t sched_type
uint8_t max_event_queues
Definition: rte_eventdev.h:550
__rte_experimental int rte_event_port_profile_unlink(uint8_t dev_id, uint8_t port_id, uint8_t queues[], uint16_t nb_unlinks, uint8_t profile_id)
uint8_t max_event_queue_priority_levels
Definition: rte_eventdev.h:557
int rte_event_port_unlink(uint8_t dev_id, uint8_t port_id, uint8_t queues[], uint16_t nb_unlinks)
int rte_event_eth_rx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id, uint32_t *caps)
uint32_t nb_event_queue_flows
Definition: rte_eventdev.h:726