DPDK  24.03.0-rc1
rte_eventdev.h
Go to the documentation of this file.
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016 Cavium, Inc.
3  * Copyright(c) 2016-2018 Intel Corporation.
4  * Copyright 2016 NXP
5  * All rights reserved.
6  */
7 
8 #ifndef _RTE_EVENTDEV_H_
9 #define _RTE_EVENTDEV_H_
10 
208 #ifdef __cplusplus
209 extern "C" {
210 #endif
211 
212 #include <rte_compat.h>
213 #include <rte_common.h>
214 #include <rte_errno.h>
215 #include <rte_mbuf_pool_ops.h>
216 #include <rte_mempool.h>
217 
218 #include "rte_eventdev_trace_fp.h"
219 
220 struct rte_mbuf; /* we just use mbuf pointers; no need to include rte_mbuf.h */
221 struct rte_event;
222 
223 /* Event device capability bitmap flags */
224 #define RTE_EVENT_DEV_CAP_QUEUE_QOS (1ULL << 0)
225 
236 #define RTE_EVENT_DEV_CAP_EVENT_QOS (1ULL << 1)
237 
243 #define RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED (1ULL << 2)
244 
252 #define RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES (1ULL << 3)
253 
270 #define RTE_EVENT_DEV_CAP_BURST_MODE (1ULL << 4)
271 
278 #define RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE (1ULL << 5)
279 
289 #define RTE_EVENT_DEV_CAP_NONSEQ_MODE (1ULL << 6)
290 
299 #define RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK (1ULL << 7)
300 
305 #define RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT (1ULL << 8)
306 
311 #define RTE_EVENT_DEV_CAP_CARRY_FLOW_ID (1ULL << 9)
312 
317 #define RTE_EVENT_DEV_CAP_MAINTENANCE_FREE (1ULL << 10)
318 
327 #define RTE_EVENT_DEV_CAP_RUNTIME_QUEUE_ATTR (1ULL << 11)
328 
334 #define RTE_EVENT_DEV_CAP_PROFILE_LINK (1ULL << 12)
335 
340 #define RTE_EVENT_DEV_CAP_ATOMIC (1ULL << 13)
341 
347 #define RTE_EVENT_DEV_CAP_ORDERED (1ULL << 14)
348 
354 #define RTE_EVENT_DEV_CAP_PARALLEL (1ULL << 15)
355 
361 /* Event device priority levels */
362 #define RTE_EVENT_DEV_PRIORITY_HIGHEST 0
363 
367 #define RTE_EVENT_DEV_PRIORITY_NORMAL 128
368 
372 #define RTE_EVENT_DEV_PRIORITY_LOWEST 255
373 
378 /* Event queue scheduling weights */
379 #define RTE_EVENT_QUEUE_WEIGHT_HIGHEST 255
380 
383 #define RTE_EVENT_QUEUE_WEIGHT_LOWEST 0
384 
388 /* Event queue scheduling affinity */
389 #define RTE_EVENT_QUEUE_AFFINITY_HIGHEST 255
390 
393 #define RTE_EVENT_QUEUE_AFFINITY_LOWEST 0
394 
405 uint8_t
406 rte_event_dev_count(void);
407 
418 int
419 rte_event_dev_get_dev_id(const char *name);
420 
431 int
432 rte_event_dev_socket_id(uint8_t dev_id);
433 
438  const char *driver_name;
439  struct rte_device *dev;
474  int32_t max_num_events;
479  uint32_t event_dev_cap;
491 };
492 
507 int
508 rte_event_dev_info_get(uint8_t dev_id, struct rte_event_dev_info *dev_info);
509 
513 #define RTE_EVENT_DEV_ATTR_PORT_COUNT 0
514 
517 #define RTE_EVENT_DEV_ATTR_QUEUE_COUNT 1
518 
521 #define RTE_EVENT_DEV_ATTR_STARTED 2
522 
535 int
536 rte_event_dev_attr_get(uint8_t dev_id, uint32_t attr_id,
537  uint32_t *attr_value);
538 
539 
540 /* Event device configuration bitmap flags */
541 #define RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT (1ULL << 0)
542 
569  uint8_t nb_event_ports;
595  uint32_t event_dev_cfg;
605 };
606 
626 int
627 rte_event_dev_configure(uint8_t dev_id,
628  const struct rte_event_dev_config *dev_conf);
629 
630 /* Event queue specific APIs */
631 
632 /* Event queue configuration bitmap flags */
633 #define RTE_EVENT_QUEUE_CFG_ALL_TYPES (1ULL << 0)
634 
639 #define RTE_EVENT_QUEUE_CFG_SINGLE_LINK (1ULL << 1)
640 
647  uint32_t nb_atomic_flows;
669  uint32_t event_queue_cfg;
671  uint8_t schedule_type;
676  uint8_t priority;
684  uint8_t weight;
692  uint8_t affinity;
700 };
701 
723 int
724 rte_event_queue_default_conf_get(uint8_t dev_id, uint8_t queue_id,
725  struct rte_event_queue_conf *queue_conf);
726 
745 int
746 rte_event_queue_setup(uint8_t dev_id, uint8_t queue_id,
747  const struct rte_event_queue_conf *queue_conf);
748 
752 #define RTE_EVENT_QUEUE_ATTR_PRIORITY 0
753 
756 #define RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_FLOWS 1
757 
760 #define RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_ORDER_SEQUENCES 2
761 
764 #define RTE_EVENT_QUEUE_ATTR_EVENT_QUEUE_CFG 3
765 
768 #define RTE_EVENT_QUEUE_ATTR_SCHEDULE_TYPE 4
769 
772 #define RTE_EVENT_QUEUE_ATTR_WEIGHT 5
773 
776 #define RTE_EVENT_QUEUE_ATTR_AFFINITY 6
777 
798 int
799 rte_event_queue_attr_get(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id,
800  uint32_t *attr_value);
801 
820 int
821 rte_event_queue_attr_set(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id,
822  uint64_t attr_value);
823 
824 /* Event port specific APIs */
825 
826 /* Event port configuration bitmap flags */
827 #define RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL (1ULL << 0)
828 
834 #define RTE_EVENT_PORT_CFG_SINGLE_LINK (1ULL << 1)
835 
839 #define RTE_EVENT_PORT_CFG_HINT_PRODUCER (1ULL << 2)
840 
849 #define RTE_EVENT_PORT_CFG_HINT_CONSUMER (1ULL << 3)
850 
860 #define RTE_EVENT_PORT_CFG_HINT_WORKER (1ULL << 4)
861 
887  uint16_t dequeue_depth;
893  uint16_t enqueue_depth;
899  uint32_t event_port_cfg;
900 };
901 
923 int
924 rte_event_port_default_conf_get(uint8_t dev_id, uint8_t port_id,
925  struct rte_event_port_conf *port_conf);
926 
947 int
948 rte_event_port_setup(uint8_t dev_id, uint8_t port_id,
949  const struct rte_event_port_conf *port_conf);
950 
951 typedef void (*rte_eventdev_port_flush_t)(uint8_t dev_id,
952  struct rte_event event, void *arg);
982 void
983 rte_event_port_quiesce(uint8_t dev_id, uint8_t port_id,
984  rte_eventdev_port_flush_t release_cb, void *args);
985 
989 #define RTE_EVENT_PORT_ATTR_ENQ_DEPTH 0
990 
993 #define RTE_EVENT_PORT_ATTR_DEQ_DEPTH 1
994 
997 #define RTE_EVENT_PORT_ATTR_NEW_EVENT_THRESHOLD 2
998 
1001 #define RTE_EVENT_PORT_ATTR_IMPLICIT_RELEASE_DISABLE 3
1002 
1019 int
1020 rte_event_port_attr_get(uint8_t dev_id, uint8_t port_id, uint32_t attr_id,
1021  uint32_t *attr_value);
1022 
1039 int
1040 rte_event_dev_start(uint8_t dev_id);
1041 
1060 void
1061 rte_event_dev_stop(uint8_t dev_id);
1062 
1063 typedef void (*rte_eventdev_stop_flush_t)(uint8_t dev_id,
1064  struct rte_event event, void *arg);
1094  rte_eventdev_stop_flush_t callback, void *userdata);
1095 
1107 int
1108 rte_event_dev_close(uint8_t dev_id);
1114  uint16_t nb_elem;
1116  uint16_t elem_offset : 12;
1118  uint16_t rsvd : 3;
1120  uint16_t attr_valid : 1;
1123  union {
1124  /* Used by Rx/Tx adapter.
1125  * Indicates that all the elements in this vector belong to the
1126  * same port and queue pair when originating from Rx adapter,
1127  * valid only when event type is ETHDEV_VECTOR or
1128  * ETH_RX_ADAPTER_VECTOR.
1129  * Can also be used to indicate the Tx adapter the destination
1130  * port and queue of the mbufs in the vector
1131  */
1132  struct {
1133  uint16_t port;
1134  /* Ethernet device port id. */
1135  uint16_t queue;
1136  /* Ethernet device queue id. */
1137  };
1138  };
1140  uint64_t impl_opaque;
1141 
1142 /* empty structures do not have zero size in C++ leading to compilation errors
1143  * with clang about structure having different sizes in C and C++.
1144  * Since these are all zero-sized arrays, we can omit the "union" wrapper for
1145  * C++ builds, removing the warning.
1146  */
1147 #ifndef __cplusplus
1148 
1153  union {
1154 #endif
1155  struct rte_mbuf *mbufs[0];
1156  void *ptrs[0];
1157  uint64_t u64s[0];
1158 #ifndef __cplusplus
1159  } __rte_aligned(16);
1160 #endif
1161 
1165 } __rte_aligned(16);
1166 
1167 /* Scheduler type definitions */
1168 #define RTE_SCHED_TYPE_ORDERED 0
1169 
1195 #define RTE_SCHED_TYPE_ATOMIC 1
1196 
1214 #define RTE_SCHED_TYPE_PARALLEL 2
1215 
1227 /* Event types to classify the event source */
1228 #define RTE_EVENT_TYPE_ETHDEV 0x0
1229 
1230 #define RTE_EVENT_TYPE_CRYPTODEV 0x1
1231 
1232 #define RTE_EVENT_TYPE_TIMER 0x2
1233 
1234 #define RTE_EVENT_TYPE_CPU 0x3
1235 
1238 #define RTE_EVENT_TYPE_ETH_RX_ADAPTER 0x4
1239 
1240 #define RTE_EVENT_TYPE_DMADEV 0x5
1241 
1242 #define RTE_EVENT_TYPE_VECTOR 0x8
1243 
1254 #define RTE_EVENT_TYPE_ETHDEV_VECTOR \
1255  (RTE_EVENT_TYPE_VECTOR | RTE_EVENT_TYPE_ETHDEV)
1256 
1257 #define RTE_EVENT_TYPE_CPU_VECTOR (RTE_EVENT_TYPE_VECTOR | RTE_EVENT_TYPE_CPU)
1258 
1259 #define RTE_EVENT_TYPE_ETH_RX_ADAPTER_VECTOR \
1260  (RTE_EVENT_TYPE_VECTOR | RTE_EVENT_TYPE_ETH_RX_ADAPTER)
1261 
1262 #define RTE_EVENT_TYPE_CRYPTODEV_VECTOR \
1263  (RTE_EVENT_TYPE_VECTOR | RTE_EVENT_TYPE_CRYPTODEV)
1264 
1266 #define RTE_EVENT_TYPE_MAX 0x10
1267 
1269 /* Event enqueue operations */
1270 #define RTE_EVENT_OP_NEW 0
1271 
1274 #define RTE_EVENT_OP_FORWARD 1
1275 
1282 #define RTE_EVENT_OP_RELEASE 2
1283 
1318 struct rte_event {
1320  union {
1321  uint64_t event;
1323  struct {
1324  uint32_t flow_id:20;
1331  uint32_t sub_event_type:8;
1335  uint32_t event_type:4;
1339  uint8_t op:2;
1345  uint8_t rsvd:4;
1347  uint8_t sched_type:2;
1352  uint8_t queue_id;
1359  uint8_t priority;
1369  uint8_t impl_opaque;
1376  };
1377  };
1379  union {
1380  uint64_t u64;
1382  void *event_ptr;
1384  struct rte_mbuf *mbuf;
1388  };
1389 };
1390 
1391 /* Ethdev Rx adapter capability bitmap flags */
1392 #define RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT 0x1
1393 
1396 #define RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ 0x2
1397 
1400 #define RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID 0x4
1401 
1407 #define RTE_EVENT_ETH_RX_ADAPTER_CAP_EVENT_VECTOR 0x8
1408 
1428 int
1429 rte_event_eth_rx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id,
1430  uint32_t *caps);
1431 
1432 #define RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT (1ULL << 0)
1433 
1435 #define RTE_EVENT_TIMER_ADAPTER_CAP_PERIODIC (1ULL << 1)
1436 
1451 int
1452 rte_event_timer_adapter_caps_get(uint8_t dev_id, uint32_t *caps);
1453 
1454 /* Crypto adapter capability bitmap flag */
1455 #define RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW 0x1
1456 
1462 #define RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD 0x2
1463 
1469 #define RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND 0x4
1470 
1474 #define RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA 0x8
1475 
1479 #define RTE_EVENT_CRYPTO_ADAPTER_CAP_EVENT_VECTOR 0x10
1480 
1503 int
1504 rte_event_crypto_adapter_caps_get(uint8_t dev_id, uint8_t cdev_id,
1505  uint32_t *caps);
1506 
1507 /* DMA adapter capability bitmap flag */
1508 #define RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_NEW 0x1
1509 
1515 #define RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_FWD 0x2
1516 
1522 #define RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_VCHAN_EV_BIND 0x4
1523 
1545 __rte_experimental
1546 int
1547 rte_event_dma_adapter_caps_get(uint8_t dev_id, uint8_t dmadev_id, uint32_t *caps);
1548 
1549 /* Ethdev Tx adapter capability bitmap flags */
1550 #define RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT 0x1
1551 
1553 #define RTE_EVENT_ETH_TX_ADAPTER_CAP_EVENT_VECTOR 0x2
1554 
1574 int
1575 rte_event_eth_tx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id,
1576  uint32_t *caps);
1577 
1602 int
1603 rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns,
1604  uint64_t *timeout_ticks);
1605 
1669 int
1670 rte_event_port_link(uint8_t dev_id, uint8_t port_id,
1671  const uint8_t queues[], const uint8_t priorities[],
1672  uint16_t nb_links);
1673 
1717 int
1718 rte_event_port_unlink(uint8_t dev_id, uint8_t port_id,
1719  uint8_t queues[], uint16_t nb_unlinks);
1720 
1793 __rte_experimental
1794 int
1795 rte_event_port_profile_links_set(uint8_t dev_id, uint8_t port_id, const uint8_t queues[],
1796  const uint8_t priorities[], uint16_t nb_links, uint8_t profile_id);
1797 
1846 __rte_experimental
1847 int
1848 rte_event_port_profile_unlink(uint8_t dev_id, uint8_t port_id, uint8_t queues[],
1849  uint16_t nb_unlinks, uint8_t profile_id);
1850 
1872 int
1873 rte_event_port_unlinks_in_progress(uint8_t dev_id, uint8_t port_id);
1874 
1901 int
1902 rte_event_port_links_get(uint8_t dev_id, uint8_t port_id,
1903  uint8_t queues[], uint8_t priorities[]);
1904 
1936 __rte_experimental
1937 int
1938 rte_event_port_profile_links_get(uint8_t dev_id, uint8_t port_id, uint8_t queues[],
1939  uint8_t priorities[], uint8_t profile_id);
1940 
1956 int
1957 rte_event_dev_service_id_get(uint8_t dev_id, uint32_t *service_id);
1958 
1972 int
1973 rte_event_dev_dump(uint8_t dev_id, FILE *f);
1974 
1976 #define RTE_EVENT_DEV_XSTATS_NAME_SIZE 64
1977 
1982  RTE_EVENT_DEV_XSTATS_DEVICE,
1983  RTE_EVENT_DEV_XSTATS_PORT,
1984  RTE_EVENT_DEV_XSTATS_QUEUE,
1985 };
1986 
1994  char name[RTE_EVENT_DEV_XSTATS_NAME_SIZE];
1995 };
1996 
2029 int
2030 rte_event_dev_xstats_names_get(uint8_t dev_id,
2031  enum rte_event_dev_xstats_mode mode,
2032  uint8_t queue_port_id,
2033  struct rte_event_dev_xstats_name *xstats_names,
2034  uint64_t *ids,
2035  unsigned int size);
2036 
2063 int
2064 rte_event_dev_xstats_get(uint8_t dev_id,
2065  enum rte_event_dev_xstats_mode mode,
2066  uint8_t queue_port_id,
2067  const uint64_t ids[],
2068  uint64_t values[], unsigned int n);
2069 
2086 uint64_t
2087 rte_event_dev_xstats_by_name_get(uint8_t dev_id, const char *name,
2088  uint64_t *id);
2089 
2110 int
2111 rte_event_dev_xstats_reset(uint8_t dev_id,
2112  enum rte_event_dev_xstats_mode mode,
2113  int16_t queue_port_id,
2114  const uint64_t ids[],
2115  uint32_t nb_ids);
2116 
2127 int rte_event_dev_selftest(uint8_t dev_id);
2128 
2159 struct rte_mempool *
2160 rte_event_vector_pool_create(const char *name, unsigned int n,
2161  unsigned int cache_size, uint16_t nb_elem,
2162  int socket_id);
2163 
2164 #include <rte_eventdev_core.h>
2165 
2166 static __rte_always_inline uint16_t
2167 __rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
2168  const struct rte_event ev[], uint16_t nb_events,
2169  const event_enqueue_burst_t fn)
2170 {
2171  const struct rte_event_fp_ops *fp_ops;
2172  void *port;
2173 
2174  fp_ops = &rte_event_fp_ops[dev_id];
2175  port = fp_ops->data[port_id];
2176 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
2177  if (dev_id >= RTE_EVENT_MAX_DEVS ||
2178  port_id >= RTE_EVENT_MAX_PORTS_PER_DEV) {
2179  rte_errno = EINVAL;
2180  return 0;
2181  }
2182 
2183  if (port == NULL) {
2184  rte_errno = EINVAL;
2185  return 0;
2186  }
2187 #endif
2188  rte_eventdev_trace_enq_burst(dev_id, port_id, ev, nb_events, (void *)fn);
2189  /*
2190  * Allow zero cost non burst mode routine invocation if application
2191  * requests nb_events as const one
2192  */
2193  if (nb_events == 1)
2194  return (fp_ops->enqueue)(port, ev);
2195  else
2196  return fn(port, ev, nb_events);
2197 }
2198 
2242 static inline uint16_t
2243 rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
2244  const struct rte_event ev[], uint16_t nb_events)
2245 {
2246  const struct rte_event_fp_ops *fp_ops;
2247 
2248  fp_ops = &rte_event_fp_ops[dev_id];
2249  return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
2250  fp_ops->enqueue_burst);
2251 }
2252 
2294 static inline uint16_t
2295 rte_event_enqueue_new_burst(uint8_t dev_id, uint8_t port_id,
2296  const struct rte_event ev[], uint16_t nb_events)
2297 {
2298  const struct rte_event_fp_ops *fp_ops;
2299 
2300  fp_ops = &rte_event_fp_ops[dev_id];
2301  return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
2302  fp_ops->enqueue_new_burst);
2303 }
2304 
2346 static inline uint16_t
2347 rte_event_enqueue_forward_burst(uint8_t dev_id, uint8_t port_id,
2348  const struct rte_event ev[], uint16_t nb_events)
2349 {
2350  const struct rte_event_fp_ops *fp_ops;
2351 
2352  fp_ops = &rte_event_fp_ops[dev_id];
2353  return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
2354  fp_ops->enqueue_forward_burst);
2355 }
2356 
2423 static inline uint16_t
2424 rte_event_dequeue_burst(uint8_t dev_id, uint8_t port_id, struct rte_event ev[],
2425  uint16_t nb_events, uint64_t timeout_ticks)
2426 {
2427  const struct rte_event_fp_ops *fp_ops;
2428  void *port;
2429 
2430  fp_ops = &rte_event_fp_ops[dev_id];
2431  port = fp_ops->data[port_id];
2432 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
2433  if (dev_id >= RTE_EVENT_MAX_DEVS ||
2434  port_id >= RTE_EVENT_MAX_PORTS_PER_DEV) {
2435  rte_errno = EINVAL;
2436  return 0;
2437  }
2438 
2439  if (port == NULL) {
2440  rte_errno = EINVAL;
2441  return 0;
2442  }
2443 #endif
2444  rte_eventdev_trace_deq_burst(dev_id, port_id, ev, nb_events);
2445  /*
2446  * Allow zero cost non burst mode routine invocation if application
2447  * requests nb_events as const one
2448  */
2449  if (nb_events == 1)
2450  return (fp_ops->dequeue)(port, ev, timeout_ticks);
2451  else
2452  return (fp_ops->dequeue_burst)(port, ev, nb_events,
2453  timeout_ticks);
2454 }
2455 
2456 #define RTE_EVENT_DEV_MAINT_OP_FLUSH (1 << 0)
2457 
2498 static inline int
2499 rte_event_maintain(uint8_t dev_id, uint8_t port_id, int op)
2500 {
2501  const struct rte_event_fp_ops *fp_ops;
2502  void *port;
2503 
2504  fp_ops = &rte_event_fp_ops[dev_id];
2505  port = fp_ops->data[port_id];
2506 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
2507  if (dev_id >= RTE_EVENT_MAX_DEVS ||
2508  port_id >= RTE_EVENT_MAX_PORTS_PER_DEV)
2509  return -EINVAL;
2510 
2511  if (port == NULL)
2512  return -EINVAL;
2513 
2514  if (op & (~RTE_EVENT_DEV_MAINT_OP_FLUSH))
2515  return -EINVAL;
2516 #endif
2517  rte_eventdev_trace_maintain(dev_id, port_id, op);
2518 
2519  if (fp_ops->maintain != NULL)
2520  fp_ops->maintain(port, op);
2521 
2522  return 0;
2523 }
2524 
2546 static inline uint8_t
2547 rte_event_port_profile_switch(uint8_t dev_id, uint8_t port_id, uint8_t profile_id)
2548 {
2549  const struct rte_event_fp_ops *fp_ops;
2550  void *port;
2551 
2552  fp_ops = &rte_event_fp_ops[dev_id];
2553  port = fp_ops->data[port_id];
2554 
2555 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
2556  if (dev_id >= RTE_EVENT_MAX_DEVS ||
2557  port_id >= RTE_EVENT_MAX_PORTS_PER_DEV)
2558  return -EINVAL;
2559 
2560  if (port == NULL)
2561  return -EINVAL;
2562 
2563  if (profile_id >= RTE_EVENT_MAX_PROFILES_PER_PORT)
2564  return -EINVAL;
2565 #endif
2566  rte_eventdev_trace_port_profile_switch(dev_id, port_id, profile_id);
2567 
2568  return fp_ops->profile_switch(port, profile_id);
2569 }
2570 
2571 #ifdef __cplusplus
2572 }
2573 #endif
2574 
2575 #endif /* _RTE_EVENTDEV_H_ */
void rte_event_dev_stop(uint8_t dev_id)
uint32_t min_dequeue_timeout_ns
Definition: rte_eventdev.h:440
int rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns, uint64_t *timeout_ticks)
int rte_event_dev_xstats_names_get(uint8_t dev_id, enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id, struct rte_event_dev_xstats_name *xstats_names, uint64_t *ids, unsigned int size)
static uint16_t rte_event_enqueue_new_burst(uint8_t dev_id, uint8_t port_id, const struct rte_event ev[], uint16_t nb_events)
#define __rte_always_inline
Definition: rte_common.h:343
#define RTE_EVENT_DEV_XSTATS_NAME_SIZE
uint64_t u64
int rte_event_dev_attr_get(uint8_t dev_id, uint32_t attr_id, uint32_t *attr_value)
uint16_t elem_offset
uint32_t flow_id
int rte_event_eth_tx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id, uint32_t *caps)
uint16_t nb_elem
uint8_t priority
struct rte_device * dev
Definition: rte_eventdev.h:439
int rte_event_queue_setup(uint8_t dev_id, uint8_t queue_id, const struct rte_event_queue_conf *queue_conf)
uint8_t max_event_port_links
Definition: rte_eventdev.h:470
int rte_event_queue_default_conf_get(uint8_t dev_id, uint8_t queue_id, struct rte_event_queue_conf *queue_conf)
int rte_event_port_default_conf_get(uint8_t dev_id, uint8_t port_id, struct rte_event_port_conf *port_conf)
uint32_t dequeue_timeout_ns
Definition: rte_eventdev.h:444
#define rte_errno
Definition: rte_errno.h:29
uint32_t event_type
__rte_experimental int rte_event_dma_adapter_caps_get(uint8_t dev_id, uint8_t dmadev_id, uint32_t *caps)
uint32_t event_dev_cap
Definition: rte_eventdev.h:479
int rte_event_queue_attr_set(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id, uint64_t attr_value)
int rte_event_dev_socket_id(uint8_t dev_id)
int rte_event_dev_xstats_get(uint8_t dev_id, enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id, const uint64_t ids[], uint64_t values[], unsigned int n)
static uint8_t rte_event_port_profile_switch(uint8_t dev_id, uint8_t port_id, uint8_t profile_id)
int rte_event_dev_xstats_reset(uint8_t dev_id, enum rte_event_dev_xstats_mode mode, int16_t queue_port_id, const uint64_t ids[], uint32_t nb_ids)
uint32_t max_event_port_enqueue_depth
Definition: rte_eventdev.h:465
int rte_event_timer_adapter_caps_get(uint8_t dev_id, uint32_t *caps)
int rte_event_dev_info_get(uint8_t dev_id, struct rte_event_dev_info *dev_info)
uint32_t cache_size
Definition: rte_mempool.h:240
uint32_t nb_atomic_order_sequences
Definition: rte_eventdev.h:655
uint32_t nb_event_port_dequeue_depth
Definition: rte_eventdev.h:579
int rte_event_port_unlinks_in_progress(uint8_t dev_id, uint8_t port_id)
void * event_ptr
__rte_experimental int rte_event_port_profile_links_set(uint8_t dev_id, uint8_t port_id, const uint8_t queues[], const uint8_t priorities[], uint16_t nb_links, uint8_t profile_id)
int rte_event_port_link(uint8_t dev_id, uint8_t port_id, const uint8_t queues[], const uint8_t priorities[], uint16_t nb_links)
int rte_event_queue_attr_get(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id, uint32_t *attr_value)
uint8_t max_profiles_per_port
Definition: rte_eventdev.h:487
struct rte_event_vector * vec
uint8_t max_single_link_event_port_queue_pairs
Definition: rte_eventdev.h:481
rte_event_dev_xstats_mode
int rte_event_port_links_get(uint8_t dev_id, uint8_t port_id, uint8_t queues[], uint8_t priorities[])
int rte_event_dev_stop_flush_callback_register(uint8_t dev_id, rte_eventdev_stop_flush_t callback, void *userdata)
int rte_event_dev_selftest(uint8_t dev_id)
static uint16_t rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id, const struct rte_event ev[], uint16_t nb_events)
int rte_event_dev_start(uint8_t dev_id)
uint64_t rte_event_dev_xstats_by_name_get(uint8_t dev_id, const char *name, uint64_t *id)
int rte_event_port_setup(uint8_t dev_id, uint8_t port_id, const struct rte_event_port_conf *port_conf)
void rte_event_port_quiesce(uint8_t dev_id, uint8_t port_id, rte_eventdev_port_flush_t release_cb, void *args)
uint8_t max_event_port_dequeue_depth
Definition: rte_eventdev.h:460
int rte_event_port_attr_get(uint8_t dev_id, uint8_t port_id, uint32_t attr_id, uint32_t *attr_value)
uint32_t nb_event_port_enqueue_depth
Definition: rte_eventdev.h:587
const char * driver_name
Definition: rte_eventdev.h:438
uint8_t impl_opaque
uint8_t queue_id
static uint16_t rte_event_dequeue_burst(uint8_t dev_id, uint8_t port_id, struct rte_event ev[], uint16_t nb_events, uint64_t timeout_ticks)
int rte_event_dev_get_dev_id(const char *name)
uint8_t rte_event_dev_count(void)
int rte_event_dev_close(uint8_t dev_id)
int rte_event_crypto_adapter_caps_get(uint8_t dev_id, uint8_t cdev_id, uint32_t *caps)
__rte_experimental int rte_event_port_profile_links_get(uint8_t dev_id, uint8_t port_id, uint8_t queues[], uint8_t priorities[], uint8_t profile_id)
uint32_t dequeue_timeout_ns
Definition: rte_eventdev.h:548
static int rte_event_maintain(uint8_t dev_id, uint8_t port_id, int op)
int rte_event_dev_service_id_get(uint8_t dev_id, uint32_t *service_id)
static uint16_t rte_event_enqueue_forward_burst(uint8_t dev_id, uint8_t port_id, const struct rte_event ev[], uint16_t nb_events)
#define RTE_EVENT_DEV_MAINT_OP_FLUSH
struct rte_mempool * rte_event_vector_pool_create(const char *name, unsigned int n, unsigned int cache_size, uint16_t nb_elem, int socket_id)
int rte_event_dev_dump(uint8_t dev_id, FILE *f)
uint8_t nb_single_link_event_port_queues
Definition: rte_eventdev.h:597
uint8_t rsvd
void(* rte_eventdev_port_flush_t)(uint8_t dev_id, struct rte_event event, void *arg)
Definition: rte_eventdev.h:951
uint8_t op
void(* rte_eventdev_stop_flush_t)(uint8_t dev_id, struct rte_event event, void *arg)
int32_t new_event_threshold
Definition: rte_eventdev.h:874
uint8_t max_event_priority_levels
Definition: rte_eventdev.h:454
struct rte_mbuf * mbuf
uint32_t max_dequeue_timeout_ns
Definition: rte_eventdev.h:442
uint32_t max_event_queue_flows
Definition: rte_eventdev.h:448
int rte_event_dev_configure(uint8_t dev_id, const struct rte_event_dev_config *dev_conf)
uint32_t sub_event_type
uint8_t sched_type
uint8_t max_event_queues
Definition: rte_eventdev.h:446
struct rte_event_vector __rte_aligned(16)
__rte_experimental int rte_event_port_profile_unlink(uint8_t dev_id, uint8_t port_id, uint8_t queues[], uint16_t nb_unlinks, uint8_t profile_id)
uint8_t max_event_queue_priority_levels
Definition: rte_eventdev.h:450
int rte_event_port_unlink(uint8_t dev_id, uint8_t port_id, uint8_t queues[], uint16_t nb_unlinks)
int rte_event_eth_rx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id, uint32_t *caps)
uint32_t nb_event_queue_flows
Definition: rte_eventdev.h:574