DPDK  23.11.1
rte_eventdev.h
Go to the documentation of this file.
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016 Cavium, Inc.
3  * Copyright(c) 2016-2018 Intel Corporation.
4  * Copyright 2016 NXP
5  * All rights reserved.
6  */
7 
8 #ifndef _RTE_EVENTDEV_H_
9 #define _RTE_EVENTDEV_H_
10 
208 #ifdef __cplusplus
209 extern "C" {
210 #endif
211 
212 #include <rte_compat.h>
213 #include <rte_common.h>
214 #include <rte_errno.h>
215 #include <rte_mbuf_pool_ops.h>
216 #include <rte_mempool.h>
217 
218 #include "rte_eventdev_trace_fp.h"
219 
220 struct rte_mbuf; /* we just use mbuf pointers; no need to include rte_mbuf.h */
221 struct rte_event;
222 
223 /* Event device capability bitmap flags */
224 #define RTE_EVENT_DEV_CAP_QUEUE_QOS (1ULL << 0)
225 
236 #define RTE_EVENT_DEV_CAP_EVENT_QOS (1ULL << 1)
237 
243 #define RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED (1ULL << 2)
244 
252 #define RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES (1ULL << 3)
253 
259 #define RTE_EVENT_DEV_CAP_BURST_MODE (1ULL << 4)
260 
267 #define RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE (1ULL << 5)
268 
278 #define RTE_EVENT_DEV_CAP_NONSEQ_MODE (1ULL << 6)
279 
288 #define RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK (1ULL << 7)
289 
294 #define RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT (1ULL << 8)
295 
300 #define RTE_EVENT_DEV_CAP_CARRY_FLOW_ID (1ULL << 9)
301 
306 #define RTE_EVENT_DEV_CAP_MAINTENANCE_FREE (1ULL << 10)
307 
316 #define RTE_EVENT_DEV_CAP_RUNTIME_QUEUE_ATTR (1ULL << 11)
317 
323 #define RTE_EVENT_DEV_CAP_PROFILE_LINK (1ULL << 12)
324 
329 /* Event device priority levels */
330 #define RTE_EVENT_DEV_PRIORITY_HIGHEST 0
331 
335 #define RTE_EVENT_DEV_PRIORITY_NORMAL 128
336 
340 #define RTE_EVENT_DEV_PRIORITY_LOWEST 255
341 
346 /* Event queue scheduling weights */
347 #define RTE_EVENT_QUEUE_WEIGHT_HIGHEST 255
348 
351 #define RTE_EVENT_QUEUE_WEIGHT_LOWEST 0
352 
356 /* Event queue scheduling affinity */
357 #define RTE_EVENT_QUEUE_AFFINITY_HIGHEST 255
358 
361 #define RTE_EVENT_QUEUE_AFFINITY_LOWEST 0
362 
373 uint8_t
374 rte_event_dev_count(void);
375 
386 int
387 rte_event_dev_get_dev_id(const char *name);
388 
399 int
400 rte_event_dev_socket_id(uint8_t dev_id);
401 
406  const char *driver_name;
407  struct rte_device *dev;
442  int32_t max_num_events;
447  uint32_t event_dev_cap;
459 };
460 
475 int
476 rte_event_dev_info_get(uint8_t dev_id, struct rte_event_dev_info *dev_info);
477 
481 #define RTE_EVENT_DEV_ATTR_PORT_COUNT 0
482 
485 #define RTE_EVENT_DEV_ATTR_QUEUE_COUNT 1
486 
489 #define RTE_EVENT_DEV_ATTR_STARTED 2
490 
503 int
504 rte_event_dev_attr_get(uint8_t dev_id, uint32_t attr_id,
505  uint32_t *attr_value);
506 
507 
508 /* Event device configuration bitmap flags */
509 #define RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT (1ULL << 0)
510 
547  uint8_t nb_event_ports;
576  uint32_t event_dev_cfg;
586 };
587 
607 int
608 rte_event_dev_configure(uint8_t dev_id,
609  const struct rte_event_dev_config *dev_conf);
610 
611 /* Event queue specific APIs */
612 
613 /* Event queue configuration bitmap flags */
614 #define RTE_EVENT_QUEUE_CFG_ALL_TYPES (1ULL << 0)
615 
620 #define RTE_EVENT_QUEUE_CFG_SINGLE_LINK (1ULL << 1)
621 
628  uint32_t nb_atomic_flows;
650  uint32_t event_queue_cfg;
652  uint8_t schedule_type;
657  uint8_t priority;
665  uint8_t weight;
673  uint8_t affinity;
681 };
682 
704 int
705 rte_event_queue_default_conf_get(uint8_t dev_id, uint8_t queue_id,
706  struct rte_event_queue_conf *queue_conf);
707 
726 int
727 rte_event_queue_setup(uint8_t dev_id, uint8_t queue_id,
728  const struct rte_event_queue_conf *queue_conf);
729 
733 #define RTE_EVENT_QUEUE_ATTR_PRIORITY 0
734 
737 #define RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_FLOWS 1
738 
741 #define RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_ORDER_SEQUENCES 2
742 
745 #define RTE_EVENT_QUEUE_ATTR_EVENT_QUEUE_CFG 3
746 
749 #define RTE_EVENT_QUEUE_ATTR_SCHEDULE_TYPE 4
750 
753 #define RTE_EVENT_QUEUE_ATTR_WEIGHT 5
754 
757 #define RTE_EVENT_QUEUE_ATTR_AFFINITY 6
758 
779 int
780 rte_event_queue_attr_get(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id,
781  uint32_t *attr_value);
782 
801 int
802 rte_event_queue_attr_set(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id,
803  uint64_t attr_value);
804 
805 /* Event port specific APIs */
806 
807 /* Event port configuration bitmap flags */
808 #define RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL (1ULL << 0)
809 
815 #define RTE_EVENT_PORT_CFG_SINGLE_LINK (1ULL << 1)
816 
820 #define RTE_EVENT_PORT_CFG_HINT_PRODUCER (1ULL << 2)
821 
830 #define RTE_EVENT_PORT_CFG_HINT_CONSUMER (1ULL << 3)
831 
841 #define RTE_EVENT_PORT_CFG_HINT_WORKER (1ULL << 4)
842 
868  uint16_t dequeue_depth;
874  uint16_t enqueue_depth;
880  uint32_t event_port_cfg;
881 };
882 
904 int
905 rte_event_port_default_conf_get(uint8_t dev_id, uint8_t port_id,
906  struct rte_event_port_conf *port_conf);
907 
928 int
929 rte_event_port_setup(uint8_t dev_id, uint8_t port_id,
930  const struct rte_event_port_conf *port_conf);
931 
932 typedef void (*rte_eventdev_port_flush_t)(uint8_t dev_id,
933  struct rte_event event, void *arg);
963 void
964 rte_event_port_quiesce(uint8_t dev_id, uint8_t port_id,
965  rte_eventdev_port_flush_t release_cb, void *args);
966 
970 #define RTE_EVENT_PORT_ATTR_ENQ_DEPTH 0
971 
974 #define RTE_EVENT_PORT_ATTR_DEQ_DEPTH 1
975 
978 #define RTE_EVENT_PORT_ATTR_NEW_EVENT_THRESHOLD 2
979 
982 #define RTE_EVENT_PORT_ATTR_IMPLICIT_RELEASE_DISABLE 3
983 
1000 int
1001 rte_event_port_attr_get(uint8_t dev_id, uint8_t port_id, uint32_t attr_id,
1002  uint32_t *attr_value);
1003 
1020 int
1021 rte_event_dev_start(uint8_t dev_id);
1022 
1041 void
1042 rte_event_dev_stop(uint8_t dev_id);
1043 
1044 typedef void (*rte_eventdev_stop_flush_t)(uint8_t dev_id,
1045  struct rte_event event, void *arg);
1075  rte_eventdev_stop_flush_t callback, void *userdata);
1076 
1088 int
1089 rte_event_dev_close(uint8_t dev_id);
1090 
1095  uint16_t nb_elem;
1097  uint16_t elem_offset : 12;
1099  uint16_t rsvd : 3;
1101  uint16_t attr_valid : 1;
1104  union {
1105  /* Used by Rx/Tx adapter.
1106  * Indicates that all the elements in this vector belong to the
1107  * same port and queue pair when originating from Rx adapter,
1108  * valid only when event type is ETHDEV_VECTOR or
1109  * ETH_RX_ADAPTER_VECTOR.
1110  * Can also be used to indicate the Tx adapter the destination
1111  * port and queue of the mbufs in the vector
1112  */
1113  struct {
1114  uint16_t port;
1115  uint16_t queue;
1116  };
1117  };
1119  uint64_t impl_opaque;
1120 
1121 /* empty structures do not have zero size in C++ leading to compilation errors
1122  * with clang about structure having different sizes in C and C++.
1123  * Since these are all zero-sized arrays, we can omit the "union" wrapper for
1124  * C++ builds, removing the warning.
1125  */
1126 #ifndef __cplusplus
1127 
1132  union {
1133 #endif
1134  struct rte_mbuf *mbufs[0];
1135  void *ptrs[0];
1136  uint64_t u64s[0];
1137 #ifndef __cplusplus
1138  } __rte_aligned(16);
1139 #endif
1140 
1144 #ifndef __DOXYGEN__
1145 } __rte_aligned(16);
1146 #else
1147 };
1148 #endif
1149 
1150 /* Scheduler type definitions */
1151 #define RTE_SCHED_TYPE_ORDERED 0
1152 
1178 #define RTE_SCHED_TYPE_ATOMIC 1
1179 
1197 #define RTE_SCHED_TYPE_PARALLEL 2
1198 
1210 /* Event types to classify the event source */
1211 #define RTE_EVENT_TYPE_ETHDEV 0x0
1212 
1213 #define RTE_EVENT_TYPE_CRYPTODEV 0x1
1214 
1215 #define RTE_EVENT_TYPE_TIMER 0x2
1216 
1217 #define RTE_EVENT_TYPE_CPU 0x3
1218 
1221 #define RTE_EVENT_TYPE_ETH_RX_ADAPTER 0x4
1222 
1223 #define RTE_EVENT_TYPE_DMADEV 0x5
1224 
1225 #define RTE_EVENT_TYPE_VECTOR 0x8
1226 
1237 #define RTE_EVENT_TYPE_ETHDEV_VECTOR \
1238  (RTE_EVENT_TYPE_VECTOR | RTE_EVENT_TYPE_ETHDEV)
1239 
1240 #define RTE_EVENT_TYPE_CPU_VECTOR (RTE_EVENT_TYPE_VECTOR | RTE_EVENT_TYPE_CPU)
1241 
1242 #define RTE_EVENT_TYPE_ETH_RX_ADAPTER_VECTOR \
1243  (RTE_EVENT_TYPE_VECTOR | RTE_EVENT_TYPE_ETH_RX_ADAPTER)
1244 
1245 #define RTE_EVENT_TYPE_CRYPTODEV_VECTOR \
1246  (RTE_EVENT_TYPE_VECTOR | RTE_EVENT_TYPE_CRYPTODEV)
1247 
1249 #define RTE_EVENT_TYPE_MAX 0x10
1250 
1252 /* Event enqueue operations */
1253 #define RTE_EVENT_OP_NEW 0
1254 
1257 #define RTE_EVENT_OP_FORWARD 1
1258 
1265 #define RTE_EVENT_OP_RELEASE 2
1266 
1301 struct rte_event {
1303  union {
1304  uint64_t event;
1306  struct {
1307  uint32_t flow_id:20;
1314  uint32_t sub_event_type:8;
1318  uint32_t event_type:4;
1322  uint8_t op:2;
1328  uint8_t rsvd:4;
1330  uint8_t sched_type:2;
1335  uint8_t queue_id;
1342  uint8_t priority;
1352  uint8_t impl_opaque;
1359  };
1360  };
1362  union {
1363  uint64_t u64;
1365  void *event_ptr;
1367  struct rte_mbuf *mbuf;
1371  };
1372 };
1373 
1374 /* Ethdev Rx adapter capability bitmap flags */
1375 #define RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT 0x1
1376 
1379 #define RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ 0x2
1380 
1383 #define RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID 0x4
1384 
1390 #define RTE_EVENT_ETH_RX_ADAPTER_CAP_EVENT_VECTOR 0x8
1391 
1411 int
1412 rte_event_eth_rx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id,
1413  uint32_t *caps);
1414 
1415 #define RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT (1ULL << 0)
1416 
1418 #define RTE_EVENT_TIMER_ADAPTER_CAP_PERIODIC (1ULL << 1)
1419 
1434 int
1435 rte_event_timer_adapter_caps_get(uint8_t dev_id, uint32_t *caps);
1436 
1437 /* Crypto adapter capability bitmap flag */
1438 #define RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW 0x1
1439 
1445 #define RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD 0x2
1446 
1452 #define RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND 0x4
1453 
1457 #define RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA 0x8
1458 
1462 #define RTE_EVENT_CRYPTO_ADAPTER_CAP_EVENT_VECTOR 0x10
1463 
1486 int
1487 rte_event_crypto_adapter_caps_get(uint8_t dev_id, uint8_t cdev_id,
1488  uint32_t *caps);
1489 
1490 /* DMA adapter capability bitmap flag */
1491 #define RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_NEW 0x1
1492 
1498 #define RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_FWD 0x2
1499 
1505 #define RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_VCHAN_EV_BIND 0x4
1506 
1528 __rte_experimental
1529 int
1530 rte_event_dma_adapter_caps_get(uint8_t dev_id, uint8_t dmadev_id, uint32_t *caps);
1531 
1532 /* Ethdev Tx adapter capability bitmap flags */
1533 #define RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT 0x1
1534 
1536 #define RTE_EVENT_ETH_TX_ADAPTER_CAP_EVENT_VECTOR 0x2
1537 
1557 int
1558 rte_event_eth_tx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id,
1559  uint32_t *caps);
1560 
1585 int
1586 rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns,
1587  uint64_t *timeout_ticks);
1588 
1652 int
1653 rte_event_port_link(uint8_t dev_id, uint8_t port_id,
1654  const uint8_t queues[], const uint8_t priorities[],
1655  uint16_t nb_links);
1656 
1700 int
1701 rte_event_port_unlink(uint8_t dev_id, uint8_t port_id,
1702  uint8_t queues[], uint16_t nb_unlinks);
1703 
1776 __rte_experimental
1777 int
1778 rte_event_port_profile_links_set(uint8_t dev_id, uint8_t port_id, const uint8_t queues[],
1779  const uint8_t priorities[], uint16_t nb_links, uint8_t profile_id);
1780 
1829 __rte_experimental
1830 int
1831 rte_event_port_profile_unlink(uint8_t dev_id, uint8_t port_id, uint8_t queues[],
1832  uint16_t nb_unlinks, uint8_t profile_id);
1833 
1855 int
1856 rte_event_port_unlinks_in_progress(uint8_t dev_id, uint8_t port_id);
1857 
1884 int
1885 rte_event_port_links_get(uint8_t dev_id, uint8_t port_id,
1886  uint8_t queues[], uint8_t priorities[]);
1887 
1919 __rte_experimental
1920 int
1921 rte_event_port_profile_links_get(uint8_t dev_id, uint8_t port_id, uint8_t queues[],
1922  uint8_t priorities[], uint8_t profile_id);
1923 
1939 int
1940 rte_event_dev_service_id_get(uint8_t dev_id, uint32_t *service_id);
1941 
1955 int
1956 rte_event_dev_dump(uint8_t dev_id, FILE *f);
1957 
1959 #define RTE_EVENT_DEV_XSTATS_NAME_SIZE 64
1960 
1965  RTE_EVENT_DEV_XSTATS_DEVICE,
1966  RTE_EVENT_DEV_XSTATS_PORT,
1967  RTE_EVENT_DEV_XSTATS_QUEUE,
1968 };
1969 
1977  char name[RTE_EVENT_DEV_XSTATS_NAME_SIZE];
1978 };
1979 
2012 int
2013 rte_event_dev_xstats_names_get(uint8_t dev_id,
2014  enum rte_event_dev_xstats_mode mode,
2015  uint8_t queue_port_id,
2016  struct rte_event_dev_xstats_name *xstats_names,
2017  uint64_t *ids,
2018  unsigned int size);
2019 
2046 int
2047 rte_event_dev_xstats_get(uint8_t dev_id,
2048  enum rte_event_dev_xstats_mode mode,
2049  uint8_t queue_port_id,
2050  const uint64_t ids[],
2051  uint64_t values[], unsigned int n);
2052 
2069 uint64_t
2070 rte_event_dev_xstats_by_name_get(uint8_t dev_id, const char *name,
2071  uint64_t *id);
2072 
2093 int
2094 rte_event_dev_xstats_reset(uint8_t dev_id,
2095  enum rte_event_dev_xstats_mode mode,
2096  int16_t queue_port_id,
2097  const uint64_t ids[],
2098  uint32_t nb_ids);
2099 
2110 int rte_event_dev_selftest(uint8_t dev_id);
2111 
2142 struct rte_mempool *
2143 rte_event_vector_pool_create(const char *name, unsigned int n,
2144  unsigned int cache_size, uint16_t nb_elem,
2145  int socket_id);
2146 
2147 #include <rte_eventdev_core.h>
2148 
2149 static __rte_always_inline uint16_t
2150 __rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
2151  const struct rte_event ev[], uint16_t nb_events,
2152  const event_enqueue_burst_t fn)
2153 {
2154  const struct rte_event_fp_ops *fp_ops;
2155  void *port;
2156 
2157  fp_ops = &rte_event_fp_ops[dev_id];
2158  port = fp_ops->data[port_id];
2159 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
2160  if (dev_id >= RTE_EVENT_MAX_DEVS ||
2161  port_id >= RTE_EVENT_MAX_PORTS_PER_DEV) {
2162  rte_errno = EINVAL;
2163  return 0;
2164  }
2165 
2166  if (port == NULL) {
2167  rte_errno = EINVAL;
2168  return 0;
2169  }
2170 #endif
2171  rte_eventdev_trace_enq_burst(dev_id, port_id, ev, nb_events, (void *)fn);
2172  /*
2173  * Allow zero cost non burst mode routine invocation if application
2174  * requests nb_events as const one
2175  */
2176  if (nb_events == 1)
2177  return (fp_ops->enqueue)(port, ev);
2178  else
2179  return fn(port, ev, nb_events);
2180 }
2181 
2225 static inline uint16_t
2226 rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
2227  const struct rte_event ev[], uint16_t nb_events)
2228 {
2229  const struct rte_event_fp_ops *fp_ops;
2230 
2231  fp_ops = &rte_event_fp_ops[dev_id];
2232  return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
2233  fp_ops->enqueue_burst);
2234 }
2235 
2277 static inline uint16_t
2278 rte_event_enqueue_new_burst(uint8_t dev_id, uint8_t port_id,
2279  const struct rte_event ev[], uint16_t nb_events)
2280 {
2281  const struct rte_event_fp_ops *fp_ops;
2282 
2283  fp_ops = &rte_event_fp_ops[dev_id];
2284  return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
2285  fp_ops->enqueue_new_burst);
2286 }
2287 
2329 static inline uint16_t
2330 rte_event_enqueue_forward_burst(uint8_t dev_id, uint8_t port_id,
2331  const struct rte_event ev[], uint16_t nb_events)
2332 {
2333  const struct rte_event_fp_ops *fp_ops;
2334 
2335  fp_ops = &rte_event_fp_ops[dev_id];
2336  return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
2337  fp_ops->enqueue_forward_burst);
2338 }
2339 
2406 static inline uint16_t
2407 rte_event_dequeue_burst(uint8_t dev_id, uint8_t port_id, struct rte_event ev[],
2408  uint16_t nb_events, uint64_t timeout_ticks)
2409 {
2410  const struct rte_event_fp_ops *fp_ops;
2411  void *port;
2412 
2413  fp_ops = &rte_event_fp_ops[dev_id];
2414  port = fp_ops->data[port_id];
2415 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
2416  if (dev_id >= RTE_EVENT_MAX_DEVS ||
2417  port_id >= RTE_EVENT_MAX_PORTS_PER_DEV) {
2418  rte_errno = EINVAL;
2419  return 0;
2420  }
2421 
2422  if (port == NULL) {
2423  rte_errno = EINVAL;
2424  return 0;
2425  }
2426 #endif
2427  rte_eventdev_trace_deq_burst(dev_id, port_id, ev, nb_events);
2428  /*
2429  * Allow zero cost non burst mode routine invocation if application
2430  * requests nb_events as const one
2431  */
2432  if (nb_events == 1)
2433  return (fp_ops->dequeue)(port, ev, timeout_ticks);
2434  else
2435  return (fp_ops->dequeue_burst)(port, ev, nb_events,
2436  timeout_ticks);
2437 }
2438 
2439 #define RTE_EVENT_DEV_MAINT_OP_FLUSH (1 << 0)
2440 
2481 static inline int
2482 rte_event_maintain(uint8_t dev_id, uint8_t port_id, int op)
2483 {
2484  const struct rte_event_fp_ops *fp_ops;
2485  void *port;
2486 
2487  fp_ops = &rte_event_fp_ops[dev_id];
2488  port = fp_ops->data[port_id];
2489 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
2490  if (dev_id >= RTE_EVENT_MAX_DEVS ||
2491  port_id >= RTE_EVENT_MAX_PORTS_PER_DEV)
2492  return -EINVAL;
2493 
2494  if (port == NULL)
2495  return -EINVAL;
2496 
2497  if (op & (~RTE_EVENT_DEV_MAINT_OP_FLUSH))
2498  return -EINVAL;
2499 #endif
2500  rte_eventdev_trace_maintain(dev_id, port_id, op);
2501 
2502  if (fp_ops->maintain != NULL)
2503  fp_ops->maintain(port, op);
2504 
2505  return 0;
2506 }
2507 
2529 static inline uint8_t
2530 rte_event_port_profile_switch(uint8_t dev_id, uint8_t port_id, uint8_t profile_id)
2531 {
2532  const struct rte_event_fp_ops *fp_ops;
2533  void *port;
2534 
2535  fp_ops = &rte_event_fp_ops[dev_id];
2536  port = fp_ops->data[port_id];
2537 
2538 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
2539  if (dev_id >= RTE_EVENT_MAX_DEVS ||
2540  port_id >= RTE_EVENT_MAX_PORTS_PER_DEV)
2541  return -EINVAL;
2542 
2543  if (port == NULL)
2544  return -EINVAL;
2545 
2546  if (profile_id >= RTE_EVENT_MAX_PROFILES_PER_PORT)
2547  return -EINVAL;
2548 #endif
2549  rte_eventdev_trace_port_profile_switch(dev_id, port_id, profile_id);
2550 
2551  return fp_ops->profile_switch(port, profile_id);
2552 }
2553 
2554 #ifdef __cplusplus
2555 }
2556 #endif
2557 
2558 #endif /* _RTE_EVENTDEV_H_ */
void rte_event_dev_stop(uint8_t dev_id)
uint32_t min_dequeue_timeout_ns
Definition: rte_eventdev.h:408
int rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns, uint64_t *timeout_ticks)
int rte_event_dev_xstats_names_get(uint8_t dev_id, enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id, struct rte_event_dev_xstats_name *xstats_names, uint64_t *ids, unsigned int size)
static uint16_t rte_event_enqueue_new_burst(uint8_t dev_id, uint8_t port_id, const struct rte_event ev[], uint16_t nb_events)
#define __rte_always_inline
Definition: rte_common.h:331
#define RTE_EVENT_DEV_XSTATS_NAME_SIZE
uint64_t u64
int rte_event_dev_attr_get(uint8_t dev_id, uint32_t attr_id, uint32_t *attr_value)
uint16_t elem_offset
uint32_t flow_id
int rte_event_eth_tx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id, uint32_t *caps)
uint8_t priority
struct rte_device * dev
Definition: rte_eventdev.h:407
int rte_event_queue_setup(uint8_t dev_id, uint8_t queue_id, const struct rte_event_queue_conf *queue_conf)
uint8_t max_event_port_links
Definition: rte_eventdev.h:438
int rte_event_queue_default_conf_get(uint8_t dev_id, uint8_t queue_id, struct rte_event_queue_conf *queue_conf)
int rte_event_port_default_conf_get(uint8_t dev_id, uint8_t port_id, struct rte_event_port_conf *port_conf)
uint32_t dequeue_timeout_ns
Definition: rte_eventdev.h:412
#define rte_errno
Definition: rte_errno.h:29
uint32_t event_type
__rte_experimental int rte_event_dma_adapter_caps_get(uint8_t dev_id, uint8_t dmadev_id, uint32_t *caps)
uint32_t event_dev_cap
Definition: rte_eventdev.h:447
int rte_event_queue_attr_set(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id, uint64_t attr_value)
int rte_event_dev_socket_id(uint8_t dev_id)
int rte_event_dev_xstats_get(uint8_t dev_id, enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id, const uint64_t ids[], uint64_t values[], unsigned int n)
static uint8_t rte_event_port_profile_switch(uint8_t dev_id, uint8_t port_id, uint8_t profile_id)
int rte_event_dev_xstats_reset(uint8_t dev_id, enum rte_event_dev_xstats_mode mode, int16_t queue_port_id, const uint64_t ids[], uint32_t nb_ids)
uint32_t max_event_port_enqueue_depth
Definition: rte_eventdev.h:433
int rte_event_timer_adapter_caps_get(uint8_t dev_id, uint32_t *caps)
int rte_event_dev_info_get(uint8_t dev_id, struct rte_event_dev_info *dev_info)
uint32_t cache_size
Definition: rte_mempool.h:231
char name[RTE_MEMPOOL_NAMESIZE]
Definition: rte_mempool.h:221
uint32_t nb_atomic_order_sequences
Definition: rte_eventdev.h:636
uint32_t nb_event_port_dequeue_depth
Definition: rte_eventdev.h:562
int rte_event_port_unlinks_in_progress(uint8_t dev_id, uint8_t port_id)
void * event_ptr
__rte_experimental int rte_event_port_profile_links_set(uint8_t dev_id, uint8_t port_id, const uint8_t queues[], const uint8_t priorities[], uint16_t nb_links, uint8_t profile_id)
int rte_event_port_link(uint8_t dev_id, uint8_t port_id, const uint8_t queues[], const uint8_t priorities[], uint16_t nb_links)
int rte_event_queue_attr_get(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id, uint32_t *attr_value)
uint8_t max_profiles_per_port
Definition: rte_eventdev.h:455
struct rte_event_vector * vec
uint8_t max_single_link_event_port_queue_pairs
Definition: rte_eventdev.h:449
rte_event_dev_xstats_mode
int rte_event_port_links_get(uint8_t dev_id, uint8_t port_id, uint8_t queues[], uint8_t priorities[])
int rte_event_dev_stop_flush_callback_register(uint8_t dev_id, rte_eventdev_stop_flush_t callback, void *userdata)
int rte_event_dev_selftest(uint8_t dev_id)
static uint16_t rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id, const struct rte_event ev[], uint16_t nb_events)
int rte_event_dev_start(uint8_t dev_id)
uint64_t rte_event_dev_xstats_by_name_get(uint8_t dev_id, const char *name, uint64_t *id)
int rte_event_port_setup(uint8_t dev_id, uint8_t port_id, const struct rte_event_port_conf *port_conf)
void rte_event_port_quiesce(uint8_t dev_id, uint8_t port_id, rte_eventdev_port_flush_t release_cb, void *args)
uint8_t max_event_port_dequeue_depth
Definition: rte_eventdev.h:428
int rte_event_port_attr_get(uint8_t dev_id, uint8_t port_id, uint32_t attr_id, uint32_t *attr_value)
uint32_t nb_event_port_enqueue_depth
Definition: rte_eventdev.h:569
const char * driver_name
Definition: rte_eventdev.h:406
uint8_t impl_opaque
uint8_t queue_id
static uint16_t rte_event_dequeue_burst(uint8_t dev_id, uint8_t port_id, struct rte_event ev[], uint16_t nb_events, uint64_t timeout_ticks)
int rte_event_dev_get_dev_id(const char *name)
uint8_t rte_event_dev_count(void)
int rte_event_dev_close(uint8_t dev_id)
int rte_event_crypto_adapter_caps_get(uint8_t dev_id, uint8_t cdev_id, uint32_t *caps)
__rte_experimental int rte_event_port_profile_links_get(uint8_t dev_id, uint8_t port_id, uint8_t queues[], uint8_t priorities[], uint8_t profile_id)
uint32_t dequeue_timeout_ns
Definition: rte_eventdev.h:516
static int rte_event_maintain(uint8_t dev_id, uint8_t port_id, int op)
int rte_event_dev_service_id_get(uint8_t dev_id, uint32_t *service_id)
static uint16_t rte_event_enqueue_forward_burst(uint8_t dev_id, uint8_t port_id, const struct rte_event ev[], uint16_t nb_events)
__extension__ struct rte_eth_link __rte_aligned(8)
#define RTE_EVENT_DEV_MAINT_OP_FLUSH
struct rte_mempool * rte_event_vector_pool_create(const char *name, unsigned int n, unsigned int cache_size, uint16_t nb_elem, int socket_id)
int rte_event_dev_dump(uint8_t dev_id, FILE *f)
uint8_t nb_single_link_event_port_queues
Definition: rte_eventdev.h:578
uint8_t rsvd
void(* rte_eventdev_port_flush_t)(uint8_t dev_id, struct rte_event event, void *arg)
Definition: rte_eventdev.h:932
uint8_t op
void(* rte_eventdev_stop_flush_t)(uint8_t dev_id, struct rte_event event, void *arg)
int32_t new_event_threshold
Definition: rte_eventdev.h:855
uint8_t max_event_priority_levels
Definition: rte_eventdev.h:422
struct rte_mbuf * mbuf
uint32_t max_dequeue_timeout_ns
Definition: rte_eventdev.h:410
uint32_t max_event_queue_flows
Definition: rte_eventdev.h:416
int rte_event_dev_configure(uint8_t dev_id, const struct rte_event_dev_config *dev_conf)
uint32_t sub_event_type
uint8_t sched_type
uint8_t max_event_queues
Definition: rte_eventdev.h:414
__rte_experimental int rte_event_port_profile_unlink(uint8_t dev_id, uint8_t port_id, uint8_t queues[], uint16_t nb_unlinks, uint8_t profile_id)
uint8_t max_event_queue_priority_levels
Definition: rte_eventdev.h:418
int rte_event_port_unlink(uint8_t dev_id, uint8_t port_id, uint8_t queues[], uint16_t nb_unlinks)
int rte_event_eth_rx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id, uint32_t *caps)
uint32_t nb_event_queue_flows
Definition: rte_eventdev.h:557