DPDK  24.11.0-rc3
rte_eventdev.h
Go to the documentation of this file.
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016 Cavium, Inc.
3  * Copyright(c) 2016-2018 Intel Corporation.
4  * Copyright 2016 NXP
5  * All rights reserved.
6  */
7 
8 #ifndef _RTE_EVENTDEV_H_
9 #define _RTE_EVENTDEV_H_
10 
240 #include <rte_compat.h>
241 #include <rte_common.h>
242 #include <rte_errno.h>
243 #include <rte_mbuf_pool_ops.h>
244 #include <rte_mempool.h>
245 
246 #include "rte_eventdev_trace_fp.h"
247 
248 struct rte_mbuf; /* we just use mbuf pointers; no need to include rte_mbuf.h */
249 struct rte_event;
250 
251 /* Event device capability bitmap flags */
252 #define RTE_EVENT_DEV_CAP_QUEUE_QOS (1ULL << 0)
253 
270 #define RTE_EVENT_DEV_CAP_EVENT_QOS (1ULL << 1)
271 
284 #define RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED (1ULL << 2)
285 
294 #define RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES (1ULL << 3)
295 
318 #define RTE_EVENT_DEV_CAP_BURST_MODE (1ULL << 4)
319 
329 #define RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE (1ULL << 5)
330 
342 #define RTE_EVENT_DEV_CAP_NONSEQ_MODE (1ULL << 6)
343 
354 #define RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK (1ULL << 7)
355 
365 #define RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT (1ULL << 8)
366 
375 #define RTE_EVENT_DEV_CAP_CARRY_FLOW_ID (1ULL << 9)
376 
384 #define RTE_EVENT_DEV_CAP_MAINTENANCE_FREE (1ULL << 10)
385 
397 #define RTE_EVENT_DEV_CAP_RUNTIME_QUEUE_ATTR (1ULL << 11)
398 
407 #define RTE_EVENT_DEV_CAP_PROFILE_LINK (1ULL << 12)
408 
421 #define RTE_EVENT_DEV_CAP_ATOMIC (1ULL << 13)
422 
429 #define RTE_EVENT_DEV_CAP_ORDERED (1ULL << 14)
430 
437 #define RTE_EVENT_DEV_CAP_PARALLEL (1ULL << 15)
438 
445 #define RTE_EVENT_DEV_CAP_INDEPENDENT_ENQ (1ULL << 16)
446 
464 #define RTE_EVENT_DEV_CAP_EVENT_PRESCHEDULE (1ULL << 17)
465 
476 #define RTE_EVENT_DEV_CAP_EVENT_PRESCHEDULE_ADAPTIVE (1ULL << 18)
477 
488 #define RTE_EVENT_DEV_CAP_PER_PORT_PRESCHEDULE (1ULL << 19)
489 
498 #define RTE_EVENT_DEV_CAP_PRESCHEDULE_EXPLICIT (1ULL << 20)
499 
507 /* Event device priority levels */
508 #define RTE_EVENT_DEV_PRIORITY_HIGHEST 0
509 
515 #define RTE_EVENT_DEV_PRIORITY_NORMAL 128
516 
522 #define RTE_EVENT_DEV_PRIORITY_LOWEST 255
523 
530 /* Event queue scheduling weights */
531 #define RTE_EVENT_QUEUE_WEIGHT_HIGHEST 255
532 
537 #define RTE_EVENT_QUEUE_WEIGHT_LOWEST 0
538 
544 /* Event queue scheduling affinity */
545 #define RTE_EVENT_QUEUE_AFFINITY_HIGHEST 255
546 
551 #define RTE_EVENT_QUEUE_AFFINITY_LOWEST 0
552 
564 uint8_t
565 rte_event_dev_count(void);
566 
579 int
580 rte_event_dev_get_dev_id(const char *name);
581 
593 int
594 rte_event_dev_socket_id(uint8_t dev_id);
595 
600  const char *driver_name;
601  struct rte_device *dev;
665  int32_t max_num_events;
672  uint32_t event_dev_cap;
684 };
685 
702 int
703 rte_event_dev_info_get(uint8_t dev_id, struct rte_event_dev_info *dev_info);
704 
708 #define RTE_EVENT_DEV_ATTR_PORT_COUNT 0
709 
712 #define RTE_EVENT_DEV_ATTR_QUEUE_COUNT 1
713 
716 #define RTE_EVENT_DEV_ATTR_STARTED 2
717 
730 int
731 rte_event_dev_attr_get(uint8_t dev_id, uint32_t attr_id,
732  uint32_t *attr_value);
733 
734 
735 /* Event device configuration bitmap flags */
736 #define RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT (1ULL << 0)
737 
764 };
765 
799  uint8_t nb_event_ports;
828  uint32_t event_dev_cfg;
838  enum rte_event_dev_preschedule_type preschedule_type;
843 };
844 
869 int
870 rte_event_dev_configure(uint8_t dev_id,
871  const struct rte_event_dev_config *dev_conf);
872 
873 /* Event queue specific APIs */
874 
875 /* Event queue configuration bitmap flags */
876 #define RTE_EVENT_QUEUE_CFG_ALL_TYPES (1ULL << 0)
877 
890 #define RTE_EVENT_QUEUE_CFG_SINGLE_LINK (1ULL << 1)
891 
911  uint32_t nb_atomic_flows;
942  uint32_t event_queue_cfg;
944  uint8_t schedule_type;
954  uint8_t priority;
965  uint8_t weight;
976  uint8_t affinity;
987 };
988 
1010 int
1011 rte_event_queue_default_conf_get(uint8_t dev_id, uint8_t queue_id,
1012  struct rte_event_queue_conf *queue_conf);
1013 
1033 int
1034 rte_event_queue_setup(uint8_t dev_id, uint8_t queue_id,
1035  const struct rte_event_queue_conf *queue_conf);
1036 
1040 #define RTE_EVENT_QUEUE_ATTR_PRIORITY 0
1041 
1044 #define RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_FLOWS 1
1045 
1048 #define RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_ORDER_SEQUENCES 2
1049 
1052 #define RTE_EVENT_QUEUE_ATTR_EVENT_QUEUE_CFG 3
1053 
1056 #define RTE_EVENT_QUEUE_ATTR_SCHEDULE_TYPE 4
1057 
1060 #define RTE_EVENT_QUEUE_ATTR_WEIGHT 5
1061 
1064 #define RTE_EVENT_QUEUE_ATTR_AFFINITY 6
1065 
1086 int
1087 rte_event_queue_attr_get(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id,
1088  uint32_t *attr_value);
1089 
1109 int
1110 rte_event_queue_attr_set(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id,
1111  uint64_t attr_value);
1112 
1113 /* Event port specific APIs */
1114 
1115 /* Event port configuration bitmap flags */
1116 #define RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL (1ULL << 0)
1117 
1123 #define RTE_EVENT_PORT_CFG_SINGLE_LINK (1ULL << 1)
1124 
1131 #define RTE_EVENT_PORT_CFG_HINT_PRODUCER (1ULL << 2)
1132 
1141 #define RTE_EVENT_PORT_CFG_HINT_CONSUMER (1ULL << 3)
1142 
1152 #define RTE_EVENT_PORT_CFG_HINT_WORKER (1ULL << 4)
1153 
1163 #define RTE_EVENT_PORT_CFG_INDEPENDENT_ENQ (1ULL << 5)
1164 
1194  uint16_t dequeue_depth;
1201  uint16_t enqueue_depth;
1208  uint32_t event_port_cfg;
1209 };
1210 
1234 int
1235 rte_event_port_default_conf_get(uint8_t dev_id, uint8_t port_id,
1236  struct rte_event_port_conf *port_conf);
1237 
1264 int
1265 rte_event_port_setup(uint8_t dev_id, uint8_t port_id,
1266  const struct rte_event_port_conf *port_conf);
1267 
1268 typedef void (*rte_eventdev_port_flush_t)(uint8_t dev_id,
1269  struct rte_event event, void *arg);
1299 void
1300 rte_event_port_quiesce(uint8_t dev_id, uint8_t port_id,
1301  rte_eventdev_port_flush_t release_cb, void *args);
1302 
1306 #define RTE_EVENT_PORT_ATTR_ENQ_DEPTH 0
1307 
1310 #define RTE_EVENT_PORT_ATTR_DEQ_DEPTH 1
1311 
1316 #define RTE_EVENT_PORT_ATTR_NEW_EVENT_THRESHOLD 2
1317 
1320 #define RTE_EVENT_PORT_ATTR_IMPLICIT_RELEASE_DISABLE 3
1321 
1339 int
1340 rte_event_port_attr_get(uint8_t dev_id, uint8_t port_id, uint32_t attr_id,
1341  uint32_t *attr_value);
1342 
1361 int
1362 rte_event_dev_start(uint8_t dev_id);
1363 
1382 void
1383 rte_event_dev_stop(uint8_t dev_id);
1384 
1385 typedef void (*rte_eventdev_stop_flush_t)(uint8_t dev_id,
1386  struct rte_event event, void *arg);
1420  rte_eventdev_stop_flush_t callback, void *userdata);
1421 
1435 int
1436 rte_event_dev_close(uint8_t dev_id);
1437 
1441 struct __rte_aligned(16) rte_event_vector {
1442  uint16_t nb_elem;
1444  uint16_t elem_offset : 12;
1446  uint16_t rsvd : 3;
1448  uint16_t attr_valid : 1;
1451  union {
1452  /* Used by Rx/Tx adapter.
1453  * Indicates that all the elements in this vector belong to the
1454  * same port and queue pair when originating from Rx adapter,
1455  * valid only when event type is ETHDEV_VECTOR or
1456  * ETH_RX_ADAPTER_VECTOR.
1457  * Can also be used to indicate the Tx adapter the destination
1458  * port and queue of the mbufs in the vector
1459  */
1460  struct {
1461  uint16_t port;
1462  uint16_t queue;
1463  };
1464  };
1466  uint64_t impl_opaque;
1467 
1468 /* empty structures do not have zero size in C++ leading to compilation errors
1469  * with clang about structure having different sizes in C and C++.
1470  * Since these are all zero-sized arrays, we can omit the "union" wrapper for
1471  * C++ builds, removing the warning.
1472  */
1473 #ifndef __cplusplus
1474 
1479  union __rte_aligned(16) {
1480 #endif
1481  struct rte_mbuf *mbufs[0];
1482  void *ptrs[0];
1483  uint64_t u64s[0];
1484 #ifndef __cplusplus
1485  };
1486 #endif
1487 
1491 };
1492 
1493 /* Scheduler type definitions */
1494 #define RTE_SCHED_TYPE_ORDERED 0
1495 
1532 #define RTE_SCHED_TYPE_ATOMIC 1
1533 
1559 #define RTE_SCHED_TYPE_PARALLEL 2
1560 
1572 /* Event types to classify the event source */
1573 #define RTE_EVENT_TYPE_ETHDEV 0x0
1574 
1575 #define RTE_EVENT_TYPE_CRYPTODEV 0x1
1576 
1577 #define RTE_EVENT_TYPE_TIMER 0x2
1578 
1579 #define RTE_EVENT_TYPE_CPU 0x3
1580 
1583 #define RTE_EVENT_TYPE_ETH_RX_ADAPTER 0x4
1584 
1585 #define RTE_EVENT_TYPE_DMADEV 0x5
1586 
1587 #define RTE_EVENT_TYPE_VECTOR 0x8
1588 
1599 #define RTE_EVENT_TYPE_ETHDEV_VECTOR \
1600  (RTE_EVENT_TYPE_VECTOR | RTE_EVENT_TYPE_ETHDEV)
1601 
1602 #define RTE_EVENT_TYPE_CPU_VECTOR (RTE_EVENT_TYPE_VECTOR | RTE_EVENT_TYPE_CPU)
1603 
1604 #define RTE_EVENT_TYPE_ETH_RX_ADAPTER_VECTOR \
1605  (RTE_EVENT_TYPE_VECTOR | RTE_EVENT_TYPE_ETH_RX_ADAPTER)
1606 
1607 #define RTE_EVENT_TYPE_CRYPTODEV_VECTOR \
1608  (RTE_EVENT_TYPE_VECTOR | RTE_EVENT_TYPE_CRYPTODEV)
1609 
1611 #define RTE_EVENT_TYPE_MAX 0x10
1612 
1614 /* Event enqueue operations */
1615 #define RTE_EVENT_OP_NEW 0
1616 
1620 #define RTE_EVENT_OP_FORWARD 1
1621 
1632 #define RTE_EVENT_OP_RELEASE 2
1633 
1671 struct rte_event {
1672  /* WORD0 */
1673  union {
1674  uint64_t event;
1676  struct {
1677  uint32_t flow_id:20;
1689  uint32_t sub_event_type:8;
1696  uint32_t event_type:4;
1701  uint8_t op:2;
1711  uint8_t rsvd:4;
1719  uint8_t sched_type:2;
1736  uint8_t queue_id;
1744  uint8_t priority;
1768  uint8_t impl_opaque;
1782  };
1783  };
1784  /* WORD1 */
1785  union {
1786  uint64_t u64;
1788  void *event_ptr;
1790  struct rte_mbuf *mbuf;
1792  struct rte_event_vector *vec;
1794  };
1795 };
1796 
1797 /* Ethdev Rx adapter capability bitmap flags */
1798 #define RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT 0x1
1799 
1802 #define RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ 0x2
1803 
1806 #define RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID 0x4
1807 
1813 #define RTE_EVENT_ETH_RX_ADAPTER_CAP_EVENT_VECTOR 0x8
1814 
1834 int
1835 rte_event_eth_rx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id,
1836  uint32_t *caps);
1837 
1838 #define RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT (1ULL << 0)
1839 
1841 #define RTE_EVENT_TIMER_ADAPTER_CAP_PERIODIC (1ULL << 1)
1842 
1857 int
1858 rte_event_timer_adapter_caps_get(uint8_t dev_id, uint32_t *caps);
1859 
1860 /* Crypto adapter capability bitmap flag */
1861 #define RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW 0x1
1862 
1868 #define RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD 0x2
1869 
1875 #define RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND 0x4
1876 
1880 #define RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA 0x8
1881 
1885 #define RTE_EVENT_CRYPTO_ADAPTER_CAP_EVENT_VECTOR 0x10
1886 
1909 int
1910 rte_event_crypto_adapter_caps_get(uint8_t dev_id, uint8_t cdev_id,
1911  uint32_t *caps);
1912 
1913 /* DMA adapter capability bitmap flag */
1914 #define RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_NEW 0x1
1915 
1921 #define RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_FWD 0x2
1922 
1928 #define RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_VCHAN_EV_BIND 0x4
1929 
1951 __rte_experimental
1952 int
1953 rte_event_dma_adapter_caps_get(uint8_t dev_id, uint8_t dmadev_id, uint32_t *caps);
1954 
1955 /* Ethdev Tx adapter capability bitmap flags */
1956 #define RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT 0x1
1957 
1959 #define RTE_EVENT_ETH_TX_ADAPTER_CAP_EVENT_VECTOR 0x2
1960 
1980 int
1981 rte_event_eth_tx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id,
1982  uint32_t *caps);
1983 
2008 int
2009 rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns,
2010  uint64_t *timeout_ticks);
2011 
2075 int
2076 rte_event_port_link(uint8_t dev_id, uint8_t port_id,
2077  const uint8_t queues[], const uint8_t priorities[],
2078  uint16_t nb_links);
2079 
2123 int
2124 rte_event_port_unlink(uint8_t dev_id, uint8_t port_id,
2125  uint8_t queues[], uint16_t nb_unlinks);
2126 
2199 __rte_experimental
2200 int
2201 rte_event_port_profile_links_set(uint8_t dev_id, uint8_t port_id, const uint8_t queues[],
2202  const uint8_t priorities[], uint16_t nb_links, uint8_t profile_id);
2203 
2252 __rte_experimental
2253 int
2254 rte_event_port_profile_unlink(uint8_t dev_id, uint8_t port_id, uint8_t queues[],
2255  uint16_t nb_unlinks, uint8_t profile_id);
2256 
2278 int
2279 rte_event_port_unlinks_in_progress(uint8_t dev_id, uint8_t port_id);
2280 
2307 int
2308 rte_event_port_links_get(uint8_t dev_id, uint8_t port_id,
2309  uint8_t queues[], uint8_t priorities[]);
2310 
2342 __rte_experimental
2343 int
2344 rte_event_port_profile_links_get(uint8_t dev_id, uint8_t port_id, uint8_t queues[],
2345  uint8_t priorities[], uint8_t profile_id);
2346 
2362 int
2363 rte_event_dev_service_id_get(uint8_t dev_id, uint32_t *service_id);
2364 
2378 int
2379 rte_event_dev_dump(uint8_t dev_id, FILE *f);
2380 
2382 #define RTE_EVENT_DEV_XSTATS_NAME_SIZE 64
2383 
2388  RTE_EVENT_DEV_XSTATS_DEVICE,
2389  RTE_EVENT_DEV_XSTATS_PORT,
2390  RTE_EVENT_DEV_XSTATS_QUEUE,
2391 };
2392 
2400  char name[RTE_EVENT_DEV_XSTATS_NAME_SIZE];
2401 };
2402 
2435 int
2436 rte_event_dev_xstats_names_get(uint8_t dev_id,
2437  enum rte_event_dev_xstats_mode mode,
2438  uint8_t queue_port_id,
2439  struct rte_event_dev_xstats_name *xstats_names,
2440  uint64_t *ids,
2441  unsigned int size);
2442 
2469 int
2470 rte_event_dev_xstats_get(uint8_t dev_id,
2471  enum rte_event_dev_xstats_mode mode,
2472  uint8_t queue_port_id,
2473  const uint64_t ids[],
2474  uint64_t values[], unsigned int n);
2475 
2492 uint64_t
2493 rte_event_dev_xstats_by_name_get(uint8_t dev_id, const char *name,
2494  uint64_t *id);
2495 
2516 int
2517 rte_event_dev_xstats_reset(uint8_t dev_id,
2518  enum rte_event_dev_xstats_mode mode,
2519  int16_t queue_port_id,
2520  const uint64_t ids[],
2521  uint32_t nb_ids);
2522 
2533 int rte_event_dev_selftest(uint8_t dev_id);
2534 
2565 struct rte_mempool *
2566 rte_event_vector_pool_create(const char *name, unsigned int n,
2567  unsigned int cache_size, uint16_t nb_elem,
2568  int socket_id);
2569 
2570 #include <rte_eventdev_core.h>
2571 
2572 #ifdef __cplusplus
2573 extern "C" {
2574 #endif
2575 
2576 static __rte_always_inline uint16_t
2577 __rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
2578  const struct rte_event ev[], uint16_t nb_events,
2579  const event_enqueue_burst_t fn)
2580 {
2581  const struct rte_event_fp_ops *fp_ops;
2582  void *port;
2583 
2584  fp_ops = &rte_event_fp_ops[dev_id];
2585  port = fp_ops->data[port_id];
2586 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
2587  if (dev_id >= RTE_EVENT_MAX_DEVS ||
2588  port_id >= RTE_EVENT_MAX_PORTS_PER_DEV) {
2589  rte_errno = EINVAL;
2590  return 0;
2591  }
2592 
2593  if (port == NULL) {
2594  rte_errno = EINVAL;
2595  return 0;
2596  }
2597 #endif
2598  rte_eventdev_trace_enq_burst(dev_id, port_id, ev, nb_events, (void *)fn);
2599 
2600  return fn(port, ev, nb_events);
2601 }
2602 
2646 static inline uint16_t
2647 rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
2648  const struct rte_event ev[], uint16_t nb_events)
2649 {
2650  const struct rte_event_fp_ops *fp_ops;
2651 
2652  fp_ops = &rte_event_fp_ops[dev_id];
2653  return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
2654  fp_ops->enqueue_burst);
2655 }
2656 
2698 static inline uint16_t
2699 rte_event_enqueue_new_burst(uint8_t dev_id, uint8_t port_id,
2700  const struct rte_event ev[], uint16_t nb_events)
2701 {
2702  const struct rte_event_fp_ops *fp_ops;
2703 
2704  fp_ops = &rte_event_fp_ops[dev_id];
2705  return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
2706  fp_ops->enqueue_new_burst);
2707 }
2708 
2750 static inline uint16_t
2751 rte_event_enqueue_forward_burst(uint8_t dev_id, uint8_t port_id,
2752  const struct rte_event ev[], uint16_t nb_events)
2753 {
2754  const struct rte_event_fp_ops *fp_ops;
2755 
2756  fp_ops = &rte_event_fp_ops[dev_id];
2757  return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
2758  fp_ops->enqueue_forward_burst);
2759 }
2760 
2827 static inline uint16_t
2828 rte_event_dequeue_burst(uint8_t dev_id, uint8_t port_id, struct rte_event ev[],
2829  uint16_t nb_events, uint64_t timeout_ticks)
2830 {
2831  const struct rte_event_fp_ops *fp_ops;
2832  void *port;
2833 
2834  fp_ops = &rte_event_fp_ops[dev_id];
2835  port = fp_ops->data[port_id];
2836 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
2837  if (dev_id >= RTE_EVENT_MAX_DEVS ||
2838  port_id >= RTE_EVENT_MAX_PORTS_PER_DEV) {
2839  rte_errno = EINVAL;
2840  return 0;
2841  }
2842 
2843  if (port == NULL) {
2844  rte_errno = EINVAL;
2845  return 0;
2846  }
2847 #endif
2848  rte_eventdev_trace_deq_burst(dev_id, port_id, ev, nb_events);
2849 
2850  return (fp_ops->dequeue_burst)(port, ev, nb_events, timeout_ticks);
2851 }
2852 
2853 #define RTE_EVENT_DEV_MAINT_OP_FLUSH (1 << 0)
2854 
2895 static inline int
2896 rte_event_maintain(uint8_t dev_id, uint8_t port_id, int op)
2897 {
2898  const struct rte_event_fp_ops *fp_ops;
2899  void *port;
2900 
2901  fp_ops = &rte_event_fp_ops[dev_id];
2902  port = fp_ops->data[port_id];
2903 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
2904  if (dev_id >= RTE_EVENT_MAX_DEVS ||
2905  port_id >= RTE_EVENT_MAX_PORTS_PER_DEV)
2906  return -EINVAL;
2907 
2908  if (port == NULL)
2909  return -EINVAL;
2910 
2911  if (op & (~RTE_EVENT_DEV_MAINT_OP_FLUSH))
2912  return -EINVAL;
2913 #endif
2914  rte_eventdev_trace_maintain(dev_id, port_id, op);
2915 
2916  if (fp_ops->maintain != NULL)
2917  fp_ops->maintain(port, op);
2918 
2919  return 0;
2920 }
2921 
2943 static inline uint8_t
2944 rte_event_port_profile_switch(uint8_t dev_id, uint8_t port_id, uint8_t profile_id)
2945 {
2946  const struct rte_event_fp_ops *fp_ops;
2947  void *port;
2948 
2949  fp_ops = &rte_event_fp_ops[dev_id];
2950  port = fp_ops->data[port_id];
2951 
2952 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
2953  if (dev_id >= RTE_EVENT_MAX_DEVS ||
2954  port_id >= RTE_EVENT_MAX_PORTS_PER_DEV)
2955  return -EINVAL;
2956 
2957  if (port == NULL)
2958  return -EINVAL;
2959 
2960  if (profile_id >= RTE_EVENT_MAX_PROFILES_PER_PORT)
2961  return -EINVAL;
2962 #endif
2963  rte_eventdev_trace_port_profile_switch(dev_id, port_id, profile_id);
2964 
2965  return fp_ops->profile_switch(port, profile_id);
2966 }
2967 
2991 __rte_experimental
2992 static inline int
2993 rte_event_port_preschedule_modify(uint8_t dev_id, uint8_t port_id,
2995 {
2996  const struct rte_event_fp_ops *fp_ops;
2997  void *port;
2998 
2999  fp_ops = &rte_event_fp_ops[dev_id];
3000  port = fp_ops->data[port_id];
3001 
3002 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
3003  if (dev_id >= RTE_EVENT_MAX_DEVS || port_id >= RTE_EVENT_MAX_PORTS_PER_DEV)
3004  return -EINVAL;
3005 
3006  if (port == NULL)
3007  return -EINVAL;
3008 #endif
3009  rte_eventdev_trace_port_preschedule_modify(dev_id, port_id, type);
3010 
3011  return fp_ops->preschedule_modify(port, type);
3012 }
3013 
3035 __rte_experimental
3036 static inline void
3037 rte_event_port_preschedule(uint8_t dev_id, uint8_t port_id,
3039 {
3040  const struct rte_event_fp_ops *fp_ops;
3041  void *port;
3042 
3043  fp_ops = &rte_event_fp_ops[dev_id];
3044  port = fp_ops->data[port_id];
3045 
3046 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
3047  if (dev_id >= RTE_EVENT_MAX_DEVS || port_id >= RTE_EVENT_MAX_PORTS_PER_DEV)
3048  return;
3049  if (port == NULL)
3050  return;
3051 #endif
3052  rte_eventdev_trace_port_preschedule(dev_id, port_id, type);
3053 
3054  fp_ops->preschedule(port, type);
3055 }
3056 #ifdef __cplusplus
3057 }
3058 #endif
3059 
3060 #endif /* _RTE_EVENTDEV_H_ */
void rte_event_dev_stop(uint8_t dev_id)
uint32_t min_dequeue_timeout_ns
Definition: rte_eventdev.h:602
int rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns, uint64_t *timeout_ticks)
int rte_event_dev_xstats_names_get(uint8_t dev_id, enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id, struct rte_event_dev_xstats_name *xstats_names, uint64_t *ids, unsigned int size)
static uint16_t rte_event_enqueue_new_burst(uint8_t dev_id, uint8_t port_id, const struct rte_event ev[], uint16_t nb_events)
#define __rte_always_inline
Definition: rte_common.h:413
#define RTE_EVENT_DEV_XSTATS_NAME_SIZE
uint64_t u64
int rte_event_dev_attr_get(uint8_t dev_id, uint32_t attr_id, uint32_t *attr_value)
uint32_t flow_id
static __rte_experimental int rte_event_port_preschedule_modify(uint8_t dev_id, uint8_t port_id, enum rte_event_dev_preschedule_type type)
int rte_event_eth_tx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id, uint32_t *caps)
uint8_t priority
struct rte_device * dev
Definition: rte_eventdev.h:601
int rte_event_queue_setup(uint8_t dev_id, uint8_t queue_id, const struct rte_event_queue_conf *queue_conf)
uint8_t max_event_port_links
Definition: rte_eventdev.h:662
int rte_event_queue_default_conf_get(uint8_t dev_id, uint8_t queue_id, struct rte_event_queue_conf *queue_conf)
static __rte_experimental void rte_event_port_preschedule(uint8_t dev_id, uint8_t port_id, enum rte_event_dev_preschedule_type type)
int rte_event_port_default_conf_get(uint8_t dev_id, uint8_t port_id, struct rte_event_port_conf *port_conf)
uint32_t dequeue_timeout_ns
Definition: rte_eventdev.h:606
#define rte_errno
Definition: rte_errno.h:29
uint32_t event_type
__rte_experimental int rte_event_dma_adapter_caps_get(uint8_t dev_id, uint8_t dmadev_id, uint32_t *caps)
uint32_t event_dev_cap
Definition: rte_eventdev.h:672
int rte_event_queue_attr_set(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id, uint64_t attr_value)
int rte_event_dev_socket_id(uint8_t dev_id)
int rte_event_dev_xstats_get(uint8_t dev_id, enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id, const uint64_t ids[], uint64_t values[], unsigned int n)
static uint8_t rte_event_port_profile_switch(uint8_t dev_id, uint8_t port_id, uint8_t profile_id)
int rte_event_dev_xstats_reset(uint8_t dev_id, enum rte_event_dev_xstats_mode mode, int16_t queue_port_id, const uint64_t ids[], uint32_t nb_ids)
uint32_t max_event_port_enqueue_depth
Definition: rte_eventdev.h:655
int rte_event_timer_adapter_caps_get(uint8_t dev_id, uint32_t *caps)
int rte_event_dev_info_get(uint8_t dev_id, struct rte_event_dev_info *dev_info)
uint32_t cache_size
Definition: rte_mempool.h:241
char name[RTE_MEMPOOL_NAMESIZE]
Definition: rte_mempool.h:231
uint32_t nb_atomic_order_sequences
Definition: rte_eventdev.h:924
uint32_t nb_event_port_dequeue_depth
Definition: rte_eventdev.h:814
int rte_event_port_unlinks_in_progress(uint8_t dev_id, uint8_t port_id)
void * event_ptr
__rte_experimental int rte_event_port_profile_links_set(uint8_t dev_id, uint8_t port_id, const uint8_t queues[], const uint8_t priorities[], uint16_t nb_links, uint8_t profile_id)
int rte_event_port_link(uint8_t dev_id, uint8_t port_id, const uint8_t queues[], const uint8_t priorities[], uint16_t nb_links)
int rte_event_queue_attr_get(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id, uint32_t *attr_value)
uint8_t max_profiles_per_port
Definition: rte_eventdev.h:680
struct rte_event_vector * vec
uint8_t max_single_link_event_port_queue_pairs
Definition: rte_eventdev.h:674
rte_event_dev_xstats_mode
int rte_event_port_links_get(uint8_t dev_id, uint8_t port_id, uint8_t queues[], uint8_t priorities[])
int rte_event_dev_stop_flush_callback_register(uint8_t dev_id, rte_eventdev_stop_flush_t callback, void *userdata)
int rte_event_dev_selftest(uint8_t dev_id)
static uint16_t rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id, const struct rte_event ev[], uint16_t nb_events)
int rte_event_dev_start(uint8_t dev_id)
uint64_t rte_event_dev_xstats_by_name_get(uint8_t dev_id, const char *name, uint64_t *id)
int rte_event_port_setup(uint8_t dev_id, uint8_t port_id, const struct rte_event_port_conf *port_conf)
void rte_event_port_quiesce(uint8_t dev_id, uint8_t port_id, rte_eventdev_port_flush_t release_cb, void *args)
uint8_t max_event_port_dequeue_depth
Definition: rte_eventdev.h:648
int rte_event_port_attr_get(uint8_t dev_id, uint8_t port_id, uint32_t attr_id, uint32_t *attr_value)
uint32_t nb_event_port_enqueue_depth
Definition: rte_eventdev.h:821
const char * driver_name
Definition: rte_eventdev.h:600
uint8_t impl_opaque
uint8_t queue_id
struct __rte_aligned(16) rte_event_vector
static uint16_t rte_event_dequeue_burst(uint8_t dev_id, uint8_t port_id, struct rte_event ev[], uint16_t nb_events, uint64_t timeout_ticks)
int rte_event_dev_get_dev_id(const char *name)
uint8_t rte_event_dev_count(void)
int rte_event_dev_close(uint8_t dev_id)
int rte_event_crypto_adapter_caps_get(uint8_t dev_id, uint8_t cdev_id, uint32_t *caps)
__rte_experimental int rte_event_port_profile_links_get(uint8_t dev_id, uint8_t port_id, uint8_t queues[], uint8_t priorities[], uint8_t profile_id)
uint32_t dequeue_timeout_ns
Definition: rte_eventdev.h:768
static int rte_event_maintain(uint8_t dev_id, uint8_t port_id, int op)
int rte_event_dev_service_id_get(uint8_t dev_id, uint32_t *service_id)
static uint16_t rte_event_enqueue_forward_burst(uint8_t dev_id, uint8_t port_id, const struct rte_event ev[], uint16_t nb_events)
#define RTE_EVENT_DEV_MAINT_OP_FLUSH
struct rte_mempool * rte_event_vector_pool_create(const char *name, unsigned int n, unsigned int cache_size, uint16_t nb_elem, int socket_id)
rte_event_dev_preschedule_type
Definition: rte_eventdev.h:742
int rte_event_dev_dump(uint8_t dev_id, FILE *f)
uint8_t nb_single_link_event_port_queues
Definition: rte_eventdev.h:830
uint8_t rsvd
void(* rte_eventdev_port_flush_t)(uint8_t dev_id, struct rte_event event, void *arg)
uint8_t op
void(* rte_eventdev_stop_flush_t)(uint8_t dev_id, struct rte_event event, void *arg)
uint8_t max_event_priority_levels
Definition: rte_eventdev.h:629
struct rte_mbuf * mbuf
uint32_t max_dequeue_timeout_ns
Definition: rte_eventdev.h:604
uint32_t max_event_queue_flows
Definition: rte_eventdev.h:613
int rte_event_dev_configure(uint8_t dev_id, const struct rte_event_dev_config *dev_conf)
uint32_t sub_event_type
uint8_t sched_type
uint8_t max_event_queues
Definition: rte_eventdev.h:608
__rte_experimental int rte_event_port_profile_unlink(uint8_t dev_id, uint8_t port_id, uint8_t queues[], uint16_t nb_unlinks, uint8_t profile_id)
uint8_t max_event_queue_priority_levels
Definition: rte_eventdev.h:615
int rte_event_port_unlink(uint8_t dev_id, uint8_t port_id, uint8_t queues[], uint16_t nb_unlinks)
int rte_event_eth_rx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id, uint32_t *caps)
uint32_t nb_event_queue_flows
Definition: rte_eventdev.h:809