DPDK  18.05.1
rte_eventdev.h
Go to the documentation of this file.
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016 Cavium, Inc.
3  * Copyright(c) 2016-2018 Intel Corporation.
4  * Copyright 2016 NXP
5  * All rights reserved.
6  */
7 
8 #ifndef _RTE_EVENTDEV_H_
9 #define _RTE_EVENTDEV_H_
10 
210 #ifdef __cplusplus
211 extern "C" {
212 #endif
213 
214 #include <rte_common.h>
215 #include <rte_config.h>
216 #include <rte_memory.h>
217 #include <rte_errno.h>
218 
219 struct rte_mbuf; /* we just use mbuf pointers; no need to include rte_mbuf.h */
220 struct rte_event;
221 
222 /* Event device capability bitmap flags */
223 #define RTE_EVENT_DEV_CAP_QUEUE_QOS (1ULL << 0)
224 
229 #define RTE_EVENT_DEV_CAP_EVENT_QOS (1ULL << 1)
230 
236 #define RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED (1ULL << 2)
237 
245 #define RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES (1ULL << 3)
246 
252 #define RTE_EVENT_DEV_CAP_BURST_MODE (1ULL << 4)
253 
260 #define RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE (1ULL << 5)
261 
271 #define RTE_EVENT_DEV_CAP_NONSEQ_MODE (1ULL << 6)
272 
281 #define RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK (1ULL << 7)
282 
287 #define RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT (1ULL << 8)
288 
293 /* Event device priority levels */
294 #define RTE_EVENT_DEV_PRIORITY_HIGHEST 0
295 
299 #define RTE_EVENT_DEV_PRIORITY_NORMAL 128
300 
304 #define RTE_EVENT_DEV_PRIORITY_LOWEST 255
305 
317 uint8_t
318 rte_event_dev_count(void);
319 
330 int
331 rte_event_dev_get_dev_id(const char *name);
332 
343 int
344 rte_event_dev_socket_id(uint8_t dev_id);
345 
350  const char *driver_name;
351  struct rte_device *dev;
352  uint32_t min_dequeue_timeout_ns;
354  uint32_t max_dequeue_timeout_ns;
356  uint32_t dequeue_timeout_ns;
358  uint8_t max_event_queues;
360  uint32_t max_event_queue_flows;
370  uint8_t max_event_ports;
382  int32_t max_num_events;
387  uint32_t event_dev_cap;
389 };
390 
406 int
407 rte_event_dev_info_get(uint8_t dev_id, struct rte_event_dev_info *dev_info);
408 
412 #define RTE_EVENT_DEV_ATTR_PORT_COUNT 0
413 
416 #define RTE_EVENT_DEV_ATTR_QUEUE_COUNT 1
417 
420 #define RTE_EVENT_DEV_ATTR_STARTED 2
421 
434 int
435 rte_event_dev_attr_get(uint8_t dev_id, uint32_t attr_id,
436  uint32_t *attr_value);
437 
438 
439 /* Event device configuration bitmap flags */
440 #define RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT (1ULL << 0)
441 
447  uint32_t dequeue_timeout_ns;
455  int32_t nb_events_limit;
463  uint8_t nb_event_queues;
468  uint8_t nb_event_ports;
473  uint32_t nb_event_queue_flows;
494  uint32_t event_dev_cfg;
496 };
497 
517 int
518 rte_event_dev_configure(uint8_t dev_id,
519  const struct rte_event_dev_config *dev_conf);
520 
521 
522 /* Event queue specific APIs */
523 
524 /* Event queue configuration bitmap flags */
525 #define RTE_EVENT_QUEUE_CFG_ALL_TYPES (1ULL << 0)
526 
531 #define RTE_EVENT_QUEUE_CFG_SINGLE_LINK (1ULL << 1)
532 
539  uint32_t nb_atomic_flows;
547  uint32_t nb_atomic_order_sequences;
561  uint32_t event_queue_cfg;
563  uint8_t schedule_type;
568  uint8_t priority;
576 };
577 
600 int
601 rte_event_queue_default_conf_get(uint8_t dev_id, uint8_t queue_id,
602  struct rte_event_queue_conf *queue_conf);
603 
622 int
623 rte_event_queue_setup(uint8_t dev_id, uint8_t queue_id,
624  const struct rte_event_queue_conf *queue_conf);
625 
629 #define RTE_EVENT_QUEUE_ATTR_PRIORITY 0
630 
633 #define RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_FLOWS 1
634 
637 #define RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_ORDER_SEQUENCES 2
638 
641 #define RTE_EVENT_QUEUE_ATTR_EVENT_QUEUE_CFG 3
642 
645 #define RTE_EVENT_QUEUE_ATTR_SCHEDULE_TYPE 4
646 
667 int
668 rte_event_queue_attr_get(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id,
669  uint32_t *attr_value);
670 
671 /* Event port specific APIs */
672 
675  int32_t new_event_threshold;
688  uint16_t dequeue_depth;
694  uint16_t enqueue_depth;
700  uint8_t disable_implicit_release;
707 };
708 
731 int
732 rte_event_port_default_conf_get(uint8_t dev_id, uint8_t port_id,
733  struct rte_event_port_conf *port_conf);
734 
755 int
756 rte_event_port_setup(uint8_t dev_id, uint8_t port_id,
757  const struct rte_event_port_conf *port_conf);
758 
762 #define RTE_EVENT_PORT_ATTR_ENQ_DEPTH 0
763 
766 #define RTE_EVENT_PORT_ATTR_DEQ_DEPTH 1
767 
770 #define RTE_EVENT_PORT_ATTR_NEW_EVENT_THRESHOLD 2
771 
788 int
789 rte_event_port_attr_get(uint8_t dev_id, uint8_t port_id, uint32_t attr_id,
790  uint32_t *attr_value);
791 
808 int
809 rte_event_dev_start(uint8_t dev_id);
810 
829 void
830 rte_event_dev_stop(uint8_t dev_id);
831 
832 typedef void (*eventdev_stop_flush_t)(uint8_t dev_id, struct rte_event event,
833  void *arg);
862 int
864  eventdev_stop_flush_t callback, void *userdata);
865 
877 int
878 rte_event_dev_close(uint8_t dev_id);
879 
880 /* Scheduler type definitions */
881 #define RTE_SCHED_TYPE_ORDERED 0
882 
908 #define RTE_SCHED_TYPE_ATOMIC 1
909 
927 #define RTE_SCHED_TYPE_PARALLEL 2
928 
940 /* Event types to classify the event source */
941 #define RTE_EVENT_TYPE_ETHDEV 0x0
942 
943 #define RTE_EVENT_TYPE_CRYPTODEV 0x1
944 
945 #define RTE_EVENT_TYPE_TIMER 0x2
946 
947 #define RTE_EVENT_TYPE_CPU 0x3
948 
951 #define RTE_EVENT_TYPE_ETH_RX_ADAPTER 0x4
952 
953 #define RTE_EVENT_TYPE_MAX 0x10
954 
956 /* Event enqueue operations */
957 #define RTE_EVENT_OP_NEW 0
958 
961 #define RTE_EVENT_OP_FORWARD 1
962 
969 #define RTE_EVENT_OP_RELEASE 2
970 
1007 struct rte_event {
1009  union {
1010  uint64_t event;
1012  struct {
1013  uint32_t flow_id:20;
1020  uint32_t sub_event_type:8;
1024  uint32_t event_type:4;
1028  uint8_t op:2;
1034  uint8_t rsvd:4;
1036  uint8_t sched_type:2;
1041  uint8_t queue_id;
1048  uint8_t priority;
1058  uint8_t impl_opaque;
1065  };
1066  };
1068  union {
1069  uint64_t u64;
1071  void *event_ptr;
1073  struct rte_mbuf *mbuf;
1075  };
1076 };
1077 
1078 /* Ethdev Rx adapter capability bitmap flags */
1079 #define RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT 0x1
1080 
1083 #define RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ 0x2
1084 
1087 #define RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID 0x4
1088 
1114 int
1115 rte_event_eth_rx_adapter_caps_get(uint8_t dev_id, uint8_t eth_port_id,
1116  uint32_t *caps);
1117 
1118 #define RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT (1ULL << 0)
1119 
1134 int __rte_experimental
1135 rte_event_timer_adapter_caps_get(uint8_t dev_id, uint32_t *caps);
1136 
1137 /* Crypto adapter capability bitmap flag */
1138 #define RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW 0x1
1139 
1145 #define RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD 0x2
1146 
1152 #define RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND 0x4
1153 
1157 #define RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA 0x8
1158 
1185 int __rte_experimental
1186 rte_event_crypto_adapter_caps_get(uint8_t dev_id, uint8_t cdev_id,
1187  uint32_t *caps);
1188 
1189 struct rte_eventdev_ops;
1190 struct rte_eventdev;
1191 
1192 typedef uint16_t (*event_enqueue_t)(void *port, const struct rte_event *ev);
1195 typedef uint16_t (*event_enqueue_burst_t)(void *port,
1196  const struct rte_event ev[], uint16_t nb_events);
1199 typedef uint16_t (*event_dequeue_t)(void *port, struct rte_event *ev,
1200  uint64_t timeout_ticks);
1203 typedef uint16_t (*event_dequeue_burst_t)(void *port, struct rte_event ev[],
1204  uint16_t nb_events, uint64_t timeout_ticks);
1207 #define RTE_EVENTDEV_NAME_MAX_LEN (64)
1208 
1217 struct rte_eventdev_data {
1218  int socket_id;
1220  uint8_t dev_id;
1222  uint8_t nb_queues;
1224  uint8_t nb_ports;
1226  void **ports;
1228  struct rte_event_port_conf *ports_cfg;
1230  struct rte_event_queue_conf *queues_cfg;
1232  uint16_t *links_map;
1234  void *dev_private;
1236  uint32_t event_dev_cap;
1238  struct rte_event_dev_config dev_conf;
1240  uint8_t service_inited;
1241  /* Service initialization state */
1242  uint32_t service_id;
1243  /* Service ID*/
1244  void *dev_stop_flush_arg;
1247  RTE_STD_C11
1248  uint8_t dev_started : 1;
1251  char name[RTE_EVENTDEV_NAME_MAX_LEN];
1254 
1256 struct rte_eventdev {
1257  event_enqueue_t enqueue;
1259  event_enqueue_burst_t enqueue_burst;
1261  event_enqueue_burst_t enqueue_new_burst;
1263  event_enqueue_burst_t enqueue_forward_burst;
1265  event_dequeue_t dequeue;
1267  event_dequeue_burst_t dequeue_burst;
1270  struct rte_eventdev_data *data;
1272  struct rte_eventdev_ops *dev_ops;
1274  struct rte_device *dev;
1277  RTE_STD_C11
1278  uint8_t attached : 1;
1281 
1282 extern struct rte_eventdev *rte_eventdevs;
1285 static __rte_always_inline uint16_t
1286 __rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
1287  const struct rte_event ev[], uint16_t nb_events,
1288  const event_enqueue_burst_t fn)
1289 {
1290  const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1291 
1292 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
1293  if (dev_id >= RTE_EVENT_MAX_DEVS || !rte_eventdevs[dev_id].attached) {
1294  rte_errno = -EINVAL;
1295  return 0;
1296  }
1297 
1298  if (port_id >= dev->data->nb_ports) {
1299  rte_errno = -EINVAL;
1300  return 0;
1301  }
1302 #endif
1303  /*
1304  * Allow zero cost non burst mode routine invocation if application
1305  * requests nb_events as const one
1306  */
1307  if (nb_events == 1)
1308  return (*dev->enqueue)(dev->data->ports[port_id], ev);
1309  else
1310  return fn(dev->data->ports[port_id], ev, nb_events);
1311 }
1312 
1355 static inline uint16_t
1356 rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
1357  const struct rte_event ev[], uint16_t nb_events)
1358 {
1359  const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1360 
1361  return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
1362  dev->enqueue_burst);
1363 }
1364 
1404 static inline uint16_t
1405 rte_event_enqueue_new_burst(uint8_t dev_id, uint8_t port_id,
1406  const struct rte_event ev[], uint16_t nb_events)
1407 {
1408  const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1409 
1410  return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
1411  dev->enqueue_new_burst);
1412 }
1413 
1453 static inline uint16_t
1454 rte_event_enqueue_forward_burst(uint8_t dev_id, uint8_t port_id,
1455  const struct rte_event ev[], uint16_t nb_events)
1456 {
1457  const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1458 
1459  return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
1460  dev->enqueue_forward_burst);
1461 }
1462 
1488 int
1489 rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns,
1490  uint64_t *timeout_ticks);
1491 
1558 static inline uint16_t
1559 rte_event_dequeue_burst(uint8_t dev_id, uint8_t port_id, struct rte_event ev[],
1560  uint16_t nb_events, uint64_t timeout_ticks)
1561 {
1562  struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1563 
1564 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
1565  if (dev_id >= RTE_EVENT_MAX_DEVS || !rte_eventdevs[dev_id].attached) {
1566  rte_errno = -EINVAL;
1567  return 0;
1568  }
1569 
1570  if (port_id >= dev->data->nb_ports) {
1571  rte_errno = -EINVAL;
1572  return 0;
1573  }
1574 #endif
1575 
1576  /*
1577  * Allow zero cost non burst mode routine invocation if application
1578  * requests nb_events as const one
1579  */
1580  if (nb_events == 1)
1581  return (*dev->dequeue)(
1582  dev->data->ports[port_id], ev, timeout_ticks);
1583  else
1584  return (*dev->dequeue_burst)(
1585  dev->data->ports[port_id], ev, nb_events,
1586  timeout_ticks);
1587 }
1588 
1649 int
1650 rte_event_port_link(uint8_t dev_id, uint8_t port_id,
1651  const uint8_t queues[], const uint8_t priorities[],
1652  uint16_t nb_links);
1653 
1693 int
1694 rte_event_port_unlink(uint8_t dev_id, uint8_t port_id,
1695  uint8_t queues[], uint16_t nb_unlinks);
1696 
1724 int
1725 rte_event_port_links_get(uint8_t dev_id, uint8_t port_id,
1726  uint8_t queues[], uint8_t priorities[]);
1727 
1743 int
1744 rte_event_dev_service_id_get(uint8_t dev_id, uint32_t *service_id);
1745 
1759 int
1760 rte_event_dev_dump(uint8_t dev_id, FILE *f);
1761 
1763 #define RTE_EVENT_DEV_XSTATS_NAME_SIZE 64
1764 
1769  RTE_EVENT_DEV_XSTATS_DEVICE,
1770  RTE_EVENT_DEV_XSTATS_PORT,
1771  RTE_EVENT_DEV_XSTATS_QUEUE,
1772 };
1773 
1781  char name[RTE_EVENT_DEV_XSTATS_NAME_SIZE];
1782 };
1783 
1816 int
1817 rte_event_dev_xstats_names_get(uint8_t dev_id,
1818  enum rte_event_dev_xstats_mode mode,
1819  uint8_t queue_port_id,
1820  struct rte_event_dev_xstats_name *xstats_names,
1821  unsigned int *ids,
1822  unsigned int size);
1823 
1850 int
1851 rte_event_dev_xstats_get(uint8_t dev_id,
1852  enum rte_event_dev_xstats_mode mode,
1853  uint8_t queue_port_id,
1854  const unsigned int ids[],
1855  uint64_t values[], unsigned int n);
1856 
1873 uint64_t
1874 rte_event_dev_xstats_by_name_get(uint8_t dev_id, const char *name,
1875  unsigned int *id);
1876 
1897 int
1898 rte_event_dev_xstats_reset(uint8_t dev_id,
1899  enum rte_event_dev_xstats_mode mode,
1900  int16_t queue_port_id,
1901  const uint32_t ids[],
1902  uint32_t nb_ids);
1903 
1914 int rte_event_dev_selftest(uint8_t dev_id);
1915 
1916 #ifdef __cplusplus
1917 }
1918 #endif
1919 
1920 #endif /* _RTE_EVENTDEV_H_ */