DPDK  17.05.2
rte_eventdev.h
Go to the documentation of this file.
1 /*
2  * BSD LICENSE
3  *
4  * Copyright 2016 Cavium.
5  * Copyright 2016 Intel Corporation.
6  * Copyright 2016 NXP.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  *
12  * * Redistributions of source code must retain the above copyright
13  * notice, this list of conditions and the following disclaimer.
14  * * Redistributions in binary form must reproduce the above copyright
15  * notice, this list of conditions and the following disclaimer in
16  * the documentation and/or other materials provided with the
17  * distribution.
18  * * Neither the name of Cavium nor the names of its
19  * contributors may be used to endorse or promote products derived
20  * from this software without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
25  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
26  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
28  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
32  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #ifndef _RTE_EVENTDEV_H_
36 #define _RTE_EVENTDEV_H_
37 
242 #ifdef __cplusplus
243 extern "C" {
244 #endif
245 
246 #include <rte_common.h>
247 #include <rte_memory.h>
248 #include <rte_errno.h>
249 
250 struct rte_mbuf; /* we just use mbuf pointers; no need to include rte_mbuf.h */
251 
252 /* Event device capability bitmap flags */
253 #define RTE_EVENT_DEV_CAP_QUEUE_QOS (1ULL << 0)
254 
259 #define RTE_EVENT_DEV_CAP_EVENT_QOS (1ULL << 1)
260 
266 #define RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED (1ULL << 2)
267 
275 #define RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES (1ULL << 3)
276 
283 /* Event device priority levels */
284 #define RTE_EVENT_DEV_PRIORITY_HIGHEST 0
285 
289 #define RTE_EVENT_DEV_PRIORITY_NORMAL 128
290 
294 #define RTE_EVENT_DEV_PRIORITY_LOWEST 255
295 
307 uint8_t
308 rte_event_dev_count(void);
309 
320 int
321 rte_event_dev_get_dev_id(const char *name);
322 
333 int
335 
340  const char *driver_name;
341  struct rte_device *dev;
342  uint32_t min_dequeue_timeout_ns;
344  uint32_t max_dequeue_timeout_ns;
346  uint32_t dequeue_timeout_ns;
348  uint8_t max_event_queues;
350  uint32_t max_event_queue_flows;
360  uint8_t max_event_ports;
372  int32_t max_num_events;
377  uint32_t event_dev_cap;
379 };
380 
396 int
397 rte_event_dev_info_get(uint8_t dev_id, struct rte_event_dev_info *dev_info);
398 
399 /* Event device configuration bitmap flags */
400 #define RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT (1ULL << 0)
401 
407  uint32_t dequeue_timeout_ns;
414  int32_t nb_events_limit;
422  uint8_t nb_event_queues;
427  uint8_t nb_event_ports;
432  uint32_t nb_event_queue_flows;
451  uint32_t event_dev_cfg;
453 };
454 
474 int
476  const struct rte_event_dev_config *dev_conf);
477 
478 
479 /* Event queue specific APIs */
480 
481 /* Event queue configuration bitmap flags */
482 #define RTE_EVENT_QUEUE_CFG_TYPE_MASK (3ULL << 0)
483 
484 #define RTE_EVENT_QUEUE_CFG_ALL_TYPES (0ULL << 0)
485 
490 #define RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY (1ULL << 0)
491 
498 #define RTE_EVENT_QUEUE_CFG_ORDERED_ONLY (2ULL << 0)
499 
506 #define RTE_EVENT_QUEUE_CFG_PARALLEL_ONLY (3ULL << 0)
507 
514 #define RTE_EVENT_QUEUE_CFG_SINGLE_LINK (1ULL << 2)
515 
522  uint32_t nb_atomic_flows;
528  uint32_t nb_atomic_order_sequences;
539  uint32_t event_queue_cfg;
540  uint8_t priority;
548 };
549 
572 int
573 rte_event_queue_default_conf_get(uint8_t dev_id, uint8_t queue_id,
574  struct rte_event_queue_conf *queue_conf);
575 
594 int
595 rte_event_queue_setup(uint8_t dev_id, uint8_t queue_id,
596  const struct rte_event_queue_conf *queue_conf);
597 
606 uint8_t
608 
622 uint8_t
623 rte_event_queue_priority(uint8_t dev_id, uint8_t queue_id);
624 
625 /* Event port specific APIs */
626 
629  int32_t new_event_threshold;
642  uint16_t dequeue_depth;
647  uint16_t enqueue_depth;
652 };
653 
676 int
677 rte_event_port_default_conf_get(uint8_t dev_id, uint8_t port_id,
678  struct rte_event_port_conf *port_conf);
679 
700 int
701 rte_event_port_setup(uint8_t dev_id, uint8_t port_id,
702  const struct rte_event_port_conf *port_conf);
703 
717 uint8_t
718 rte_event_port_dequeue_depth(uint8_t dev_id, uint8_t port_id);
719 
733 uint8_t
734 rte_event_port_enqueue_depth(uint8_t dev_id, uint8_t port_id);
735 
744 uint8_t
746 
763 int
764 rte_event_dev_start(uint8_t dev_id);
765 
773 void
774 rte_event_dev_stop(uint8_t dev_id);
775 
787 int
788 rte_event_dev_close(uint8_t dev_id);
789 
790 /* Scheduler type definitions */
791 #define RTE_SCHED_TYPE_ORDERED 0
792 
818 #define RTE_SCHED_TYPE_ATOMIC 1
819 
837 #define RTE_SCHED_TYPE_PARALLEL 2
838 
850 /* Event types to classify the event source */
851 #define RTE_EVENT_TYPE_ETHDEV 0x0
852 
853 #define RTE_EVENT_TYPE_CRYPTODEV 0x1
854 
855 #define RTE_EVENT_TYPE_TIMERDEV 0x2
856 
857 #define RTE_EVENT_TYPE_CPU 0x3
858 
861 #define RTE_EVENT_TYPE_MAX 0x10
862 
864 /* Event enqueue operations */
865 #define RTE_EVENT_OP_NEW 0
866 
869 #define RTE_EVENT_OP_FORWARD 1
870 
874 #define RTE_EVENT_OP_RELEASE 2
875 
909 struct rte_event {
911  union {
912  uint64_t event;
914  struct {
915  uint32_t flow_id:20;
922  uint32_t sub_event_type:8;
926  uint32_t event_type:4;
930  uint8_t op:2;
936  uint8_t rsvd:4;
938  uint8_t sched_type:2;
943  uint8_t queue_id;
950  uint8_t priority;
960  uint8_t impl_opaque;
967  };
968  };
970  union {
971  uint64_t u64;
973  void *event_ptr;
975  struct rte_mbuf *mbuf;
977  };
978 };
979 
980 
981 struct rte_eventdev_driver;
982 struct rte_eventdev_ops;
983 struct rte_eventdev;
984 
985 typedef void (*event_schedule_t)(struct rte_eventdev *dev);
988 typedef uint16_t (*event_enqueue_t)(void *port, const struct rte_event *ev);
991 typedef uint16_t (*event_enqueue_burst_t)(void *port,
992  const struct rte_event ev[], uint16_t nb_events);
995 typedef uint16_t (*event_dequeue_t)(void *port, struct rte_event *ev,
996  uint64_t timeout_ticks);
999 typedef uint16_t (*event_dequeue_burst_t)(void *port, struct rte_event ev[],
1000  uint16_t nb_events, uint64_t timeout_ticks);
1003 #define RTE_EVENTDEV_NAME_MAX_LEN (64)
1004 
1013 struct rte_eventdev_data {
1014  int socket_id;
1016  uint8_t dev_id;
1018  uint8_t nb_queues;
1020  uint8_t nb_ports;
1022  void **ports;
1024  uint8_t *ports_dequeue_depth;
1026  uint8_t *ports_enqueue_depth;
1028  uint8_t *queues_prio;
1030  uint16_t *links_map;
1032  void *dev_private;
1034  uint32_t event_dev_cap;
1036  struct rte_event_dev_config dev_conf;
1039  RTE_STD_C11
1040  uint8_t dev_started : 1;
1043  char name[RTE_EVENTDEV_NAME_MAX_LEN];
1046 
1048 struct rte_eventdev {
1049  event_schedule_t schedule;
1051  event_enqueue_t enqueue;
1053  event_enqueue_burst_t enqueue_burst;
1055  event_dequeue_t dequeue;
1057  event_dequeue_burst_t dequeue_burst;
1060  struct rte_eventdev_data *data;
1062  const struct rte_eventdev_ops *dev_ops;
1064  struct rte_device *dev;
1066  const struct rte_eventdev_driver *driver;
1069  RTE_STD_C11
1070  uint8_t attached : 1;
1073 
1074 extern struct rte_eventdev *rte_eventdevs;
1087 static inline void
1089 {
1090  struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1091  if (*dev->schedule)
1092  (*dev->schedule)(dev);
1093 }
1094 
1134 static inline uint16_t
1135 rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
1136  const struct rte_event ev[], uint16_t nb_events)
1137 {
1138  struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1139 
1140 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
1141  if (dev_id >= RTE_EVENT_MAX_DEVS || !rte_eventdevs[dev_id].attached) {
1142  rte_errno = -EINVAL;
1143  return 0;
1144  }
1145 
1146  if (port_id >= dev->data->nb_ports) {
1147  rte_errno = -EINVAL;
1148  return 0;
1149  }
1150 #endif
1151 
1152  /*
1153  * Allow zero cost non burst mode routine invocation if application
1154  * requests nb_events as const one
1155  */
1156  if (nb_events == 1)
1157  return (*dev->enqueue)(
1158  dev->data->ports[port_id], ev);
1159  else
1160  return (*dev->enqueue_burst)(
1161  dev->data->ports[port_id], ev, nb_events);
1162 }
1163 
1189 int
1190 rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns,
1191  uint64_t *timeout_ticks);
1192 
1256 static inline uint16_t
1257 rte_event_dequeue_burst(uint8_t dev_id, uint8_t port_id, struct rte_event ev[],
1258  uint16_t nb_events, uint64_t timeout_ticks)
1259 {
1260  struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1261 
1262 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
1263  if (dev_id >= RTE_EVENT_MAX_DEVS || !rte_eventdevs[dev_id].attached) {
1264  rte_errno = -EINVAL;
1265  return 0;
1266  }
1267 
1268  if (port_id >= dev->data->nb_ports) {
1269  rte_errno = -EINVAL;
1270  return 0;
1271  }
1272 #endif
1273 
1274  /*
1275  * Allow zero cost non burst mode routine invocation if application
1276  * requests nb_events as const one
1277  */
1278  if (nb_events == 1)
1279  return (*dev->dequeue)(
1280  dev->data->ports[port_id], ev, timeout_ticks);
1281  else
1282  return (*dev->dequeue_burst)(
1283  dev->data->ports[port_id], ev, nb_events,
1284  timeout_ticks);
1285 }
1286 
1347 int
1348 rte_event_port_link(uint8_t dev_id, uint8_t port_id,
1349  const uint8_t queues[], const uint8_t priorities[],
1350  uint16_t nb_links);
1351 
1391 int
1392 rte_event_port_unlink(uint8_t dev_id, uint8_t port_id,
1393  uint8_t queues[], uint16_t nb_unlinks);
1394 
1422 int
1423 rte_event_port_links_get(uint8_t dev_id, uint8_t port_id,
1424  uint8_t queues[], uint8_t priorities[]);
1425 
1439 int
1440 rte_event_dev_dump(uint8_t dev_id, FILE *f);
1441 
1443 #define RTE_EVENT_DEV_XSTATS_NAME_SIZE 64
1444 
1449  RTE_EVENT_DEV_XSTATS_DEVICE,
1450  RTE_EVENT_DEV_XSTATS_PORT,
1451  RTE_EVENT_DEV_XSTATS_QUEUE,
1452 };
1453 
1461  char name[RTE_EVENT_DEV_XSTATS_NAME_SIZE];
1462 };
1463 
1496 int
1498  enum rte_event_dev_xstats_mode mode,
1499  uint8_t queue_port_id,
1500  struct rte_event_dev_xstats_name *xstats_names,
1501  unsigned int *ids,
1502  unsigned int size);
1503 
1530 int
1532  enum rte_event_dev_xstats_mode mode,
1533  uint8_t queue_port_id,
1534  const unsigned int ids[],
1535  uint64_t values[], unsigned int n);
1536 
1553 uint64_t
1554 rte_event_dev_xstats_by_name_get(uint8_t dev_id, const char *name,
1555  unsigned int *id);
1556 
1577 int
1579  enum rte_event_dev_xstats_mode mode,
1580  int16_t queue_port_id,
1581  const uint32_t ids[],
1582  uint32_t nb_ids);
1583 
1584 #ifdef __cplusplus
1585 }
1586 #endif
1587 
1588 #endif /* _RTE_EVENTDEV_H_ */