DPDK  17.08.2
rte_eventdev.h
Go to the documentation of this file.
1 /*
2  * BSD LICENSE
3  *
4  * Copyright 2016 Cavium, Inc.
5  * Copyright 2016 Intel Corporation.
6  * Copyright 2016 NXP.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  *
12  * * Redistributions of source code must retain the above copyright
13  * notice, this list of conditions and the following disclaimer.
14  * * Redistributions in binary form must reproduce the above copyright
15  * notice, this list of conditions and the following disclaimer in
16  * the documentation and/or other materials provided with the
17  * distribution.
18  * * Neither the name of Cavium, Inc nor the names of its
19  * contributors may be used to endorse or promote products derived
20  * from this software without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
25  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
26  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
28  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
32  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #ifndef _RTE_EVENTDEV_H_
36 #define _RTE_EVENTDEV_H_
37 
237 #ifdef __cplusplus
238 extern "C" {
239 #endif
240 
241 #include <rte_common.h>
242 #include <rte_memory.h>
243 #include <rte_errno.h>
244 
245 struct rte_mbuf; /* we just use mbuf pointers; no need to include rte_mbuf.h */
246 
247 /* Event device capability bitmap flags */
248 #define RTE_EVENT_DEV_CAP_QUEUE_QOS (1ULL << 0)
249 
254 #define RTE_EVENT_DEV_CAP_EVENT_QOS (1ULL << 1)
255 
261 #define RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED (1ULL << 2)
262 
270 #define RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES (1ULL << 3)
271 
277 #define RTE_EVENT_DEV_CAP_BURST_MODE (1ULL << 4)
278 
286 /* Event device priority levels */
287 #define RTE_EVENT_DEV_PRIORITY_HIGHEST 0
288 
292 #define RTE_EVENT_DEV_PRIORITY_NORMAL 128
293 
297 #define RTE_EVENT_DEV_PRIORITY_LOWEST 255
298 
310 uint8_t
311 rte_event_dev_count(void);
312 
323 int
324 rte_event_dev_get_dev_id(const char *name);
325 
336 int
337 rte_event_dev_socket_id(uint8_t dev_id);
338 
343  const char *driver_name;
344  struct rte_device *dev;
345  uint32_t min_dequeue_timeout_ns;
347  uint32_t max_dequeue_timeout_ns;
349  uint32_t dequeue_timeout_ns;
351  uint8_t max_event_queues;
353  uint32_t max_event_queue_flows;
363  uint8_t max_event_ports;
375  int32_t max_num_events;
380  uint32_t event_dev_cap;
382 };
383 
399 int
400 rte_event_dev_info_get(uint8_t dev_id, struct rte_event_dev_info *dev_info);
401 
402 /* Event device configuration bitmap flags */
403 #define RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT (1ULL << 0)
404 
410  uint32_t dequeue_timeout_ns;
418  int32_t nb_events_limit;
426  uint8_t nb_event_queues;
431  uint8_t nb_event_ports;
436  uint32_t nb_event_queue_flows;
457  uint32_t event_dev_cfg;
459 };
460 
480 int
481 rte_event_dev_configure(uint8_t dev_id,
482  const struct rte_event_dev_config *dev_conf);
483 
484 
485 /* Event queue specific APIs */
486 
487 /* Event queue configuration bitmap flags */
488 #define RTE_EVENT_QUEUE_CFG_TYPE_MASK (3ULL << 0)
489 
490 #define RTE_EVENT_QUEUE_CFG_ALL_TYPES (0ULL << 0)
491 
496 #define RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY (1ULL << 0)
497 
504 #define RTE_EVENT_QUEUE_CFG_ORDERED_ONLY (2ULL << 0)
505 
512 #define RTE_EVENT_QUEUE_CFG_PARALLEL_ONLY (3ULL << 0)
513 
520 #define RTE_EVENT_QUEUE_CFG_SINGLE_LINK (1ULL << 2)
521 
528  uint32_t nb_atomic_flows;
536  uint32_t nb_atomic_order_sequences;
550  uint32_t event_queue_cfg;
551  uint8_t priority;
559 };
560 
583 int
584 rte_event_queue_default_conf_get(uint8_t dev_id, uint8_t queue_id,
585  struct rte_event_queue_conf *queue_conf);
586 
605 int
606 rte_event_queue_setup(uint8_t dev_id, uint8_t queue_id,
607  const struct rte_event_queue_conf *queue_conf);
608 
617 uint8_t
618 rte_event_queue_count(uint8_t dev_id);
619 
633 uint8_t
634 rte_event_queue_priority(uint8_t dev_id, uint8_t queue_id);
635 
636 /* Event port specific APIs */
637 
640  int32_t new_event_threshold;
653  uint16_t dequeue_depth;
659  uint16_t enqueue_depth;
665 };
666 
689 int
690 rte_event_port_default_conf_get(uint8_t dev_id, uint8_t port_id,
691  struct rte_event_port_conf *port_conf);
692 
713 int
714 rte_event_port_setup(uint8_t dev_id, uint8_t port_id,
715  const struct rte_event_port_conf *port_conf);
716 
730 uint8_t
731 rte_event_port_dequeue_depth(uint8_t dev_id, uint8_t port_id);
732 
746 uint8_t
747 rte_event_port_enqueue_depth(uint8_t dev_id, uint8_t port_id);
748 
757 uint8_t
758 rte_event_port_count(uint8_t dev_id);
759 
776 int
777 rte_event_dev_start(uint8_t dev_id);
778 
786 void
787 rte_event_dev_stop(uint8_t dev_id);
788 
800 int
801 rte_event_dev_close(uint8_t dev_id);
802 
803 /* Scheduler type definitions */
804 #define RTE_SCHED_TYPE_ORDERED 0
805 
831 #define RTE_SCHED_TYPE_ATOMIC 1
832 
850 #define RTE_SCHED_TYPE_PARALLEL 2
851 
863 /* Event types to classify the event source */
864 #define RTE_EVENT_TYPE_ETHDEV 0x0
865 
866 #define RTE_EVENT_TYPE_CRYPTODEV 0x1
867 
868 #define RTE_EVENT_TYPE_TIMERDEV 0x2
869 
870 #define RTE_EVENT_TYPE_CPU 0x3
871 
874 #define RTE_EVENT_TYPE_MAX 0x10
875 
877 /* Event enqueue operations */
878 #define RTE_EVENT_OP_NEW 0
879 
882 #define RTE_EVENT_OP_FORWARD 1
883 
887 #define RTE_EVENT_OP_RELEASE 2
888 
922 struct rte_event {
924  union {
925  uint64_t event;
927  struct {
928  uint32_t flow_id:20;
935  uint32_t sub_event_type:8;
939  uint32_t event_type:4;
943  uint8_t op:2;
949  uint8_t rsvd:4;
951  uint8_t sched_type:2;
956  uint8_t queue_id;
963  uint8_t priority;
973  uint8_t impl_opaque;
980  };
981  };
983  union {
984  uint64_t u64;
986  void *event_ptr;
988  struct rte_mbuf *mbuf;
990  };
991 };
992 
993 
994 struct rte_eventdev_driver;
995 struct rte_eventdev_ops;
996 struct rte_eventdev;
997 
998 typedef void (*event_schedule_t)(struct rte_eventdev *dev);
1001 typedef uint16_t (*event_enqueue_t)(void *port, const struct rte_event *ev);
1004 typedef uint16_t (*event_enqueue_burst_t)(void *port,
1005  const struct rte_event ev[], uint16_t nb_events);
1008 typedef uint16_t (*event_dequeue_t)(void *port, struct rte_event *ev,
1009  uint64_t timeout_ticks);
1012 typedef uint16_t (*event_dequeue_burst_t)(void *port, struct rte_event ev[],
1013  uint16_t nb_events, uint64_t timeout_ticks);
1016 #define RTE_EVENTDEV_NAME_MAX_LEN (64)
1017 
1026 struct rte_eventdev_data {
1027  int socket_id;
1029  uint8_t dev_id;
1031  uint8_t nb_queues;
1033  uint8_t nb_ports;
1035  void **ports;
1037  uint8_t *ports_dequeue_depth;
1039  uint8_t *ports_enqueue_depth;
1041  uint8_t *queues_prio;
1043  uint16_t *links_map;
1045  void *dev_private;
1047  uint32_t event_dev_cap;
1049  struct rte_event_dev_config dev_conf;
1052  RTE_STD_C11
1053  uint8_t dev_started : 1;
1056  char name[RTE_EVENTDEV_NAME_MAX_LEN];
1059 
1061 struct rte_eventdev {
1062  event_schedule_t schedule;
1064  event_enqueue_t enqueue;
1066  event_enqueue_burst_t enqueue_burst;
1068  event_enqueue_burst_t enqueue_new_burst;
1070  event_enqueue_burst_t enqueue_forward_burst;
1072  event_dequeue_t dequeue;
1074  event_dequeue_burst_t dequeue_burst;
1077  struct rte_eventdev_data *data;
1079  const struct rte_eventdev_ops *dev_ops;
1081  struct rte_device *dev;
1084  RTE_STD_C11
1085  uint8_t attached : 1;
1088 
1089 extern struct rte_eventdev *rte_eventdevs;
1102 static inline void
1103 rte_event_schedule(uint8_t dev_id)
1104 {
1105  struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1106  if (*dev->schedule)
1107  (*dev->schedule)(dev);
1108 }
1109 
1110 static __rte_always_inline uint16_t
1111 __rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
1112  const struct rte_event ev[], uint16_t nb_events,
1113  const event_enqueue_burst_t fn)
1114 {
1115  const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1116 
1117 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
1118  if (dev_id >= RTE_EVENT_MAX_DEVS || !rte_eventdevs[dev_id].attached) {
1119  rte_errno = -EINVAL;
1120  return 0;
1121  }
1122 
1123  if (port_id >= dev->data->nb_ports) {
1124  rte_errno = -EINVAL;
1125  return 0;
1126  }
1127 #endif
1128  /*
1129  * Allow zero cost non burst mode routine invocation if application
1130  * requests nb_events as const one
1131  */
1132  if (nb_events == 1)
1133  return (*dev->enqueue)(dev->data->ports[port_id], ev);
1134  else
1135  return fn(dev->data->ports[port_id], ev, nb_events);
1136 }
1137 
1177 static inline uint16_t
1178 rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
1179  const struct rte_event ev[], uint16_t nb_events)
1180 {
1181  const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1182 
1183  return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
1184  dev->enqueue_burst);
1185 }
1186 
1226 static inline uint16_t
1227 rte_event_enqueue_new_burst(uint8_t dev_id, uint8_t port_id,
1228  const struct rte_event ev[], uint16_t nb_events)
1229 {
1230  const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1231 
1232  return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
1233  dev->enqueue_new_burst);
1234 }
1235 
1275 static inline uint16_t
1276 rte_event_enqueue_forward_burst(uint8_t dev_id, uint8_t port_id,
1277  const struct rte_event ev[], uint16_t nb_events)
1278 {
1279  const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1280 
1281  return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
1282  dev->enqueue_forward_burst);
1283 }
1284 
1310 int
1311 rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns,
1312  uint64_t *timeout_ticks);
1313 
1377 static inline uint16_t
1378 rte_event_dequeue_burst(uint8_t dev_id, uint8_t port_id, struct rte_event ev[],
1379  uint16_t nb_events, uint64_t timeout_ticks)
1380 {
1381  struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1382 
1383 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
1384  if (dev_id >= RTE_EVENT_MAX_DEVS || !rte_eventdevs[dev_id].attached) {
1385  rte_errno = -EINVAL;
1386  return 0;
1387  }
1388 
1389  if (port_id >= dev->data->nb_ports) {
1390  rte_errno = -EINVAL;
1391  return 0;
1392  }
1393 #endif
1394 
1395  /*
1396  * Allow zero cost non burst mode routine invocation if application
1397  * requests nb_events as const one
1398  */
1399  if (nb_events == 1)
1400  return (*dev->dequeue)(
1401  dev->data->ports[port_id], ev, timeout_ticks);
1402  else
1403  return (*dev->dequeue_burst)(
1404  dev->data->ports[port_id], ev, nb_events,
1405  timeout_ticks);
1406 }
1407 
1468 int
1469 rte_event_port_link(uint8_t dev_id, uint8_t port_id,
1470  const uint8_t queues[], const uint8_t priorities[],
1471  uint16_t nb_links);
1472 
1512 int
1513 rte_event_port_unlink(uint8_t dev_id, uint8_t port_id,
1514  uint8_t queues[], uint16_t nb_unlinks);
1515 
1543 int
1544 rte_event_port_links_get(uint8_t dev_id, uint8_t port_id,
1545  uint8_t queues[], uint8_t priorities[]);
1546 
1560 int
1561 rte_event_dev_dump(uint8_t dev_id, FILE *f);
1562 
1564 #define RTE_EVENT_DEV_XSTATS_NAME_SIZE 64
1565 
1570  RTE_EVENT_DEV_XSTATS_DEVICE,
1571  RTE_EVENT_DEV_XSTATS_PORT,
1572  RTE_EVENT_DEV_XSTATS_QUEUE,
1573 };
1574 
1582  char name[RTE_EVENT_DEV_XSTATS_NAME_SIZE];
1583 };
1584 
1617 int
1618 rte_event_dev_xstats_names_get(uint8_t dev_id,
1619  enum rte_event_dev_xstats_mode mode,
1620  uint8_t queue_port_id,
1621  struct rte_event_dev_xstats_name *xstats_names,
1622  unsigned int *ids,
1623  unsigned int size);
1624 
1651 int
1652 rte_event_dev_xstats_get(uint8_t dev_id,
1653  enum rte_event_dev_xstats_mode mode,
1654  uint8_t queue_port_id,
1655  const unsigned int ids[],
1656  uint64_t values[], unsigned int n);
1657 
1674 uint64_t
1675 rte_event_dev_xstats_by_name_get(uint8_t dev_id, const char *name,
1676  unsigned int *id);
1677 
1698 int
1699 rte_event_dev_xstats_reset(uint8_t dev_id,
1700  enum rte_event_dev_xstats_mode mode,
1701  int16_t queue_port_id,
1702  const uint32_t ids[],
1703  uint32_t nb_ids);
1704 
1705 #ifdef __cplusplus
1706 }
1707 #endif
1708 
1709 #endif /* _RTE_EVENTDEV_H_ */