35 #ifndef _RTE_EVENTDEV_H_
36 #define _RTE_EVENTDEV_H_
253 #define RTE_EVENT_DEV_CAP_QUEUE_QOS (1ULL << 0)
259 #define RTE_EVENT_DEV_CAP_EVENT_QOS (1ULL << 1)
266 #define RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED (1ULL << 2)
275 #define RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES (1ULL << 3)
284 #define RTE_EVENT_DEV_PRIORITY_HIGHEST 0
289 #define RTE_EVENT_DEV_PRIORITY_NORMAL 128
294 #define RTE_EVENT_DEV_PRIORITY_LOWEST 255
400 #define RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT (1ULL << 0)
482 #define RTE_EVENT_QUEUE_CFG_TYPE_MASK (3ULL << 0)
484 #define RTE_EVENT_QUEUE_CFG_ALL_TYPES (0ULL << 0)
490 #define RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY (1ULL << 0)
498 #define RTE_EVENT_QUEUE_CFG_ORDERED_ONLY (2ULL << 0)
506 #define RTE_EVENT_QUEUE_CFG_PARALLEL_ONLY (3ULL << 0)
514 #define RTE_EVENT_QUEUE_CFG_SINGLE_LINK (1ULL << 2)
791 #define RTE_SCHED_TYPE_ORDERED 0
818 #define RTE_SCHED_TYPE_ATOMIC 1
837 #define RTE_SCHED_TYPE_PARALLEL 2
851 #define RTE_EVENT_TYPE_ETHDEV 0x0
853 #define RTE_EVENT_TYPE_CRYPTODEV 0x1
855 #define RTE_EVENT_TYPE_TIMERDEV 0x2
857 #define RTE_EVENT_TYPE_CPU 0x3
861 #define RTE_EVENT_TYPE_MAX 0x10
865 #define RTE_EVENT_OP_NEW 0
869 #define RTE_EVENT_OP_FORWARD 1
874 #define RTE_EVENT_OP_RELEASE 2
985 typedef void (*event_schedule_t)(
struct rte_eventdev *dev);
988 typedef uint16_t (*event_enqueue_t)(
void *
port,
const struct rte_event *ev);
991 typedef uint16_t (*event_enqueue_burst_t)(
void *port,
992 const struct rte_event ev[], uint16_t nb_events);
995 typedef uint16_t (*event_dequeue_t)(
void *port,
struct rte_event *ev,
996 uint64_t timeout_ticks);
999 typedef uint16_t (*event_dequeue_burst_t)(
void *port,
struct rte_event ev[],
1000 uint16_t nb_events, uint64_t timeout_ticks);
1003 #define RTE_EVENTDEV_NAME_MAX_LEN (64)
1013 struct rte_eventdev_data {
1024 uint8_t *ports_dequeue_depth;
1026 uint8_t *ports_enqueue_depth;
1028 uint8_t *queues_prio;
1030 uint16_t *links_map;
1034 uint32_t event_dev_cap;
1040 uint8_t dev_started : 1;
1043 char name[RTE_EVENTDEV_NAME_MAX_LEN];
1048 struct rte_eventdev {
1049 event_schedule_t schedule;
1051 event_enqueue_t enqueue;
1053 event_enqueue_burst_t enqueue_burst;
1055 event_dequeue_t dequeue;
1057 event_dequeue_burst_t dequeue_burst;
1060 struct rte_eventdev_data *data;
1070 uint8_t attached : 1;
1074 extern struct rte_eventdev *rte_eventdevs;
1090 struct rte_eventdev *dev = &rte_eventdevs[
dev_id];
1092 (*dev->schedule)(dev);
1134 static inline uint16_t
1136 const struct rte_event ev[], uint16_t nb_events)
1138 struct rte_eventdev *dev = &rte_eventdevs[
dev_id];
1140 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
1141 if (dev_id >= RTE_EVENT_MAX_DEVS || !rte_eventdevs[dev_id].attached) {
1146 if (port_id >= dev->data->nb_ports) {
1157 return (*dev->enqueue)(
1158 dev->data->ports[port_id], ev);
1160 return (*dev->enqueue_burst)(
1161 dev->data->ports[port_id], ev, nb_events);
1191 uint64_t *timeout_ticks);
1256 static inline uint16_t
1258 uint16_t nb_events, uint64_t timeout_ticks)
1260 struct rte_eventdev *dev = &rte_eventdevs[
dev_id];
1262 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
1263 if (dev_id >= RTE_EVENT_MAX_DEVS || !rte_eventdevs[dev_id].attached) {
1268 if (port_id >= dev->data->nb_ports) {
1279 return (*dev->dequeue)(
1280 dev->data->ports[port_id], ev, timeout_ticks);
1282 return (*dev->dequeue_burst)(
1283 dev->data->ports[port_id], ev, nb_events,
1349 const uint8_t queues[],
const uint8_t priorities[],
1393 uint8_t queues[], uint16_t nb_unlinks);
1424 uint8_t queues[], uint8_t priorities[]);
1443 #define RTE_EVENT_DEV_XSTATS_NAME_SIZE 64
1449 RTE_EVENT_DEV_XSTATS_DEVICE,
1450 RTE_EVENT_DEV_XSTATS_PORT,
1451 RTE_EVENT_DEV_XSTATS_QUEUE,
1499 uint8_t queue_port_id,
1533 uint8_t queue_port_id,
1534 const unsigned int ids[],
1535 uint64_t values[],
unsigned int n);
1580 int16_t queue_port_id,
1581 const uint32_t ids[],