35 #ifndef _RTE_EVENTDEV_H_
36 #define _RTE_EVENTDEV_H_
248 #define RTE_EVENT_DEV_CAP_QUEUE_QOS (1ULL << 0)
254 #define RTE_EVENT_DEV_CAP_EVENT_QOS (1ULL << 1)
261 #define RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED (1ULL << 2)
270 #define RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES (1ULL << 3)
277 #define RTE_EVENT_DEV_CAP_BURST_MODE (1ULL << 4)
287 #define RTE_EVENT_DEV_PRIORITY_HIGHEST 0
292 #define RTE_EVENT_DEV_PRIORITY_NORMAL 128
297 #define RTE_EVENT_DEV_PRIORITY_LOWEST 255
403 #define RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT (1ULL << 0)
488 #define RTE_EVENT_QUEUE_CFG_TYPE_MASK (3ULL << 0)
490 #define RTE_EVENT_QUEUE_CFG_ALL_TYPES (0ULL << 0)
496 #define RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY (1ULL << 0)
504 #define RTE_EVENT_QUEUE_CFG_ORDERED_ONLY (2ULL << 0)
512 #define RTE_EVENT_QUEUE_CFG_PARALLEL_ONLY (3ULL << 0)
520 #define RTE_EVENT_QUEUE_CFG_SINGLE_LINK (1ULL << 2)
804 #define RTE_SCHED_TYPE_ORDERED 0
831 #define RTE_SCHED_TYPE_ATOMIC 1
850 #define RTE_SCHED_TYPE_PARALLEL 2
864 #define RTE_EVENT_TYPE_ETHDEV 0x0
866 #define RTE_EVENT_TYPE_CRYPTODEV 0x1
868 #define RTE_EVENT_TYPE_TIMERDEV 0x2
870 #define RTE_EVENT_TYPE_CPU 0x3
874 #define RTE_EVENT_TYPE_MAX 0x10
878 #define RTE_EVENT_OP_NEW 0
882 #define RTE_EVENT_OP_FORWARD 1
887 #define RTE_EVENT_OP_RELEASE 2
994 struct rte_eventdev_driver;
998 typedef void (*event_schedule_t)(
struct rte_eventdev *dev);
1001 typedef uint16_t (*event_enqueue_t)(
void *
port,
const struct rte_event *ev);
1004 typedef uint16_t (*event_enqueue_burst_t)(
void *port,
1005 const struct rte_event ev[], uint16_t nb_events);
1008 typedef uint16_t (*event_dequeue_t)(
void *port,
struct rte_event *ev,
1009 uint64_t timeout_ticks);
1012 typedef uint16_t (*event_dequeue_burst_t)(
void *port,
struct rte_event ev[],
1013 uint16_t nb_events, uint64_t timeout_ticks);
1016 #define RTE_EVENTDEV_NAME_MAX_LEN (64)
1026 struct rte_eventdev_data {
1037 uint8_t *ports_dequeue_depth;
1039 uint8_t *ports_enqueue_depth;
1041 uint8_t *queues_prio;
1043 uint16_t *links_map;
1047 uint32_t event_dev_cap;
1053 uint8_t dev_started : 1;
1056 char name[RTE_EVENTDEV_NAME_MAX_LEN];
1061 struct rte_eventdev {
1062 event_schedule_t schedule;
1064 event_enqueue_t enqueue;
1066 event_enqueue_burst_t enqueue_burst;
1068 event_enqueue_burst_t enqueue_new_burst;
1070 event_enqueue_burst_t enqueue_forward_burst;
1072 event_dequeue_t dequeue;
1074 event_dequeue_burst_t dequeue_burst;
1077 struct rte_eventdev_data *data;
1085 uint8_t attached : 1;
1089 extern struct rte_eventdev *rte_eventdevs;
1105 struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1107 (*dev->schedule)(dev);
1111 __rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
1112 const struct rte_event ev[], uint16_t nb_events,
1113 const event_enqueue_burst_t fn)
1115 const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1117 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
1118 if (dev_id >= RTE_EVENT_MAX_DEVS || !rte_eventdevs[dev_id].attached) {
1123 if (port_id >= dev->data->nb_ports) {
1133 return (*dev->enqueue)(dev->data->ports[port_id], ev);
1135 return fn(dev->data->ports[port_id], ev, nb_events);
1177 static inline uint16_t
1179 const struct rte_event ev[], uint16_t nb_events)
1181 const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1183 return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
1184 dev->enqueue_burst);
1226 static inline uint16_t
1228 const struct rte_event ev[], uint16_t nb_events)
1230 const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1232 return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
1233 dev->enqueue_new_burst);
1275 static inline uint16_t
1277 const struct rte_event ev[], uint16_t nb_events)
1279 const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1281 return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
1282 dev->enqueue_forward_burst);
1312 uint64_t *timeout_ticks);
1377 static inline uint16_t
1379 uint16_t nb_events, uint64_t timeout_ticks)
1381 struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1383 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
1384 if (dev_id >= RTE_EVENT_MAX_DEVS || !rte_eventdevs[dev_id].attached) {
1389 if (port_id >= dev->data->nb_ports) {
1400 return (*dev->dequeue)(
1401 dev->data->ports[port_id], ev, timeout_ticks);
1403 return (*dev->dequeue_burst)(
1404 dev->data->ports[port_id], ev, nb_events,
1470 const uint8_t queues[],
const uint8_t priorities[],
1514 uint8_t queues[], uint16_t nb_unlinks);
1545 uint8_t queues[], uint8_t priorities[]);
1564 #define RTE_EVENT_DEV_XSTATS_NAME_SIZE 64
1570 RTE_EVENT_DEV_XSTATS_DEVICE,
1571 RTE_EVENT_DEV_XSTATS_PORT,
1572 RTE_EVENT_DEV_XSTATS_QUEUE,
1620 uint8_t queue_port_id,
1654 uint8_t queue_port_id,
1655 const unsigned int ids[],
1656 uint64_t values[],
unsigned int n);
1701 int16_t queue_port_id,
1702 const uint32_t ids[],