35 #ifndef _RTE_EVENTDEV_H_
36 #define _RTE_EVENTDEV_H_
242 #include <rte_config.h>
249 #define RTE_EVENT_DEV_CAP_QUEUE_QOS (1ULL << 0)
255 #define RTE_EVENT_DEV_CAP_EVENT_QOS (1ULL << 1)
262 #define RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED (1ULL << 2)
271 #define RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES (1ULL << 3)
278 #define RTE_EVENT_DEV_CAP_BURST_MODE (1ULL << 4)
286 #define RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE (1ULL << 5)
297 #define RTE_EVENT_DEV_CAP_NONSEQ_MODE (1ULL << 6)
307 #define RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK (1ULL << 7)
313 #define RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT (1ULL << 8)
320 #define RTE_EVENT_DEV_PRIORITY_HIGHEST 0
325 #define RTE_EVENT_DEV_PRIORITY_NORMAL 128
330 #define RTE_EVENT_DEV_PRIORITY_LOWEST 255
438 #define RTE_EVENT_DEV_ATTR_PORT_COUNT 0
442 #define RTE_EVENT_DEV_ATTR_QUEUE_COUNT 1
446 #define RTE_EVENT_DEV_ATTR_STARTED 2
462 uint32_t *attr_value);
466 #define RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT (1ULL << 0)
551 #define RTE_EVENT_QUEUE_CFG_ALL_TYPES (1ULL << 0)
557 #define RTE_EVENT_QUEUE_CFG_SINGLE_LINK (1ULL << 1)
655 #define RTE_EVENT_QUEUE_ATTR_PRIORITY 0
659 #define RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_FLOWS 1
663 #define RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_ORDER_SEQUENCES 2
667 #define RTE_EVENT_QUEUE_ATTR_EVENT_QUEUE_CFG 3
671 #define RTE_EVENT_QUEUE_ATTR_SCHEDULE_TYPE 4
695 uint32_t *attr_value);
788 #define RTE_EVENT_PORT_ATTR_ENQ_DEPTH 0
792 #define RTE_EVENT_PORT_ATTR_DEQ_DEPTH 1
796 #define RTE_EVENT_PORT_ATTR_NEW_EVENT_THRESHOLD 2
816 uint32_t *attr_value);
862 #define RTE_SCHED_TYPE_ORDERED 0
889 #define RTE_SCHED_TYPE_ATOMIC 1
908 #define RTE_SCHED_TYPE_PARALLEL 2
922 #define RTE_EVENT_TYPE_ETHDEV 0x0
924 #define RTE_EVENT_TYPE_CRYPTODEV 0x1
926 #define RTE_EVENT_TYPE_TIMERDEV 0x2
928 #define RTE_EVENT_TYPE_CPU 0x3
932 #define RTE_EVENT_TYPE_ETH_RX_ADAPTER 0x4
934 #define RTE_EVENT_TYPE_MAX 0x10
938 #define RTE_EVENT_OP_NEW 0
942 #define RTE_EVENT_OP_FORWARD 1
950 #define RTE_EVENT_OP_RELEASE 2
1060 #define RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT 0x1
1064 #define RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ 0x2
1068 #define RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID 0x4
1100 struct rte_eventdev;
1102 typedef uint16_t (*event_enqueue_t)(
void *
port,
const struct rte_event *ev);
1105 typedef uint16_t (*event_enqueue_burst_t)(
void *port,
1106 const struct rte_event ev[], uint16_t nb_events);
1109 typedef uint16_t (*event_dequeue_t)(
void *port,
struct rte_event *ev,
1110 uint64_t timeout_ticks);
1113 typedef uint16_t (*event_dequeue_burst_t)(
void *port,
struct rte_event ev[],
1114 uint16_t nb_events, uint64_t timeout_ticks);
1117 #define RTE_EVENTDEV_NAME_MAX_LEN (64)
1127 struct rte_eventdev_data {
1142 uint16_t *links_map;
1146 uint32_t event_dev_cap;
1150 uint8_t service_inited;
1152 uint32_t service_id;
1156 uint8_t dev_started : 1;
1159 char name[RTE_EVENTDEV_NAME_MAX_LEN];
1164 struct rte_eventdev {
1165 event_enqueue_t enqueue;
1167 event_enqueue_burst_t enqueue_burst;
1169 event_enqueue_burst_t enqueue_new_burst;
1171 event_enqueue_burst_t enqueue_forward_burst;
1173 event_dequeue_t dequeue;
1175 event_dequeue_burst_t dequeue_burst;
1178 struct rte_eventdev_data *data;
1186 uint8_t attached : 1;
1190 extern struct rte_eventdev *rte_eventdevs;
1194 __rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
1195 const struct rte_event ev[], uint16_t nb_events,
1196 const event_enqueue_burst_t fn)
1198 const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1200 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
1201 if (dev_id >= RTE_EVENT_MAX_DEVS || !rte_eventdevs[dev_id].attached) {
1206 if (port_id >= dev->data->nb_ports) {
1216 return (*dev->enqueue)(dev->data->ports[port_id], ev);
1218 return fn(dev->data->ports[port_id], ev, nb_events);
1263 static inline uint16_t
1265 const struct rte_event ev[], uint16_t nb_events)
1267 const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1269 return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
1270 dev->enqueue_burst);
1312 static inline uint16_t
1314 const struct rte_event ev[], uint16_t nb_events)
1316 const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1318 return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
1319 dev->enqueue_new_burst);
1361 static inline uint16_t
1363 const struct rte_event ev[], uint16_t nb_events)
1365 const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1367 return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
1368 dev->enqueue_forward_burst);
1398 uint64_t *timeout_ticks);
1466 static inline uint16_t
1468 uint16_t nb_events, uint64_t timeout_ticks)
1470 struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1472 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
1473 if (dev_id >= RTE_EVENT_MAX_DEVS || !rte_eventdevs[dev_id].attached) {
1478 if (port_id >= dev->data->nb_ports) {
1489 return (*dev->dequeue)(
1490 dev->data->ports[port_id], ev, timeout_ticks);
1492 return (*dev->dequeue_burst)(
1493 dev->data->ports[port_id], ev, nb_events,
1559 const uint8_t queues[],
const uint8_t priorities[],
1603 uint8_t queues[], uint16_t nb_unlinks);
1634 uint8_t queues[], uint8_t priorities[]);
1671 #define RTE_EVENT_DEV_XSTATS_NAME_SIZE 64
1677 RTE_EVENT_DEV_XSTATS_DEVICE,
1678 RTE_EVENT_DEV_XSTATS_PORT,
1679 RTE_EVENT_DEV_XSTATS_QUEUE,
1727 uint8_t queue_port_id,
1761 uint8_t queue_port_id,
1762 const unsigned int ids[],
1763 uint64_t values[],
unsigned int n);
1808 int16_t queue_port_id,
1809 const uint32_t ids[],