5 #ifndef __RTE_PMD_DPAA2_QDMA_H__
6 #define __RTE_PMD_DPAA2_QDMA_H__
18 #define RTE_QDMA_BURST_NB_MAX 256
41 RTE_QDMA_ULTRASHORT_FORMAT,
51 #define RTE_QDMA_VQ_EXCLUSIVE_PQ (1ULL)
53 #define RTE_QDMA_VQ_FD_LONG_FORMAT (1ULL << 1)
55 #define RTE_QDMA_VQ_FD_SG_FORMAT (1ULL << 2)
57 #define RTE_QDMA_VQ_NO_RESPONSE (1ULL << 3)
60 #define RTE_QDMA_JOB_SRC_PHY (1ULL)
63 #define RTE_QDMA_JOB_DEST_PHY (1ULL << 1)
89 uint32_t use_ultrashort:1;
129 uint64_t num_enqueues;
131 uint64_t num_dequeues;
133 uint64_t num_pending_jobs;
166 struct rte_qdma_enqdeq {
171 struct rte_qdma_queue_config {
174 struct rte_qdma_rbp *rbp;
177 #define rte_qdma_info rte_rawdev_info
178 #define rte_qdma_start(id) rte_rawdev_start(id)
179 #define rte_qdma_reset(id) rte_rawdev_reset(id)
180 #define rte_qdma_configure(id, cf) rte_rawdev_configure(id, cf)
181 #define rte_qdma_dequeue_buffers(id, buf, num, ctxt) \
182 rte_rawdev_dequeue_buffers(id, buf, num, ctxt)
183 #define rte_qdma_enqueue_buffers(id, buf, num, ctxt) \
184 rte_rawdev_enqueue_buffers(id, buf, num, ctxt)
185 #define rte_qdma_queue_setup(id, qid, cfg) \
186 rte_rawdev_queue_setup(id, qid, cfg)
uint8_t exclusive_hw_queue
uint16_t max_hw_queues_per_core
void rte_qdma_vq_stats(struct rte_rawdev *rawdev, uint16_t vq_id, struct rte_qdma_vq_stats *vq_stats)