35 #include <rte_compat.h>
43 #ifndef RTE_BBDEV_MAX_DEVS
44 #define RTE_BBDEV_MAX_DEVS 128
91 #define RTE_BBDEV_FOREACH(i) for (i = rte_bbdev_find_next(-1); \
92 i < RTE_BBDEV_MAX_DEVS; \
93 i = rte_bbdev_find_next(i))
322 #define RTE_BBDEV_END_OF_CAPABILITIES_LIST() \
323 { RTE_BBDEV_OP_NONE }
384 struct rte_bbdev_queue_data {
392 typedef uint16_t (*rte_bbdev_enqueue_enc_ops_t)(
393 struct rte_bbdev_queue_data *q_data,
398 typedef uint16_t (*rte_bbdev_enqueue_dec_ops_t)(
399 struct rte_bbdev_queue_data *q_data,
404 typedef uint16_t (*rte_bbdev_dequeue_enc_ops_t)(
405 struct rte_bbdev_queue_data *q_data,
409 typedef uint16_t (*rte_bbdev_dequeue_dec_ops_t)(
410 struct rte_bbdev_queue_data *q_data,
413 #define RTE_BBDEV_NAME_MAX_LEN 64
421 struct rte_bbdev_data {
425 struct rte_bbdev_queue_data *queues;
435 struct rte_bbdev_callback;
439 TAILQ_HEAD(rte_bbdev_cb_list, rte_bbdev_callback);
447 rte_bbdev_enqueue_enc_ops_t enqueue_enc_ops;
449 rte_bbdev_enqueue_dec_ops_t enqueue_dec_ops;
451 rte_bbdev_dequeue_enc_ops_t dequeue_enc_ops;
453 rte_bbdev_dequeue_dec_ops_t dequeue_dec_ops;
455 rte_bbdev_enqueue_enc_ops_t enqueue_ldpc_enc_ops;
457 rte_bbdev_enqueue_dec_ops_t enqueue_ldpc_dec_ops;
459 rte_bbdev_dequeue_enc_ops_t dequeue_ldpc_enc_ops;
461 rte_bbdev_dequeue_dec_ops_t dequeue_ldpc_dec_ops;
463 struct rte_bbdev_data *data;
467 struct rte_bbdev_cb_list list_cbs;
472 extern struct rte_bbdev rte_bbdev_devices[];
496 static inline uint16_t
500 struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
501 struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
502 return dev->enqueue_enc_ops(q_data, ops, num_ops);
527 static inline uint16_t
531 struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
532 struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
533 return dev->enqueue_dec_ops(q_data, ops, num_ops);
558 static inline uint16_t
562 struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
563 struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
564 return dev->enqueue_ldpc_enc_ops(q_data, ops, num_ops);
589 static inline uint16_t
593 struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
594 struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
595 return dev->enqueue_ldpc_dec_ops(q_data, ops, num_ops);
622 static inline uint16_t
626 struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
627 struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
628 return dev->dequeue_enc_ops(q_data, ops, num_ops);
655 static inline uint16_t
659 struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
660 struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
661 return dev->dequeue_dec_ops(q_data, ops, num_ops);
687 static inline uint16_t
691 struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
692 struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
693 return dev->dequeue_ldpc_enc_ops(q_data, ops, num_ops);
718 static inline uint16_t
722 struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
723 struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
724 return dev->dequeue_ldpc_dec_ops(q_data, ops, num_ops);
bool hardware_accelerated
uint64_t acc_offload_cycles
__rte_experimental int rte_bbdev_info_get(uint16_t dev_id, struct rte_bbdev_info *dev_info)
__rte_experimental int rte_bbdev_queue_start(uint16_t dev_id, uint16_t queue_id)
TAILQ_HEAD(rte_bbdev_cb_list, rte_bbdev_callback)
__rte_experimental int rte_bbdev_intr_enable(uint16_t dev_id)
uint64_t dequeue_err_count
__rte_experimental int rte_bbdev_callback_register(uint16_t dev_id, enum rte_bbdev_event_type event, rte_bbdev_cb_fn cb_fn, void *cb_arg)
uint32_t harq_buffer_size
uint8_t max_dl_queue_priority
static __rte_experimental uint16_t rte_bbdev_dequeue_dec_ops(uint16_t dev_id, uint16_t queue_id, struct rte_bbdev_dec_op **ops, uint16_t num_ops)
struct rte_bbdev_driver_info drv
__rte_experimental int rte_bbdev_stats_get(uint16_t dev_id, struct rte_bbdev_stats *stats)
static __rte_experimental uint16_t rte_bbdev_enqueue_ldpc_enc_ops(uint16_t dev_id, uint16_t queue_id, struct rte_bbdev_enc_op **ops, uint16_t num_ops)
unsigned int max_num_queues
enum rte_bbdev_op_type op_type
__rte_experimental int rte_bbdev_stats_reset(uint16_t dev_id)
static __rte_experimental uint16_t rte_bbdev_enqueue_ldpc_dec_ops(uint16_t dev_id, uint16_t queue_id, struct rte_bbdev_dec_op **ops, uint16_t num_ops)
__rte_experimental int rte_bbdev_queue_info_get(uint16_t dev_id, uint16_t queue_id, struct rte_bbdev_queue_info *queue_info)
__rte_experimental int rte_bbdev_callback_unregister(uint16_t dev_id, enum rte_bbdev_event_type event, rte_bbdev_cb_fn cb_fn, void *cb_arg)
__rte_experimental int rte_bbdev_close(uint16_t dev_id)
static __rte_experimental uint16_t rte_bbdev_dequeue_enc_ops(uint16_t dev_id, uint16_t queue_id, struct rte_bbdev_enc_op **ops, uint16_t num_ops)
__rte_experimental int rte_bbdev_stop(uint16_t dev_id)
__rte_experimental int rte_bbdev_queue_intr_enable(uint16_t dev_id, uint16_t queue_id)
#define RTE_BBDEV_NAME_MAX_LEN
#define __rte_cache_aligned
struct rte_bbdev_queue_conf conf
__rte_experimental int rte_bbdev_queue_configure(uint16_t dev_id, uint16_t queue_id, const struct rte_bbdev_queue_conf *conf)
bool queue_intr_supported
__rte_experimental int rte_bbdev_queue_intr_ctl(uint16_t dev_id, uint16_t queue_id, int epfd, int op, void *data)
__rte_experimental int rte_bbdev_queue_stop(uint16_t dev_id, uint16_t queue_id)
__rte_experimental uint16_t rte_bbdev_find_next(uint16_t dev_id)
static __rte_experimental uint16_t rte_bbdev_dequeue_ldpc_enc_ops(uint16_t dev_id, uint16_t queue_id, struct rte_bbdev_enc_op **ops, uint16_t num_ops)
static __rte_experimental uint16_t rte_bbdev_dequeue_ldpc_dec_ops(uint16_t dev_id, uint16_t queue_id, struct rte_bbdev_dec_op **ops, uint16_t num_ops)
__rte_experimental uint16_t rte_bbdev_count(void)
const struct rte_device * device
__rte_experimental int rte_bbdev_queue_intr_disable(uint16_t dev_id, uint16_t queue_id)
__rte_experimental int rte_bbdev_start(uint16_t dev_id)
static __rte_experimental uint16_t rte_bbdev_enqueue_dec_ops(uint16_t dev_id, uint16_t queue_id, struct rte_bbdev_dec_op **ops, uint16_t num_ops)
__rte_experimental int rte_bbdev_setup_queues(uint16_t dev_id, uint16_t num_queues, int socket_id)
const struct rte_bbdev_op_cap * capabilities
static __rte_experimental uint16_t rte_bbdev_enqueue_enc_ops(uint16_t dev_id, uint16_t queue_id, struct rte_bbdev_enc_op **ops, uint16_t num_ops)
struct rte_bbdev_queue_conf default_queue_conf
uint8_t max_ul_queue_priority
enum rte_cpu_flag_t * cpu_flag_reqs
__rte_experimental bool rte_bbdev_is_valid(uint16_t dev_id)
uint64_t enqueue_err_count
void(* rte_bbdev_cb_fn)(uint16_t dev_id, enum rte_bbdev_event_type event, void *cb_arg, void *ret_param)