34 #ifndef RTE_BBDEV_MAX_DEVS 35 #define RTE_BBDEV_MAX_DEVS 128 79 #define RTE_BBDEV_FOREACH(i) for (i = rte_bbdev_find_next(-1); \ 80 i < RTE_BBDEV_MAX_DEVS; \ 81 i = rte_bbdev_find_next(i)) 304 #define RTE_BBDEV_END_OF_CAPABILITIES_LIST() \ 305 { RTE_BBDEV_OP_NONE } 364 struct rte_bbdev_queue_data {
372 typedef uint16_t (*rte_bbdev_enqueue_enc_ops_t)(
373 struct rte_bbdev_queue_data *q_data,
378 typedef uint16_t (*rte_bbdev_enqueue_dec_ops_t)(
379 struct rte_bbdev_queue_data *q_data,
384 typedef uint16_t (*rte_bbdev_dequeue_enc_ops_t)(
385 struct rte_bbdev_queue_data *q_data,
389 typedef uint16_t (*rte_bbdev_dequeue_dec_ops_t)(
390 struct rte_bbdev_queue_data *q_data,
393 #define RTE_BBDEV_NAME_MAX_LEN 64 401 struct rte_bbdev_data { 405 struct rte_bbdev_queue_data *queues;
409 uint16_t process_cnt;
414 struct rte_bbdev_callback;
415 struct rte_intr_handle;
426 rte_bbdev_enqueue_enc_ops_t enqueue_enc_ops;
428 rte_bbdev_enqueue_dec_ops_t enqueue_dec_ops;
430 rte_bbdev_dequeue_enc_ops_t dequeue_enc_ops;
432 rte_bbdev_dequeue_dec_ops_t dequeue_dec_ops;
434 rte_bbdev_enqueue_enc_ops_t enqueue_ldpc_enc_ops;
436 rte_bbdev_enqueue_dec_ops_t enqueue_ldpc_dec_ops;
438 rte_bbdev_dequeue_enc_ops_t dequeue_ldpc_enc_ops;
440 rte_bbdev_dequeue_dec_ops_t dequeue_ldpc_dec_ops;
442 struct rte_bbdev_data *data;
446 struct rte_bbdev_cb_list list_cbs;
447 struct rte_intr_handle *intr_handle;
451 extern struct rte_bbdev rte_bbdev_devices[];
474 static inline uint16_t
478 struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
479 struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
480 return dev->enqueue_enc_ops(q_data, ops, num_ops);
504 static inline uint16_t
508 struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
509 struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
510 return dev->enqueue_dec_ops(q_data, ops, num_ops);
534 static inline uint16_t
538 struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
539 struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
540 return dev->enqueue_ldpc_enc_ops(q_data, ops, num_ops);
564 static inline uint16_t
568 struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
569 struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
570 return dev->enqueue_ldpc_dec_ops(q_data, ops, num_ops);
596 static inline uint16_t
600 struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
601 struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
602 return dev->dequeue_enc_ops(q_data, ops, num_ops);
628 static inline uint16_t
632 struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
633 struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
634 return dev->dequeue_dec_ops(q_data, ops, num_ops);
659 static inline uint16_t
663 struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
664 struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
665 return dev->dequeue_ldpc_enc_ops(q_data, ops, num_ops);
689 static inline uint16_t
693 struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
694 struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
695 return dev->dequeue_ldpc_dec_ops(q_data, ops, num_ops);
bool hardware_accelerated
uint64_t acc_offload_cycles
int rte_bbdev_queue_configure(uint16_t dev_id, uint16_t queue_id, const struct rte_bbdev_queue_conf *conf)
int rte_bbdev_intr_enable(uint16_t dev_id)
int rte_bbdev_queue_intr_ctl(uint16_t dev_id, uint16_t queue_id, int epfd, int op, void *data)
int rte_bbdev_queue_start(uint16_t dev_id, uint16_t queue_id)
static uint16_t rte_bbdev_enqueue_ldpc_enc_ops(uint16_t dev_id, uint16_t queue_id, struct rte_bbdev_enc_op **ops, uint16_t num_ops)
RTE_TAILQ_HEAD(rte_bbdev_cb_list, rte_bbdev_callback)
uint64_t dequeue_err_count
static uint16_t rte_bbdev_dequeue_ldpc_enc_ops(uint16_t dev_id, uint16_t queue_id, struct rte_bbdev_enc_op **ops, uint16_t num_ops)
uint32_t harq_buffer_size
uint8_t max_dl_queue_priority
static uint16_t rte_bbdev_dequeue_dec_ops(uint16_t dev_id, uint16_t queue_id, struct rte_bbdev_dec_op **ops, uint16_t num_ops)
int rte_bbdev_queue_info_get(uint16_t dev_id, uint16_t queue_id, struct rte_bbdev_queue_info *queue_info)
uint16_t rte_bbdev_find_next(uint16_t dev_id)
int rte_bbdev_stats_get(uint16_t dev_id, struct rte_bbdev_stats *stats)
int rte_bbdev_info_get(uint16_t dev_id, struct rte_bbdev_info *dev_info)
unsigned int max_num_queues
enum rte_bbdev_op_type op_type
int rte_bbdev_queue_intr_enable(uint16_t dev_id, uint16_t queue_id)
int rte_bbdev_start(uint16_t dev_id)
uint16_t rte_bbdev_count(void)
static uint16_t rte_bbdev_enqueue_dec_ops(uint16_t dev_id, uint16_t queue_id, struct rte_bbdev_dec_op **ops, uint16_t num_ops)
bool rte_bbdev_is_valid(uint16_t dev_id)
#define RTE_BBDEV_NAME_MAX_LEN
int rte_bbdev_queue_intr_disable(uint16_t dev_id, uint16_t queue_id)
static uint16_t rte_bbdev_dequeue_enc_ops(uint16_t dev_id, uint16_t queue_id, struct rte_bbdev_enc_op **ops, uint16_t num_ops)
#define __rte_cache_aligned
int rte_bbdev_queue_stop(uint16_t dev_id, uint16_t queue_id)
bool queue_intr_supported
static uint16_t rte_bbdev_dequeue_ldpc_dec_ops(uint16_t dev_id, uint16_t queue_id, struct rte_bbdev_dec_op **ops, uint16_t num_ops)
int rte_bbdev_callback_unregister(uint16_t dev_id, enum rte_bbdev_event_type event, rte_bbdev_cb_fn cb_fn, void *cb_arg)
static uint16_t rte_bbdev_enqueue_ldpc_dec_ops(uint16_t dev_id, uint16_t queue_id, struct rte_bbdev_dec_op **ops, uint16_t num_ops)
int rte_bbdev_stats_reset(uint16_t dev_id)
const struct rte_device * device
static uint16_t rte_bbdev_enqueue_enc_ops(uint16_t dev_id, uint16_t queue_id, struct rte_bbdev_enc_op **ops, uint16_t num_ops)
const struct rte_bbdev_op_cap * capabilities
int rte_bbdev_stop(uint16_t dev_id)
int rte_bbdev_close(uint16_t dev_id)
int rte_bbdev_callback_register(uint16_t dev_id, enum rte_bbdev_event_type event, rte_bbdev_cb_fn cb_fn, void *cb_arg)
uint8_t max_ul_queue_priority
enum rte_cpu_flag_t * cpu_flag_reqs
uint64_t enqueue_err_count
void(* rte_bbdev_cb_fn)(uint16_t dev_id, enum rte_bbdev_event_type event, void *cb_arg, void *ret_param)
int rte_bbdev_setup_queues(uint16_t dev_id, uint16_t num_queues, int socket_id)