26 #include <rte_compat.h> 35 #ifndef RTE_BBDEV_MAX_DEVS 36 #define RTE_BBDEV_MAX_DEVS 128 44 #define RTE_BBDEV_ENQ_STATUS_SIZE_MAX 6 87 #define RTE_BBDEV_FOREACH(i) for (i = rte_bbdev_find_next(-1); \ 88 i < RTE_BBDEV_MAX_DEVS; \ 89 i = rte_bbdev_find_next(i)) 278 uint64_t enqueue_status_count[RTE_BBDEV_ENQ_STATUS_SIZE_MAX];
327 unsigned int num_queues[RTE_BBDEV_OP_TYPE_SIZE_MAX];
329 unsigned int queue_priority[RTE_BBDEV_OP_TYPE_SIZE_MAX];
362 #define RTE_BBDEV_END_OF_CAPABILITIES_LIST() \ 363 { RTE_BBDEV_OP_NONE } 425 struct rte_bbdev_queue_data {
434 typedef uint16_t (*rte_bbdev_enqueue_enc_ops_t)(
435 struct rte_bbdev_queue_data *q_data,
440 typedef uint16_t (*rte_bbdev_enqueue_dec_ops_t)(
441 struct rte_bbdev_queue_data *q_data,
446 typedef uint16_t (*rte_bbdev_enqueue_fft_ops_t)(
447 struct rte_bbdev_queue_data *q_data,
452 typedef uint16_t (*rte_bbdev_enqueue_mldts_ops_t)(
453 struct rte_bbdev_queue_data *q_data,
458 typedef uint16_t (*rte_bbdev_dequeue_enc_ops_t)(
459 struct rte_bbdev_queue_data *q_data,
463 typedef uint16_t (*rte_bbdev_dequeue_dec_ops_t)(
464 struct rte_bbdev_queue_data *q_data,
468 typedef uint16_t (*rte_bbdev_dequeue_fft_ops_t)(
469 struct rte_bbdev_queue_data *q_data,
473 typedef uint16_t (*rte_bbdev_dequeue_mldts_ops_t)(
474 struct rte_bbdev_queue_data *q_data,
477 #define RTE_BBDEV_NAME_MAX_LEN 64 485 struct rte_bbdev_data { 489 struct rte_bbdev_queue_data *queues;
493 RTE_ATOMIC(uint16_t) process_cnt;
498 struct rte_bbdev_callback;
499 struct rte_intr_handle;
510 rte_bbdev_enqueue_enc_ops_t enqueue_enc_ops;
512 rte_bbdev_enqueue_dec_ops_t enqueue_dec_ops;
514 rte_bbdev_dequeue_enc_ops_t dequeue_enc_ops;
516 rte_bbdev_dequeue_dec_ops_t dequeue_dec_ops;
518 rte_bbdev_enqueue_enc_ops_t enqueue_ldpc_enc_ops;
520 rte_bbdev_enqueue_dec_ops_t enqueue_ldpc_dec_ops;
522 rte_bbdev_dequeue_enc_ops_t dequeue_ldpc_enc_ops;
524 rte_bbdev_dequeue_dec_ops_t dequeue_ldpc_dec_ops;
526 rte_bbdev_enqueue_fft_ops_t enqueue_fft_ops;
528 rte_bbdev_dequeue_fft_ops_t dequeue_fft_ops;
530 struct rte_bbdev_data *data;
532 struct rte_device *device;
534 struct rte_bbdev_cb_list list_cbs;
535 struct rte_intr_handle *intr_handle;
537 rte_bbdev_enqueue_mldts_ops_t enqueue_mldts_ops;
539 rte_bbdev_dequeue_mldts_ops_t dequeue_mldts_ops;
543 extern struct rte_bbdev rte_bbdev_devices[];
566 static inline uint16_t
570 struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
571 struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
572 return dev->enqueue_enc_ops(q_data, ops, num_ops);
596 static inline uint16_t
600 struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
601 struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
602 return dev->enqueue_dec_ops(q_data, ops, num_ops);
626 static inline uint16_t
630 struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
631 struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
632 return dev->enqueue_ldpc_enc_ops(q_data, ops, num_ops);
656 static inline uint16_t
660 struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
661 struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
662 return dev->enqueue_ldpc_dec_ops(q_data, ops, num_ops);
686 static inline uint16_t
690 struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
691 struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
692 return dev->enqueue_fft_ops(q_data, ops, num_ops);
716 static inline uint16_t
720 struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
721 struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
722 return dev->enqueue_mldts_ops(q_data, ops, num_ops);
747 static inline uint16_t
751 struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
752 struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
753 return dev->dequeue_enc_ops(q_data, ops, num_ops);
779 static inline uint16_t
783 struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
784 struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
785 return dev->dequeue_dec_ops(q_data, ops, num_ops);
810 static inline uint16_t
814 struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
815 struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
816 return dev->dequeue_ldpc_enc_ops(q_data, ops, num_ops);
840 static inline uint16_t
844 struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
845 struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
846 return dev->dequeue_ldpc_dec_ops(q_data, ops, num_ops);
870 static inline uint16_t
874 struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
875 struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
876 return dev->dequeue_fft_ops(q_data, ops, num_ops);
900 static inline uint16_t
904 struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
905 struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
906 return dev->dequeue_mldts_ops(q_data, ops, num_ops);
__rte_experimental int rte_bbdev_queue_ops_dump(uint16_t dev_id, uint16_t queue_index, FILE *file)
bool hardware_accelerated
uint64_t acc_offload_cycles
int rte_bbdev_queue_configure(uint16_t dev_id, uint16_t queue_id, const struct rte_bbdev_queue_conf *conf)
int rte_bbdev_intr_enable(uint16_t dev_id)
int rte_bbdev_queue_intr_ctl(uint16_t dev_id, uint16_t queue_id, int epfd, int op, void *data)
int rte_bbdev_queue_start(uint16_t dev_id, uint16_t queue_id)
static uint16_t rte_bbdev_enqueue_ldpc_enc_ops(uint16_t dev_id, uint16_t queue_id, struct rte_bbdev_enc_op **ops, uint16_t num_ops)
RTE_TAILQ_HEAD(rte_bbdev_cb_list, rte_bbdev_callback)
uint64_t dequeue_err_count
static uint16_t rte_bbdev_dequeue_ldpc_enc_ops(uint16_t dev_id, uint16_t queue_id, struct rte_bbdev_enc_op **ops, uint16_t num_ops)
uint32_t harq_buffer_size
uint8_t max_dl_queue_priority
static uint16_t rte_bbdev_dequeue_dec_ops(uint16_t dev_id, uint16_t queue_id, struct rte_bbdev_dec_op **ops, uint16_t num_ops)
static uint16_t rte_bbdev_dequeue_fft_ops(uint16_t dev_id, uint16_t queue_id, struct rte_bbdev_fft_op **ops, uint16_t num_ops)
int rte_bbdev_queue_info_get(uint16_t dev_id, uint16_t queue_id, struct rte_bbdev_queue_info *queue_info)
#define __rte_cache_aligned
uint16_t rte_bbdev_find_next(uint16_t dev_id)
int rte_bbdev_stats_get(uint16_t dev_id, struct rte_bbdev_stats *stats)
int rte_bbdev_info_get(uint16_t dev_id, struct rte_bbdev_info *dev_info)
unsigned int max_num_queues
enum rte_bbdev_op_type op_type
int rte_bbdev_queue_intr_enable(uint16_t dev_id, uint16_t queue_id)
int rte_bbdev_start(uint16_t dev_id)
uint16_t rte_bbdev_count(void)
const char * rte_bbdev_enqueue_status_str(enum rte_bbdev_enqueue_status status)
uint16_t enqueue_depth_avail
static uint16_t rte_bbdev_enqueue_dec_ops(uint16_t dev_id, uint16_t queue_id, struct rte_bbdev_dec_op **ops, uint16_t num_ops)
__rte_experimental char * rte_bbdev_ops_param_string(void *op, enum rte_bbdev_op_type op_type, char *str, uint32_t len)
bool rte_bbdev_is_valid(uint16_t dev_id)
static uint16_t rte_bbdev_enqueue_fft_ops(uint16_t dev_id, uint16_t queue_id, struct rte_bbdev_fft_op **ops, uint16_t num_ops)
uint64_t enqueue_warn_count
#define RTE_BBDEV_NAME_MAX_LEN
int rte_bbdev_queue_intr_disable(uint16_t dev_id, uint16_t queue_id)
static uint16_t rte_bbdev_dequeue_enc_ops(uint16_t dev_id, uint16_t queue_id, struct rte_bbdev_enc_op **ops, uint16_t num_ops)
uint64_t dequeue_warn_count
int rte_bbdev_queue_stop(uint16_t dev_id, uint16_t queue_id)
bool queue_intr_supported
static uint16_t rte_bbdev_dequeue_ldpc_dec_ops(uint16_t dev_id, uint16_t queue_id, struct rte_bbdev_dec_op **ops, uint16_t num_ops)
int rte_bbdev_callback_unregister(uint16_t dev_id, enum rte_bbdev_event_type event, rte_bbdev_cb_fn cb_fn, void *cb_arg)
static uint16_t rte_bbdev_enqueue_ldpc_dec_ops(uint16_t dev_id, uint16_t queue_id, struct rte_bbdev_dec_op **ops, uint16_t num_ops)
uint16_t * fft_window_width
int rte_bbdev_stats_reset(uint16_t dev_id)
const struct rte_device * device
static uint16_t rte_bbdev_enqueue_enc_ops(uint16_t dev_id, uint16_t queue_id, struct rte_bbdev_enc_op **ops, uint16_t num_ops)
const struct rte_bbdev_op_cap * capabilities
int rte_bbdev_stop(uint16_t dev_id)
int rte_bbdev_close(uint16_t dev_id)
static uint16_t rte_bbdev_enqueue_mldts_ops(uint16_t dev_id, uint16_t queue_id, struct rte_bbdev_mldts_op **ops, uint16_t num_ops)
int rte_bbdev_callback_register(uint16_t dev_id, enum rte_bbdev_event_type event, rte_bbdev_cb_fn cb_fn, void *cb_arg)
uint8_t max_ul_queue_priority
static uint16_t rte_bbdev_dequeue_mldts_ops(uint16_t dev_id, uint16_t queue_id, struct rte_bbdev_mldts_op **ops, uint16_t num_ops)
enum rte_cpu_flag_t * cpu_flag_reqs
uint64_t enqueue_err_count
const char * rte_bbdev_device_status_str(enum rte_bbdev_device_status status)
void(* rte_bbdev_cb_fn)(uint16_t dev_id, enum rte_bbdev_event_type event, void *cb_arg, void *ret_param)
int rte_bbdev_setup_queues(uint16_t dev_id, uint16_t num_queues, int socket_id)