DPDK  20.08.0
rte_cryptodev.h
Go to the documentation of this file.
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2015-2020 Intel Corporation.
3  */
4 
5 #ifndef _RTE_CRYPTODEV_H_
6 #define _RTE_CRYPTODEV_H_
7 
17 #ifdef __cplusplus
18 extern "C" {
19 #endif
20 
21 #include "rte_kvargs.h"
22 #include "rte_crypto.h"
23 #include "rte_dev.h"
24 #include <rte_common.h>
25 #include <rte_config.h>
26 
27 #include "rte_cryptodev_trace_fp.h"
28 
29 extern const char **rte_cyptodev_names;
30 
31 /* Logging Macros */
32 
33 #define CDEV_LOG_ERR(...) \
34  RTE_LOG(ERR, CRYPTODEV, \
35  RTE_FMT("%s() line %u: " RTE_FMT_HEAD(__VA_ARGS__,) "\n", \
36  __func__, __LINE__, RTE_FMT_TAIL(__VA_ARGS__,)))
37 
38 #define CDEV_LOG_INFO(...) \
39  RTE_LOG(INFO, CRYPTODEV, \
40  RTE_FMT(RTE_FMT_HEAD(__VA_ARGS__,) "\n", \
41  RTE_FMT_TAIL(__VA_ARGS__,)))
42 
43 #define CDEV_LOG_DEBUG(...) \
44  RTE_LOG(DEBUG, CRYPTODEV, \
45  RTE_FMT("%s() line %u: " RTE_FMT_HEAD(__VA_ARGS__,) "\n", \
46  __func__, __LINE__, RTE_FMT_TAIL(__VA_ARGS__,)))
47 
48 #define CDEV_PMD_TRACE(...) \
49  RTE_LOG(DEBUG, CRYPTODEV, \
50  RTE_FMT("[%s] %s: " RTE_FMT_HEAD(__VA_ARGS__,) "\n", \
51  dev, __func__, RTE_FMT_TAIL(__VA_ARGS__,)))
52 
66 #define rte_crypto_op_ctod_offset(c, t, o) \
67  ((t)((char *)(c) + (o)))
68 
80 #define rte_crypto_op_ctophys_offset(c, o) \
81  (rte_iova_t)((c)->phys_addr + (o))
82 
87  uint16_t min;
88  uint16_t max;
89  uint16_t increment;
95 };
96 
104  union {
105  struct {
108  uint16_t block_size;
118  } auth;
120  struct {
123  uint16_t block_size;
129  } cipher;
131  struct {
134  uint16_t block_size;
144  } aead;
145  };
146 };
147 
156  uint32_t op_types;
159  __extension__
160  union {
165  };
166 };
167 
174 };
175 
176 
183  union {
188  };
189 };
190 
193  enum rte_crypto_sym_xform_type type;
194  union {
195  enum rte_crypto_cipher_algorithm cipher;
196  enum rte_crypto_auth_algorithm auth;
197  enum rte_crypto_aead_algorithm aead;
198  } algo;
199 };
200 
209 };
210 
223  const struct rte_cryptodev_sym_capability_idx *idx);
224 
226 rte_cryptodev_sym_capability_get_v21(uint8_t dev_id,
227  const struct rte_cryptodev_sym_capability_idx *idx);
228 
230 rte_cryptodev_sym_capability_get(uint8_t dev_id,
231  const struct rte_cryptodev_sym_capability_idx *idx);
232 
243 __rte_experimental
245 rte_cryptodev_asym_capability_get(uint8_t dev_id,
246  const struct rte_cryptodev_asym_capability_idx *idx);
247 
260 int
262  const struct rte_cryptodev_symmetric_capability *capability,
263  uint16_t key_size, uint16_t iv_size);
264 
278 int
280  const struct rte_cryptodev_symmetric_capability *capability,
281  uint16_t key_size, uint16_t digest_size, uint16_t iv_size);
282 
297 int
299  const struct rte_cryptodev_symmetric_capability *capability,
300  uint16_t key_size, uint16_t digest_size, uint16_t aad_size,
301  uint16_t iv_size);
302 
313 __rte_experimental
314 int
316  const struct rte_cryptodev_asymmetric_xform_capability *capability,
317  enum rte_crypto_asym_op_type op_type);
318 
329 __rte_experimental
330 int
332  const struct rte_cryptodev_asymmetric_xform_capability *capability,
333  uint16_t modlen);
334 
346 int
348  const char *algo_string);
349 
361 int
363  const char *algo_string);
364 
376 int
378  const char *algo_string);
379 
391 __rte_experimental
392 int
394  const char *xform_string);
395 
396 
398 #define RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST() \
399  { RTE_CRYPTO_OP_TYPE_UNDEFINED }
400 
401 
410 #define RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO (1ULL << 0)
411 
412 #define RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO (1ULL << 1)
413 
414 #define RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING (1ULL << 2)
415 
416 #define RTE_CRYPTODEV_FF_CPU_SSE (1ULL << 3)
417 
418 #define RTE_CRYPTODEV_FF_CPU_AVX (1ULL << 4)
419 
420 #define RTE_CRYPTODEV_FF_CPU_AVX2 (1ULL << 5)
421 
422 #define RTE_CRYPTODEV_FF_CPU_AESNI (1ULL << 6)
423 
424 #define RTE_CRYPTODEV_FF_HW_ACCELERATED (1ULL << 7)
425 
428 #define RTE_CRYPTODEV_FF_CPU_AVX512 (1ULL << 8)
429 
430 #define RTE_CRYPTODEV_FF_IN_PLACE_SGL (1ULL << 9)
431 
434 #define RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT (1ULL << 10)
435 
438 #define RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT (1ULL << 11)
439 
443 #define RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT (1ULL << 12)
444 
447 #define RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT (1ULL << 13)
448 
449 #define RTE_CRYPTODEV_FF_CPU_NEON (1ULL << 14)
450 
451 #define RTE_CRYPTODEV_FF_CPU_ARM_CE (1ULL << 15)
452 
453 #define RTE_CRYPTODEV_FF_SECURITY (1ULL << 16)
454 
455 #define RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_EXP (1ULL << 17)
456 
457 #define RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_QT (1ULL << 18)
458 
459 #define RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED (1ULL << 19)
460 
461 #define RTE_CRYPTODEV_FF_ASYM_SESSIONLESS (1ULL << 20)
462 
463 #define RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO (1ULL << 21)
464 
465 #define RTE_CRYPTODEV_FF_SYM_SESSIONLESS (1ULL << 22)
466 
467 #define RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA (1ULL << 23)
468 
480 extern const char *
481 rte_cryptodev_get_feature_name(uint64_t flag);
482 
485  const char *driver_name;
486  uint8_t driver_id;
487  struct rte_device *device;
489  uint64_t feature_flags;
504  struct {
505  unsigned max_nb_sessions;
510  } sym;
511 };
512 
513 #define RTE_CRYPTODEV_DETACHED (0)
514 #define RTE_CRYPTODEV_ATTACHED (1)
515 
521 };
522 
525  uint32_t nb_descriptors;
530 };
531 
541 typedef void (*rte_cryptodev_cb_fn)(uint8_t dev_id,
542  enum rte_cryptodev_event_type event, void *cb_arg);
543 
544 
547  uint64_t enqueued_count;
549  uint64_t dequeued_count;
556 };
557 
558 #define RTE_CRYPTODEV_NAME_MAX_LEN (64)
559 
570 extern int
571 rte_cryptodev_get_dev_id(const char *name);
572 
583 extern const char *
584 rte_cryptodev_name_get(uint8_t dev_id);
585 
593 extern uint8_t
594 rte_cryptodev_count(void);
595 
604 extern uint8_t
605 rte_cryptodev_device_count_by_driver(uint8_t driver_id);
606 
618 uint8_t
619 rte_cryptodev_devices_get(const char *driver_name, uint8_t *devices,
620  uint8_t nb_devices);
621 /*
622  * Return the NUMA socket to which a device is connected
623  *
624  * @param dev_id
625  * The identifier of the device
626  * @return
627  * The NUMA socket id to which the device is connected or
628  * a default of zero if the socket could not be determined.
629  * -1 if returned is the dev_id value is out of range.
630  */
631 extern int
632 rte_cryptodev_socket_id(uint8_t dev_id);
633 
636  int socket_id;
637  uint16_t nb_queue_pairs;
639  uint64_t ff_disable;
646 };
647 
662 extern int
663 rte_cryptodev_configure(uint8_t dev_id, struct rte_cryptodev_config *config);
664 
680 extern int
681 rte_cryptodev_start(uint8_t dev_id);
682 
689 extern void
690 rte_cryptodev_stop(uint8_t dev_id);
691 
701 extern int
702 rte_cryptodev_close(uint8_t dev_id);
703 
725 extern int
726 rte_cryptodev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id,
727  const struct rte_cryptodev_qp_conf *qp_conf, int socket_id);
728 
742 __rte_experimental
743 int
744 rte_cryptodev_get_qp_status(uint8_t dev_id, uint16_t queue_pair_id);
745 
753 extern uint16_t
754 rte_cryptodev_queue_pair_count(uint8_t dev_id);
755 
756 
768 extern int
769 rte_cryptodev_stats_get(uint8_t dev_id, struct rte_cryptodev_stats *stats);
770 
776 extern void
777 rte_cryptodev_stats_reset(uint8_t dev_id);
778 
793 void
794 rte_cryptodev_info_get(uint8_t dev_id, struct rte_cryptodev_info *dev_info);
795 
796 /* An extra element RTE_CRYPTO_AEAD_CHACHA20_POLY1305 is added
797  * to enum rte_crypto_aead_algorithm, also changing the value of
798  * RTE_CRYPTO_AEAD_LIST_END. To maintain ABI compatibility with applications
799  * which linked against earlier versions, preventing them, for example, from
800  * picking up the new value and using it to index into an array sized too small
801  * for it, it is necessary to have two versions of rte_cryptodev_info_get()
802  * The latest version just returns directly the capabilities retrieved from
803  * the device. The compatible version inspects the capabilities retrieved
804  * from the device, but only returns them directly if the new value
805  * is not included. If the new value is included, it allocates space
806  * for a copy of the device capabilities, trims the new value from this
807  * and returns this copy. It only needs to do this once per device.
808  * For the corner case of a corner case when the alloc may fail,
809  * an empty capability list is returned, as there is no mechanism to return
810  * an error and adding such a mechanism would itself be an ABI breakage.
811  * The compatible version can be removed after the next major ABI release.
812  */
813 
814 void
815 rte_cryptodev_info_get_v20(uint8_t dev_id, struct rte_cryptodev_info *dev_info);
816 
817 void
818 rte_cryptodev_info_get_v21(uint8_t dev_id, struct rte_cryptodev_info *dev_info);
819 
833 extern int
834 rte_cryptodev_callback_register(uint8_t dev_id,
835  enum rte_cryptodev_event_type event,
836  rte_cryptodev_cb_fn cb_fn, void *cb_arg);
837 
851 extern int
852 rte_cryptodev_callback_unregister(uint8_t dev_id,
853  enum rte_cryptodev_event_type event,
854  rte_cryptodev_cb_fn cb_fn, void *cb_arg);
855 
856 
857 typedef uint16_t (*dequeue_pkt_burst_t)(void *qp,
858  struct rte_crypto_op **ops, uint16_t nb_ops);
861 typedef uint16_t (*enqueue_pkt_burst_t)(void *qp,
862  struct rte_crypto_op **ops, uint16_t nb_ops);
868 struct rte_cryptodev_callback;
869 
871 TAILQ_HEAD(rte_cryptodev_cb_list, rte_cryptodev_callback);
872 
884  uint64_t feature_flags;
889  uint8_t driver_id;
892  struct rte_cryptodev_cb_list link_intr_cbs;
898  __extension__
899  uint8_t attached : 1;
902 
903 void *
904 rte_cryptodev_get_sec_ctx(uint8_t dev_id);
905 
914  uint8_t dev_id;
916  uint8_t socket_id;
921  __extension__
922  uint8_t dev_started : 1;
927  void **queue_pairs;
929  uint16_t nb_queue_pairs;
932  void *dev_private;
935 
936 extern struct rte_cryptodev *rte_cryptodevs;
973 static inline uint16_t
974 rte_cryptodev_dequeue_burst(uint8_t dev_id, uint16_t qp_id,
975  struct rte_crypto_op **ops, uint16_t nb_ops)
976 {
977  struct rte_cryptodev *dev = &rte_cryptodevs[dev_id];
978 
979  nb_ops = (*dev->dequeue_burst)
980  (dev->data->queue_pairs[qp_id], ops, nb_ops);
981 
982  rte_cryptodev_trace_dequeue_burst(dev_id, qp_id, (void **)ops, nb_ops);
983  return nb_ops;
984 }
985 
1017 static inline uint16_t
1018 rte_cryptodev_enqueue_burst(uint8_t dev_id, uint16_t qp_id,
1019  struct rte_crypto_op **ops, uint16_t nb_ops)
1020 {
1021  struct rte_cryptodev *dev = &rte_cryptodevs[dev_id];
1022 
1023  rte_cryptodev_trace_enqueue_burst(dev_id, qp_id, (void **)ops, nb_ops);
1024  return (*dev->enqueue_burst)(
1025  dev->data->queue_pairs[qp_id], ops, nb_ops);
1026 }
1027 
1028 
1034  uint64_t opaque_data;
1036  uint16_t nb_drivers;
1038  uint16_t user_data_sz;
1040  __extension__ struct {
1041  void *data;
1042  uint16_t refcnt;
1043  } sess_data[0];
1045 };
1046 
1049  __extension__ void *sess_private_data[0];
1051 };
1052 
1079 __rte_experimental
1080 struct rte_mempool *
1081 rte_cryptodev_sym_session_pool_create(const char *name, uint32_t nb_elts,
1082  uint32_t elt_size, uint32_t cache_size, uint16_t priv_size,
1083  int socket_id);
1084 
1096 
1106 __rte_experimental
1109 
1122 int
1124 
1137 __rte_experimental
1138 int
1140 
1157 int
1158 rte_cryptodev_sym_session_init(uint8_t dev_id,
1159  struct rte_cryptodev_sym_session *sess,
1160  struct rte_crypto_sym_xform *xforms,
1161  struct rte_mempool *mempool);
1162 
1178 __rte_experimental
1179 int
1180 rte_cryptodev_asym_session_init(uint8_t dev_id,
1181  struct rte_cryptodev_asym_session *sess,
1182  struct rte_crypto_asym_xform *xforms,
1183  struct rte_mempool *mempool);
1184 
1199 int
1200 rte_cryptodev_sym_session_clear(uint8_t dev_id,
1201  struct rte_cryptodev_sym_session *sess);
1202 
1213 __rte_experimental
1214 int
1215 rte_cryptodev_asym_session_clear(uint8_t dev_id,
1216  struct rte_cryptodev_asym_session *sess);
1217 
1225 unsigned int
1227 
1239 __rte_experimental
1240 unsigned int
1242  struct rte_cryptodev_sym_session *sess);
1243 
1250 __rte_experimental
1251 unsigned int
1253 
1265 unsigned int
1267 
1278 __rte_experimental
1279 unsigned int
1281 
1290 int rte_cryptodev_driver_id_get(const char *name);
1291 
1300 const char *rte_cryptodev_driver_name_get(uint8_t driver_id);
1301 
1314 __rte_experimental
1315 int
1317  struct rte_cryptodev_sym_session *sess,
1318  void *data,
1319  uint16_t size);
1320 
1331 __rte_experimental
1332 void *
1334  struct rte_cryptodev_sym_session *sess);
1335 
1348 __rte_experimental
1349 uint32_t
1351  struct rte_cryptodev_sym_session *sess, union rte_crypto_sym_ofs ofs,
1352  struct rte_crypto_sym_vec *vec);
1353 
1354 #ifdef __cplusplus
1355 }
1356 #endif
1357 
1358 #endif /* _RTE_CRYPTODEV_H_ */
int rte_cryptodev_get_cipher_algo_enum(enum rte_crypto_cipher_algorithm *algo_enum, const char *algo_string)
int rte_cryptodev_get_auth_algo_enum(enum rte_crypto_auth_algorithm *algo_enum, const char *algo_string)
struct rte_mempool * mp_session
void rte_cryptodev_info_get(uint8_t dev_id, struct rte_cryptodev_info *dev_info)
struct rte_cryptodev_symmetric_capability sym
__rte_experimental unsigned int rte_cryptodev_sym_get_existing_header_session_size(struct rte_cryptodev_sym_session *sess)
const char * rte_cryptodev_get_feature_name(uint64_t flag)
__rte_experimental unsigned int rte_cryptodev_asym_get_private_session_size(uint8_t dev_id)
enum rte_crypto_auth_algorithm algo
void * security_ctx
int rte_cryptodev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id, const struct rte_cryptodev_qp_conf *qp_conf, int socket_id)
uint8_t rte_cryptodev_devices_get(const char *driver_name, uint8_t *devices, uint8_t nb_devices)
rte_crypto_asym_xform_type
enum rte_crypto_asym_xform_type xform_type
static uint16_t rte_cryptodev_dequeue_burst(uint8_t dev_id, uint16_t qp_id, struct rte_crypto_op **ops, uint16_t nb_ops)
struct rte_crypto_param_range digest_size
uint64_t feature_flags
enum rte_crypto_op_type op
int rte_cryptodev_driver_id_get(const char *name)
int rte_cryptodev_sym_session_clear(uint8_t dev_id, struct rte_cryptodev_sym_session *sess)
TAILQ_HEAD(rte_cryptodev_cb_list, rte_cryptodev_callback)
__extension__ uint8_t attached
__rte_experimental int rte_cryptodev_asym_get_xform_enum(enum rte_crypto_asym_xform_type *xform_enum, const char *xform_string)
int rte_cryptodev_sym_session_init(uint8_t dev_id, struct rte_cryptodev_sym_session *sess, struct rte_crypto_sym_xform *xforms, struct rte_mempool *mempool)
struct rte_mempool * session_pool
uint64_t dequeue_err_count
int rte_cryptodev_get_aead_algo_enum(enum rte_crypto_aead_algorithm *algo_enum, const char *algo_string)
uint32_t cache_size
Definition: rte_mempool.h:235
dequeue_pkt_burst_t dequeue_burst
int rte_cryptodev_configure(uint8_t dev_id, struct rte_cryptodev_config *config)
char name[RTE_CRYPTODEV_NAME_MAX_LEN]
int rte_cryptodev_callback_unregister(uint8_t dev_id, enum rte_cryptodev_event_type event, rte_cryptodev_cb_fn cb_fn, void *cb_arg)
const struct rte_cryptodev_capabilities * capabilities
rte_crypto_asym_op_type
void rte_cryptodev_stop(uint8_t dev_id)
const char * driver_name
struct rte_cryptodev_symmetric_capability::@121::@124 cipher
__extension__ struct rte_cryptodev_sym_session::@132 sess_data[0]
int rte_cryptodev_close(uint8_t dev_id)
enum rte_crypto_asym_xform_type type
uint8_t driver_id
__extension__ void * sess_private_data[0]
void(* rte_cryptodev_cb_fn)(uint8_t dev_id, enum rte_cryptodev_event_type event, void *cb_arg)
__rte_experimental int rte_cryptodev_sym_session_set_user_data(struct rte_cryptodev_sym_session *sess, void *data, uint16_t size)
int rte_cryptodev_sym_capability_check_aead(const struct rte_cryptodev_symmetric_capability *capability, uint16_t key_size, uint16_t digest_size, uint16_t aad_size, uint16_t iv_size)
unsigned int rte_cryptodev_sym_get_header_session_size(void)
const char * rte_cryptodev_driver_name_get(uint8_t driver_id)
int rte_cryptodev_callback_register(uint8_t dev_id, enum rte_cryptodev_event_type event, rte_cryptodev_cb_fn cb_fn, void *cb_arg)
uint16_t min_mbuf_tailroom_req
struct rte_cryptodev_sym_session * rte_cryptodev_sym_session_create(struct rte_mempool *mempool)
int rte_cryptodev_sym_session_free(struct rte_cryptodev_sym_session *sess)
const char * rte_cryptodev_name_get(uint8_t dev_id)
__rte_experimental void * rte_cryptodev_sym_session_get_user_data(struct rte_cryptodev_sym_session *sess)
const struct rte_cryptodev_symmetric_capability * rte_cryptodev_sym_capability_get_v20(uint8_t dev_id, const struct rte_cryptodev_sym_capability_idx *idx)
rte_crypto_op_type
Definition: rte_crypto.h:29
__rte_experimental const struct rte_cryptodev_asymmetric_xform_capability * rte_cryptodev_asym_capability_get(uint8_t dev_id, const struct rte_cryptodev_asym_capability_idx *idx)
#define RTE_CRYPTODEV_NAME_MAX_LEN
__rte_experimental int rte_cryptodev_asym_session_clear(uint8_t dev_id, struct rte_cryptodev_asym_session *sess)
__rte_experimental int rte_cryptodev_asym_session_free(struct rte_cryptodev_asym_session *sess)
enqueue_pkt_burst_t enqueue_burst
static uint16_t rte_cryptodev_enqueue_burst(uint8_t dev_id, uint16_t qp_id, struct rte_crypto_op **ops, uint16_t nb_ops)
struct rte_cryptodev_symmetric_capability::@121::@123 auth
int rte_cryptodev_sym_capability_check_cipher(const struct rte_cryptodev_symmetric_capability *capability, uint16_t key_size, uint16_t iv_size)
__rte_experimental int rte_cryptodev_asym_session_init(uint8_t dev_id, struct rte_cryptodev_asym_session *sess, struct rte_crypto_asym_xform *xforms, struct rte_mempool *mempool)
int rte_cryptodev_stats_get(uint8_t dev_id, struct rte_cryptodev_stats *stats)
struct rte_cryptodev_data * data
struct rte_cryptodev_cb_list link_intr_cbs
struct rte_device * device
__rte_experimental struct rte_cryptodev_asym_session * rte_cryptodev_asym_session_create(struct rte_mempool *mempool)
uint32_t elt_size
Definition: rte_mempool.h:238
uint8_t rte_cryptodev_count(void)
__extension__ uint8_t dev_started
int rte_cryptodev_sym_capability_check_auth(const struct rte_cryptodev_symmetric_capability *capability, uint16_t key_size, uint16_t digest_size, uint16_t iv_size)
struct rte_cryptodev_ops * dev_ops
uint16_t min_mbuf_headroom_req
__rte_experimental unsigned int rte_cryptodev_asym_get_header_session_size(void)
unsigned int rte_cryptodev_sym_get_private_session_size(uint8_t dev_id)
#define __rte_cache_aligned
Definition: rte_common.h:376
__rte_experimental struct rte_mempool * rte_cryptodev_sym_session_pool_create(const char *name, uint32_t nb_elts, uint32_t elt_size, uint32_t cache_size, uint16_t priv_size, int socket_id)
#define RTE_STD_C11
Definition: rte_common.h:40
rte_crypto_auth_algorithm
__rte_experimental int rte_cryptodev_asym_xform_capability_check_modlen(const struct rte_cryptodev_asymmetric_xform_capability *capability, uint16_t modlen)
rte_crypto_sym_xform_type
__rte_experimental uint32_t rte_cryptodev_sym_cpu_crypto_process(uint8_t dev_id, struct rte_cryptodev_sym_session *sess, union rte_crypto_sym_ofs ofs, struct rte_crypto_sym_vec *vec)
struct rte_mempool * mp_session_private
int rte_cryptodev_start(uint8_t dev_id)
uint16_t(* enqueue_pkt_burst_t)(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
uint64_t enqueue_err_count
uint16_t(* dequeue_pkt_burst_t)(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
struct rte_crypto_param_range modlen
uint16_t rte_cryptodev_queue_pair_count(uint8_t dev_id)
struct rte_crypto_param_range key_size
__rte_experimental int rte_cryptodev_asym_xform_capability_check_optype(const struct rte_cryptodev_asymmetric_xform_capability *capability, enum rte_crypto_asym_op_type op_type)
__rte_experimental int rte_cryptodev_get_qp_status(uint8_t dev_id, uint16_t queue_pair_id)
struct rte_device * device
unsigned max_nb_sessions
rte_cryptodev_event_type
struct rte_crypto_param_range aad_size
enum rte_crypto_sym_xform_type xform_type
void rte_cryptodev_stats_reset(uint8_t dev_id)
int rte_cryptodev_get_dev_id(const char *name)
unsigned max_nb_queue_pairs
uint8_t rte_cryptodev_device_count_by_driver(uint8_t driver_id)
char name[RTE_MEMZONE_NAMESIZE]
Definition: rte_mempool.h:224
struct rte_crypto_param_range iv_size
struct rte_cryptodev * rte_cryptodevs
struct rte_cryptodev_asymmetric_capability asym
rte_crypto_aead_algorithm
rte_crypto_cipher_algorithm