DPDK  20.05.0
rte_cryptodev.h
Go to the documentation of this file.
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2015-2020 Intel Corporation.
3  */
4 
5 #ifndef _RTE_CRYPTODEV_H_
6 #define _RTE_CRYPTODEV_H_
7 
17 #ifdef __cplusplus
18 extern "C" {
19 #endif
20 
21 #include "rte_kvargs.h"
22 #include "rte_crypto.h"
23 #include "rte_dev.h"
24 #include <rte_common.h>
25 #include <rte_config.h>
26 
27 #include "rte_cryptodev_trace_fp.h"
28 
29 extern const char **rte_cyptodev_names;
30 
31 /* Logging Macros */
32 
33 #define CDEV_LOG_ERR(...) \
34  RTE_LOG(ERR, CRYPTODEV, \
35  RTE_FMT("%s() line %u: " RTE_FMT_HEAD(__VA_ARGS__,) "\n", \
36  __func__, __LINE__, RTE_FMT_TAIL(__VA_ARGS__,)))
37 
38 #define CDEV_LOG_INFO(...) \
39  RTE_LOG(INFO, CRYPTODEV, \
40  RTE_FMT(RTE_FMT_HEAD(__VA_ARGS__,) "\n", \
41  RTE_FMT_TAIL(__VA_ARGS__,)))
42 
43 #define CDEV_LOG_DEBUG(...) \
44  RTE_LOG(DEBUG, CRYPTODEV, \
45  RTE_FMT("%s() line %u: " RTE_FMT_HEAD(__VA_ARGS__,) "\n", \
46  __func__, __LINE__, RTE_FMT_TAIL(__VA_ARGS__,)))
47 
48 #define CDEV_PMD_TRACE(...) \
49  RTE_LOG(DEBUG, CRYPTODEV, \
50  RTE_FMT("[%s] %s: " RTE_FMT_HEAD(__VA_ARGS__,) "\n", \
51  dev, __func__, RTE_FMT_TAIL(__VA_ARGS__,)))
52 
66 #define rte_crypto_op_ctod_offset(c, t, o) \
67  ((t)((char *)(c) + (o)))
68 
80 #define rte_crypto_op_ctophys_offset(c, o) \
81  (rte_iova_t)((c)->phys_addr + (o))
82 
87  uint16_t min;
88  uint16_t max;
89  uint16_t increment;
95 };
96 
104  union {
105  struct {
108  uint16_t block_size;
118  } auth;
120  struct {
123  uint16_t block_size;
129  } cipher;
131  struct {
134  uint16_t block_size;
144  } aead;
145  };
146 };
147 
156  uint32_t op_types;
159  __extension__
160  union {
165  };
166 };
167 
174 };
175 
176 
183  union {
188  };
189 };
190 
193  enum rte_crypto_sym_xform_type type;
194  union {
195  enum rte_crypto_cipher_algorithm cipher;
196  enum rte_crypto_auth_algorithm auth;
197  enum rte_crypto_aead_algorithm aead;
198  } algo;
199 };
200 
209 };
210 
223  const struct rte_cryptodev_sym_capability_idx *idx);
224 
226 rte_cryptodev_sym_capability_get_v21(uint8_t dev_id,
227  const struct rte_cryptodev_sym_capability_idx *idx);
228 
230 rte_cryptodev_sym_capability_get(uint8_t dev_id,
231  const struct rte_cryptodev_sym_capability_idx *idx);
232 
243 __rte_experimental
245 rte_cryptodev_asym_capability_get(uint8_t dev_id,
246  const struct rte_cryptodev_asym_capability_idx *idx);
247 
260 int
262  const struct rte_cryptodev_symmetric_capability *capability,
263  uint16_t key_size, uint16_t iv_size);
264 
278 int
280  const struct rte_cryptodev_symmetric_capability *capability,
281  uint16_t key_size, uint16_t digest_size, uint16_t iv_size);
282 
297 int
299  const struct rte_cryptodev_symmetric_capability *capability,
300  uint16_t key_size, uint16_t digest_size, uint16_t aad_size,
301  uint16_t iv_size);
302 
313 __rte_experimental
314 int
316  const struct rte_cryptodev_asymmetric_xform_capability *capability,
317  enum rte_crypto_asym_op_type op_type);
318 
329 __rte_experimental
330 int
332  const struct rte_cryptodev_asymmetric_xform_capability *capability,
333  uint16_t modlen);
334 
346 int
348  const char *algo_string);
349 
361 int
363  const char *algo_string);
364 
376 int
378  const char *algo_string);
379 
391 __rte_experimental
392 int
394  const char *xform_string);
395 
396 
398 #define RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST() \
399  { RTE_CRYPTO_OP_TYPE_UNDEFINED }
400 
401 
410 #define RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO (1ULL << 0)
411 
412 #define RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO (1ULL << 1)
413 
414 #define RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING (1ULL << 2)
415 
416 #define RTE_CRYPTODEV_FF_CPU_SSE (1ULL << 3)
417 
418 #define RTE_CRYPTODEV_FF_CPU_AVX (1ULL << 4)
419 
420 #define RTE_CRYPTODEV_FF_CPU_AVX2 (1ULL << 5)
421 
422 #define RTE_CRYPTODEV_FF_CPU_AESNI (1ULL << 6)
423 
424 #define RTE_CRYPTODEV_FF_HW_ACCELERATED (1ULL << 7)
425 
428 #define RTE_CRYPTODEV_FF_CPU_AVX512 (1ULL << 8)
429 
430 #define RTE_CRYPTODEV_FF_IN_PLACE_SGL (1ULL << 9)
431 
434 #define RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT (1ULL << 10)
435 
438 #define RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT (1ULL << 11)
439 
443 #define RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT (1ULL << 12)
444 
447 #define RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT (1ULL << 13)
448 
449 #define RTE_CRYPTODEV_FF_CPU_NEON (1ULL << 14)
450 
451 #define RTE_CRYPTODEV_FF_CPU_ARM_CE (1ULL << 15)
452 
453 #define RTE_CRYPTODEV_FF_SECURITY (1ULL << 16)
454 
455 #define RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_EXP (1ULL << 17)
456 
457 #define RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_QT (1ULL << 18)
458 
459 #define RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED (1ULL << 19)
460 
461 #define RTE_CRYPTODEV_FF_ASYM_SESSIONLESS (1ULL << 20)
462 
463 #define RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO (1ULL << 21)
464 
465 #define RTE_CRYPTODEV_FF_SYM_SESSIONLESS (1ULL << 22)
466 
467 #define RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA (1ULL << 23)
468 
480 extern const char *
481 rte_cryptodev_get_feature_name(uint64_t flag);
482 
485  const char *driver_name;
486  uint8_t driver_id;
487  struct rte_device *device;
489  uint64_t feature_flags;
504  struct {
505  unsigned max_nb_sessions;
510  } sym;
511 };
512 
513 #define RTE_CRYPTODEV_DETACHED (0)
514 #define RTE_CRYPTODEV_ATTACHED (1)
515 
521 };
522 
525  uint32_t nb_descriptors;
530 };
531 
541 typedef void (*rte_cryptodev_cb_fn)(uint8_t dev_id,
542  enum rte_cryptodev_event_type event, void *cb_arg);
543 
544 
547  uint64_t enqueued_count;
549  uint64_t dequeued_count;
556 };
557 
558 #define RTE_CRYPTODEV_NAME_MAX_LEN (64)
559 
570 extern int
571 rte_cryptodev_get_dev_id(const char *name);
572 
583 extern const char *
584 rte_cryptodev_name_get(uint8_t dev_id);
585 
593 extern uint8_t
594 rte_cryptodev_count(void);
595 
604 extern uint8_t
605 rte_cryptodev_device_count_by_driver(uint8_t driver_id);
606 
618 uint8_t
619 rte_cryptodev_devices_get(const char *driver_name, uint8_t *devices,
620  uint8_t nb_devices);
621 /*
622  * Return the NUMA socket to which a device is connected
623  *
624  * @param dev_id
625  * The identifier of the device
626  * @return
627  * The NUMA socket id to which the device is connected or
628  * a default of zero if the socket could not be determined.
629  * -1 if returned is the dev_id value is out of range.
630  */
631 extern int
632 rte_cryptodev_socket_id(uint8_t dev_id);
633 
636  int socket_id;
637  uint16_t nb_queue_pairs;
639  uint64_t ff_disable;
646 };
647 
662 extern int
663 rte_cryptodev_configure(uint8_t dev_id, struct rte_cryptodev_config *config);
664 
680 extern int
681 rte_cryptodev_start(uint8_t dev_id);
682 
689 extern void
690 rte_cryptodev_stop(uint8_t dev_id);
691 
701 extern int
702 rte_cryptodev_close(uint8_t dev_id);
703 
725 extern int
726 rte_cryptodev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id,
727  const struct rte_cryptodev_qp_conf *qp_conf, int socket_id);
728 
736 extern uint16_t
737 rte_cryptodev_queue_pair_count(uint8_t dev_id);
738 
739 
751 extern int
752 rte_cryptodev_stats_get(uint8_t dev_id, struct rte_cryptodev_stats *stats);
753 
759 extern void
760 rte_cryptodev_stats_reset(uint8_t dev_id);
761 
776 void
777 rte_cryptodev_info_get(uint8_t dev_id, struct rte_cryptodev_info *dev_info);
778 
779 /* An extra element RTE_CRYPTO_AEAD_CHACHA20_POLY1305 is added
780  * to enum rte_crypto_aead_algorithm, also changing the value of
781  * RTE_CRYPTO_AEAD_LIST_END. To maintain ABI compatibility with applications
782  * which linked against earlier versions, preventing them, for example, from
783  * picking up the new value and using it to index into an array sized too small
784  * for it, it is necessary to have two versions of rte_cryptodev_info_get()
785  * The latest version just returns directly the capabilities retrieved from
786  * the device. The compatible version inspects the capabilities retrieved
787  * from the device, but only returns them directly if the new value
788  * is not included. If the new value is included, it allocates space
789  * for a copy of the device capabilities, trims the new value from this
790  * and returns this copy. It only needs to do this once per device.
791  * For the corner case of a corner case when the alloc may fail,
792  * an empty capability list is returned, as there is no mechanism to return
793  * an error and adding such a mechanism would itself be an ABI breakage.
794  * The compatible version can be removed after the next major ABI release.
795  */
796 
797 void
798 rte_cryptodev_info_get_v20(uint8_t dev_id, struct rte_cryptodev_info *dev_info);
799 
800 void
801 rte_cryptodev_info_get_v21(uint8_t dev_id, struct rte_cryptodev_info *dev_info);
802 
816 extern int
817 rte_cryptodev_callback_register(uint8_t dev_id,
818  enum rte_cryptodev_event_type event,
819  rte_cryptodev_cb_fn cb_fn, void *cb_arg);
820 
834 extern int
835 rte_cryptodev_callback_unregister(uint8_t dev_id,
836  enum rte_cryptodev_event_type event,
837  rte_cryptodev_cb_fn cb_fn, void *cb_arg);
838 
839 
840 typedef uint16_t (*dequeue_pkt_burst_t)(void *qp,
841  struct rte_crypto_op **ops, uint16_t nb_ops);
844 typedef uint16_t (*enqueue_pkt_burst_t)(void *qp,
845  struct rte_crypto_op **ops, uint16_t nb_ops);
851 struct rte_cryptodev_callback;
852 
854 TAILQ_HEAD(rte_cryptodev_cb_list, rte_cryptodev_callback);
855 
867  uint64_t feature_flags;
872  uint8_t driver_id;
875  struct rte_cryptodev_cb_list link_intr_cbs;
881  __extension__
882  uint8_t attached : 1;
885 
886 void *
887 rte_cryptodev_get_sec_ctx(uint8_t dev_id);
888 
897  uint8_t dev_id;
899  uint8_t socket_id;
904  __extension__
905  uint8_t dev_started : 1;
910  void **queue_pairs;
912  uint16_t nb_queue_pairs;
915  void *dev_private;
918 
919 extern struct rte_cryptodev *rte_cryptodevs;
956 static inline uint16_t
957 rte_cryptodev_dequeue_burst(uint8_t dev_id, uint16_t qp_id,
958  struct rte_crypto_op **ops, uint16_t nb_ops)
959 {
960  struct rte_cryptodev *dev = &rte_cryptodevs[dev_id];
961 
962  nb_ops = (*dev->dequeue_burst)
963  (dev->data->queue_pairs[qp_id], ops, nb_ops);
964 
965  rte_cryptodev_trace_dequeue_burst(dev_id, qp_id, (void **)ops, nb_ops);
966  return nb_ops;
967 }
968 
1000 static inline uint16_t
1001 rte_cryptodev_enqueue_burst(uint8_t dev_id, uint16_t qp_id,
1002  struct rte_crypto_op **ops, uint16_t nb_ops)
1003 {
1004  struct rte_cryptodev *dev = &rte_cryptodevs[dev_id];
1005 
1006  rte_cryptodev_trace_enqueue_burst(dev_id, qp_id, (void **)ops, nb_ops);
1007  return (*dev->enqueue_burst)(
1008  dev->data->queue_pairs[qp_id], ops, nb_ops);
1009 }
1010 
1011 
1017  uint64_t opaque_data;
1019  uint16_t nb_drivers;
1021  uint16_t user_data_sz;
1023  __extension__ struct {
1024  void *data;
1025  uint16_t refcnt;
1026  } sess_data[0];
1028 };
1029 
1032  __extension__ void *sess_private_data[0];
1034 };
1035 
1062 __rte_experimental
1063 struct rte_mempool *
1064 rte_cryptodev_sym_session_pool_create(const char *name, uint32_t nb_elts,
1065  uint32_t elt_size, uint32_t cache_size, uint16_t priv_size,
1066  int socket_id);
1067 
1079 
1089 __rte_experimental
1092 
1105 int
1107 
1120 __rte_experimental
1121 int
1123 
1140 int
1141 rte_cryptodev_sym_session_init(uint8_t dev_id,
1142  struct rte_cryptodev_sym_session *sess,
1143  struct rte_crypto_sym_xform *xforms,
1144  struct rte_mempool *mempool);
1145 
1161 __rte_experimental
1162 int
1163 rte_cryptodev_asym_session_init(uint8_t dev_id,
1164  struct rte_cryptodev_asym_session *sess,
1165  struct rte_crypto_asym_xform *xforms,
1166  struct rte_mempool *mempool);
1167 
1182 int
1183 rte_cryptodev_sym_session_clear(uint8_t dev_id,
1184  struct rte_cryptodev_sym_session *sess);
1185 
1196 __rte_experimental
1197 int
1198 rte_cryptodev_asym_session_clear(uint8_t dev_id,
1199  struct rte_cryptodev_asym_session *sess);
1200 
1208 unsigned int
1210 
1222 __rte_experimental
1223 unsigned int
1225  struct rte_cryptodev_sym_session *sess);
1226 
1233 __rte_experimental
1234 unsigned int
1236 
1248 unsigned int
1250 
1261 __rte_experimental
1262 unsigned int
1264 
1273 int rte_cryptodev_driver_id_get(const char *name);
1274 
1283 const char *rte_cryptodev_driver_name_get(uint8_t driver_id);
1284 
1297 __rte_experimental
1298 int
1300  struct rte_cryptodev_sym_session *sess,
1301  void *data,
1302  uint16_t size);
1303 
1314 __rte_experimental
1315 void *
1317  struct rte_cryptodev_sym_session *sess);
1318 
1331 __rte_experimental
1332 uint32_t
1334  struct rte_cryptodev_sym_session *sess, union rte_crypto_sym_ofs ofs,
1335  struct rte_crypto_sym_vec *vec);
1336 
1337 #ifdef __cplusplus
1338 }
1339 #endif
1340 
1341 #endif /* _RTE_CRYPTODEV_H_ */
int rte_cryptodev_get_cipher_algo_enum(enum rte_crypto_cipher_algorithm *algo_enum, const char *algo_string)
int rte_cryptodev_get_auth_algo_enum(enum rte_crypto_auth_algorithm *algo_enum, const char *algo_string)
struct rte_mempool * mp_session
void rte_cryptodev_info_get(uint8_t dev_id, struct rte_cryptodev_info *dev_info)
struct rte_cryptodev_symmetric_capability sym
__rte_experimental unsigned int rte_cryptodev_sym_get_existing_header_session_size(struct rte_cryptodev_sym_session *sess)
const char * rte_cryptodev_get_feature_name(uint64_t flag)
__rte_experimental unsigned int rte_cryptodev_asym_get_private_session_size(uint8_t dev_id)
enum rte_crypto_auth_algorithm algo
void * security_ctx
int rte_cryptodev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id, const struct rte_cryptodev_qp_conf *qp_conf, int socket_id)
uint8_t rte_cryptodev_devices_get(const char *driver_name, uint8_t *devices, uint8_t nb_devices)
rte_crypto_asym_xform_type
enum rte_crypto_asym_xform_type xform_type
static uint16_t rte_cryptodev_dequeue_burst(uint8_t dev_id, uint16_t qp_id, struct rte_crypto_op **ops, uint16_t nb_ops)
struct rte_crypto_param_range digest_size
uint64_t feature_flags
enum rte_crypto_op_type op
int rte_cryptodev_driver_id_get(const char *name)
int rte_cryptodev_sym_session_clear(uint8_t dev_id, struct rte_cryptodev_sym_session *sess)
TAILQ_HEAD(rte_cryptodev_cb_list, rte_cryptodev_callback)
__extension__ uint8_t attached
__rte_experimental int rte_cryptodev_asym_get_xform_enum(enum rte_crypto_asym_xform_type *xform_enum, const char *xform_string)
int rte_cryptodev_sym_session_init(uint8_t dev_id, struct rte_cryptodev_sym_session *sess, struct rte_crypto_sym_xform *xforms, struct rte_mempool *mempool)
struct rte_mempool * session_pool
uint64_t dequeue_err_count
int rte_cryptodev_get_aead_algo_enum(enum rte_crypto_aead_algorithm *algo_enum, const char *algo_string)
uint32_t cache_size
Definition: rte_mempool.h:235
dequeue_pkt_burst_t dequeue_burst
int rte_cryptodev_configure(uint8_t dev_id, struct rte_cryptodev_config *config)
char name[RTE_CRYPTODEV_NAME_MAX_LEN]
int rte_cryptodev_callback_unregister(uint8_t dev_id, enum rte_cryptodev_event_type event, rte_cryptodev_cb_fn cb_fn, void *cb_arg)
const struct rte_cryptodev_capabilities * capabilities
rte_crypto_asym_op_type
void rte_cryptodev_stop(uint8_t dev_id)
const char * driver_name
struct rte_cryptodev_symmetric_capability::@121::@124 cipher
__extension__ struct rte_cryptodev_sym_session::@132 sess_data[0]
int rte_cryptodev_close(uint8_t dev_id)
enum rte_crypto_asym_xform_type type
uint8_t driver_id
__extension__ void * sess_private_data[0]
void(* rte_cryptodev_cb_fn)(uint8_t dev_id, enum rte_cryptodev_event_type event, void *cb_arg)
__rte_experimental int rte_cryptodev_sym_session_set_user_data(struct rte_cryptodev_sym_session *sess, void *data, uint16_t size)
int rte_cryptodev_sym_capability_check_aead(const struct rte_cryptodev_symmetric_capability *capability, uint16_t key_size, uint16_t digest_size, uint16_t aad_size, uint16_t iv_size)
unsigned int rte_cryptodev_sym_get_header_session_size(void)
const char * rte_cryptodev_driver_name_get(uint8_t driver_id)
int rte_cryptodev_callback_register(uint8_t dev_id, enum rte_cryptodev_event_type event, rte_cryptodev_cb_fn cb_fn, void *cb_arg)
uint16_t min_mbuf_tailroom_req
struct rte_cryptodev_sym_session * rte_cryptodev_sym_session_create(struct rte_mempool *mempool)
int rte_cryptodev_sym_session_free(struct rte_cryptodev_sym_session *sess)
const char * rte_cryptodev_name_get(uint8_t dev_id)
__rte_experimental void * rte_cryptodev_sym_session_get_user_data(struct rte_cryptodev_sym_session *sess)
const struct rte_cryptodev_symmetric_capability * rte_cryptodev_sym_capability_get_v20(uint8_t dev_id, const struct rte_cryptodev_sym_capability_idx *idx)
rte_crypto_op_type
Definition: rte_crypto.h:29
__rte_experimental const struct rte_cryptodev_asymmetric_xform_capability * rte_cryptodev_asym_capability_get(uint8_t dev_id, const struct rte_cryptodev_asym_capability_idx *idx)
#define RTE_CRYPTODEV_NAME_MAX_LEN
__rte_experimental int rte_cryptodev_asym_session_clear(uint8_t dev_id, struct rte_cryptodev_asym_session *sess)
__rte_experimental int rte_cryptodev_asym_session_free(struct rte_cryptodev_asym_session *sess)
enqueue_pkt_burst_t enqueue_burst
static uint16_t rte_cryptodev_enqueue_burst(uint8_t dev_id, uint16_t qp_id, struct rte_crypto_op **ops, uint16_t nb_ops)
struct rte_cryptodev_symmetric_capability::@121::@123 auth
int rte_cryptodev_sym_capability_check_cipher(const struct rte_cryptodev_symmetric_capability *capability, uint16_t key_size, uint16_t iv_size)
__rte_experimental int rte_cryptodev_asym_session_init(uint8_t dev_id, struct rte_cryptodev_asym_session *sess, struct rte_crypto_asym_xform *xforms, struct rte_mempool *mempool)
int rte_cryptodev_stats_get(uint8_t dev_id, struct rte_cryptodev_stats *stats)
struct rte_cryptodev_data * data
struct rte_cryptodev_cb_list link_intr_cbs
struct rte_device * device
__rte_experimental struct rte_cryptodev_asym_session * rte_cryptodev_asym_session_create(struct rte_mempool *mempool)
uint32_t elt_size
Definition: rte_mempool.h:238
uint8_t rte_cryptodev_count(void)
__extension__ uint8_t dev_started
int rte_cryptodev_sym_capability_check_auth(const struct rte_cryptodev_symmetric_capability *capability, uint16_t key_size, uint16_t digest_size, uint16_t iv_size)
struct rte_cryptodev_ops * dev_ops
uint16_t min_mbuf_headroom_req
__rte_experimental unsigned int rte_cryptodev_asym_get_header_session_size(void)
unsigned int rte_cryptodev_sym_get_private_session_size(uint8_t dev_id)
__rte_experimental struct rte_mempool * rte_cryptodev_sym_session_pool_create(const char *name, uint32_t nb_elts, uint32_t elt_size, uint32_t cache_size, uint16_t priv_size, int socket_id)
#define RTE_STD_C11
Definition: rte_common.h:40
rte_crypto_auth_algorithm
__rte_experimental int rte_cryptodev_asym_xform_capability_check_modlen(const struct rte_cryptodev_asymmetric_xform_capability *capability, uint16_t modlen)
rte_crypto_sym_xform_type
__rte_experimental uint32_t rte_cryptodev_sym_cpu_crypto_process(uint8_t dev_id, struct rte_cryptodev_sym_session *sess, union rte_crypto_sym_ofs ofs, struct rte_crypto_sym_vec *vec)
struct rte_mempool * mp_session_private
int rte_cryptodev_start(uint8_t dev_id)
uint16_t(* enqueue_pkt_burst_t)(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
uint64_t enqueue_err_count
#define __rte_cache_aligned
Definition: rte_common.h:367
uint16_t(* dequeue_pkt_burst_t)(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
struct rte_crypto_param_range modlen
uint16_t rte_cryptodev_queue_pair_count(uint8_t dev_id)
struct rte_crypto_param_range key_size
__rte_experimental int rte_cryptodev_asym_xform_capability_check_optype(const struct rte_cryptodev_asymmetric_xform_capability *capability, enum rte_crypto_asym_op_type op_type)
struct rte_device * device
unsigned max_nb_sessions
rte_cryptodev_event_type
struct rte_crypto_param_range aad_size
enum rte_crypto_sym_xform_type xform_type
void rte_cryptodev_stats_reset(uint8_t dev_id)
int rte_cryptodev_get_dev_id(const char *name)
unsigned max_nb_queue_pairs
uint8_t rte_cryptodev_device_count_by_driver(uint8_t driver_id)
char name[RTE_MEMZONE_NAMESIZE]
Definition: rte_mempool.h:224
struct rte_crypto_param_range iv_size
struct rte_cryptodev * rte_cryptodevs
struct rte_cryptodev_asymmetric_capability asym
rte_crypto_aead_algorithm
rte_crypto_cipher_algorithm