DPDK  21.02.0
rte_cryptodev.h
Go to the documentation of this file.
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2015-2020 Intel Corporation.
3  */
4 
5 #ifndef _RTE_CRYPTODEV_H_
6 #define _RTE_CRYPTODEV_H_
7 
17 #ifdef __cplusplus
18 extern "C" {
19 #endif
20 
21 #include "rte_kvargs.h"
22 #include "rte_crypto.h"
23 #include "rte_dev.h"
24 #include <rte_common.h>
25 #include <rte_config.h>
26 #include <rte_rcu_qsbr.h>
27 
28 #include "rte_cryptodev_trace_fp.h"
29 
30 extern const char **rte_cyptodev_names;
31 
32 /* Logging Macros */
33 
34 #define CDEV_LOG_ERR(...) \
35  RTE_LOG(ERR, CRYPTODEV, \
36  RTE_FMT("%s() line %u: " RTE_FMT_HEAD(__VA_ARGS__,) "\n", \
37  __func__, __LINE__, RTE_FMT_TAIL(__VA_ARGS__,)))
38 
39 #define CDEV_LOG_INFO(...) \
40  RTE_LOG(INFO, CRYPTODEV, \
41  RTE_FMT(RTE_FMT_HEAD(__VA_ARGS__,) "\n", \
42  RTE_FMT_TAIL(__VA_ARGS__,)))
43 
44 #define CDEV_LOG_DEBUG(...) \
45  RTE_LOG(DEBUG, CRYPTODEV, \
46  RTE_FMT("%s() line %u: " RTE_FMT_HEAD(__VA_ARGS__,) "\n", \
47  __func__, __LINE__, RTE_FMT_TAIL(__VA_ARGS__,)))
48 
49 #define CDEV_PMD_TRACE(...) \
50  RTE_LOG(DEBUG, CRYPTODEV, \
51  RTE_FMT("[%s] %s: " RTE_FMT_HEAD(__VA_ARGS__,) "\n", \
52  dev, __func__, RTE_FMT_TAIL(__VA_ARGS__,)))
53 
67 #define rte_crypto_op_ctod_offset(c, t, o) \
68  ((t)((char *)(c) + (o)))
69 
81 #define rte_crypto_op_ctophys_offset(c, o) \
82  (rte_iova_t)((c)->phys_addr + (o))
83 
88  uint16_t min;
89  uint16_t max;
90  uint16_t increment;
96 };
97 
105  union {
106  struct {
109  uint16_t block_size;
119  } auth;
121  struct {
124  uint16_t block_size;
130  } cipher;
132  struct {
135  uint16_t block_size;
145  } aead;
146  };
147 };
148 
157  uint32_t op_types;
160  __extension__
161  union {
166  };
167 };
168 
175 };
176 
177 
184  union {
189  };
190 };
191 
194  enum rte_crypto_sym_xform_type type;
195  union {
196  enum rte_crypto_cipher_algorithm cipher;
197  enum rte_crypto_auth_algorithm auth;
198  enum rte_crypto_aead_algorithm aead;
199  } algo;
200 };
201 
210 };
211 
223 rte_cryptodev_sym_capability_get(uint8_t dev_id,
224  const struct rte_cryptodev_sym_capability_idx *idx);
225 
236 __rte_experimental
238 rte_cryptodev_asym_capability_get(uint8_t dev_id,
239  const struct rte_cryptodev_asym_capability_idx *idx);
240 
253 int
255  const struct rte_cryptodev_symmetric_capability *capability,
256  uint16_t key_size, uint16_t iv_size);
257 
271 int
273  const struct rte_cryptodev_symmetric_capability *capability,
274  uint16_t key_size, uint16_t digest_size, uint16_t iv_size);
275 
290 int
292  const struct rte_cryptodev_symmetric_capability *capability,
293  uint16_t key_size, uint16_t digest_size, uint16_t aad_size,
294  uint16_t iv_size);
295 
306 __rte_experimental
307 int
309  const struct rte_cryptodev_asymmetric_xform_capability *capability,
310  enum rte_crypto_asym_op_type op_type);
311 
322 __rte_experimental
323 int
325  const struct rte_cryptodev_asymmetric_xform_capability *capability,
326  uint16_t modlen);
327 
339 int
341  const char *algo_string);
342 
354 int
356  const char *algo_string);
357 
369 int
371  const char *algo_string);
372 
384 __rte_experimental
385 int
387  const char *xform_string);
388 
389 
391 #define RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST() \
392  { RTE_CRYPTO_OP_TYPE_UNDEFINED }
393 
394 
403 #define RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO (1ULL << 0)
404 
405 #define RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO (1ULL << 1)
406 
407 #define RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING (1ULL << 2)
408 
409 #define RTE_CRYPTODEV_FF_CPU_SSE (1ULL << 3)
410 
411 #define RTE_CRYPTODEV_FF_CPU_AVX (1ULL << 4)
412 
413 #define RTE_CRYPTODEV_FF_CPU_AVX2 (1ULL << 5)
414 
415 #define RTE_CRYPTODEV_FF_CPU_AESNI (1ULL << 6)
416 
417 #define RTE_CRYPTODEV_FF_HW_ACCELERATED (1ULL << 7)
418 
421 #define RTE_CRYPTODEV_FF_CPU_AVX512 (1ULL << 8)
422 
423 #define RTE_CRYPTODEV_FF_IN_PLACE_SGL (1ULL << 9)
424 
427 #define RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT (1ULL << 10)
428 
431 #define RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT (1ULL << 11)
432 
436 #define RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT (1ULL << 12)
437 
440 #define RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT (1ULL << 13)
441 
442 #define RTE_CRYPTODEV_FF_CPU_NEON (1ULL << 14)
443 
444 #define RTE_CRYPTODEV_FF_CPU_ARM_CE (1ULL << 15)
445 
446 #define RTE_CRYPTODEV_FF_SECURITY (1ULL << 16)
447 
448 #define RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_EXP (1ULL << 17)
449 
450 #define RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_QT (1ULL << 18)
451 
452 #define RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED (1ULL << 19)
453 
454 #define RTE_CRYPTODEV_FF_ASYM_SESSIONLESS (1ULL << 20)
455 
456 #define RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO (1ULL << 21)
457 
458 #define RTE_CRYPTODEV_FF_SYM_SESSIONLESS (1ULL << 22)
459 
460 #define RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA (1ULL << 23)
461 
462 #define RTE_CRYPTODEV_FF_SYM_RAW_DP (1ULL << 24)
463 
474 extern const char *
475 rte_cryptodev_get_feature_name(uint64_t flag);
476 
479  const char *driver_name;
480  uint8_t driver_id;
481  struct rte_device *device;
483  uint64_t feature_flags;
498  struct {
499  unsigned max_nb_sessions;
504  } sym;
505 };
506 
507 #define RTE_CRYPTODEV_DETACHED (0)
508 #define RTE_CRYPTODEV_ATTACHED (1)
509 
515 };
516 
519  uint32_t nb_descriptors;
524 };
525 
547 typedef uint16_t (*rte_cryptodev_callback_fn)(uint16_t dev_id, uint16_t qp_id,
548  struct rte_crypto_op **ops, uint16_t nb_ops, void *user_param);
549 
559 typedef void (*rte_cryptodev_cb_fn)(uint8_t dev_id,
560  enum rte_cryptodev_event_type event, void *cb_arg);
561 
562 
565  uint64_t enqueued_count;
567  uint64_t dequeued_count;
574 };
575 
576 #define RTE_CRYPTODEV_NAME_MAX_LEN (64)
577 
588 extern int
589 rte_cryptodev_get_dev_id(const char *name);
590 
601 extern const char *
602 rte_cryptodev_name_get(uint8_t dev_id);
603 
611 extern uint8_t
612 rte_cryptodev_count(void);
613 
622 extern uint8_t
623 rte_cryptodev_device_count_by_driver(uint8_t driver_id);
624 
636 uint8_t
637 rte_cryptodev_devices_get(const char *driver_name, uint8_t *devices,
638  uint8_t nb_devices);
639 /*
640  * Return the NUMA socket to which a device is connected
641  *
642  * @param dev_id
643  * The identifier of the device
644  * @return
645  * The NUMA socket id to which the device is connected or
646  * a default of zero if the socket could not be determined.
647  * -1 if returned is the dev_id value is out of range.
648  */
649 extern int
650 rte_cryptodev_socket_id(uint8_t dev_id);
651 
654  int socket_id;
655  uint16_t nb_queue_pairs;
657  uint64_t ff_disable;
664 };
665 
680 extern int
681 rte_cryptodev_configure(uint8_t dev_id, struct rte_cryptodev_config *config);
682 
698 extern int
699 rte_cryptodev_start(uint8_t dev_id);
700 
707 extern void
708 rte_cryptodev_stop(uint8_t dev_id);
709 
719 extern int
720 rte_cryptodev_close(uint8_t dev_id);
721 
743 extern int
744 rte_cryptodev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id,
745  const struct rte_cryptodev_qp_conf *qp_conf, int socket_id);
746 
760 __rte_experimental
761 int
762 rte_cryptodev_get_qp_status(uint8_t dev_id, uint16_t queue_pair_id);
763 
771 extern uint16_t
772 rte_cryptodev_queue_pair_count(uint8_t dev_id);
773 
774 
786 extern int
787 rte_cryptodev_stats_get(uint8_t dev_id, struct rte_cryptodev_stats *stats);
788 
794 extern void
795 rte_cryptodev_stats_reset(uint8_t dev_id);
796 
810 extern void
811 rte_cryptodev_info_get(uint8_t dev_id, struct rte_cryptodev_info *dev_info);
812 
813 
827 extern int
828 rte_cryptodev_callback_register(uint8_t dev_id,
829  enum rte_cryptodev_event_type event,
830  rte_cryptodev_cb_fn cb_fn, void *cb_arg);
831 
845 extern int
846 rte_cryptodev_callback_unregister(uint8_t dev_id,
847  enum rte_cryptodev_event_type event,
848  rte_cryptodev_cb_fn cb_fn, void *cb_arg);
849 
850 typedef uint16_t (*dequeue_pkt_burst_t)(void *qp,
851  struct rte_crypto_op **ops, uint16_t nb_ops);
854 typedef uint16_t (*enqueue_pkt_burst_t)(void *qp,
855  struct rte_crypto_op **ops, uint16_t nb_ops);
861 struct rte_cryptodev_callback;
862 
864 TAILQ_HEAD(rte_cryptodev_cb_list, rte_cryptodev_callback);
865 
875  void *arg;
877 };
878 
883 struct rte_cryptodev_cb_rcu {
884  struct rte_cryptodev_cb *next;
886  struct rte_rcu_qsbr *qsbr;
888 };
889 
901  uint64_t feature_flags;
906  uint8_t driver_id;
909  struct rte_cryptodev_cb_list link_intr_cbs;
915  __extension__
916  uint8_t attached : 1;
919  struct rte_cryptodev_cb_rcu *enq_cbs;
922  struct rte_cryptodev_cb_rcu *deq_cbs;
925 
926 void *
927 rte_cryptodev_get_sec_ctx(uint8_t dev_id);
928 
937  uint8_t dev_id;
939  uint8_t socket_id;
944  __extension__
945  uint8_t dev_started : 1;
950  void **queue_pairs;
952  uint16_t nb_queue_pairs;
955  void *dev_private;
958 
959 extern struct rte_cryptodev *rte_cryptodevs;
996 static inline uint16_t
997 rte_cryptodev_dequeue_burst(uint8_t dev_id, uint16_t qp_id,
998  struct rte_crypto_op **ops, uint16_t nb_ops)
999 {
1000  struct rte_cryptodev *dev = &rte_cryptodevs[dev_id];
1001 
1002  rte_cryptodev_trace_dequeue_burst(dev_id, qp_id, (void **)ops, nb_ops);
1003  nb_ops = (*dev->dequeue_burst)
1004  (dev->data->queue_pairs[qp_id], ops, nb_ops);
1005 #ifdef RTE_CRYPTO_CALLBACKS
1006  if (unlikely(dev->deq_cbs != NULL)) {
1007  struct rte_cryptodev_cb_rcu *list;
1008  struct rte_cryptodev_cb *cb;
1009 
1010  /* __ATOMIC_RELEASE memory order was used when the
1011  * call back was inserted into the list.
1012  * Since there is a clear dependency between loading
1013  * cb and cb->fn/cb->next, __ATOMIC_ACQUIRE memory order is
1014  * not required.
1015  */
1016  list = &dev->deq_cbs[qp_id];
1017  rte_rcu_qsbr_thread_online(list->qsbr, 0);
1018  cb = __atomic_load_n(&list->next, __ATOMIC_RELAXED);
1019 
1020  while (cb != NULL) {
1021  nb_ops = cb->fn(dev_id, qp_id, ops, nb_ops,
1022  cb->arg);
1023  cb = cb->next;
1024  };
1025 
1026  rte_rcu_qsbr_thread_offline(list->qsbr, 0);
1027  }
1028 #endif
1029  return nb_ops;
1030 }
1031 
1063 static inline uint16_t
1064 rte_cryptodev_enqueue_burst(uint8_t dev_id, uint16_t qp_id,
1065  struct rte_crypto_op **ops, uint16_t nb_ops)
1066 {
1067  struct rte_cryptodev *dev = &rte_cryptodevs[dev_id];
1068 
1069 #ifdef RTE_CRYPTO_CALLBACKS
1070  if (unlikely(dev->enq_cbs != NULL)) {
1071  struct rte_cryptodev_cb_rcu *list;
1072  struct rte_cryptodev_cb *cb;
1073 
1074  /* __ATOMIC_RELEASE memory order was used when the
1075  * call back was inserted into the list.
1076  * Since there is a clear dependency between loading
1077  * cb and cb->fn/cb->next, __ATOMIC_ACQUIRE memory order is
1078  * not required.
1079  */
1080  list = &dev->enq_cbs[qp_id];
1081  rte_rcu_qsbr_thread_online(list->qsbr, 0);
1082  cb = __atomic_load_n(&list->next, __ATOMIC_RELAXED);
1083 
1084  while (cb != NULL) {
1085  nb_ops = cb->fn(dev_id, qp_id, ops, nb_ops,
1086  cb->arg);
1087  cb = cb->next;
1088  };
1089 
1090  rte_rcu_qsbr_thread_offline(list->qsbr, 0);
1091  }
1092 #endif
1093 
1094  rte_cryptodev_trace_enqueue_burst(dev_id, qp_id, (void **)ops, nb_ops);
1095  return (*dev->enqueue_burst)(
1096  dev->data->queue_pairs[qp_id], ops, nb_ops);
1097 }
1098 
1099 
1105  uint64_t opaque_data;
1107  uint16_t nb_drivers;
1109  uint16_t user_data_sz;
1111  __extension__ struct {
1112  void *data;
1113  uint16_t refcnt;
1114  } sess_data[0];
1116 };
1117 
1120  __extension__ void *sess_private_data[0];
1122 };
1123 
1150 __rte_experimental
1151 struct rte_mempool *
1152 rte_cryptodev_sym_session_pool_create(const char *name, uint32_t nb_elts,
1153  uint32_t elt_size, uint32_t cache_size, uint16_t priv_size,
1154  int socket_id);
1155 
1167 
1177 __rte_experimental
1180 
1193 int
1195 
1208 __rte_experimental
1209 int
1211 
1228 int
1229 rte_cryptodev_sym_session_init(uint8_t dev_id,
1230  struct rte_cryptodev_sym_session *sess,
1231  struct rte_crypto_sym_xform *xforms,
1232  struct rte_mempool *mempool);
1233 
1249 __rte_experimental
1250 int
1251 rte_cryptodev_asym_session_init(uint8_t dev_id,
1252  struct rte_cryptodev_asym_session *sess,
1253  struct rte_crypto_asym_xform *xforms,
1254  struct rte_mempool *mempool);
1255 
1270 int
1271 rte_cryptodev_sym_session_clear(uint8_t dev_id,
1272  struct rte_cryptodev_sym_session *sess);
1273 
1284 __rte_experimental
1285 int
1286 rte_cryptodev_asym_session_clear(uint8_t dev_id,
1287  struct rte_cryptodev_asym_session *sess);
1288 
1296 unsigned int
1298 
1310 __rte_experimental
1311 unsigned int
1313  struct rte_cryptodev_sym_session *sess);
1314 
1321 __rte_experimental
1322 unsigned int
1324 
1336 unsigned int
1338 
1349 __rte_experimental
1350 unsigned int
1352 
1361 int rte_cryptodev_driver_id_get(const char *name);
1362 
1371 const char *rte_cryptodev_driver_name_get(uint8_t driver_id);
1372 
1385 __rte_experimental
1386 int
1388  struct rte_cryptodev_sym_session *sess,
1389  void *data,
1390  uint16_t size);
1391 
1402 __rte_experimental
1403 void *
1405  struct rte_cryptodev_sym_session *sess);
1406 
1419 __rte_experimental
1420 uint32_t
1422  struct rte_cryptodev_sym_session *sess, union rte_crypto_sym_ofs ofs,
1423  struct rte_crypto_sym_vec *vec);
1424 
1434 __rte_experimental
1435 int
1436 rte_cryptodev_get_raw_dp_ctx_size(uint8_t dev_id);
1437 
1443  struct rte_cryptodev_sym_session *crypto_sess;
1444  struct rte_crypto_sym_xform *xform;
1445  struct rte_security_session *sec_sess;
1446 };
1447 
1474  void *qp, uint8_t *drv_ctx, struct rte_crypto_sym_vec *vec,
1475  union rte_crypto_sym_ofs ofs, void *user_data[], int *enqueue_status);
1476 
1499  void *qp, uint8_t *drv_ctx, struct rte_crypto_vec *data_vec,
1500  uint16_t n_data_vecs, union rte_crypto_sym_ofs ofs,
1501  struct rte_crypto_va_iova_ptr *iv,
1502  struct rte_crypto_va_iova_ptr *digest,
1503  struct rte_crypto_va_iova_ptr *aad_or_auth_iv,
1504  void *user_data);
1505 
1517 typedef int (*cryptodev_sym_raw_operation_done_t)(void *qp, uint8_t *drv_ctx,
1518  uint32_t n);
1519 
1529 typedef uint32_t (*rte_cryptodev_raw_get_dequeue_count_t)(void *user_data);
1530 
1539 typedef void (*rte_cryptodev_raw_post_dequeue_t)(void *user_data,
1540  uint32_t index, uint8_t is_op_success);
1541 
1580 typedef uint32_t (*cryptodev_sym_raw_dequeue_burst_t)(void *qp,
1581  uint8_t *drv_ctx,
1582  rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
1583  rte_cryptodev_raw_post_dequeue_t post_dequeue,
1584  void **out_user_data, uint8_t is_user_data_array,
1585  uint32_t *n_success, int *dequeue_status);
1586 
1610 typedef void * (*cryptodev_sym_raw_dequeue_t)(
1611  void *qp, uint8_t *drv_ctx, int *dequeue_status,
1612  enum rte_crypto_op_status *op_status);
1613 
1620  void *qp_data;
1621 
1623  cryptodev_sym_raw_enqueue_burst_t enqueue_burst;
1626  cryptodev_sym_raw_dequeue_burst_t dequeue_burst;
1628 
1629  /* Driver specific context data */
1630  __extension__ uint8_t drv_ctx_data[];
1631 };
1632 
1656 __rte_experimental
1657 int
1658 rte_cryptodev_configure_raw_dp_ctx(uint8_t dev_id, uint16_t qp_id,
1659  struct rte_crypto_raw_dp_ctx *ctx,
1660  enum rte_crypto_op_sess_type sess_type,
1661  union rte_cryptodev_session_ctx session_ctx,
1662  uint8_t is_update);
1663 
1688 __rte_experimental
1689 uint32_t
1691  struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
1692  void **user_data, int *enqueue_status);
1693 
1714 __rte_experimental
1715 static __rte_always_inline int
1717  struct rte_crypto_vec *data_vec, uint16_t n_data_vecs,
1718  union rte_crypto_sym_ofs ofs,
1719  struct rte_crypto_va_iova_ptr *iv,
1720  struct rte_crypto_va_iova_ptr *digest,
1721  struct rte_crypto_va_iova_ptr *aad_or_auth_iv,
1722  void *user_data)
1723 {
1724  return (*ctx->enqueue)(ctx->qp_data, ctx->drv_ctx_data, data_vec,
1725  n_data_vecs, ofs, iv, digest, aad_or_auth_iv, user_data);
1726 }
1727 
1738 __rte_experimental
1739 int
1741  uint32_t n);
1742 
1781 __rte_experimental
1782 uint32_t
1784  rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
1785  rte_cryptodev_raw_post_dequeue_t post_dequeue,
1786  void **out_user_data, uint8_t is_user_data_array,
1787  uint32_t *n_success, int *dequeue_status);
1788 
1812 __rte_experimental
1813 static __rte_always_inline void *
1815  int *dequeue_status, enum rte_crypto_op_status *op_status)
1816 {
1817  return (*ctx->dequeue)(ctx->qp_data, ctx->drv_ctx_data, dequeue_status,
1818  op_status);
1819 }
1820 
1830 __rte_experimental
1831 int
1833  uint32_t n);
1834 
1871 __rte_experimental
1872 struct rte_cryptodev_cb *
1873 rte_cryptodev_add_enq_callback(uint8_t dev_id,
1874  uint16_t qp_id,
1876  void *cb_arg);
1877 
1900 __rte_experimental
1901 int rte_cryptodev_remove_enq_callback(uint8_t dev_id,
1902  uint16_t qp_id,
1903  struct rte_cryptodev_cb *cb);
1904 
1940 __rte_experimental
1941 struct rte_cryptodev_cb *
1942 rte_cryptodev_add_deq_callback(uint8_t dev_id,
1943  uint16_t qp_id,
1945  void *cb_arg);
1946 
1968 __rte_experimental
1969 int rte_cryptodev_remove_deq_callback(uint8_t dev_id,
1970  uint16_t qp_id,
1971  struct rte_cryptodev_cb *cb);
1972 
1973 #ifdef __cplusplus
1974 }
1975 #endif
1976 
1977 #endif /* _RTE_CRYPTODEV_H_ */
int rte_cryptodev_get_cipher_algo_enum(enum rte_crypto_cipher_algorithm *algo_enum, const char *algo_string)
int rte_cryptodev_get_auth_algo_enum(enum rte_crypto_auth_algorithm *algo_enum, const char *algo_string)
#define __rte_always_inline
Definition: rte_common.h:226
struct rte_cryptodev_cb * next
struct rte_mempool * mp_session
void rte_cryptodev_info_get(uint8_t dev_id, struct rte_cryptodev_info *dev_info)
struct rte_cryptodev_symmetric_capability sym
__rte_experimental unsigned int rte_cryptodev_sym_get_existing_header_session_size(struct rte_cryptodev_sym_session *sess)
static __rte_experimental __rte_always_inline int rte_cryptodev_raw_enqueue(struct rte_crypto_raw_dp_ctx *ctx, struct rte_crypto_vec *data_vec, uint16_t n_data_vecs, union rte_crypto_sym_ofs ofs, struct rte_crypto_va_iova_ptr *iv, struct rte_crypto_va_iova_ptr *digest, struct rte_crypto_va_iova_ptr *aad_or_auth_iv, void *user_data)
const char * rte_cryptodev_get_feature_name(uint64_t flag)
__rte_experimental unsigned int rte_cryptodev_asym_get_private_session_size(uint8_t dev_id)
enum rte_crypto_auth_algorithm algo
void * security_ctx
int rte_cryptodev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id, const struct rte_cryptodev_qp_conf *qp_conf, int socket_id)
uint8_t rte_cryptodev_devices_get(const char *driver_name, uint8_t *devices, uint8_t nb_devices)
rte_crypto_asym_xform_type
enum rte_crypto_asym_xform_type xform_type
static uint16_t rte_cryptodev_dequeue_burst(uint8_t dev_id, uint16_t qp_id, struct rte_crypto_op **ops, uint16_t nb_ops)
struct rte_crypto_param_range digest_size
uint64_t feature_flags
enum rte_crypto_op_type op
int rte_cryptodev_driver_id_get(const char *name)
int rte_cryptodev_sym_session_clear(uint8_t dev_id, struct rte_cryptodev_sym_session *sess)
TAILQ_HEAD(rte_cryptodev_cb_list, rte_cryptodev_callback)
__rte_experimental uint32_t rte_cryptodev_raw_dequeue_burst(struct rte_crypto_raw_dp_ctx *ctx, rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count, rte_cryptodev_raw_post_dequeue_t post_dequeue, void **out_user_data, uint8_t is_user_data_array, uint32_t *n_success, int *dequeue_status)
__extension__ uint8_t attached
__rte_experimental int rte_cryptodev_asym_get_xform_enum(enum rte_crypto_asym_xform_type *xform_enum, const char *xform_string)
int rte_cryptodev_sym_session_init(uint8_t dev_id, struct rte_cryptodev_sym_session *sess, struct rte_crypto_sym_xform *xforms, struct rte_mempool *mempool)
__rte_experimental uint32_t rte_cryptodev_raw_enqueue_burst(struct rte_crypto_raw_dp_ctx *ctx, struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs, void **user_data, int *enqueue_status)
struct rte_mempool * session_pool
uint64_t dequeue_err_count
struct rte_cryptodev_symmetric_capability::@135::@137 auth
int rte_cryptodev_get_aead_algo_enum(enum rte_crypto_aead_algorithm *algo_enum, const char *algo_string)
uint32_t cache_size
Definition: rte_mempool.h:224
static __rte_always_inline void rte_rcu_qsbr_thread_offline(struct rte_rcu_qsbr *v, unsigned int thread_id)
Definition: rte_rcu_qsbr.h:356
const struct rte_cryptodev_symmetric_capability * rte_cryptodev_sym_capability_get(uint8_t dev_id, const struct rte_cryptodev_sym_capability_idx *idx)
dequeue_pkt_burst_t dequeue_burst
int rte_cryptodev_configure(uint8_t dev_id, struct rte_cryptodev_config *config)
char name[RTE_CRYPTODEV_NAME_MAX_LEN]
int rte_cryptodev_callback_unregister(uint8_t dev_id, enum rte_cryptodev_event_type event, rte_cryptodev_cb_fn cb_fn, void *cb_arg)
const struct rte_cryptodev_capabilities * capabilities
uint32_t(* rte_cryptodev_raw_get_dequeue_count_t)(void *user_data)
rte_crypto_asym_op_type
__rte_experimental struct rte_cryptodev_cb * rte_cryptodev_add_enq_callback(uint8_t dev_id, uint16_t qp_id, rte_cryptodev_callback_fn cb_fn, void *cb_arg)
struct rte_cryptodev_cb_rcu * deq_cbs
void rte_cryptodev_stop(uint8_t dev_id)
const char * driver_name
int rte_cryptodev_close(uint8_t dev_id)
enum rte_crypto_asym_xform_type type
uint8_t driver_id
int(* cryptodev_sym_raw_operation_done_t)(void *qp, uint8_t *drv_ctx, uint32_t n)
__extension__ void * sess_private_data[0]
void(* rte_cryptodev_cb_fn)(uint8_t dev_id, enum rte_cryptodev_event_type event, void *cb_arg)
__rte_experimental int rte_cryptodev_sym_session_set_user_data(struct rte_cryptodev_sym_session *sess, void *data, uint16_t size)
int rte_cryptodev_sym_capability_check_aead(const struct rte_cryptodev_symmetric_capability *capability, uint16_t key_size, uint16_t digest_size, uint16_t aad_size, uint16_t iv_size)
#define unlikely(x)
struct rte_cryptodev_cb_rcu * enq_cbs
unsigned int rte_cryptodev_sym_get_header_session_size(void)
const char * rte_cryptodev_driver_name_get(uint8_t driver_id)
int rte_cryptodev_callback_register(uint8_t dev_id, enum rte_cryptodev_event_type event, rte_cryptodev_cb_fn cb_fn, void *cb_arg)
uint16_t min_mbuf_tailroom_req
__rte_experimental int rte_cryptodev_raw_dequeue_done(struct rte_crypto_raw_dp_ctx *ctx, uint32_t n)
struct rte_cryptodev_symmetric_capability::@135::@138 cipher
__rte_experimental int rte_cryptodev_get_raw_dp_ctx_size(uint8_t dev_id)
struct rte_cryptodev_sym_session * rte_cryptodev_sym_session_create(struct rte_mempool *mempool)
int rte_cryptodev_sym_session_free(struct rte_cryptodev_sym_session *sess)
const char * rte_cryptodev_name_get(uint8_t dev_id)
__rte_experimental void * rte_cryptodev_sym_session_get_user_data(struct rte_cryptodev_sym_session *sess)
rte_crypto_op_type
Definition: rte_crypto.h:29
__rte_experimental const struct rte_cryptodev_asymmetric_xform_capability * rte_cryptodev_asym_capability_get(uint8_t dev_id, const struct rte_cryptodev_asym_capability_idx *idx)
#define RTE_CRYPTODEV_NAME_MAX_LEN
__rte_experimental int rte_cryptodev_asym_session_clear(uint8_t dev_id, struct rte_cryptodev_asym_session *sess)
__rte_experimental int rte_cryptodev_asym_session_free(struct rte_cryptodev_asym_session *sess)
rte_cryptodev_callback_fn fn
enqueue_pkt_burst_t enqueue_burst
static uint16_t rte_cryptodev_enqueue_burst(uint8_t dev_id, uint16_t qp_id, struct rte_crypto_op **ops, uint16_t nb_ops)
int rte_cryptodev_sym_capability_check_cipher(const struct rte_cryptodev_symmetric_capability *capability, uint16_t key_size, uint16_t iv_size)
__rte_experimental int rte_cryptodev_asym_session_init(uint8_t dev_id, struct rte_cryptodev_asym_session *sess, struct rte_crypto_asym_xform *xforms, struct rte_mempool *mempool)
uint16_t(* rte_cryptodev_callback_fn)(uint16_t dev_id, uint16_t qp_id, struct rte_crypto_op **ops, uint16_t nb_ops, void *user_param)
int rte_cryptodev_stats_get(uint8_t dev_id, struct rte_cryptodev_stats *stats)
__rte_experimental int rte_cryptodev_raw_enqueue_done(struct rte_crypto_raw_dp_ctx *ctx, uint32_t n)
struct rte_cryptodev_data * data
struct rte_cryptodev_cb_list link_intr_cbs
struct rte_device * device
__rte_experimental struct rte_cryptodev_asym_session * rte_cryptodev_asym_session_create(struct rte_mempool *mempool)
uint32_t elt_size
Definition: rte_mempool.h:227
__rte_experimental int rte_cryptodev_remove_enq_callback(uint8_t dev_id, uint16_t qp_id, struct rte_cryptodev_cb *cb)
uint8_t rte_cryptodev_count(void)
__extension__ uint8_t dev_started
void *(* cryptodev_sym_raw_dequeue_t)(void *qp, uint8_t *drv_ctx, int *dequeue_status, enum rte_crypto_op_status *op_status)
int rte_cryptodev_sym_capability_check_auth(const struct rte_cryptodev_symmetric_capability *capability, uint16_t key_size, uint16_t digest_size, uint16_t iv_size)
struct rte_cryptodev_ops * dev_ops
uint16_t min_mbuf_headroom_req
__rte_experimental unsigned int rte_cryptodev_asym_get_header_session_size(void)
unsigned int rte_cryptodev_sym_get_private_session_size(uint8_t dev_id)
#define __rte_cache_aligned
Definition: rte_common.h:400
__rte_experimental struct rte_mempool * rte_cryptodev_sym_session_pool_create(const char *name, uint32_t nb_elts, uint32_t elt_size, uint32_t cache_size, uint16_t priv_size, int socket_id)
static __rte_always_inline void rte_rcu_qsbr_thread_online(struct rte_rcu_qsbr *v, unsigned int thread_id)
Definition: rte_rcu_qsbr.h:303
uint32_t(* cryptodev_sym_raw_enqueue_burst_t)(void *qp, uint8_t *drv_ctx, struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs, void *user_data[], int *enqueue_status)
#define RTE_STD_C11
Definition: rte_common.h:40
rte_crypto_auth_algorithm
__rte_experimental int rte_cryptodev_asym_xform_capability_check_modlen(const struct rte_cryptodev_asymmetric_xform_capability *capability, uint16_t modlen)
rte_crypto_sym_xform_type
__rte_experimental uint32_t rte_cryptodev_sym_cpu_crypto_process(uint8_t dev_id, struct rte_cryptodev_sym_session *sess, union rte_crypto_sym_ofs ofs, struct rte_crypto_sym_vec *vec)
struct rte_mempool * mp_session_private
int rte_cryptodev_start(uint8_t dev_id)
__rte_experimental int rte_cryptodev_configure_raw_dp_ctx(uint8_t dev_id, uint16_t qp_id, struct rte_crypto_raw_dp_ctx *ctx, enum rte_crypto_op_sess_type sess_type, union rte_cryptodev_session_ctx session_ctx, uint8_t is_update)
uint16_t(* enqueue_pkt_burst_t)(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
uint64_t enqueue_err_count
uint16_t(* dequeue_pkt_burst_t)(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
rte_crypto_op_sess_type
Definition: rte_crypto.h:62
struct rte_crypto_param_range modlen
uint16_t rte_cryptodev_queue_pair_count(uint8_t dev_id)
struct rte_crypto_param_range key_size
__rte_experimental int rte_cryptodev_asym_xform_capability_check_optype(const struct rte_cryptodev_asymmetric_xform_capability *capability, enum rte_crypto_asym_op_type op_type)
int(* cryptodev_sym_raw_enqueue_t)(void *qp, uint8_t *drv_ctx, struct rte_crypto_vec *data_vec, uint16_t n_data_vecs, union rte_crypto_sym_ofs ofs, struct rte_crypto_va_iova_ptr *iv, struct rte_crypto_va_iova_ptr *digest, struct rte_crypto_va_iova_ptr *aad_or_auth_iv, void *user_data)
__rte_experimental int rte_cryptodev_get_qp_status(uint8_t dev_id, uint16_t queue_pair_id)
struct rte_device * device
unsigned max_nb_sessions
rte_cryptodev_event_type
uint32_t(* cryptodev_sym_raw_dequeue_burst_t)(void *qp, uint8_t *drv_ctx, rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count, rte_cryptodev_raw_post_dequeue_t post_dequeue, void **out_user_data, uint8_t is_user_data_array, uint32_t *n_success, int *dequeue_status)
struct rte_crypto_param_range aad_size
__rte_experimental int rte_cryptodev_remove_deq_callback(uint8_t dev_id, uint16_t qp_id, struct rte_cryptodev_cb *cb)
enum rte_crypto_sym_xform_type xform_type
static __rte_experimental __rte_always_inline void * rte_cryptodev_raw_dequeue(struct rte_crypto_raw_dp_ctx *ctx, int *dequeue_status, enum rte_crypto_op_status *op_status)
void rte_cryptodev_stats_reset(uint8_t dev_id)
int rte_cryptodev_get_dev_id(const char *name)
unsigned max_nb_queue_pairs
uint8_t rte_cryptodev_device_count_by_driver(uint8_t driver_id)
char name[RTE_MEMZONE_NAMESIZE]
Definition: rte_mempool.h:213
__rte_experimental struct rte_cryptodev_cb * rte_cryptodev_add_deq_callback(uint8_t dev_id, uint16_t qp_id, rte_cryptodev_callback_fn cb_fn, void *cb_arg)
__extension__ struct rte_cryptodev_sym_session::@146 sess_data[0]
void(* rte_cryptodev_raw_post_dequeue_t)(void *user_data, uint32_t index, uint8_t is_op_success)
struct rte_crypto_param_range iv_size
struct rte_cryptodev * rte_cryptodevs
rte_crypto_op_status
Definition: rte_crypto.h:39
struct rte_cryptodev_asymmetric_capability asym
rte_crypto_aead_algorithm
rte_crypto_cipher_algorithm