DPDK 25.11.0-rc1
rte_cryptodev.h
Go to the documentation of this file.
1/* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2015-2020 Intel Corporation.
3 */
4
5#ifndef _RTE_CRYPTODEV_H_
6#define _RTE_CRYPTODEV_H_
7
17#include <rte_compat.h>
18#include "rte_kvargs.h"
19#include "rte_crypto.h"
20#include <rte_common.h>
21#include <rte_rcu_qsbr.h>
22
23#include "rte_cryptodev_trace_fp.h"
24
25#ifdef __cplusplus
26extern "C" {
27#endif
28
32extern int rte_cryptodev_logtype;
33#define RTE_LOGTYPE_CRYPTODEV rte_cryptodev_logtype
34
35/* Logging Macros */
36#define CDEV_LOG_ERR(...) \
37 RTE_LOG_LINE_PREFIX(ERR, CRYPTODEV, \
38 "%s() line %u: ", __func__ RTE_LOG_COMMA __LINE__, __VA_ARGS__)
39
40#define CDEV_LOG_INFO(...) \
41 RTE_LOG_LINE(INFO, CRYPTODEV, "" __VA_ARGS__)
42
43#define CDEV_LOG_DEBUG(...) \
44 RTE_LOG_LINE_PREFIX(DEBUG, CRYPTODEV, \
45 "%s() line %u: ", __func__ RTE_LOG_COMMA __LINE__, __VA_ARGS__)
46
47#define CDEV_PMD_TRACE(...) \
48 RTE_LOG_LINE_PREFIX(DEBUG, CRYPTODEV, \
49 "[%s] %s: ", dev RTE_LOG_COMMA __func__, __VA_ARGS__)
50
64#define rte_crypto_op_ctod_offset(c, t, o) \
65 ((t)((char *)(c) + (o)))
66
78#define rte_crypto_op_ctophys_offset(c, o) \
79 (rte_iova_t)((c)->phys_addr + (o))
80
85 uint16_t min;
86 uint16_t max;
87 uint16_t increment;
93};
94
100#define RTE_CRYPTO_CIPHER_DATA_UNIT_LEN_512_BYTES RTE_BIT32(0)
101#define RTE_CRYPTO_CIPHER_DATA_UNIT_LEN_4096_BYTES RTE_BIT32(1)
102#define RTE_CRYPTO_CIPHER_DATA_UNIT_LEN_1_MEGABYTES RTE_BIT32(2)
103
110 union {
111 struct {
114 uint16_t block_size;
124 } auth;
126 struct {
129 uint16_t block_size;
135 uint32_t dataunit_set;
141 } cipher;
143 struct {
146 uint16_t block_size;
156 } aead;
157 };
158};
159
167 uint32_t op_types;
179 __extension__
180 union {
192 uint32_t op_capa[RTE_CRYPTO_ASYM_OP_LIST_END];
195 uint32_t mlkem_capa[RTE_CRYPTO_ML_KEM_OP_END];
198 uint32_t mldsa_capa[RTE_CRYPTO_ML_DSA_OP_END];
200 };
201
202 uint64_t hash_algos;
204};
205
211};
212
213
219 union {
224 };
225};
226
230 union {
231 enum rte_crypto_cipher_algorithm cipher;
234 } algo;
235};
236
244};
245
258 const struct rte_cryptodev_sym_capability_idx *idx);
259
272 const struct rte_cryptodev_asym_capability_idx *idx);
273
286int
288 const struct rte_cryptodev_symmetric_capability *capability,
289 uint16_t key_size, uint16_t iv_size);
290
304int
306 const struct rte_cryptodev_symmetric_capability *capability,
307 uint16_t key_size, uint16_t digest_size, uint16_t iv_size);
308
323int
325 const struct rte_cryptodev_symmetric_capability *capability,
326 uint16_t key_size, uint16_t digest_size, uint16_t aad_size,
327 uint16_t iv_size);
328
339int
341 const struct rte_cryptodev_asymmetric_xform_capability *capability,
342 enum rte_crypto_asym_op_type op_type);
343
354int
356 const struct rte_cryptodev_asymmetric_xform_capability *capability,
357 uint16_t modlen);
358
369bool
371 const struct rte_cryptodev_asymmetric_xform_capability *capability,
372 enum rte_crypto_auth_algorithm hash);
373
388__rte_experimental
389int
391 const struct rte_cryptodev_asymmetric_xform_capability *capability,
392 enum rte_crypto_asym_op_type op_type, uint8_t cap);
393
405int
407 const char *algo_string);
408
420int
422 const char *algo_string);
423
435int
437 const char *algo_string);
438
450int
452 const char *xform_string);
453
463__rte_experimental
464const char *
466
476__rte_experimental
477const char *
479
489__rte_experimental
490const char *
492
502__rte_experimental
503const char *
505
506
508#define RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST() \
509 { RTE_CRYPTO_OP_TYPE_UNDEFINED }
510
511
520#define RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO (1ULL << 0)
522#define RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO (1ULL << 1)
524#define RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING (1ULL << 2)
526#define RTE_CRYPTODEV_FF_CPU_SSE (1ULL << 3)
528#define RTE_CRYPTODEV_FF_CPU_AVX (1ULL << 4)
530#define RTE_CRYPTODEV_FF_CPU_AVX2 (1ULL << 5)
532#define RTE_CRYPTODEV_FF_CPU_AESNI (1ULL << 6)
534#define RTE_CRYPTODEV_FF_HW_ACCELERATED (1ULL << 7)
538#define RTE_CRYPTODEV_FF_CPU_AVX512 (1ULL << 8)
540#define RTE_CRYPTODEV_FF_IN_PLACE_SGL (1ULL << 9)
544#define RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT (1ULL << 10)
548#define RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT (1ULL << 11)
553#define RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT (1ULL << 12)
557#define RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT (1ULL << 13)
559#define RTE_CRYPTODEV_FF_CPU_NEON (1ULL << 14)
561#define RTE_CRYPTODEV_FF_CPU_ARM_CE (1ULL << 15)
563#define RTE_CRYPTODEV_FF_SECURITY (1ULL << 16)
565#define RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_EXP (1ULL << 17)
567#define RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_QT (1ULL << 18)
569#define RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED (1ULL << 19)
571#define RTE_CRYPTODEV_FF_ASYM_SESSIONLESS (1ULL << 20)
573#define RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO (1ULL << 21)
575#define RTE_CRYPTODEV_FF_SYM_SESSIONLESS (1ULL << 22)
577#define RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA (1ULL << 23)
579#define RTE_CRYPTODEV_FF_SYM_RAW_DP (1ULL << 24)
581#define RTE_CRYPTODEV_FF_CIPHER_MULTIPLE_DATA_UNITS (1ULL << 25)
583#define RTE_CRYPTODEV_FF_CIPHER_WRAPPED_KEY (1ULL << 26)
585#define RTE_CRYPTODEV_FF_SECURITY_INNER_CSUM (1ULL << 27)
587#define RTE_CRYPTODEV_FF_SECURITY_RX_INJECT (1ULL << 28)
589#define RTE_CRYPTODEV_FF_MLDSA_SIGN_PREHASH (1ULL << 29)
600const char *
602
604/* Structure rte_cryptodev_info 8< */
606 const char *driver_name;
607 uint8_t driver_id;
608 struct rte_device *device;
625 struct {
631 } sym;
632};
633/* >8 End of structure rte_cryptodev_info. */
634
635#define RTE_CRYPTODEV_DETACHED (0)
636#define RTE_CRYPTODEV_ATTACHED (1)
637
644
645/* Crypto queue pair priority levels */
646#define RTE_CRYPTODEV_QP_PRIORITY_HIGHEST 0
650#define RTE_CRYPTODEV_QP_PRIORITY_NORMAL 128
654#define RTE_CRYPTODEV_QP_PRIORITY_LOWEST 255
660/* Structure rte_cryptodev_qp_conf 8<*/
662 uint32_t nb_descriptors;
665 uint8_t priority;
673};
674/* >8 End of structure rte_cryptodev_qp_conf. */
675
697typedef uint16_t (*rte_cryptodev_callback_fn)(uint16_t dev_id, uint16_t qp_id,
698 struct rte_crypto_op **ops, uint16_t nb_ops, void *user_param);
699
709typedef void (*rte_cryptodev_cb_fn)(uint8_t dev_id,
710 enum rte_cryptodev_event_type event, void *cb_arg);
711
712
724};
725
726#define RTE_CRYPTODEV_NAME_MAX_LEN (64)
738int
739rte_cryptodev_get_dev_id(const char *name);
740
751const char *
753
761uint8_t
763
772uint8_t
774
786uint8_t
787rte_cryptodev_devices_get(const char *driver_name, uint8_t *devices,
788 uint8_t nb_devices);
789/*
790 * Return the NUMA socket to which a device is connected
791 *
792 * @param dev_id
793 * The identifier of the device
794 * @return
795 * The NUMA socket id to which the device is connected or
796 * a default of zero if the socket could not be determined.
797 * -1 if returned is the dev_id value is out of range.
798 */
799int
800rte_cryptodev_socket_id(uint8_t dev_id);
801
803/* Structure rte_cryptodev_config 8< */
808 uint64_t ff_disable;
815};
816/* >8 End of structure rte_cryptodev_config. */
817
832int
833rte_cryptodev_configure(uint8_t dev_id, struct rte_cryptodev_config *config);
834
850int
851rte_cryptodev_start(uint8_t dev_id);
852
859void
860rte_cryptodev_stop(uint8_t dev_id);
861
871int
872rte_cryptodev_close(uint8_t dev_id);
873
895int
896rte_cryptodev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id,
897 const struct rte_cryptodev_qp_conf *qp_conf, int socket_id);
898
923__rte_experimental
924int
925rte_cryptodev_queue_pair_reset(uint8_t dev_id, uint16_t queue_pair_id,
926 const struct rte_cryptodev_qp_conf *qp_conf, int socket_id);
927
941int
942rte_cryptodev_get_qp_status(uint8_t dev_id, uint16_t queue_pair_id);
943
951uint16_t
953
954
966int
967rte_cryptodev_stats_get(uint8_t dev_id, struct rte_cryptodev_stats *stats);
968
974void
976
990void
991rte_cryptodev_info_get(uint8_t dev_id, struct rte_cryptodev_info *dev_info);
992
993
1007int
1009 enum rte_cryptodev_event_type event,
1010 rte_cryptodev_cb_fn cb_fn, void *cb_arg);
1011
1025int
1027 enum rte_cryptodev_event_type event,
1028 rte_cryptodev_cb_fn cb_fn, void *cb_arg);
1029
1045__rte_experimental
1046int
1047rte_cryptodev_queue_pair_event_error_query(uint8_t dev_id, uint16_t qp_id);
1048
1049struct rte_cryptodev_callback;
1050
1052RTE_TAILQ_HEAD(rte_cryptodev_cb_list, rte_cryptodev_callback);
1053
1059 RTE_ATOMIC(struct rte_cryptodev_cb *) next;
1063 void *arg;
1065};
1066
1071struct rte_cryptodev_cb_rcu {
1072 RTE_ATOMIC(struct rte_cryptodev_cb *) next;
1074 struct rte_rcu_qsbr *qsbr;
1076};
1077
1087void *
1089
1119struct rte_mempool *
1120rte_cryptodev_sym_session_pool_create(const char *name, uint32_t nb_elts,
1121 uint32_t elt_size, uint32_t cache_size, uint16_t priv_size,
1122 int socket_id);
1123
1124
1145struct rte_mempool *
1146rte_cryptodev_asym_session_pool_create(const char *name, uint32_t nb_elts,
1147 uint32_t cache_size, uint16_t user_data_size, int socket_id);
1148
1165void *
1167 struct rte_crypto_sym_xform *xforms,
1168 struct rte_mempool *mp);
1186int
1188 struct rte_crypto_asym_xform *xforms, struct rte_mempool *mp,
1189 void **session);
1190
1203int
1205 void *sess);
1206
1218int
1219rte_cryptodev_asym_session_free(uint8_t dev_id, void *sess);
1220
1227unsigned int
1229
1241unsigned int
1243
1254unsigned int
1256
1265unsigned int
1267
1277
1286const char *rte_cryptodev_driver_name_get(uint8_t driver_id);
1287
1300int
1302 void *data,
1303 uint16_t size);
1304
1305#define CRYPTO_SESS_OPAQUE_DATA_OFF 0
1309static inline uint64_t
1311{
1312 return *((uint64_t *)sess + CRYPTO_SESS_OPAQUE_DATA_OFF);
1313}
1314
1318static inline void
1320{
1321 uint64_t *data;
1322 data = (((uint64_t *)sess) + CRYPTO_SESS_OPAQUE_DATA_OFF);
1323 *data = opaque;
1324}
1325
1336void *
1338
1352int
1353rte_cryptodev_asym_session_set_user_data(void *sess, void *data, uint16_t size);
1354
1365void *
1367
1380uint32_t
1382 void *sess, union rte_crypto_sym_ofs ofs,
1383 struct rte_crypto_sym_vec *vec);
1384
1394int
1396
1412int
1413rte_cryptodev_session_event_mdata_set(uint8_t dev_id, void *sess,
1414 enum rte_crypto_op_type op_type,
1415 enum rte_crypto_op_sess_type sess_type,
1416 void *ev_mdata, uint16_t size);
1417
1422union rte_cryptodev_session_ctx {void *crypto_sess;
1423 struct rte_crypto_sym_xform *xform;
1424 struct rte_security_session *sec_sess;
1425};
1426
1453 void *qp, uint8_t *drv_ctx, struct rte_crypto_sym_vec *vec,
1454 union rte_crypto_sym_ofs ofs, void *user_data[], int *enqueue_status);
1455
1478 void *qp, uint8_t *drv_ctx, struct rte_crypto_vec *data_vec,
1479 uint16_t n_data_vecs, union rte_crypto_sym_ofs ofs,
1480 struct rte_crypto_va_iova_ptr *iv,
1481 struct rte_crypto_va_iova_ptr *digest,
1482 struct rte_crypto_va_iova_ptr *aad_or_auth_iv,
1483 void *user_data);
1484
1496typedef int (*cryptodev_sym_raw_operation_done_t)(void *qp, uint8_t *drv_ctx,
1497 uint32_t n);
1498
1508typedef uint32_t (*rte_cryptodev_raw_get_dequeue_count_t)(void *user_data);
1509
1518typedef void (*rte_cryptodev_raw_post_dequeue_t)(void *user_data,
1519 uint32_t index, uint8_t is_op_success);
1520
1562typedef uint32_t (*cryptodev_sym_raw_dequeue_burst_t)(void *qp,
1563 uint8_t *drv_ctx,
1564 rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
1565 uint32_t max_nb_to_dequeue,
1567 void **out_user_data, uint8_t is_user_data_array,
1568 uint32_t *n_success, int *dequeue_status);
1569
1593typedef void * (*cryptodev_sym_raw_dequeue_t)(
1594 void *qp, uint8_t *drv_ctx, int *dequeue_status,
1595 enum rte_crypto_op_status *op_status);
1596
1603 void *qp_data;
1604
1611
1612 /* Driver specific context data */
1613 uint8_t drv_ctx_data[];
1614};
1615
1637int
1638rte_cryptodev_configure_raw_dp_ctx(uint8_t dev_id, uint16_t qp_id,
1639 struct rte_crypto_raw_dp_ctx *ctx,
1640 enum rte_crypto_op_sess_type sess_type,
1641 union rte_cryptodev_session_ctx session_ctx,
1642 uint8_t is_update);
1643
1668uint32_t
1670 struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
1671 void **user_data, int *enqueue_status);
1672
1693__rte_experimental
1694static __rte_always_inline int
1696 struct rte_crypto_vec *data_vec, uint16_t n_data_vecs,
1697 union rte_crypto_sym_ofs ofs,
1698 struct rte_crypto_va_iova_ptr *iv,
1699 struct rte_crypto_va_iova_ptr *digest,
1700 struct rte_crypto_va_iova_ptr *aad_or_auth_iv,
1701 void *user_data)
1702{
1703 return ctx->enqueue(ctx->qp_data, ctx->drv_ctx_data, data_vec,
1704 n_data_vecs, ofs, iv, digest, aad_or_auth_iv, user_data);
1705}
1706
1717int
1719 uint32_t n);
1720
1762uint32_t
1764 rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
1765 uint32_t max_nb_to_dequeue,
1767 void **out_user_data, uint8_t is_user_data_array,
1768 uint32_t *n_success, int *dequeue_status);
1769
1793__rte_experimental
1794static __rte_always_inline void *
1796 int *dequeue_status, enum rte_crypto_op_status *op_status)
1797{
1798 return ctx->dequeue(ctx->qp_data, ctx->drv_ctx_data, dequeue_status, op_status);
1799}
1800
1810int
1812 uint32_t n);
1813
1849struct rte_cryptodev_cb *
1851 uint16_t qp_id,
1853 void *cb_arg);
1854
1877 uint16_t qp_id,
1878 struct rte_cryptodev_cb *cb);
1879
1914struct rte_cryptodev_cb *
1916 uint16_t qp_id,
1918 void *cb_arg);
1919
1942 uint16_t qp_id,
1943 struct rte_cryptodev_cb *cb);
1944
1945#ifdef __cplusplus
1946}
1947#endif
1948
1949#include "rte_cryptodev_core.h"
1950
1951#ifdef __cplusplus
1952extern "C" {
1953#endif
1954
1991static inline uint16_t
1992rte_cryptodev_dequeue_burst(uint8_t dev_id, uint16_t qp_id,
1993 struct rte_crypto_op **ops, uint16_t nb_ops)
1994{
1995 const struct rte_crypto_fp_ops *fp_ops;
1996 void *qp;
1997
1998 rte_cryptodev_trace_dequeue_burst(dev_id, qp_id, (void **)ops, nb_ops);
1999
2000 fp_ops = &rte_crypto_fp_ops[dev_id];
2001 qp = fp_ops->qp.data[qp_id];
2002
2003 nb_ops = fp_ops->dequeue_burst(qp, ops, nb_ops);
2004
2005#ifdef RTE_CRYPTO_CALLBACKS
2006 if (unlikely(fp_ops->qp.deq_cb[qp_id].next != NULL)) {
2007 struct rte_cryptodev_cb_rcu *list;
2008 struct rte_cryptodev_cb *cb;
2009
2010 /* rte_memory_order_release memory order was used when the
2011 * call back was inserted into the list.
2012 * Since there is a clear dependency between loading
2013 * cb and cb->fn/cb->next, rte_memory_order_acquire memory order is
2014 * not required.
2015 */
2016 list = &fp_ops->qp.deq_cb[qp_id];
2017 rte_rcu_qsbr_thread_online(list->qsbr, 0);
2018 cb = rte_atomic_load_explicit(&list->next, rte_memory_order_relaxed);
2019
2020 while (cb != NULL) {
2021 nb_ops = cb->fn(dev_id, qp_id, ops, nb_ops,
2022 cb->arg);
2023 cb = cb->next;
2024 };
2025
2026 rte_rcu_qsbr_thread_offline(list->qsbr, 0);
2027 }
2028#endif
2029 return nb_ops;
2030}
2031
2063static inline uint16_t
2064rte_cryptodev_enqueue_burst(uint8_t dev_id, uint16_t qp_id,
2065 struct rte_crypto_op **ops, uint16_t nb_ops)
2066{
2067 const struct rte_crypto_fp_ops *fp_ops;
2068 void *qp;
2069
2070 fp_ops = &rte_crypto_fp_ops[dev_id];
2071 qp = fp_ops->qp.data[qp_id];
2072#ifdef RTE_CRYPTO_CALLBACKS
2073 if (unlikely(fp_ops->qp.enq_cb[qp_id].next != NULL)) {
2074 struct rte_cryptodev_cb_rcu *list;
2075 struct rte_cryptodev_cb *cb;
2076
2077 /* rte_memory_order_release memory order was used when the
2078 * call back was inserted into the list.
2079 * Since there is a clear dependency between loading
2080 * cb and cb->fn/cb->next, rte_memory_order_acquire memory order is
2081 * not required.
2082 */
2083 list = &fp_ops->qp.enq_cb[qp_id];
2084 rte_rcu_qsbr_thread_online(list->qsbr, 0);
2085 cb = rte_atomic_load_explicit(&list->next, rte_memory_order_relaxed);
2086
2087 while (cb != NULL) {
2088 nb_ops = cb->fn(dev_id, qp_id, ops, nb_ops,
2089 cb->arg);
2090 cb = cb->next;
2091 };
2092
2093 rte_rcu_qsbr_thread_offline(list->qsbr, 0);
2094 }
2095#endif
2096
2097 rte_cryptodev_trace_enqueue_burst(dev_id, qp_id, (void **)ops, nb_ops);
2098 return fp_ops->enqueue_burst(qp, ops, nb_ops);
2099}
2100
2125__rte_experimental
2126static inline int
2127rte_cryptodev_qp_depth_used(uint8_t dev_id, uint16_t qp_id)
2128{
2129 const struct rte_crypto_fp_ops *fp_ops;
2130 void *qp;
2131 int rc;
2132
2133 fp_ops = &rte_crypto_fp_ops[dev_id];
2134 qp = fp_ops->qp.data[qp_id];
2135
2136 if (fp_ops->qp_depth_used == NULL) {
2137 rc = -ENOTSUP;
2138 goto out;
2139 }
2140
2141 rc = fp_ops->qp_depth_used(qp);
2142out:
2143 rte_cryptodev_trace_qp_depth_used(dev_id, qp_id);
2144 return rc;
2145}
2146
2147#ifdef __cplusplus
2148}
2149#endif
2150
2151#endif /* _RTE_CRYPTODEV_H_ */
#define unlikely(x)
#define __rte_always_inline
Definition: rte_common.h:490
rte_crypto_op_sess_type
Definition: rte_crypto.h:61
rte_crypto_op_type
Definition: rte_crypto.h:28
rte_crypto_op_status
Definition: rte_crypto.h:38
rte_crypto_asym_op_type
rte_crypto_asym_xform_type
rte_crypto_auth_algorithm
rte_crypto_sym_xform_type
rte_crypto_aead_algorithm
rte_crypto_cipher_algorithm
static uint16_t rte_cryptodev_dequeue_burst(uint8_t dev_id, uint16_t qp_id, struct rte_crypto_op **ops, uint16_t nb_ops)
int rte_cryptodev_asym_session_create(uint8_t dev_id, struct rte_crypto_asym_xform *xforms, struct rte_mempool *mp, void **session)
uint32_t(* cryptodev_sym_raw_dequeue_burst_t)(void *qp, uint8_t *drv_ctx, rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count, uint32_t max_nb_to_dequeue, rte_cryptodev_raw_post_dequeue_t post_dequeue, void **out_user_data, uint8_t is_user_data_array, uint32_t *n_success, int *dequeue_status)
int rte_cryptodev_close(uint8_t dev_id)
struct rte_mempool * rte_cryptodev_sym_session_pool_create(const char *name, uint32_t nb_elts, uint32_t elt_size, uint32_t cache_size, uint16_t priv_size, int socket_id)
rte_cryptodev_event_type
@ RTE_CRYPTODEV_EVENT_ERROR
@ RTE_CRYPTODEV_EVENT_UNKNOWN
@ RTE_CRYPTODEV_EVENT_MAX
uint8_t rte_cryptodev_devices_get(const char *driver_name, uint8_t *devices, uint8_t nb_devices)
uint8_t rte_cryptodev_count(void)
static uint16_t rte_cryptodev_enqueue_burst(uint8_t dev_id, uint16_t qp_id, struct rte_crypto_op **ops, uint16_t nb_ops)
int rte_cryptodev_start(uint8_t dev_id)
bool rte_cryptodev_asym_xform_capability_check_hash(const struct rte_cryptodev_asymmetric_xform_capability *capability, enum rte_crypto_auth_algorithm hash)
uint32_t rte_cryptodev_raw_enqueue_burst(struct rte_crypto_raw_dp_ctx *ctx, struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs, void **user_data, int *enqueue_status)
const char * rte_cryptodev_driver_name_get(uint8_t driver_id)
void *(* cryptodev_sym_raw_dequeue_t)(void *qp, uint8_t *drv_ctx, int *dequeue_status, enum rte_crypto_op_status *op_status)
const struct rte_cryptodev_asymmetric_xform_capability * rte_cryptodev_asym_capability_get(uint8_t dev_id, const struct rte_cryptodev_asym_capability_idx *idx)
__rte_experimental const char * rte_cryptodev_get_aead_algo_string(enum rte_crypto_aead_algorithm algo_enum)
int rte_cryptodev_asym_xform_capability_check_modlen(const struct rte_cryptodev_asymmetric_xform_capability *capability, uint16_t modlen)
static uint64_t rte_cryptodev_sym_session_opaque_data_get(void *sess)
int rte_cryptodev_configure_raw_dp_ctx(uint8_t dev_id, uint16_t qp_id, struct rte_crypto_raw_dp_ctx *ctx, enum rte_crypto_op_sess_type sess_type, union rte_cryptodev_session_ctx session_ctx, uint8_t is_update)
unsigned int rte_cryptodev_is_valid_dev(uint8_t dev_id)
int rte_cryptodev_session_event_mdata_set(uint8_t dev_id, void *sess, enum rte_crypto_op_type op_type, enum rte_crypto_op_sess_type sess_type, void *ev_mdata, uint16_t size)
uint32_t rte_cryptodev_raw_dequeue_burst(struct rte_crypto_raw_dp_ctx *ctx, rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count, uint32_t max_nb_to_dequeue, rte_cryptodev_raw_post_dequeue_t post_dequeue, void **out_user_data, uint8_t is_user_data_array, uint32_t *n_success, int *dequeue_status)
unsigned int rte_cryptodev_sym_get_private_session_size(uint8_t dev_id)
int rte_cryptodev_get_auth_algo_enum(enum rte_crypto_auth_algorithm *algo_enum, const char *algo_string)
void rte_cryptodev_stop(uint8_t dev_id)
struct rte_cryptodev_cb * rte_cryptodev_add_enq_callback(uint8_t dev_id, uint16_t qp_id, rte_cryptodev_callback_fn cb_fn, void *cb_arg)
const char * rte_cryptodev_name_get(uint8_t dev_id)
RTE_TAILQ_HEAD(rte_cryptodev_cb_list, rte_cryptodev_callback)
int rte_cryptodev_sym_session_set_user_data(void *sess, void *data, uint16_t size)
int rte_cryptodev_callback_unregister(uint8_t dev_id, enum rte_cryptodev_event_type event, rte_cryptodev_cb_fn cb_fn, void *cb_arg)
static void rte_cryptodev_sym_session_opaque_data_set(void *sess, uint64_t opaque)
int rte_cryptodev_remove_enq_callback(uint8_t dev_id, uint16_t qp_id, struct rte_cryptodev_cb *cb)
int rte_cryptodev_remove_deq_callback(uint8_t dev_id, uint16_t qp_id, struct rte_cryptodev_cb *cb)
int rte_cryptodev_sym_capability_check_auth(const struct rte_cryptodev_symmetric_capability *capability, uint16_t key_size, uint16_t digest_size, uint16_t iv_size)
int rte_cryptodev_configure(uint8_t dev_id, struct rte_cryptodev_config *config)
int rte_cryptodev_raw_dequeue_done(struct rte_crypto_raw_dp_ctx *ctx, uint32_t n)
__rte_experimental int rte_cryptodev_asym_xform_capability_check_opcap(const struct rte_cryptodev_asymmetric_xform_capability *capability, enum rte_crypto_asym_op_type op_type, uint8_t cap)
int rte_cryptodev_callback_register(uint8_t dev_id, enum rte_cryptodev_event_type event, rte_cryptodev_cb_fn cb_fn, void *cb_arg)
__rte_experimental const char * rte_cryptodev_asym_get_xform_string(enum rte_crypto_asym_xform_type xform_enum)
uint16_t rte_cryptodev_queue_pair_count(uint8_t dev_id)
void(* rte_cryptodev_cb_fn)(uint8_t dev_id, enum rte_cryptodev_event_type event, void *cb_arg)
void * rte_cryptodev_sym_session_create(uint8_t dev_id, struct rte_crypto_sym_xform *xforms, struct rte_mempool *mp)
uint32_t(* cryptodev_sym_raw_enqueue_burst_t)(void *qp, uint8_t *drv_ctx, struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs, void *user_data[], int *enqueue_status)
struct rte_mempool * rte_cryptodev_asym_session_pool_create(const char *name, uint32_t nb_elts, uint32_t cache_size, uint16_t user_data_size, int socket_id)
void rte_cryptodev_stats_reset(uint8_t dev_id)
static __rte_experimental __rte_always_inline int rte_cryptodev_raw_enqueue(struct rte_crypto_raw_dp_ctx *ctx, struct rte_crypto_vec *data_vec, uint16_t n_data_vecs, union rte_crypto_sym_ofs ofs, struct rte_crypto_va_iova_ptr *iv, struct rte_crypto_va_iova_ptr *digest, struct rte_crypto_va_iova_ptr *aad_or_auth_iv, void *user_data)
int rte_cryptodev_asym_session_free(uint8_t dev_id, void *sess)
int rte_cryptodev_asym_get_xform_enum(enum rte_crypto_asym_xform_type *xform_enum, const char *xform_string)
void * rte_cryptodev_sym_session_get_user_data(void *sess)
void * rte_cryptodev_asym_session_get_user_data(void *sess)
void(* rte_cryptodev_raw_post_dequeue_t)(void *user_data, uint32_t index, uint8_t is_op_success)
int rte_cryptodev_driver_id_get(const char *name)
int rte_cryptodev_get_dev_id(const char *name)
void * rte_cryptodev_get_sec_ctx(uint8_t dev_id)
unsigned int rte_cryptodev_asym_get_private_session_size(uint8_t dev_id)
__rte_experimental const char * rte_cryptodev_get_cipher_algo_string(enum rte_crypto_cipher_algorithm algo_enum)
int rte_cryptodev_stats_get(uint8_t dev_id, struct rte_cryptodev_stats *stats)
int rte_cryptodev_get_cipher_algo_enum(enum rte_crypto_cipher_algorithm *algo_enum, const char *algo_string)
unsigned int rte_cryptodev_asym_get_header_session_size(void)
__rte_experimental const char * rte_cryptodev_get_auth_algo_string(enum rte_crypto_auth_algorithm algo_enum)
uint32_t(* rte_cryptodev_raw_get_dequeue_count_t)(void *user_data)
__rte_experimental int rte_cryptodev_queue_pair_event_error_query(uint8_t dev_id, uint16_t qp_id)
struct rte_cryptodev_cb * rte_cryptodev_add_deq_callback(uint8_t dev_id, uint16_t qp_id, rte_cryptodev_callback_fn cb_fn, void *cb_arg)
static __rte_experimental __rte_always_inline void * rte_cryptodev_raw_dequeue(struct rte_crypto_raw_dp_ctx *ctx, int *dequeue_status, enum rte_crypto_op_status *op_status)
int(* cryptodev_sym_raw_enqueue_t)(void *qp, uint8_t *drv_ctx, struct rte_crypto_vec *data_vec, uint16_t n_data_vecs, union rte_crypto_sym_ofs ofs, struct rte_crypto_va_iova_ptr *iv, struct rte_crypto_va_iova_ptr *digest, struct rte_crypto_va_iova_ptr *aad_or_auth_iv, void *user_data)
int rte_cryptodev_raw_enqueue_done(struct rte_crypto_raw_dp_ctx *ctx, uint32_t n)
int rte_cryptodev_sym_capability_check_aead(const struct rte_cryptodev_symmetric_capability *capability, uint16_t key_size, uint16_t digest_size, uint16_t aad_size, uint16_t iv_size)
int rte_cryptodev_asym_session_set_user_data(void *sess, void *data, uint16_t size)
const char * rte_cryptodev_get_feature_name(uint64_t flag)
int rte_cryptodev_sym_capability_check_cipher(const struct rte_cryptodev_symmetric_capability *capability, uint16_t key_size, uint16_t iv_size)
uint8_t rte_cryptodev_device_count_by_driver(uint8_t driver_id)
int rte_cryptodev_get_aead_algo_enum(enum rte_crypto_aead_algorithm *algo_enum, const char *algo_string)
uint16_t(* rte_cryptodev_callback_fn)(uint16_t dev_id, uint16_t qp_id, struct rte_crypto_op **ops, uint16_t nb_ops, void *user_param)
int rte_cryptodev_get_raw_dp_ctx_size(uint8_t dev_id)
int rte_cryptodev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id, const struct rte_cryptodev_qp_conf *qp_conf, int socket_id)
int(* cryptodev_sym_raw_operation_done_t)(void *qp, uint8_t *drv_ctx, uint32_t n)
int rte_cryptodev_asym_xform_capability_check_optype(const struct rte_cryptodev_asymmetric_xform_capability *capability, enum rte_crypto_asym_op_type op_type)
__rte_experimental int rte_cryptodev_queue_pair_reset(uint8_t dev_id, uint16_t queue_pair_id, const struct rte_cryptodev_qp_conf *qp_conf, int socket_id)
uint32_t rte_cryptodev_sym_cpu_crypto_process(uint8_t dev_id, void *sess, union rte_crypto_sym_ofs ofs, struct rte_crypto_sym_vec *vec)
int rte_cryptodev_get_qp_status(uint8_t dev_id, uint16_t queue_pair_id)
void rte_cryptodev_info_get(uint8_t dev_id, struct rte_cryptodev_info *dev_info)
static __rte_experimental int rte_cryptodev_qp_depth_used(uint8_t dev_id, uint16_t qp_id)
int rte_cryptodev_sym_session_free(uint8_t dev_id, void *sess)
const struct rte_cryptodev_symmetric_capability * rte_cryptodev_sym_capability_get(uint8_t dev_id, const struct rte_cryptodev_sym_capability_idx *idx)
static __rte_always_inline void rte_rcu_qsbr_thread_online(struct rte_rcu_qsbr *v, unsigned int thread_id)
Definition: rte_rcu_qsbr.h:296
static __rte_always_inline void rte_rcu_qsbr_thread_offline(struct rte_rcu_qsbr *v, unsigned int thread_id)
Definition: rte_rcu_qsbr.h:349
enum rte_crypto_asym_xform_type type
uint32_t mldsa_capa[RTE_CRYPTO_ML_DSA_OP_END]
uint32_t op_capa[RTE_CRYPTO_ASYM_OP_LIST_END]
struct rte_crypto_param_range modlen
uint32_t mlkem_capa[RTE_CRYPTO_ML_KEM_OP_END]
enum rte_crypto_asym_xform_type xform_type
enum rte_crypto_op_type op
struct rte_cryptodev_symmetric_capability sym
struct rte_cryptodev_asymmetric_capability asym
struct rte_cryptodev_cb * next
rte_cryptodev_callback_fn fn
unsigned max_nb_queue_pairs
struct rte_device * device
uint16_t min_mbuf_headroom_req
const struct rte_cryptodev_capabilities * capabilities
uint16_t min_mbuf_tailroom_req
const char * driver_name
unsigned max_nb_sessions
struct rte_mempool * mp_session
uint64_t enqueue_err_count
uint64_t dequeue_err_count
enum rte_crypto_auth_algorithm algo
struct rte_cryptodev_symmetric_capability::@138::@141 cipher
enum rte_crypto_cipher_algorithm algo
enum rte_crypto_aead_algorithm algo
struct rte_crypto_param_range iv_size
struct rte_crypto_param_range digest_size
struct rte_crypto_param_range aad_size
struct rte_cryptodev_symmetric_capability::@138::@140 auth
struct rte_crypto_param_range key_size
enum rte_crypto_sym_xform_type xform_type
char name[RTE_MEMPOOL_NAMESIZE]
Definition: rte_mempool.h:231
uint32_t size
Definition: rte_mempool.h:240
uint32_t cache_size
Definition: rte_mempool.h:241
uint32_t elt_size
Definition: rte_mempool.h:244