DPDK  24.11.0-rc3
examples/ipsec-secgw/sa.c
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2016-2020 Intel Corporation
*/
/*
* Security Associations
*/
#include <stdlib.h>
#include <sys/types.h>
#include <netinet/in.h>
#include <netinet/ip.h>
#include <netinet/ip6.h>
#include <rte_memzone.h>
#include <rte_crypto.h>
#include <rte_security.h>
#include <rte_cryptodev.h>
#include <rte_byteorder.h>
#include <rte_errno.h>
#include <rte_ip.h>
#include <rte_udp.h>
#include <rte_random.h>
#include <rte_ethdev.h>
#include <rte_malloc.h>
#include "ipsec.h"
#include "esp.h"
#include "parser.h"
#include "sad.h"
#define IPDEFTTL 64
#define IP4_FULL_MASK (sizeof(((struct ip_addr *)NULL)->ip.ip4) * CHAR_BIT)
#define IP6_FULL_MASK RTE_IPV6_MAX_DEPTH
#define MBUF_NO_SEC_OFFLOAD(m) ((m->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD) == 0)
struct supported_cipher_algo {
const char *keyword;
uint16_t iv_len;
uint16_t block_size;
uint16_t key_len;
};
struct supported_auth_algo {
const char *keyword;
uint16_t iv_len;
uint16_t digest_len;
uint16_t key_len;
uint8_t key_not_req;
};
struct supported_aead_algo {
const char *keyword;
uint16_t iv_len;
uint16_t block_size;
uint16_t digest_len;
uint16_t key_len;
uint8_t aad_len;
};
const struct supported_cipher_algo cipher_algos[] = {
{
.keyword = "null",
.iv_len = 0,
.block_size = 4,
.key_len = 0
},
{
.keyword = "aes-128-cbc",
.iv_len = 16,
.block_size = 16,
.key_len = 16
},
{
.keyword = "aes-192-cbc",
.iv_len = 16,
.block_size = 16,
.key_len = 24
},
{
.keyword = "aes-256-cbc",
.iv_len = 16,
.block_size = 16,
.key_len = 32
},
{
.keyword = "aes-128-ctr",
/* Per packet IV length */
.iv_len = 8,
.block_size = 4,
.key_len = 20
},
{
.keyword = "aes-192-ctr",
.iv_len = 16,
.block_size = 16,
.key_len = 28
},
{
.keyword = "aes-256-ctr",
.iv_len = 16,
.block_size = 16,
.key_len = 36
},
{
.keyword = "3des-cbc",
.iv_len = 8,
.block_size = 8,
.key_len = 24
},
{
.keyword = "des-cbc",
.iv_len = 8,
.block_size = 8,
.key_len = 8
}
};
const struct supported_auth_algo auth_algos[] = {
{
.keyword = "null",
.digest_len = 0,
.key_len = 0,
.key_not_req = 1
},
{
.keyword = "sha1-hmac",
.digest_len = 12,
.key_len = 20
},
{
.keyword = "sha256-hmac",
.digest_len = 16,
.key_len = 32
},
{
.keyword = "sha384-hmac",
.digest_len = 24,
.key_len = 48
},
{
.keyword = "sha512-hmac",
.digest_len = 32,
.key_len = 64
},
{
.keyword = "aes-gmac",
.iv_len = 8,
.digest_len = 16,
.key_len = 20
},
{
.keyword = "aes-xcbc-mac-96",
.digest_len = 12,
.key_len = 16
}
};
const struct supported_aead_algo aead_algos[] = {
{
.keyword = "aes-128-gcm",
.iv_len = 8,
.block_size = 4,
.key_len = 20,
.digest_len = 16,
.aad_len = 8,
},
{
.keyword = "aes-192-gcm",
.iv_len = 8,
.block_size = 4,
.key_len = 28,
.digest_len = 16,
.aad_len = 8,
},
{
.keyword = "aes-256-gcm",
.iv_len = 8,
.block_size = 4,
.key_len = 36,
.digest_len = 16,
.aad_len = 8,
},
{
.keyword = "aes-128-ccm",
.iv_len = 8,
.block_size = 4,
.key_len = 20,
.digest_len = 16,
.aad_len = 8,
},
{
.keyword = "aes-192-ccm",
.iv_len = 8,
.block_size = 4,
.key_len = 28,
.digest_len = 16,
.aad_len = 8,
},
{
.keyword = "aes-256-ccm",
.iv_len = 8,
.block_size = 4,
.key_len = 36,
.digest_len = 16,
.aad_len = 8,
},
{
.keyword = "chacha20-poly1305",
.iv_len = 12,
.block_size = 64,
.key_len = 36,
.digest_len = 16,
.aad_len = 8,
}
};
#define SA_INIT_NB 128
static uint32_t nb_crypto_sessions;
struct ipsec_sa *sa_out;
uint32_t nb_sa_out;
static uint32_t sa_out_sz;
static struct ipsec_sa_cnt sa_out_cnt;
struct ipsec_sa *sa_in;
uint32_t nb_sa_in;
static uint32_t sa_in_sz;
static struct ipsec_sa_cnt sa_in_cnt;
static const struct supported_cipher_algo *
find_match_cipher_algo(const char *cipher_keyword)
{
size_t i;
for (i = 0; i < RTE_DIM(cipher_algos); i++) {
const struct supported_cipher_algo *algo =
&cipher_algos[i];
if (strcmp(cipher_keyword, algo->keyword) == 0)
return algo;
}
return NULL;
}
static const struct supported_auth_algo *
find_match_auth_algo(const char *auth_keyword)
{
size_t i;
for (i = 0; i < RTE_DIM(auth_algos); i++) {
const struct supported_auth_algo *algo =
&auth_algos[i];
if (strcmp(auth_keyword, algo->keyword) == 0)
return algo;
}
return NULL;
}
static const struct supported_aead_algo *
find_match_aead_algo(const char *aead_keyword)
{
size_t i;
for (i = 0; i < RTE_DIM(aead_algos); i++) {
const struct supported_aead_algo *algo =
&aead_algos[i];
if (strcmp(aead_keyword, algo->keyword) == 0)
return algo;
}
return NULL;
}
static uint32_t
parse_key_string(const char *key_str, uint8_t *key)
{
const char *pt_start = key_str, *pt_end = key_str;
uint32_t nb_bytes = 0;
while (pt_end != NULL) {
char sub_str[3] = {0};
pt_end = strchr(pt_start, ':');
if (pt_end == NULL) {
if (strlen(pt_start) > 2)
return 0;
strncpy(sub_str, pt_start, 2);
} else {
if (pt_end - pt_start > 2)
return 0;
strncpy(sub_str, pt_start, pt_end - pt_start);
pt_start = pt_end + 1;
}
key[nb_bytes++] = strtol(sub_str, NULL, 16);
}
return nb_bytes;
}
static int
extend_sa_arr(struct ipsec_sa **sa_tbl, uint32_t cur_cnt, uint32_t *cur_sz)
{
if (*sa_tbl == NULL) {
*sa_tbl = calloc(SA_INIT_NB, sizeof(struct ipsec_sa));
if (*sa_tbl == NULL)
return -1;
*cur_sz = SA_INIT_NB;
return 0;
}
if (cur_cnt >= *cur_sz) {
*sa_tbl = realloc(*sa_tbl,
*cur_sz * sizeof(struct ipsec_sa) * 2);
if (*sa_tbl == NULL)
return -1;
/* clean reallocated extra space */
memset(&(*sa_tbl)[*cur_sz], 0,
*cur_sz * sizeof(struct ipsec_sa));
*cur_sz *= 2;
}
return 0;
}
void
parse_sa_tokens(char **tokens, uint32_t n_tokens,
struct parse_status *status)
{
struct ipsec_sa *rule = NULL;
struct rte_ipsec_session *ips;
uint32_t ti; /*token index*/
uint32_t *ri /*rule index*/;
struct ipsec_sa_cnt *sa_cnt;
uint32_t cipher_algo_p = 0;
uint32_t auth_algo_p = 0;
uint32_t aead_algo_p = 0;
uint32_t src_p = 0;
uint32_t dst_p = 0;
uint32_t mode_p = 0;
uint32_t type_p = 0;
uint32_t portid_p = 0;
uint32_t fallback_p = 0;
int16_t status_p = 0;
uint16_t udp_encap_p = 0;
if (strcmp(tokens[0], "in") == 0) {
ri = &nb_sa_in;
sa_cnt = &sa_in_cnt;
if (extend_sa_arr(&sa_in, nb_sa_in, &sa_in_sz) < 0)
return;
rule = &sa_in[*ri];
} else {
ri = &nb_sa_out;
sa_cnt = &sa_out_cnt;
if (extend_sa_arr(&sa_out, nb_sa_out, &sa_out_sz) < 0)
return;
rule = &sa_out[*ri];
}
/* spi number */
APP_CHECK_TOKEN_IS_NUM(tokens, 1, status);
if (status->status < 0)
return;
if (atoi(tokens[1]) == INVALID_SPI)
return;
rule->flags = 0;
rule->spi = atoi(tokens[1]);
rule->portid = UINT16_MAX;
ips = ipsec_get_primary_session(rule);
for (ti = 2; ti < n_tokens; ti++) {
if (strcmp(tokens[ti], "mode") == 0) {
APP_CHECK_PRESENCE(mode_p, tokens[ti], status);
if (status->status < 0)
return;
INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
if (status->status < 0)
return;
if (strcmp(tokens[ti], "ipv4-tunnel") == 0) {
sa_cnt->nb_v4++;
rule->flags |= IP4_TUNNEL;
} else if (strcmp(tokens[ti], "ipv6-tunnel") == 0) {
sa_cnt->nb_v6++;
rule->flags |= IP6_TUNNEL;
} else if (strcmp(tokens[ti], "transport") == 0) {
sa_cnt->nb_v4++;
sa_cnt->nb_v6++;
rule->flags |= TRANSPORT;
} else {
APP_CHECK(0, status, "unrecognized "
"input \"%s\"", tokens[ti]);
return;
}
mode_p = 1;
continue;
}
if (strcmp(tokens[ti], "telemetry") == 0) {
rule->flags |= SA_TELEMETRY_ENABLE;
continue;
}
if (strcmp(tokens[ti], "cipher_algo") == 0) {
const struct supported_cipher_algo *algo;
uint32_t key_len;
APP_CHECK_PRESENCE(cipher_algo_p, tokens[ti],
status);
if (status->status < 0)
return;
INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
if (status->status < 0)
return;
algo = find_match_cipher_algo(tokens[ti]);
APP_CHECK(algo != NULL, status, "unrecognized "
"input \"%s\"", tokens[ti]);
if (status->status < 0)
return;
rule->cipher_algo = algo->algo;
rule->block_size = algo->block_size;
rule->iv_len = algo->iv_len;
rule->cipher_key_len = algo->key_len;
/* for NULL algorithm, no cipher key required */
if (rule->cipher_algo == RTE_CRYPTO_CIPHER_NULL) {
cipher_algo_p = 1;
continue;
}
INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
if (status->status < 0)
return;
APP_CHECK(strcmp(tokens[ti], "cipher_key") == 0,
status, "unrecognized input \"%s\", "
"expect \"cipher_key\"", tokens[ti]);
if (status->status < 0)
return;
INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
if (status->status < 0)
return;
key_len = parse_key_string(tokens[ti],
rule->cipher_key);
APP_CHECK(key_len == rule->cipher_key_len, status,
"unrecognized input \"%s\"", tokens[ti]);
if (status->status < 0)
return;
if (algo->algo == RTE_CRYPTO_CIPHER_AES_CBC ||
rule->salt = (uint32_t)rte_rand();
if (algo->algo == RTE_CRYPTO_CIPHER_AES_CTR) {
key_len -= 4;
rule->cipher_key_len = key_len;
memcpy(&rule->salt,
&rule->cipher_key[key_len], 4);
}
cipher_algo_p = 1;
continue;
}
if (strcmp(tokens[ti], "auth_algo") == 0) {
const struct supported_auth_algo *algo;
uint32_t key_len;
APP_CHECK_PRESENCE(auth_algo_p, tokens[ti],
status);
if (status->status < 0)
return;
INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
if (status->status < 0)
return;
algo = find_match_auth_algo(tokens[ti]);
APP_CHECK(algo != NULL, status, "unrecognized "
"input \"%s\"", tokens[ti]);
if (status->status < 0)
return;
rule->auth_algo = algo->algo;
rule->auth_key_len = algo->key_len;
rule->digest_len = algo->digest_len;
/* NULL algorithm and combined algos do not
* require auth key
*/
if (algo->key_not_req) {
auth_algo_p = 1;
continue;
}
INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
if (status->status < 0)
return;
APP_CHECK(strcmp(tokens[ti], "auth_key") == 0,
status, "unrecognized input \"%s\", "
"expect \"auth_key\"", tokens[ti]);
if (status->status < 0)
return;
INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
if (status->status < 0)
return;
key_len = parse_key_string(tokens[ti],
rule->auth_key);
APP_CHECK(key_len == rule->auth_key_len, status,
"unrecognized input \"%s\"", tokens[ti]);
if (status->status < 0)
return;
if (algo->algo == RTE_CRYPTO_AUTH_AES_GMAC) {
key_len -= 4;
rule->auth_key_len = key_len;
rule->iv_len = algo->iv_len;
memcpy(&rule->salt,
&rule->auth_key[key_len], 4);
}
auth_algo_p = 1;
continue;
}
if (strcmp(tokens[ti], "aead_algo") == 0) {
const struct supported_aead_algo *algo;
uint32_t key_len;
APP_CHECK_PRESENCE(aead_algo_p, tokens[ti],
status);
if (status->status < 0)
return;
INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
if (status->status < 0)
return;
algo = find_match_aead_algo(tokens[ti]);
APP_CHECK(algo != NULL, status, "unrecognized "
"input \"%s\"", tokens[ti]);
if (status->status < 0)
return;
rule->aead_algo = algo->algo;
rule->cipher_key_len = algo->key_len;
rule->digest_len = algo->digest_len;
rule->aad_len = algo->aad_len;
rule->block_size = algo->block_size;
rule->iv_len = algo->iv_len;
INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
if (status->status < 0)
return;
APP_CHECK(strcmp(tokens[ti], "aead_key") == 0,
status, "unrecognized input \"%s\", "
"expect \"aead_key\"", tokens[ti]);
if (status->status < 0)
return;
INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
if (status->status < 0)
return;
key_len = parse_key_string(tokens[ti],
rule->cipher_key);
APP_CHECK(key_len == rule->cipher_key_len, status,
"unrecognized input \"%s\"", tokens[ti]);
if (status->status < 0)
return;
key_len -= 4;
rule->cipher_key_len = key_len;
memcpy(&rule->salt,
&rule->cipher_key[key_len], 4);
aead_algo_p = 1;
continue;
}
if (strcmp(tokens[ti], "src") == 0) {
APP_CHECK_PRESENCE(src_p, tokens[ti], status);
if (status->status < 0)
return;
INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
if (status->status < 0)
return;
if (IS_IP4_TUNNEL(rule->flags)) {
struct in_addr ip;
APP_CHECK(parse_ipv4_addr(tokens[ti],
&ip, NULL) == 0, status,
"unrecognized input \"%s\", "
"expect valid ipv4 addr",
tokens[ti]);
if (status->status < 0)
return;
rule->src.ip.ip4 = rte_bswap32(
(uint32_t)ip.s_addr);
} else if (IS_IP6_TUNNEL(rule->flags)) {
struct rte_ipv6_addr ip;
APP_CHECK(parse_ipv6_addr(tokens[ti], &ip,
NULL) == 0, status,
"unrecognized input \"%s\", "
"expect valid ipv6 addr",
tokens[ti]);
if (status->status < 0)
return;
rule->src.ip.ip6 = ip;
} else if (IS_TRANSPORT(rule->flags)) {
APP_CHECK(0, status, "unrecognized input "
"\"%s\"", tokens[ti]);
return;
}
src_p = 1;
continue;
}
if (strcmp(tokens[ti], "dst") == 0) {
APP_CHECK_PRESENCE(dst_p, tokens[ti], status);
if (status->status < 0)
return;
INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
if (status->status < 0)
return;
if (IS_IP4_TUNNEL(rule->flags)) {
struct in_addr ip;
APP_CHECK(parse_ipv4_addr(tokens[ti],
&ip, NULL) == 0, status,
"unrecognized input \"%s\", "
"expect valid ipv4 addr",
tokens[ti]);
if (status->status < 0)
return;
rule->dst.ip.ip4 = rte_bswap32(
(uint32_t)ip.s_addr);
} else if (IS_IP6_TUNNEL(rule->flags)) {
struct rte_ipv6_addr ip;
APP_CHECK(parse_ipv6_addr(tokens[ti], &ip,
NULL) == 0, status,
"unrecognized input \"%s\", "
"expect valid ipv6 addr",
tokens[ti]);
if (status->status < 0)
return;
rule->dst.ip.ip6 = ip;
} else if (IS_TRANSPORT(rule->flags)) {
APP_CHECK(0, status, "unrecognized "
"input \"%s\"", tokens[ti]);
return;
}
dst_p = 1;
continue;
}
if (strcmp(tokens[ti], "type") == 0) {
APP_CHECK_PRESENCE(type_p, tokens[ti], status);
if (status->status < 0)
return;
INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
if (status->status < 0)
return;
if (strcmp(tokens[ti], "inline-crypto-offload") == 0)
ips->type =
else if (strcmp(tokens[ti],
"inline-protocol-offload") == 0)
ips->type =
else if (strcmp(tokens[ti],
"lookaside-protocol-offload") == 0)
ips->type =
else if (strcmp(tokens[ti], "no-offload") == 0)
else if (strcmp(tokens[ti], "cpu-crypto") == 0)
else {
APP_CHECK(0, status, "Invalid input \"%s\"",
tokens[ti]);
return;
}
type_p = 1;
continue;
}
if (strcmp(tokens[ti], "port_id") == 0) {
APP_CHECK_PRESENCE(portid_p, tokens[ti], status);
if (status->status < 0)
return;
INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
if (status->status < 0)
return;
if (rule->portid == UINT16_MAX)
rule->portid = atoi(tokens[ti]);
else if (rule->portid != atoi(tokens[ti])) {
APP_CHECK(0, status,
"portid %s not matching with already assigned portid %u",
tokens[ti], rule->portid);
return;
}
portid_p = 1;
continue;
}
if (strcmp(tokens[ti], "mss") == 0) {
INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
if (status->status < 0)
return;
rule->mss = atoi(tokens[ti]);
if (status->status < 0)
return;
continue;
}
if (strcmp(tokens[ti], "reassembly_en") == 0) {
rule->flags |= SA_REASSEMBLY_ENABLE;
continue;
}
if (strcmp(tokens[ti], "esn") == 0) {
INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
if (status->status < 0)
return;
rule->esn = atoll(tokens[ti]);
if (status->status < 0)
return;
continue;
}
if (strcmp(tokens[ti], "fallback") == 0) {
struct rte_ipsec_session *fb;
APP_CHECK(app_sa_prm.enable, status, "Fallback session "
"not allowed for legacy mode.");
if (status->status < 0)
return;
APP_CHECK(ips->type ==
"Fallback session allowed if primary session "
"is of type inline-crypto-offload only.");
if (status->status < 0)
return;
APP_CHECK(rule->direction ==
"Fallback session not allowed for egress "
"rule");
if (status->status < 0)
return;
APP_CHECK_PRESENCE(fallback_p, tokens[ti], status);
if (status->status < 0)
return;
INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
if (status->status < 0)
return;
fb = ipsec_get_fallback_session(rule);
if (strcmp(tokens[ti], "lookaside-none") == 0)
else if (strcmp(tokens[ti], "cpu-crypto") == 0)
else {
APP_CHECK(0, status, "unrecognized fallback "
"type %s.", tokens[ti]);
return;
}
rule->fallback_sessions = 1;
nb_crypto_sessions++;
fallback_p = 1;
continue;
}
if (strcmp(tokens[ti], "flow-direction") == 0) {
switch (ips->type) {
rule->fdir_flag = 1;
INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
if (status->status < 0)
return;
if (rule->portid == UINT16_MAX)
rule->portid = atoi(tokens[ti]);
else if (rule->portid != atoi(tokens[ti])) {
APP_CHECK(0, status,
"portid %s not matching with already assigned portid %u",
tokens[ti], rule->portid);
return;
}
INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
if (status->status < 0)
return;
rule->fdir_qid = atoi(tokens[ti]);
/* validating portid and queueid */
status_p = check_flow_params(rule->portid,
rule->fdir_qid);
if (status_p < 0) {
printf("port id %u / queue id %u is "
"not valid\n", rule->portid,
rule->fdir_qid);
}
break;
default:
APP_CHECK(0, status,
"flow director not supported for security session type %d",
ips->type);
return;
}
continue;
}
if (strcmp(tokens[ti], "udp-encap") == 0) {
switch (ips->type) {
APP_CHECK_PRESENCE(udp_encap_p, tokens[ti],
status);
if (status->status < 0)
return;
rule->udp_encap = 1;
app_sa_prm.udp_encap = 1;
udp_encap_p = 1;
break;
rule->udp_encap = 1;
rule->udp.sport = 0;
rule->udp.dport = 4500;
break;
default:
APP_CHECK(0, status,
"UDP encapsulation not supported for "
"security session type %d",
ips->type);
return;
}
continue;
}
/* unrecognizable input */
APP_CHECK(0, status, "unrecognized input \"%s\"",
tokens[ti]);
return;
}
if (aead_algo_p) {
APP_CHECK(cipher_algo_p == 0, status,
"AEAD used, no need for cipher options");
if (status->status < 0)
return;
APP_CHECK(auth_algo_p == 0, status,
"AEAD used, no need for auth options");
if (status->status < 0)
return;
} else {
APP_CHECK(cipher_algo_p == 1, status, "missing cipher or AEAD options");
if (status->status < 0)
return;
APP_CHECK(auth_algo_p == 1, status, "missing auth or AEAD options");
if (status->status < 0)
return;
}
APP_CHECK(mode_p == 1, status, "missing mode option");
if (status->status < 0)
return;
if ((ips->type != RTE_SECURITY_ACTION_TYPE_NONE && ips->type !=
printf("Missing portid option, falling back to non-offload\n");
if (!type_p || (!portid_p && ips->type !=
}
wrkr_flags |= INL_CR_F;
wrkr_flags |= INL_PR_F;
wrkr_flags |= LA_PR_F;
else
wrkr_flags |= LA_ANY_F;
nb_crypto_sessions++;
*ri = *ri + 1;
}
static void
print_one_sa_rule(const struct ipsec_sa *sa, int inbound)
{
uint32_t i;
uint8_t a, b, c, d;
const struct rte_ipsec_session *ips;
const struct rte_ipsec_session *fallback_ips;
printf("\tspi_%s(%3u):", inbound?"in":"out", sa->spi);
for (i = 0; i < RTE_DIM(cipher_algos); i++) {
if (cipher_algos[i].algo == sa->cipher_algo &&
cipher_algos[i].key_len == sa->cipher_key_len) {
printf("%s ", cipher_algos[i].keyword);
break;
}
}
for (i = 0; i < RTE_DIM(auth_algos); i++) {
if (auth_algos[i].algo == sa->auth_algo) {
printf("%s ", auth_algos[i].keyword);
break;
}
}
for (i = 0; i < RTE_DIM(aead_algos); i++) {
if (aead_algos[i].algo == sa->aead_algo &&
aead_algos[i].key_len-4 == sa->cipher_key_len) {
printf("%s ", aead_algos[i].keyword);
break;
}
}
printf("mode:");
if (sa->udp_encap)
printf("UDP encapsulated ");
switch (WITHOUT_TRANSPORT_VERSION(sa->flags)) {
case IP4_TUNNEL:
printf("IP4Tunnel ");
uint32_t_to_char(sa->src.ip.ip4, &a, &b, &c, &d);
printf("%hhu.%hhu.%hhu.%hhu ", d, c, b, a);
uint32_t_to_char(sa->dst.ip.ip4, &a, &b, &c, &d);
printf("%hhu.%hhu.%hhu.%hhu", d, c, b, a);
break;
case IP6_TUNNEL:
printf("IP6Tunnel ");
printf(RTE_IPV6_ADDR_FMT, RTE_IPV6_ADDR_SPLIT(&sa->src.ip.ip6));
printf(" ");
printf(RTE_IPV6_ADDR_FMT, RTE_IPV6_ADDR_SPLIT(&sa->dst.ip.ip6));
break;
case TRANSPORT:
printf("Transport ");
break;
}
ips = &sa->sessions[IPSEC_SESSION_PRIMARY];
printf(" type:");
switch (ips->type) {
printf("no-offload ");
break;
printf("inline-crypto-offload ");
break;
printf("inline-protocol-offload ");
break;
printf("lookaside-protocol-offload ");
break;
printf("cpu-crypto-accelerated ");
break;
}
fallback_ips = &sa->sessions[IPSEC_SESSION_FALLBACK];
if (fallback_ips != NULL && sa->fallback_sessions > 0) {
printf("inline fallback: ");
switch (fallback_ips->type) {
printf("lookaside-none");
break;
printf("cpu-crypto-accelerated");
break;
default:
printf("invalid");
break;
}
}
if (sa->fdir_flag == 1)
printf("flow-direction port %d queue %d", sa->portid,
sa->fdir_qid);
printf("\n");
}
static struct sa_ctx *
sa_create(const char *name, int32_t socket_id, uint32_t nb_sa)
{
char s[PATH_MAX];
struct sa_ctx *sa_ctx;
uint32_t mz_size;
const struct rte_memzone *mz;
snprintf(s, sizeof(s), "%s_%u", name, socket_id);
/* Create SA context */
printf("Creating SA context with %u maximum entries on socket %d\n",
nb_sa, socket_id);
mz_size = sizeof(struct ipsec_xf) * nb_sa;
mz = rte_memzone_reserve(s, mz_size, socket_id,
if (mz == NULL) {
printf("Failed to allocate SA XFORM memory\n");
rte_errno = ENOMEM;
return NULL;
}
sa_ctx = rte_zmalloc(NULL, sizeof(struct sa_ctx) +
sizeof(struct ipsec_sa) * nb_sa, RTE_CACHE_LINE_SIZE);
if (sa_ctx == NULL) {
printf("Failed to allocate SA CTX memory\n");
rte_errno = ENOMEM;
return NULL;
}
sa_ctx->xf = (struct ipsec_xf *)mz->addr;
sa_ctx->nb_sa = nb_sa;
return sa_ctx;
}
static int
check_eth_dev_caps(uint16_t portid, uint32_t inbound, uint32_t tso)
{
struct rte_eth_dev_info dev_info;
int retval;
retval = rte_eth_dev_info_get(portid, &dev_info);
if (retval != 0) {
RTE_LOG(ERR, IPSEC,
"Error during getting device (port %u) info: %s\n",
portid, strerror(-retval));
return retval;
}
if (inbound) {
if ((dev_info.rx_offload_capa &
RTE_ETH_RX_OFFLOAD_SECURITY) == 0) {
RTE_LOG(WARNING, IPSEC,
"hardware RX IPSec offload is not supported\n");
return -EINVAL;
}
} else { /* outbound */
if ((dev_info.tx_offload_capa &
RTE_ETH_TX_OFFLOAD_SECURITY) == 0) {
RTE_LOG(WARNING, IPSEC,
"hardware TX IPSec offload is not supported\n");
return -EINVAL;
}
if (tso && (dev_info.tx_offload_capa &
RTE_ETH_TX_OFFLOAD_TCP_TSO) == 0) {
RTE_LOG(WARNING, IPSEC,
"hardware TCP TSO offload is not supported\n");
return -EINVAL;
}
}
return 0;
}
/*
* Helper function, tries to determine next_proto for SPI
* by searching though SP rules.
*/
static int
get_spi_proto(uint32_t spi, enum rte_security_ipsec_sa_direction dir,
struct ip_addr ip_addr[2], uint32_t mask[2])
{
int32_t rc4, rc6;
rc4 = sp4_spi_present(spi, dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
ip_addr, mask);
rc6 = sp6_spi_present(spi, dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
ip_addr, mask);
if (rc4 >= 0) {
if (rc6 >= 0) {
RTE_LOG(ERR, IPSEC,
"%s: SPI %u used simultaneously by "
"IPv4(%d) and IPv6 (%d) SP rules\n",
__func__, spi, rc4, rc6);
return -EINVAL;
} else
return IPPROTO_IPIP;
} else if (rc6 < 0) {
RTE_LOG(ERR, IPSEC,
"%s: SPI %u is not used by any SP rule\n",
__func__, spi);
return -EINVAL;
} else
return IPPROTO_IPV6;
}
/*
* Helper function for getting source and destination IP addresses
* from SP. Needed for inline crypto transport mode, as addresses are not
* provided in config file for that mode. It checks if SP for current SA exists,
* and based on what type of protocol is returned, it stores appropriate
* addresses got from SP into SA.
*/
static int
sa_add_address_inline_crypto(struct ipsec_sa *sa)
{
int protocol;
struct ip_addr ip_addr[2];
uint32_t mask[2];
protocol = get_spi_proto(sa->spi, sa->direction, ip_addr, mask);
if (protocol < 0)
return protocol;
else if (protocol == IPPROTO_IPIP) {
sa->flags |= IP4_TRANSPORT;
if (mask[0] == IP4_FULL_MASK &&
mask[1] == IP4_FULL_MASK &&
ip_addr[0].ip.ip4 != 0 &&
ip_addr[1].ip.ip4 != 0) {
sa->src.ip.ip4 = ip_addr[0].ip.ip4;
sa->dst.ip.ip4 = ip_addr[1].ip.ip4;
} else {
RTE_LOG(ERR, IPSEC,
"%s: No valid address or mask entry in"
" IPv4 SP rule for SPI %u\n",
__func__, sa->spi);
return -EINVAL;
}
} else if (protocol == IPPROTO_IPV6) {
sa->flags |= IP6_TRANSPORT;
if (mask[0] == IP6_FULL_MASK &&
mask[1] == IP6_FULL_MASK &&
!rte_ipv6_addr_is_unspec(&ip_addr[0].ip.ip6) &&
!rte_ipv6_addr_is_unspec(&ip_addr[1].ip.ip6)) {
sa->src.ip.ip6 = ip_addr[0].ip.ip6;
sa->dst.ip.ip6 = ip_addr[1].ip.ip6;
} else {
RTE_LOG(ERR, IPSEC,
"%s: No valid address or mask entry in"
" IPv6 SP rule for SPI %u\n",
__func__, sa->spi);
return -EINVAL;
}
}
return 0;
}
static int
sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
uint32_t nb_entries, uint32_t inbound,
struct socket_ctx *skt_ctx,
struct ipsec_ctx *ips_ctx[],
const struct eventmode_conf *em_conf)
{
struct ipsec_sa *sa;
uint32_t i, idx;
uint16_t iv_length, aad_length;
uint16_t auth_iv_length = 0;
int inline_status;
int32_t rc;
struct rte_ipsec_session *ips;
/* for ESN upper 32 bits of SQN also need to be part of AAD */
aad_length = (app_sa_prm.enable_esn != 0) ? sizeof(uint32_t) : 0;
for (i = 0; i < nb_entries; i++) {
idx = i;
sa = &sa_ctx->sa[idx];
if (sa->spi != 0) {
printf("Index %u already in use by SPI %u\n",
idx, sa->spi);
return -EINVAL;
}
*sa = entries[i];
if (inbound) {
rc = ipsec_sad_add(&sa_ctx->sad, sa);
if (rc != 0)
return rc;
}
sa->seq = 0;
ips = ipsec_get_primary_session(sa);
if (check_eth_dev_caps(sa->portid, inbound, sa->mss))
return -EINVAL;
}
switch (WITHOUT_TRANSPORT_VERSION(sa->flags)) {
case IP4_TUNNEL:
sa->src.ip.ip4 = rte_cpu_to_be_32(sa->src.ip.ip4);
sa->dst.ip.ip4 = rte_cpu_to_be_32(sa->dst.ip.ip4);
break;
case TRANSPORT:
if (ips->type ==
inline_status =
sa_add_address_inline_crypto(sa);
if (inline_status < 0)
return inline_status;
}
break;
}
if (sa->aead_algo == RTE_CRYPTO_AEAD_AES_GCM ||
sa->aead_algo == RTE_CRYPTO_AEAD_AES_CCM ||
if (sa->aead_algo == RTE_CRYPTO_AEAD_AES_CCM)
iv_length = 11;
else
iv_length = 12;
sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_AEAD;
sa_ctx->xf[idx].a.aead.algo = sa->aead_algo;
sa_ctx->xf[idx].a.aead.key.data = sa->cipher_key;
sa_ctx->xf[idx].a.aead.key.length =
sa->cipher_key_len;
sa_ctx->xf[idx].a.aead.op = (inbound == 1) ?
sa_ctx->xf[idx].a.next = NULL;
sa_ctx->xf[idx].a.aead.iv.offset = IV_OFFSET;
sa_ctx->xf[idx].a.aead.iv.length = iv_length;
sa_ctx->xf[idx].a.aead.aad_length =
sa->aad_len + aad_length;
sa_ctx->xf[idx].a.aead.digest_length =
sa->digest_len;
sa->xforms = &sa_ctx->xf[idx].a;
} else {
switch (sa->cipher_algo) {
iv_length = sa->iv_len;
break;
/* Length includes 8B per packet IV, 4B nonce and
* 4B counter as populated in datapath.
*/
iv_length = 16;
break;
default:
RTE_LOG(ERR, IPSEC_ESP,
"unsupported cipher algorithm %u\n",
sa->cipher_algo);
return -EINVAL;
}
/* AES_GMAC uses salt like AEAD algorithms */
if (sa->auth_algo == RTE_CRYPTO_AUTH_AES_GMAC)
auth_iv_length = 12;
if (inbound) {
sa_ctx->xf[idx].b.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
sa_ctx->xf[idx].b.cipher.algo = sa->cipher_algo;
sa_ctx->xf[idx].b.cipher.key.data = sa->cipher_key;
sa_ctx->xf[idx].b.cipher.key.length =
sa->cipher_key_len;
sa_ctx->xf[idx].b.cipher.op =
sa_ctx->xf[idx].b.next = NULL;
sa_ctx->xf[idx].b.cipher.iv.offset = IV_OFFSET;
sa_ctx->xf[idx].b.cipher.iv.length = iv_length;
sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_AUTH;
sa_ctx->xf[idx].a.auth.algo = sa->auth_algo;
sa_ctx->xf[idx].a.auth.key.data = sa->auth_key;
sa_ctx->xf[idx].a.auth.key.length =
sa->auth_key_len;
sa_ctx->xf[idx].a.auth.digest_length =
sa->digest_len;
sa_ctx->xf[idx].a.auth.op =
sa_ctx->xf[idx].a.auth.iv.offset = IV_OFFSET;
sa_ctx->xf[idx].a.auth.iv.length = auth_iv_length;
} else { /* outbound */
sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
sa_ctx->xf[idx].a.cipher.algo = sa->cipher_algo;
sa_ctx->xf[idx].a.cipher.key.data = sa->cipher_key;
sa_ctx->xf[idx].a.cipher.key.length =
sa->cipher_key_len;
sa_ctx->xf[idx].a.cipher.op =
sa_ctx->xf[idx].a.next = NULL;
sa_ctx->xf[idx].a.cipher.iv.offset = IV_OFFSET;
sa_ctx->xf[idx].a.cipher.iv.length = iv_length;
sa_ctx->xf[idx].b.type = RTE_CRYPTO_SYM_XFORM_AUTH;
sa_ctx->xf[idx].b.auth.algo = sa->auth_algo;
sa_ctx->xf[idx].b.auth.key.data = sa->auth_key;
sa_ctx->xf[idx].b.auth.key.length =
sa->auth_key_len;
sa_ctx->xf[idx].b.auth.digest_length =
sa->digest_len;
sa_ctx->xf[idx].b.auth.op =
sa_ctx->xf[idx].b.auth.iv.offset = IV_OFFSET;
sa_ctx->xf[idx].b.auth.iv.length = auth_iv_length;
}
if (sa->auth_algo == RTE_CRYPTO_AUTH_AES_GMAC) {
sa->xforms = inbound ?
&sa_ctx->xf[idx].a : &sa_ctx->xf[idx].b;
sa->xforms->next = NULL;
} else {
sa_ctx->xf[idx].a.next = &sa_ctx->xf[idx].b;
sa_ctx->xf[idx].b.next = NULL;
sa->xforms = &sa_ctx->xf[idx].a;
}
}
if (ips->type ==
ips->type ==
rc = create_inline_session(skt_ctx, sa, ips);
if (rc != 0) {
RTE_LOG(ERR, IPSEC_ESP,
"create_inline_session() failed\n");
return -EINVAL;
}
} else {
rc = create_lookaside_session(ips_ctx, skt_ctx,
em_conf, sa, ips);
if (rc != 0) {
RTE_LOG(ERR, IPSEC_ESP,
"create_lookaside_session() failed\n");
return -EINVAL;
}
}
if (sa->fdir_flag && inbound) {
rc = create_ipsec_esp_flow(sa);
if (rc != 0)
RTE_LOG(ERR, IPSEC_ESP,
"create_ipsec_esp_flow() failed\n");
}
print_one_sa_rule(sa, inbound);
}
return 0;
}
static inline int
sa_out_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
uint32_t nb_entries, struct socket_ctx *skt_ctx,
struct ipsec_ctx *ips_ctx[],
const struct eventmode_conf *em_conf)
{
return sa_add_rules(sa_ctx, entries, nb_entries, 0, skt_ctx, ips_ctx, em_conf);
}
static inline int
sa_in_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
uint32_t nb_entries, struct socket_ctx *skt_ctx,
struct ipsec_ctx *ips_ctx[],
const struct eventmode_conf *em_conf)
{
return sa_add_rules(sa_ctx, entries, nb_entries, 1, skt_ctx, ips_ctx, em_conf);
}
/*
* helper function, fills parameters that are identical for all SAs
*/
static void
fill_ipsec_app_sa_prm(struct rte_ipsec_sa_prm *prm,
const struct app_sa_prm *app_prm)
{
memset(prm, 0, sizeof(*prm));
prm->flags = app_prm->flags;
prm->ipsec_xform.options.esn = app_prm->enable_esn;
prm->ipsec_xform.replay_win_sz = app_prm->window_size;
}
static int
fill_ipsec_sa_prm(struct rte_ipsec_sa_prm *prm, const struct ipsec_sa *ss,
const struct rte_ipv4_hdr *v4, struct rte_ipv6_hdr *v6)
{
int32_t rc;
/*
* Try to get SPI next proto by searching that SPI in SPD.
* probably not the optimal way, but there seems nothing
* better right now.
*/
rc = get_spi_proto(ss->spi, ss->direction, NULL, NULL);
if (rc < 0)
return rc;
fill_ipsec_app_sa_prm(prm, &app_sa_prm);
prm->userdata = (uintptr_t)ss;
/* setup ipsec xform */
prm->ipsec_xform.spi = ss->spi;
prm->ipsec_xform.salt = ss->salt;
prm->ipsec_xform.direction = ss->direction;
prm->ipsec_xform.mode = (IS_TRANSPORT(ss->flags)) ?
prm->ipsec_xform.options.udp_encap = ss->udp_encap;
prm->ipsec_xform.udp.dport = ss->udp.dport;
prm->ipsec_xform.udp.sport = ss->udp.sport;
if (ss->esn > 0) {
prm->ipsec_xform.esn.value = ss->esn;
}
if (IS_IP4_TUNNEL(ss->flags)) {
prm->tun.hdr_len = sizeof(*v4);
prm->tun.next_proto = rc;
prm->tun.hdr = v4;
} else if (IS_IP6_TUNNEL(ss->flags)) {
prm->tun.hdr_len = sizeof(*v6);
prm->tun.next_proto = rc;
prm->tun.hdr = v6;
} else {
/* transport mode */
prm->trs.proto = rc;
}
/* setup crypto section */
prm->crypto_xform = ss->xforms;
return 0;
}
static int
fill_ipsec_session(struct rte_ipsec_session *ss, struct rte_ipsec_sa *sa)
{
int32_t rc = 0;
ss->sa = sa;
if (rc != 0)
memset(ss, 0, sizeof(*ss));
return rc;
}
/*
* Initialise related rte_ipsec_sa object.
*/
static int
ipsec_sa_init(struct ipsec_sa *lsa, struct rte_ipsec_sa *sa, uint32_t sa_size,
struct socket_ctx *skt_ctx, struct ipsec_ctx *ips_ctx[],
const struct eventmode_conf *em_conf)
{
int rc;
struct rte_ipsec_sa_prm prm;
struct rte_ipsec_session *ips;
struct rte_ipv4_hdr v4 = {
.version_ihl = IPVERSION << 4 |
.time_to_live = IPDEFTTL,
.next_proto_id = lsa->udp_encap ? IPPROTO_UDP : IPPROTO_ESP,
.src_addr = lsa->src.ip.ip4,
.dst_addr = lsa->dst.ip.ip4,
};
struct rte_ipv6_hdr v6 = {
.vtc_flow = htonl(IP6_VERSION << 28),
.proto = lsa->udp_encap ? IPPROTO_UDP : IPPROTO_ESP,
};
if (IS_IP6_TUNNEL(lsa->flags)) {
v6.src_addr = lsa->src.ip.ip6;
v6.dst_addr = lsa->dst.ip.ip6;
}
rc = fill_ipsec_sa_prm(&prm, lsa, &v4, &v6);
if (rc == 0)
rc = rte_ipsec_sa_init(sa, &prm, sa_size);
if (rc < 0)
return rc;
if (lsa->flags & SA_TELEMETRY_ENABLE)
/* init primary processing session */
ips = ipsec_get_primary_session(lsa);
rc = fill_ipsec_session(ips, sa);
if (rc != 0)
return rc;
/* init inline fallback processing session */
if (lsa->fallback_sessions == 1) {
struct rte_ipsec_session *ipfs = ipsec_get_fallback_session(lsa);
if (ipfs->security.ses == NULL) {
rc = create_lookaside_session(ips_ctx, skt_ctx, em_conf, lsa, ipfs);
if (rc != 0)
return rc;
}
rc = fill_ipsec_session(ipfs, sa);
}
return rc;
}
/*
* Allocate space and init rte_ipsec_sa structures,
* one per session.
*/
static int
ipsec_satbl_init(struct sa_ctx *ctx, uint32_t nb_ent, int32_t socket,
struct socket_ctx *skt_ctx, struct ipsec_ctx *ips_ctx[],
const struct eventmode_conf *em_conf)
{
int32_t rc, sz;
uint32_t i, idx;
size_t tsz;
struct rte_ipsec_sa *sa;
struct ipsec_sa *lsa;
struct rte_ipsec_sa_prm prm;
/* determine SA size */
idx = 0;
fill_ipsec_sa_prm(&prm, ctx->sa + idx, NULL, NULL);
sz = rte_ipsec_sa_size(&prm);
if (sz < 0) {
RTE_LOG(ERR, IPSEC, "%s(%p, %u, %d): "
"failed to determine SA size, error code: %d\n",
__func__, ctx, nb_ent, socket, sz);
return sz;
}
tsz = sz * nb_ent;
ctx->satbl = rte_zmalloc_socket(NULL, tsz, RTE_CACHE_LINE_SIZE, socket);
if (ctx->satbl == NULL) {
RTE_LOG(ERR, IPSEC,
"%s(%p, %u, %d): failed to allocate %zu bytes\n",
__func__, ctx, nb_ent, socket, tsz);
return -ENOMEM;
}
rc = 0;
for (i = 0; i != nb_ent && rc == 0; i++) {
idx = i;
sa = (struct rte_ipsec_sa *)((uintptr_t)ctx->satbl + sz * i);
lsa = ctx->sa + idx;
rc = ipsec_sa_init(lsa, sa, sz, skt_ctx, ips_ctx, em_conf);
}
return rc;
}
static int
sa_cmp(const void *p, const void *q)
{
uint32_t spi1 = ((const struct ipsec_sa *)p)->spi;
uint32_t spi2 = ((const struct ipsec_sa *)q)->spi;
return (int)(spi1 - spi2);
}
/*
* Walk through all SA rules to find an SA with given SPI
*/
int
sa_spi_present(struct sa_ctx *sa_ctx, uint32_t spi, int inbound)
{
uint32_t num;
struct ipsec_sa *sa;
struct ipsec_sa tmpl;
const struct ipsec_sa *sar;
sar = sa_ctx->sa;
if (inbound != 0)
num = nb_sa_in;
else
num = nb_sa_out;
tmpl.spi = spi;
sa = bsearch(&tmpl, sar, num, sizeof(struct ipsec_sa), sa_cmp);
if (sa != NULL)
return RTE_PTR_DIFF(sa, sar) / sizeof(struct ipsec_sa);
return -ENOENT;
}
void
sa_init(struct socket_ctx *ctx, int32_t socket_id,
struct lcore_conf *lcore_conf,
const struct eventmode_conf *em_conf)
{
int32_t rc;
const char *name;
uint32_t lcore_id;
struct ipsec_ctx *ipsec_ctx[RTE_MAX_LCORE];
if (ctx == NULL)
rte_exit(EXIT_FAILURE, "NULL context.\n");
if (ctx->sa_in != NULL)
rte_exit(EXIT_FAILURE, "Inbound SA DB for socket %u already "
"initialized\n", socket_id);
if (ctx->sa_out != NULL)
rte_exit(EXIT_FAILURE, "Outbound SA DB for socket %u already "
"initialized\n", socket_id);
if (nb_sa_in > 0) {
name = "sa_in";
ctx->sa_in = sa_create(name, socket_id, nb_sa_in);
if (ctx->sa_in == NULL)
rte_exit(EXIT_FAILURE, "Error [%d] creating SA "
"context %s in socket %d\n", rte_errno,
name, socket_id);
rc = ipsec_sad_create(name, &ctx->sa_in->sad, socket_id,
&sa_in_cnt);
if (rc != 0)
rte_exit(EXIT_FAILURE, "failed to init SAD\n");
ipsec_ctx[lcore_id] = &lcore_conf[lcore_id].inbound;
sa_in_add_rules(ctx->sa_in, sa_in, nb_sa_in, ctx, ipsec_ctx, em_conf);
if (app_sa_prm.enable != 0) {
rc = ipsec_satbl_init(ctx->sa_in, nb_sa_in,
socket_id, ctx, ipsec_ctx, em_conf);
if (rc != 0)
rte_exit(EXIT_FAILURE,
"failed to init inbound SAs\n");
}
} else
RTE_LOG(WARNING, IPSEC, "No SA Inbound rule specified\n");
if (nb_sa_out > 0) {
name = "sa_out";
ctx->sa_out = sa_create(name, socket_id, nb_sa_out);
if (ctx->sa_out == NULL)
rte_exit(EXIT_FAILURE, "Error [%d] creating SA "
"context %s in socket %d\n", rte_errno,
name, socket_id);
ipsec_ctx[lcore_id] = &lcore_conf[lcore_id].outbound;
sa_out_add_rules(ctx->sa_out, sa_out, nb_sa_out, ctx, ipsec_ctx, em_conf);
if (app_sa_prm.enable != 0) {
rc = ipsec_satbl_init(ctx->sa_out, nb_sa_out,
socket_id, ctx, ipsec_ctx, em_conf);
if (rc != 0)
rte_exit(EXIT_FAILURE,
"failed to init outbound SAs\n");
}
} else
RTE_LOG(WARNING, IPSEC, "No SA Outbound rule "
"specified\n");
}
int
inbound_sa_check(struct sa_ctx *sa_ctx, struct rte_mbuf *m, uint32_t sa_idx)
{
struct ipsec_mbuf_metadata *priv;
struct ipsec_sa *sa;
priv = get_priv(m);
sa = priv->sa;
if (sa != NULL)
return (sa_ctx->sa[sa_idx].spi == sa->spi);
RTE_LOG(ERR, IPSEC, "SA not saved in private data\n");
return 0;
}
void
inbound_sa_lookup(struct sa_ctx *sa_ctx, struct rte_mbuf *pkts[],
void *sa_arr[], uint16_t nb_pkts)
{
uint32_t i;
void *result_sa;
struct ipsec_sa *sa;
sad_lookup(&sa_ctx->sad, pkts, sa_arr, nb_pkts);
/*
* Mark need for inline offload fallback on the LSB of SA pointer.
* Thanks to packet grouping mechanism which ipsec_process is using
* packets marked for fallback processing will form separate group.
*
* Because it is not safe to use SA pointer it is casted to generic
* pointer to prevent from unintentional use. Use ipsec_mask_saptr
* to get valid struct pointer.
*/
for (i = 0; i < nb_pkts; i++) {
if (sa_arr[i] == NULL)
continue;
result_sa = sa = sa_arr[i];
if (MBUF_NO_SEC_OFFLOAD(pkts[i]) &&
sa->fallback_sessions > 0) {
uintptr_t intsa = (uintptr_t)sa;
intsa |= IPSEC_SA_OFFLOAD_FALLBACK_FLAG;
result_sa = (void *)intsa;
}
sa_arr[i] = result_sa;
}
}
void
outbound_sa_lookup(struct sa_ctx *sa_ctx, uint32_t sa_idx[],
void *sa[], uint16_t nb_pkts)
{
uint32_t i;
for (i = 0; i < nb_pkts; i++)
sa[i] = &sa_ctx->sa[sa_idx[i]];
}
/*
* Select HW offloads to be used.
*/
int
sa_check_offloads(uint16_t port_id, uint64_t *rx_offloads,
uint64_t *tx_offloads, uint8_t *hw_reassembly)
{
struct ipsec_sa *rule;
uint32_t idx_sa;
struct rte_eth_dev_info dev_info;
int ret;
*rx_offloads = 0;
*tx_offloads = 0;
*hw_reassembly = 0;
ret = rte_eth_dev_info_get(port_id, &dev_info);
if (ret != 0)
rte_exit(EXIT_FAILURE,
"Error during getting device (port %u) info: %s\n",
port_id, strerror(-ret));
/* Check for inbound rules that use offloads and use this port */
for (idx_sa = 0; idx_sa < nb_sa_in; idx_sa++) {
rule = &sa_in[idx_sa];
rule_type = ipsec_get_action_type(rule);
rule_type ==
&& rule->portid == port_id)
*rx_offloads |= RTE_ETH_RX_OFFLOAD_SECURITY;
if (IS_HW_REASSEMBLY_EN(rule->flags)) {
*hw_reassembly = 1;
}
}
/* Check for outbound rules that use offloads and use this port */
for (idx_sa = 0; idx_sa < nb_sa_out; idx_sa++) {
rule = &sa_out[idx_sa];
rule_type = ipsec_get_action_type(rule);
if (rule->portid == port_id) {
switch (rule_type) {
/* Checksum offload is not needed for inline
* protocol as all processing for Outbound IPSec
* packets will be implicitly taken care and for
* non-IPSec packets, there is no need of
* IPv4 Checksum offload.
*/
*tx_offloads |= RTE_ETH_TX_OFFLOAD_SECURITY;
if (rule->mss)
*tx_offloads |= (RTE_ETH_TX_OFFLOAD_TCP_TSO |
RTE_ETH_TX_OFFLOAD_IPV4_CKSUM);
break;
*tx_offloads |= RTE_ETH_TX_OFFLOAD_SECURITY;
if (rule->mss)
*tx_offloads |=
RTE_ETH_TX_OFFLOAD_TCP_TSO;
if (dev_info.tx_offload_capa &
RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)
*tx_offloads |=
RTE_ETH_TX_OFFLOAD_IPV4_CKSUM;
break;
default:
/* Enable IPv4 checksum offload even if
* one of lookaside SA's are present.
*/
if (dev_info.tx_offload_capa &
RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)
*tx_offloads |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM;
break;
}
} else {
if (dev_info.tx_offload_capa &
RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)
*tx_offloads |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM;
}
}
return 0;
}
void
sa_sort_arr(void)
{
qsort(sa_in, nb_sa_in, sizeof(struct ipsec_sa), sa_cmp);
qsort(sa_out, nb_sa_out, sizeof(struct ipsec_sa), sa_cmp);
}
uint32_t
get_nb_crypto_sessions(void)
{
return nb_crypto_sessions;
}