#include <sys/types.h>
#include <netinet/in.h>
#include <netinet/ip.h>
#include "ipsec.h"
#define SATP_OUT_IPV4(t) \
((((t) & RTE_IPSEC_SATP_MODE_MASK) == RTE_IPSEC_SATP_MODE_TRANS && \
(((t) & RTE_IPSEC_SATP_IPV_MASK) == RTE_IPSEC_SATP_IPV4)) || \
((t) & RTE_IPSEC_SATP_MODE_MASK) == RTE_IPSEC_SATP_MODE_TUNLV4)
static inline void
free_pkts(
struct rte_mbuf *mb[], uint32_t n)
{
uint32_t i;
for (i = 0; i != n; i++)
}
static inline void
{
uint32_t i;
for (i = 0; i != n; i++)
}
static inline void
enqueue_cop_bulk(
struct cdev_qp *cqp,
struct rte_crypto_op *cop[], uint32_t num)
{
uint32_t i, k, len, n;
len = cqp->len;
if (num >=
RTE_DIM(cqp->buf) * 3 / 4 && len == 0) {
cqp->in_flight += n;
free_cops(cop + n, num - n);
return;
}
k = 0;
do {
for (i = 0; i != n; i++)
cqp->buf[len + i] = cop[k + i];
len += n;
k += n;
cqp->buf, len);
cqp->in_flight += n;
free_cops(cqp->buf + n, len - n);
len = 0;
}
} while (k != num);
cqp->len = len;
}
static inline int
struct ipsec_sa *sa)
{
int32_t rc;
RTE_ASSERT(ss->
crypto.ses == NULL);
rc = create_lookaside_session(ctx, sa, ss);
if (rc != 0)
return rc;
RTE_ASSERT(ss->
security.ses == NULL);
rc = create_lookaside_session(ctx, sa, ss);
if (rc != 0)
return rc;
} else
RTE_ASSERT(0);
if (rc != 0)
memset(ss, 0, sizeof(*ss));
return rc;
}
static uint32_t
sa_group(
void *sa_ptr[],
struct rte_mbuf *pkts[],
{
uint32_t i, n, spi;
void *sa;
void * const nosa = &spi;
sa = nosa;
for (i = 0, n = 0; i != num; i++) {
if (sa != sa_ptr[i]) {
grp[n].
cnt = pkts + i - grp[n].
m;
n += (sa != nosa);
grp[n].
id.
ptr = sa_ptr[i];
sa = sa_ptr[i];
}
}
if (sa != nosa) {
grp[n].
cnt = pkts + i - grp[n].
m;
n++;
}
return n;
}
static inline void
copy_to_trf(
struct ipsec_traffic *trf, uint64_t satp,
struct rte_mbuf *mb[],
uint32_t num)
{
uint32_t j, ofs, s;
struct traffic_type *out;
if ((satp & RTE_IPSEC_SATP_DIR_MASK) == RTE_IPSEC_SATP_DIR_IB) {
if ((satp & RTE_IPSEC_SATP_IPV_MASK) == RTE_IPSEC_SATP_IPV4) {
out = &trf->ip4;
} else {
out = &trf->ip6;
ofs =
offsetof(
struct ip6_hdr, ip6_nxt);
}
} else if (SATP_OUT_IPV4(satp)) {
out = &trf->ip4;
} else {
out = &trf->ip6;
ofs =
offsetof(
struct ip6_hdr, ip6_nxt);
}
for (j = 0, s = out->num; j != num; j++) {
void *, ofs);
out->pkts[s + j] = mb[j];
}
out->num += num;
}
static uint32_t
ipsec_prepare_crypto_group(struct ipsec_ctx *ctx, struct ipsec_sa *sa,
unsigned int cnt)
{
struct cdev_qp *cqp;
uint32_t j, k;
struct ipsec_mbuf_metadata *priv;
cqp = &ctx->tbl[sa->cdev_id_qp];
for (j = 0; j != cnt; j++) {
priv = get_priv(m[j]);
cop[j] = &priv->cop;
priv->sa = sa;
}
if (k != 0)
enqueue_cop_bulk(cqp, cop, k);
return k;
}
static inline void
prep_process_group(
void *sa,
struct rte_mbuf *mb[], uint32_t cnt)
{
uint32_t j;
struct ipsec_mbuf_metadata *priv;
for (j = 0; j != cnt; j++) {
priv = get_priv(mb[j]);
priv->sa = sa;
}
}
static uint32_t
struct ipsec_traffic *trf,
struct rte_mbuf *mb[], uint32_t cnt)
{
uint64_t satp;
uint32_t k;
prep_process_group(sa, mb, cnt);
copy_to_trf(trf, satp, mb, k);
return k;
}
static uint32_t
struct ipsec_traffic *trf,
struct rte_mbuf *mb[], uint32_t cnt)
{
uint64_t satp;
uint32_t k;
prep_process_group(sa, mb, cnt);
k = rte_ipsec_pkt_cpu_prepare(ips, mb, cnt);
copy_to_trf(trf, satp, mb, k);
return k;
}
void
ipsec_process(struct ipsec_ctx *ctx, struct ipsec_traffic *trf)
{
uint32_t i, k, n;
struct ipsec_sa *sa;
n = sa_group(trf->ipsec.saptr, trf->ipsec.pkts, grp, trf->ipsec.num);
for (i = 0; i != n; i++) {
pg = grp + i;
sa = ipsec_mask_saptr(pg->
id.ptr);
if (sa != NULL)
ips = (pg->
id.
val & IPSEC_SA_OFFLOAD_FALLBACK_FLAG) ?
ipsec_get_fallback_session(sa) :
ipsec_get_primary_session(sa);
if (sa == NULL || (ips->crypto.ses == NULL &&
fill_ipsec_session(ips, ctx, sa) != 0))
k = 0;
else {
k = ipsec_prepare_crypto_group(ctx, sa, ips,
break;
k = ipsec_process_inline_group(ips, sa,
break;
k = ipsec_process_cpu_group(ips, sa,
break;
default:
k = 0;
}
}
free_pkts(pg->
m + k, pg->
cnt - k);
}
}
static inline uint32_t
cqp_dequeue(
struct cdev_qp *cqp,
struct rte_crypto_op *cop[], uint32_t num)
{
uint32_t n;
if (cqp->in_flight == 0)
return 0;
RTE_ASSERT(cqp->in_flight >= n);
cqp->in_flight -= n;
return n;
}
static inline uint32_t
ctx_dequeue(
struct ipsec_ctx *ctx,
struct rte_crypto_op *cop[], uint32_t num)
{
uint32_t i, n;
n = 0;
for (i = ctx->last_qp; n != num && i != ctx->nb_qps; i++)
n += cqp_dequeue(ctx->tbl + i, cop + n, num - n);
for (i = 0; n != num && i != ctx->last_qp; i++)
n += cqp_dequeue(ctx->tbl + i, cop + n, num - n);
ctx->last_qp = i;
return n;
}
void
ipsec_cqp_process(struct ipsec_ctx *ctx, struct ipsec_traffic *trf)
{
uint64_t satp;
uint32_t i, k, n, ng;
struct traffic_type *out;
trf->ip4.num = 0;
trf->ip6.num = 0;
out = &trf->ipsec;
n = ctx_dequeue(ctx, cop,
RTE_DIM(cop));
if (n == 0)
return;
(uintptr_t)cop, out->pkts, grp, n);
for (i = 0; i != ng; i++) {
pg = grp + i;
copy_to_trf(trf, satp, pg->
m, k);
free_pkts(pg->
m + k, pg->
cnt - k);
}
RTE_VERIFY(n == 0);
}