#include "event_helper.h"
#include "ipsec.h"
#include "ipsec-secgw.h"
#include "ipsec_worker.h"
struct port_drv_mode_data {
struct rte_security_session *sess;
};
static inline enum pkt_type
process_ipsec_get_pkt_type(
struct rte_mbuf *pkt, uint8_t **nlp)
{
return PKT_TYPE_IPSEC_IPV4;
else
return PKT_TYPE_PLAIN_IPV4;
return PKT_TYPE_IPSEC_IPV6;
else
return PKT_TYPE_PLAIN_IPV6;
}
return PKT_TYPE_INVALID;
}
static inline void
update_mac_addrs(
struct rte_mbuf *pkt, uint16_t portid)
{
}
static inline void
ipsec_event_pre_forward(
struct rte_mbuf *m,
unsigned int port_id)
{
}
static inline void
{
}
static inline void
{
if (vec->port == 0xFFFF) {
return;
}
}
static inline void
prepare_out_sessions_tbl(struct sa_ctx *sa_out,
struct port_drv_mode_data *data,
uint16_t size)
{
struct ipsec_sa *sa;
uint32_t i;
if (!sa_out)
return;
for (i = 0; i < sa_out->nb_sa; i++) {
if (!sa)
continue;
pri_sess = ipsec_get_primary_session(sa);
if (!pri_sess)
continue;
RTE_LOG(ERR, IPSEC,
"Invalid session type %d\n",
continue;
}
if (sa->portid >= size) {
"Port id >= than table size %d, %d\n",
sa->portid, size);
continue;
}
if (data[sa->portid].sess)
continue;
data[sa->portid].sess = pri_sess->
security.ses;
data[sa->portid].ctx = pri_sess->security.ctx;
}
}
static inline int
check_sp(struct sp_ctx *sp, const uint8_t *nlp, uint32_t *sa_idx)
{
uint32_t res;
return 0;
DEFAULT_MAX_CATEGORIES);
return 0;
else if (res == BYPASS) {
*sa_idx = -1;
return 1;
}
*sa_idx = res - 1;
return 1;
}
static inline void
check_sp_bulk(struct sp_ctx *sp, struct traffic_type *ip,
struct traffic_type *ipsec)
{
uint32_t i, j, res;
if (
unlikely(sp == NULL || ip->num == 0))
return;
DEFAULT_MAX_CATEGORIES);
j = 0;
for (i = 0; i < ip->num; i++) {
m = ip->pkts[i];
res = ip->res[i];
free_pkts(&m, 1);
else if (res == BYPASS)
ip->pkts[j++] = m;
else {
ipsec->res[ipsec->num] = res - 1;
ipsec->pkts[ipsec->num++] = m;
}
}
ip->num = j;
}
static inline void
check_sp_sa_bulk(struct sp_ctx *sp, struct sa_ctx *sa_ctx,
struct traffic_type *ip)
{
struct ipsec_sa *sa;
uint32_t i, j, res;
if (
unlikely(sp == NULL || ip->num == 0))
return;
DEFAULT_MAX_CATEGORIES);
j = 0;
for (i = 0; i < ip->num; i++) {
m = ip->pkts[i];
res = ip->res[i];
free_pkts(&m, 1);
else if (res == BYPASS)
ip->pkts[j++] = m;
else {
if (sa == NULL) {
free_pkts(&m, 1);
continue;
}
if (
unlikely(sa->spi != sa_ctx->sa[res - 1].spi)) {
free_pkts(&m, 1);
continue;
}
ip->pkts[j++] = m;
}
}
ip->num = j;
}
static inline uint16_t
route4_pkt(
struct rte_mbuf *pkt,
struct rt_ctx *rt_ctx)
{
uint32_t dst_ip;
uint16_t offset;
uint32_t hop;
int ret;
if (ret == 0) {
return hop;
}
return RTE_MAX_ETHPORTS;
}
static inline uint16_t
route6_pkt(
struct rte_mbuf *pkt,
struct rt_ctx *rt_ctx)
{
uint8_t dst_ip[16];
uint8_t *ip6_dst;
uint16_t offset;
uint32_t hop;
int ret;
memcpy(&dst_ip[0], ip6_dst, 16);
if (ret == 0) {
return hop;
}
return RTE_MAX_ETHPORTS;
}
static inline uint16_t
get_route(
struct rte_mbuf *pkt,
struct route_table *rt,
enum pkt_type type)
{
if (type == PKT_TYPE_PLAIN_IPV4 || type == PKT_TYPE_IPSEC_IPV4)
return route4_pkt(pkt, rt->rt4_ctx);
else if (type == PKT_TYPE_PLAIN_IPV6 || type == PKT_TYPE_IPSEC_IPV6)
return route6_pkt(pkt, rt->rt6_ctx);
return RTE_MAX_ETHPORTS;
}
static inline int
process_ipsec_ev_inbound(struct ipsec_ctx *ctx, struct route_table *rt,
{
struct ipsec_sa *sa = NULL;
uint16_t port_id = 0;
enum pkt_type type;
uint32_t sa_idx;
uint8_t *nlp;
type = process_ipsec_get_pkt_type(pkt, &nlp);
switch (type) {
case PKT_TYPE_PLAIN_IPV4:
"Inbound security offload failed\n");
goto drop_pkt_and_exit;
}
}
if (check_sp(ctx->sp4_ctx, nlp, &sa_idx) == 0) {
goto drop_pkt_and_exit;
}
break;
case PKT_TYPE_PLAIN_IPV6:
"Inbound security offload failed\n");
goto drop_pkt_and_exit;
}
}
if (check_sp(ctx->sp6_ctx, nlp, &sa_idx) == 0) {
goto drop_pkt_and_exit;
}
break;
default:
RTE_LOG_DP(DEBUG, IPSEC_ESP,
"Unsupported packet type = %d\n",
type);
goto drop_pkt_and_exit;
}
if (sa_idx == BYPASS)
goto route_and_send_pkt;
if (sa_idx >= ctx->sa_ctx->nb_sa)
goto drop_pkt_and_exit;
if (sa == NULL)
goto drop_pkt_and_exit;
if (
unlikely(sa->spi != ctx->sa_ctx->sa[sa_idx].spi))
goto drop_pkt_and_exit;
route_and_send_pkt:
port_id = get_route(pkt, rt, type);
if (
unlikely(port_id == RTE_MAX_ETHPORTS)) {
goto drop_pkt_and_exit;
}
update_mac_addrs(pkt, port_id);
ipsec_event_pre_forward(pkt, port_id);
return PKT_FORWARDED;
drop_pkt_and_exit:
RTE_LOG(ERR, IPSEC,
"Inbound packet dropped\n");
return PKT_DROPPED;
}
static inline int
process_ipsec_ev_outbound(struct ipsec_ctx *ctx, struct route_table *rt,
{
struct sa_ctx *sa_ctx;
uint16_t port_id = 0;
struct ipsec_sa *sa;
enum pkt_type type;
uint32_t sa_idx;
uint8_t *nlp;
type = process_ipsec_get_pkt_type(pkt, &nlp);
switch (type) {
case PKT_TYPE_PLAIN_IPV4:
if (check_sp(ctx->sp4_ctx, nlp, &sa_idx) == 0) {
goto drop_pkt_and_exit;
}
break;
case PKT_TYPE_PLAIN_IPV6:
if (check_sp(ctx->sp6_ctx, nlp, &sa_idx) == 0) {
goto drop_pkt_and_exit;
}
break;
default:
RTE_LOG(ERR, IPSEC,
"Unsupported packet type = %d\n", type);
goto drop_pkt_and_exit;
}
if (sa_idx == BYPASS) {
port_id = get_route(pkt, rt, type);
if (
unlikely(port_id == RTE_MAX_ETHPORTS)) {
goto drop_pkt_and_exit;
}
goto send_pkt;
}
if (
unlikely(sa_idx >= ctx->sa_ctx->nb_sa))
goto drop_pkt_and_exit;
sa_ctx = ctx->sa_ctx;
sa = &(sa_ctx->sa[sa_idx]);
sess = ipsec_get_primary_session(sa);
RTE_LOG(ERR, IPSEC,
"SA type not supported\n");
goto drop_pkt_and_exit;
}
sess->security.ses, pkt, NULL);
port_id = sa->portid;
send_pkt:
update_mac_addrs(pkt, port_id);
ipsec_event_pre_forward(pkt, port_id);
return PKT_FORWARDED;
drop_pkt_and_exit:
RTE_LOG(ERR, IPSEC,
"Outbound packet dropped\n");
return PKT_DROPPED;
}
static inline int
struct ipsec_traffic *t, struct sa_ctx *sa_ctx)
{
uint32_t sa_idx, i, j = 0;
uint16_t port_id = 0;
struct ipsec_sa *sa;
for (i = 0; i < t->ip4.num; i++) {
pkt = t->ip4.pkts[i];
port_id = route4_pkt(pkt, rt->rt4_ctx);
if (port_id != RTE_MAX_ETHPORTS) {
update_mac_addrs(pkt, port_id);
ipsec_event_pre_forward(pkt, port_id);
ev_vector_attr_update(vec, pkt);
} else
free_pkts(&pkt, 1);
}
for (i = 0; i < t->ip6.num; i++) {
pkt = t->ip6.pkts[i];
port_id = route6_pkt(pkt, rt->rt6_ctx);
if (port_id != RTE_MAX_ETHPORTS) {
update_mac_addrs(pkt, port_id);
ipsec_event_pre_forward(pkt, port_id);
ev_vector_attr_update(vec, pkt);
vec->mbufs[j++] = pkt;
} else
free_pkts(&pkt, 1);
}
for (i = 0; i < t->ipsec.num; i++) {
sa_idx = t->ipsec.res[i];
pkt = t->ipsec.pkts[i];
free_pkts(&pkt, 1);
else {
sa = &(sa_ctx->sa[sa_idx]);
sess = ipsec_get_primary_session(sa);
RTE_LOG(ERR, IPSEC,
"SA type not supported\n");
free_pkts(&pkt, 1);
continue;
}
sess->security.ses, pkt, NULL);
port_id = sa->portid;
update_mac_addrs(pkt, port_id);
ipsec_event_pre_forward(pkt, port_id);
ev_vector_attr_update(vec, pkt);
vec->mbufs[j++] = pkt;
}
}
return j;
}
static inline void
classify_pkt(
struct rte_mbuf *pkt,
struct ipsec_traffic *t)
{
enum pkt_type type;
uint8_t *nlp;
type = process_ipsec_get_pkt_type(pkt, &nlp);
switch (type) {
case PKT_TYPE_PLAIN_IPV4:
t->ip4.data[t->ip4.num] = nlp;
t->ip4.pkts[(t->ip4.num)++] = pkt;
break;
case PKT_TYPE_PLAIN_IPV6:
t->ip6.data[t->ip6.num] = nlp;
t->ip6.pkts[(t->ip6.num)++] = pkt;
break;
default:
RTE_LOG_DP(DEBUG, IPSEC_ESP,
"Unsupported packet type = %d\n",
type);
free_pkts(&pkt, 1);
break;
}
}
static inline int
process_ipsec_ev_inbound_vector(struct ipsec_ctx *ctx, struct route_table *rt,
{
struct ipsec_traffic t;
int i;
t.ip4.num = 0;
t.ip6.num = 0;
t.ipsec.num = 0;
for (i = 0; i < vec->
nb_elem; i++) {
pkt = vec->mbufs[i];
"Inbound security offload failed\n");
free_pkts(&pkt, 1);
continue;
}
}
classify_pkt(pkt, &t);
}
check_sp_sa_bulk(ctx->sp4_ctx, ctx->sa_ctx, &t.ip4);
check_sp_sa_bulk(ctx->sp6_ctx, ctx->sa_ctx, &t.ip6);
return ipsec_ev_route_pkts(vec, rt, &t, ctx->sa_ctx);
}
static inline int
process_ipsec_ev_outbound_vector(struct ipsec_ctx *ctx, struct route_table *rt,
{
struct ipsec_traffic t;
uint32_t i;
t.ip4.num = 0;
t.ip6.num = 0;
t.ipsec.num = 0;
for (i = 0; i < vec->
nb_elem; i++) {
pkt = vec->mbufs[i];
classify_pkt(pkt, &t);
}
check_sp_bulk(ctx->sp4_ctx, &t.ip4, &t.ipsec);
check_sp_bulk(ctx->sp6_ctx, &t.ip6, &t.ipsec);
return ipsec_ev_route_pkts(vec, rt, &t, ctx->sa_ctx);
}
static inline int
struct port_drv_mode_data *data)
{
int16_t port_id;
uint32_t i;
int j = 0;
for (i = 0; i < vec->
nb_elem; i++) {
pkt = vec->mbufs[i];
free_pkts(&pkt, 1);
continue;
}
ipsec_event_pre_forward(pkt, port_id);
data[port_id].sess, pkt,
NULL);
vec->mbufs[j++] = pkt;
}
return j;
}
static inline void
ipsec_ev_vector_process(struct lcore_conf_ev_tx_int_port_wrkr *lconf,
struct eh_event_link_info *links,
{
int ret;
pkt = vec->mbufs[0];
ev_vector_attr_init(vec);
if (is_unprotected_port(pkt->
port))
ret = process_ipsec_ev_inbound_vector(&lconf->inbound,
&lconf->rt, vec);
else
ret = process_ipsec_ev_outbound_vector(&lconf->outbound,
&lconf->rt, vec);
links[0].event_port_id,
ev, 1, 0);
} else {
}
}
static inline void
ipsec_ev_vector_drv_mode_process(struct eh_event_link_info *links,
struct port_drv_mode_data *data)
{
pkt = vec->mbufs[0];
if (!is_unprotected_port(pkt->
port))
vec->
nb_elem = process_ipsec_ev_drv_mode_outbound_vector(vec,
data);
links[0].event_port_id,
ev, 1, 0);
else
}
#define IPSEC_EVENTMODE_WORKERS 2
static void
ipsec_wrkr_non_burst_int_port_drv_mode(struct eh_event_link_info *links,
uint8_t nb_links)
{
struct port_drv_mode_data data[RTE_MAX_ETHPORTS];
unsigned int nb_rx = 0;
uint32_t lcore_id;
int32_t socket_id;
int16_t port_id;
if (nb_links == 0) {
return;
}
memset(&data, 0, sizeof(struct port_drv_mode_data));
prepare_out_sessions_tbl(socket_ctx[socket_id].sa_out, data,
RTE_MAX_ETHPORTS);
"Launching event mode worker (non-burst - Tx internal port - "
"driver mode) on lcore %d\n", lcore_id);
if (nb_links != 1) {
"Multiple links not supported. Using first link\n");
}
RTE_LOG(INFO, IPSEC,
" -- lcoreid=%u event_port_id=%u\n", lcore_id,
links[0].event_port_id);
while (!force_quit) {
links[0].event_port_id,
&ev,
1,
0 );
if (nb_rx == 0)
continue;
ipsec_ev_vector_drv_mode_process(links, &ev, data);
continue;
break;
default:
RTE_LOG(ERR, IPSEC,
"Invalid event type %u",
continue;
}
ipsec_event_pre_forward(pkt, port_id);
if (!is_unprotected_port(port_id)) {
continue;
}
data[port_id].sess, pkt,
NULL);
}
links[0].event_port_id,
&ev,
1,
0 );
}
}
static void
ipsec_wrkr_non_burst_int_port_app_mode(struct eh_event_link_info *links,
uint8_t nb_links)
{
struct lcore_conf_ev_tx_int_port_wrkr lconf;
unsigned int nb_rx = 0;
uint32_t lcore_id;
int32_t socket_id;
int ret;
if (nb_links == 0) {
return;
}
lconf.rt.rt4_ctx = socket_ctx[socket_id].rt_ip4;
lconf.rt.rt6_ctx = socket_ctx[socket_id].rt_ip6;
lconf.inbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_in;
lconf.inbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_in;
lconf.inbound.sa_ctx = socket_ctx[socket_id].sa_in;
lconf.inbound.session_pool = socket_ctx[socket_id].session_pool;
lconf.inbound.session_priv_pool =
socket_ctx[socket_id].session_priv_pool;
lconf.outbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_out;
lconf.outbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_out;
lconf.outbound.sa_ctx = socket_ctx[socket_id].sa_out;
lconf.outbound.session_pool = socket_ctx[socket_id].session_pool;
lconf.outbound.session_priv_pool =
socket_ctx[socket_id].session_priv_pool;
"Launching event mode worker (non-burst - Tx internal port - "
"app mode) on lcore %d\n", lcore_id);
if (nb_links != 1) {
"Multiple links not supported. Using first link\n");
}
RTE_LOG(INFO, IPSEC,
" -- lcoreid=%u event_port_id=%u\n", lcore_id,
links[0].event_port_id);
while (!force_quit) {
links[0].event_port_id,
&ev,
1,
0 );
if (nb_rx == 0)
continue;
ipsec_ev_vector_process(&lconf, links, &ev);
continue;
break;
default:
RTE_LOG(ERR, IPSEC,
"Invalid event type %u",
continue;
}
ret = process_ipsec_ev_inbound(&lconf.inbound,
&lconf.rt, &ev);
else
ret = process_ipsec_ev_outbound(&lconf.outbound,
&lconf.rt, &ev);
if (ret != 1)
continue;
links[0].event_port_id,
&ev,
1,
0 );
}
}
static uint8_t
ipsec_eventmode_populate_wrkr_params(struct eh_app_worker_params *wrkrs)
{
struct eh_app_worker_params *wrkr;
uint8_t nb_wrkr_param = 0;
wrkr = wrkrs;
wrkr->cap.burst = EH_RX_TYPE_NON_BURST;
wrkr->cap.tx_internal_port = EH_TX_TYPE_INTERNAL_PORT;
wrkr->cap.ipsec_mode = EH_IPSEC_MODE_TYPE_DRIVER;
wrkr->worker_thread = ipsec_wrkr_non_burst_int_port_drv_mode;
wrkr++;
nb_wrkr_param++;
wrkr->cap.burst = EH_RX_TYPE_NON_BURST;
wrkr->cap.tx_internal_port = EH_TX_TYPE_INTERNAL_PORT;
wrkr->cap.ipsec_mode = EH_IPSEC_MODE_TYPE_APP;
wrkr->worker_thread = ipsec_wrkr_non_burst_int_port_app_mode;
nb_wrkr_param++;
return nb_wrkr_param;
}
static void
ipsec_eventmode_worker(struct eh_conf *conf)
{
struct eh_app_worker_params ipsec_wrkr[IPSEC_EVENTMODE_WORKERS] = {
{{{0} }, NULL } };
uint8_t nb_wrkr_param;
nb_wrkr_param = ipsec_eventmode_populate_wrkr_params(ipsec_wrkr);
eh_launch_worker(conf, ipsec_wrkr, nb_wrkr_param);
}
int ipsec_launch_one_lcore(void *args)
{
struct eh_conf *conf;
conf = (struct eh_conf *)args;
if (conf->mode == EH_PKT_TRANSFER_MODE_POLL) {
ipsec_poll_mode_worker();
} else if (conf->mode == EH_PKT_TRANSFER_MODE_EVENT) {
ipsec_eventmode_worker(conf);
}
return 0;
}