16 #include <rte_compat.h> 28 #define RTE_LPM_NAMESIZE 32 31 #define RTE_LPM_MAX_DEPTH 32 34 #define RTE_LPM_TBL24_NUM_ENTRIES (1 << 24) 37 #define RTE_LPM_TBL8_GROUP_NUM_ENTRIES 256 40 #define RTE_LPM_MAX_TBL8_NUM_GROUPS (1 << 24) 43 #define RTE_LPM_TBL8_NUM_GROUPS 256 46 #define RTE_LPM_TBL8_NUM_ENTRIES (RTE_LPM_TBL8_NUM_GROUPS * \ 47 RTE_LPM_TBL8_GROUP_NUM_ENTRIES) 50 #if defined(RTE_LIBRTE_LPM_DEBUG) 51 #define RTE_LPM_RETURN_IF_TRUE(cond, retval) do { \ 52 if (cond) return (retval); \ 55 #define RTE_LPM_RETURN_IF_TRUE(cond, retval) 59 #define RTE_LPM_VALID_EXT_ENTRY_BITMASK 0x03000000 62 #define RTE_LPM_LOOKUP_SUCCESS 0x01000000 65 #define RTE_LPM_RCU_DQ_RECLAIM_MAX 16 75 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN 78 struct rte_lpm_tbl_entry {
84 uint32_t next_hop :24;
94 uint32_t valid_group :1;
101 struct rte_lpm_tbl_entry {
103 uint32_t valid_group :1;
105 uint32_t next_hop :24;
121 struct rte_lpm_tbl_entry tbl24[RTE_LPM_TBL24_NUM_ENTRIES]
123 struct rte_lpm_tbl_entry *tbl8;
128 struct rte_rcu_qsbr *v;
136 uint32_t reclaim_thd;
137 uint32_t reclaim_max;
224 rte_lpm_add(
struct rte_lpm *lpm, uint32_t ip, uint8_t depth, uint32_t next_hop);
284 unsigned tbl24_index = (ip >> 8);
286 const uint32_t *ptbl;
289 RTE_LPM_RETURN_IF_TRUE(((lpm == NULL) || (next_hop == NULL)), -EINVAL);
292 ptbl = (
const uint32_t *)(&lpm->tbl24[tbl24_index]);
300 if (
unlikely((tbl_entry & RTE_LPM_VALID_EXT_ENTRY_BITMASK) ==
301 RTE_LPM_VALID_EXT_ENTRY_BITMASK)) {
303 unsigned tbl8_index = (uint8_t)ip +
304 (((uint32_t)tbl_entry & 0x00FFFFFF) *
305 RTE_LPM_TBL8_GROUP_NUM_ENTRIES);
307 ptbl = (
const uint32_t *)&lpm->tbl8[tbl8_index];
311 *next_hop = ((uint32_t)tbl_entry & 0x00FFFFFF);
335 #define rte_lpm_lookup_bulk(lpm, ips, next_hops, n) \ 336 rte_lpm_lookup_bulk_func(lpm, ips, next_hops, n) 339 rte_lpm_lookup_bulk_func(
const struct rte_lpm *lpm,
const uint32_t *ips,
340 uint32_t *next_hops,
const unsigned n)
343 unsigned tbl24_indexes[n];
344 const uint32_t *ptbl;
347 RTE_LPM_RETURN_IF_TRUE(((lpm == NULL) || (ips == NULL) ||
348 (next_hops == NULL)), -EINVAL);
350 for (i = 0; i < n; i++) {
351 tbl24_indexes[i] = ips[i] >> 8;
354 for (i = 0; i < n; i++) {
356 ptbl = (
const uint32_t *)&lpm->tbl24[tbl24_indexes[i]];
357 next_hops[i] = *ptbl;
360 if (
unlikely((next_hops[i] & RTE_LPM_VALID_EXT_ENTRY_BITMASK) ==
361 RTE_LPM_VALID_EXT_ENTRY_BITMASK)) {
363 unsigned tbl8_index = (uint8_t)ips[i] +
364 (((uint32_t)next_hops[i] & 0x00FFFFFF) *
365 RTE_LPM_TBL8_GROUP_NUM_ENTRIES);
367 ptbl = (
const uint32_t *)&lpm->tbl8[tbl8_index];
368 next_hops[i] = *ptbl;
375 #define RTE_LPM_MASKX4_RES UINT64_C(0x00ffffff00ffffff) 400 #if defined(RTE_ARCH_ARM) 401 #ifdef RTE_HAS_SVE_ACLE 402 #include "rte_lpm_sve.h" 403 #undef rte_lpm_lookup_bulk 404 #define rte_lpm_lookup_bulk(lpm, ips, next_hops, n) \ 405 __rte_lpm_lookup_vec(lpm, ips, next_hops, n) 407 #include "rte_lpm_neon.h" 408 #elif defined(RTE_ARCH_PPC_64) 409 #include "rte_lpm_altivec.h" 410 #elif defined(RTE_ARCH_X86) 411 #include "rte_lpm_sse.h" 413 #include "rte_lpm_scalar.h"
void rte_lpm_free(struct rte_lpm *lpm)
__rte_experimental int rte_lpm_rcu_qsbr_add(struct rte_lpm *lpm, struct rte_lpm_rcu_config *cfg)
struct rte_lpm * rte_lpm_create(const char *name, int socket_id, const struct rte_lpm_config *config)
void rte_lpm_delete_all(struct rte_lpm *lpm)
static int rte_lpm_lookup(const struct rte_lpm *lpm, uint32_t ip, uint32_t *next_hop)
static void rte_lpm_lookupx4(const struct rte_lpm *lpm, xmm_t ip, uint32_t hop[4], uint32_t defv)
int rte_lpm_delete(struct rte_lpm *lpm, uint32_t ip, uint8_t depth)
#define RTE_LPM_LOOKUP_SUCCESS
int rte_lpm_add(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, uint32_t next_hop)
int rte_lpm_is_rule_present(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, uint32_t *next_hop)
struct rte_lpm * rte_lpm_find_existing(const char *name)