29#define RTE_LPM_NAMESIZE 32
32#define RTE_LPM_MAX_DEPTH 32
35#define RTE_LPM_TBL24_NUM_ENTRIES (1 << 24)
38#define RTE_LPM_TBL8_GROUP_NUM_ENTRIES 256
41#define RTE_LPM_MAX_TBL8_NUM_GROUPS (1 << 24)
44#define RTE_LPM_TBL8_NUM_GROUPS 256
47#define RTE_LPM_TBL8_NUM_ENTRIES (RTE_LPM_TBL8_NUM_GROUPS * \
48 RTE_LPM_TBL8_GROUP_NUM_ENTRIES)
51#if defined(RTE_LIBRTE_LPM_DEBUG)
52#define RTE_LPM_RETURN_IF_TRUE(cond, retval) do { \
53 if (cond) return (retval); \
56#define RTE_LPM_RETURN_IF_TRUE(cond, retval)
60#define RTE_LPM_VALID_EXT_ENTRY_BITMASK 0x03000000
63#define RTE_LPM_LOOKUP_SUCCESS 0x01000000
66#define RTE_LPM_RCU_DQ_RECLAIM_MAX 16
76#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
79struct rte_lpm_tbl_entry {
81 RTE_ATOMIC(uint32_t) val;
88 uint32_t next_hop :24;
98 uint32_t valid_group :1;
107struct rte_lpm_tbl_entry {
109 RTE_ATOMIC(uint32_t) val;
112 uint32_t valid_group :1;
114 uint32_t next_hop :24;
121static_assert(
sizeof(
struct rte_lpm_tbl_entry) ==
sizeof(uint32_t),
122 "sizeof(struct rte_lpm_tbl_entry) == sizeof(uint32_t)");
134 alignas(RTE_CACHE_LINE_SIZE)
struct rte_lpm_tbl_entry tbl24[RTE_LPM_TBL24_NUM_ENTRIES];
136 struct rte_lpm_tbl_entry *tbl8;
141 struct rte_rcu_qsbr *v;
149 uint32_t reclaim_thd;
150 uint32_t reclaim_max;
234rte_lpm_add(
struct rte_lpm *lpm, uint32_t ip, uint8_t depth, uint32_t next_hop);
294 unsigned tbl24_index = (ip >> 8);
296 const uint32_t *ptbl;
299 RTE_LPM_RETURN_IF_TRUE(((lpm == NULL) || (next_hop == NULL)), -EINVAL);
302 ptbl = (
const uint32_t *)(&lpm->tbl24[tbl24_index]);
310 if (
unlikely((tbl_entry & RTE_LPM_VALID_EXT_ENTRY_BITMASK) ==
311 RTE_LPM_VALID_EXT_ENTRY_BITMASK)) {
313 unsigned tbl8_index = (uint8_t)ip +
314 (((uint32_t)tbl_entry & 0x00FFFFFF) *
315 RTE_LPM_TBL8_GROUP_NUM_ENTRIES);
317 ptbl = (
const uint32_t *)&lpm->tbl8[tbl8_index];
321 *next_hop = ((uint32_t)tbl_entry & 0x00FFFFFF);
345#define rte_lpm_lookup_bulk(lpm, ips, next_hops, n) \
346 rte_lpm_lookup_bulk_func(lpm, ips, next_hops, n)
349rte_lpm_lookup_bulk_func(
const struct rte_lpm *lpm,
const uint32_t *ips,
350 uint32_t *next_hops,
const unsigned n)
353 const uint32_t *ptbl;
356 RTE_LPM_RETURN_IF_TRUE(((lpm == NULL) || (ips == NULL) ||
357 (next_hops == NULL)), -EINVAL);
359 for (i = 0; i < n; i++) {
360 unsigned int tbl24_index = ips[i] >> 8;
363 ptbl = (
const uint32_t *)&lpm->tbl24[tbl24_index];
364 next_hops[i] = *ptbl;
367 if (
unlikely((next_hops[i] & RTE_LPM_VALID_EXT_ENTRY_BITMASK) ==
368 RTE_LPM_VALID_EXT_ENTRY_BITMASK)) {
370 unsigned tbl8_index = (uint8_t)ips[i] +
371 (((uint32_t)next_hops[i] & 0x00FFFFFF) *
372 RTE_LPM_TBL8_GROUP_NUM_ENTRIES);
374 ptbl = (
const uint32_t *)&lpm->tbl8[tbl8_index];
375 next_hops[i] = *ptbl;
382#define RTE_LPM_MASKX4_RES UINT64_C(0x00ffffff00ffffff)
411#if defined(RTE_ARCH_ARM)
412#ifdef RTE_HAS_SVE_ACLE
413#include "rte_lpm_sve.h"
414#undef rte_lpm_lookup_bulk
415#define rte_lpm_lookup_bulk(lpm, ips, next_hops, n) \
416 __rte_lpm_lookup_vec(lpm, ips, next_hops, n)
418#include "rte_lpm_neon.h"
419#elif defined(RTE_ARCH_PPC_64)
420#include "rte_lpm_altivec.h"
421#elif defined(RTE_ARCH_X86)
422#include "rte_lpm_sse.h"
424#include "rte_lpm_scalar.h"
#define __rte_dealloc(dealloc, argno)
int rte_lpm_add(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, uint32_t next_hop)
int rte_lpm_rcu_qsbr_add(struct rte_lpm *lpm, struct rte_lpm_rcu_config *cfg)
int rte_lpm_delete(struct rte_lpm *lpm, uint32_t ip, uint8_t depth)
void rte_lpm_free(struct rte_lpm *lpm)
static void rte_lpm_lookupx4(const struct rte_lpm *lpm, xmm_t ip, uint32_t hop[4], uint32_t defv)
#define RTE_LPM_LOOKUP_SUCCESS
void rte_lpm_delete_all(struct rte_lpm *lpm)
struct rte_lpm * rte_lpm_create(const char *name, int socket_id, const struct rte_lpm_config *config) __rte_malloc __rte_dealloc(rte_lpm_free
int rte_lpm_is_rule_present(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, uint32_t *next_hop)
static int rte_lpm_lookup(const struct rte_lpm *lpm, uint32_t ip, uint32_t *next_hop)
struct rte_lpm struct rte_lpm * rte_lpm_find_existing(const char *name)