14 #include <sys/queue.h>
19 #include <rte_config.h>
29 #define RTE_LPM_NAMESIZE 32
32 #define RTE_LPM_MAX_DEPTH 32
35 #define RTE_LPM_TBL24_NUM_ENTRIES (1 << 24)
38 #define RTE_LPM_TBL8_GROUP_NUM_ENTRIES 256
41 #define RTE_LPM_MAX_TBL8_NUM_GROUPS (1 << 24)
44 #define RTE_LPM_TBL8_NUM_GROUPS 256
47 #define RTE_LPM_TBL8_NUM_ENTRIES (RTE_LPM_TBL8_NUM_GROUPS * \
48 RTE_LPM_TBL8_GROUP_NUM_ENTRIES)
51 #if defined(RTE_LIBRTE_LPM_DEBUG)
52 #define RTE_LPM_RETURN_IF_TRUE(cond, retval) do { \
53 if (cond) return (retval); \
56 #define RTE_LPM_RETURN_IF_TRUE(cond, retval)
60 #define RTE_LPM_VALID_EXT_ENTRY_BITMASK 0x03000000
63 #define RTE_LPM_LOOKUP_SUCCESS 0x01000000
65 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
68 struct rte_lpm_tbl_entry {
74 uint32_t next_hop :24;
84 uint32_t valid_group :1;
91 struct rte_lpm_tbl_entry {
93 uint32_t valid_group :1;
95 uint32_t next_hop :24;
109 struct rte_lpm_rule {
115 struct rte_lpm_rule_info {
125 uint32_t number_tbl8s;
129 struct rte_lpm_tbl_entry tbl24[RTE_LPM_TBL24_NUM_ENTRIES]
131 struct rte_lpm_tbl_entry *tbl8;
132 struct rte_lpm_rule *rules_tbl;
197 rte_lpm_add(
struct rte_lpm *lpm, uint32_t ip, uint8_t depth, uint32_t next_hop);
257 unsigned tbl24_index = (ip >> 8);
259 const uint32_t *ptbl;
262 RTE_LPM_RETURN_IF_TRUE(((lpm == NULL) || (next_hop == NULL)), -EINVAL);
265 ptbl = (
const uint32_t *)(&lpm->tbl24[tbl24_index]);
273 if (
unlikely((tbl_entry & RTE_LPM_VALID_EXT_ENTRY_BITMASK) ==
274 RTE_LPM_VALID_EXT_ENTRY_BITMASK)) {
276 unsigned tbl8_index = (uint8_t)ip +
277 (((uint32_t)tbl_entry & 0x00FFFFFF) *
278 RTE_LPM_TBL8_GROUP_NUM_ENTRIES);
280 ptbl = (
const uint32_t *)&lpm->tbl8[tbl8_index];
284 *next_hop = ((uint32_t)tbl_entry & 0x00FFFFFF);
308 #define rte_lpm_lookup_bulk(lpm, ips, next_hops, n) \
309 rte_lpm_lookup_bulk_func(lpm, ips, next_hops, n)
312 rte_lpm_lookup_bulk_func(
const struct rte_lpm *lpm,
const uint32_t *ips,
313 uint32_t *next_hops,
const unsigned n)
316 unsigned tbl24_indexes[n];
317 const uint32_t *ptbl;
320 RTE_LPM_RETURN_IF_TRUE(((lpm == NULL) || (ips == NULL) ||
321 (next_hops == NULL)), -EINVAL);
323 for (i = 0; i < n; i++) {
324 tbl24_indexes[i] = ips[i] >> 8;
327 for (i = 0; i < n; i++) {
329 ptbl = (
const uint32_t *)&lpm->tbl24[tbl24_indexes[i]];
330 next_hops[i] = *ptbl;
333 if (
unlikely((next_hops[i] & RTE_LPM_VALID_EXT_ENTRY_BITMASK) ==
334 RTE_LPM_VALID_EXT_ENTRY_BITMASK)) {
336 unsigned tbl8_index = (uint8_t)ips[i] +
337 (((uint32_t)next_hops[i] & 0x00FFFFFF) *
338 RTE_LPM_TBL8_GROUP_NUM_ENTRIES);
340 ptbl = (
const uint32_t *)&lpm->tbl8[tbl8_index];
341 next_hops[i] = *ptbl;
348 #define RTE_LPM_MASKX4_RES UINT64_C(0x00ffffff00ffffff)
373 #if defined(RTE_ARCH_ARM) || defined(RTE_ARCH_ARM64)
374 #include "rte_lpm_neon.h"
375 #elif defined(RTE_ARCH_PPC_64)
376 #include "rte_lpm_altivec.h"
378 #include "rte_lpm_sse.h"
void rte_lpm_free(struct rte_lpm *lpm)
struct rte_lpm * rte_lpm_create(const char *name, int socket_id, const struct rte_lpm_config *config)
void rte_lpm_delete_all(struct rte_lpm *lpm)
#define RTE_LPM_MAX_DEPTH
static int rte_lpm_lookup(struct rte_lpm *lpm, uint32_t ip, uint32_t *next_hop)
static void rte_lpm_lookupx4(const struct rte_lpm *lpm, xmm_t ip, uint32_t hop[4], uint32_t defv)
int rte_lpm_delete(struct rte_lpm *lpm, uint32_t ip, uint8_t depth)
#define RTE_LPM_LOOKUP_SUCCESS
#define __rte_cache_aligned
int rte_lpm_add(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, uint32_t next_hop)
int rte_lpm_is_rule_present(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, uint32_t *next_hop)
struct rte_lpm * rte_lpm_find_existing(const char *name)