43 #include <sys/queue.h>
51 #include <rte_compat.h>
58 #define RTE_LPM_NAMESIZE 32
61 #define RTE_LPM_MAX_DEPTH 32
64 #define RTE_LPM_TBL24_NUM_ENTRIES (1 << 24)
67 #define RTE_LPM_TBL8_GROUP_NUM_ENTRIES 256
70 #define RTE_LPM_MAX_TBL8_NUM_GROUPS (1 << 24)
73 #define RTE_LPM_TBL8_NUM_GROUPS 256
76 #define RTE_LPM_TBL8_NUM_ENTRIES (RTE_LPM_TBL8_NUM_GROUPS * \
77 RTE_LPM_TBL8_GROUP_NUM_ENTRIES)
80 #if defined(RTE_LIBRTE_LPM_DEBUG)
81 #define RTE_LPM_RETURN_IF_TRUE(cond, retval) do { \
82 if (cond) return (retval); \
85 #define RTE_LPM_RETURN_IF_TRUE(cond, retval)
89 #define RTE_LPM_VALID_EXT_ENTRY_BITMASK 0x03000000
92 #define RTE_LPM_LOOKUP_SUCCESS 0x01000000
94 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
96 struct rte_lpm_tbl_entry_v20 {
115 uint8_t valid_group :1;
119 struct rte_lpm_tbl_entry {
125 uint32_t next_hop :24;
135 uint32_t valid_group :1;
140 struct rte_lpm_tbl_entry_v20 {
142 uint8_t valid_group :1;
150 struct rte_lpm_tbl_entry {
152 uint32_t valid_group :1;
154 uint32_t next_hop :24;
168 struct rte_lpm_rule_v20 {
173 struct rte_lpm_rule {
179 struct rte_lpm_rule_info {
192 struct rte_lpm_tbl_entry_v20 tbl24[RTE_LPM_TBL24_NUM_ENTRIES]
194 struct rte_lpm_tbl_entry_v20 tbl8[RTE_LPM_TBL8_NUM_ENTRIES]
196 struct rte_lpm_rule_v20 rules_tbl[0] \
204 uint32_t number_tbl8s;
208 struct rte_lpm_tbl_entry tbl24[RTE_LPM_TBL24_NUM_ENTRIES]
210 struct rte_lpm_tbl_entry *tbl8;
211 struct rte_lpm_rule *rules_tbl;
237 rte_lpm_create_v20(
const char *name,
int socket_id,
int max_rules,
int flags);
239 rte_lpm_create_v1604(
const char *name,
int socket_id,
255 rte_lpm_find_existing_v20(
const char *name);
257 rte_lpm_find_existing_v1604(
const char *name);
270 rte_lpm_free_v20(
struct rte_lpm_v20 *lpm);
272 rte_lpm_free_v1604(
struct rte_lpm *lpm);
289 rte_lpm_add(
struct rte_lpm *lpm, uint32_t ip, uint8_t depth, uint32_t next_hop);
291 rte_lpm_add_v20(
struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth,
294 rte_lpm_add_v1604(
struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
316 rte_lpm_is_rule_present_v20(
struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth,
319 rte_lpm_is_rule_present_v1604(
struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
337 rte_lpm_delete_v20(
struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth);
339 rte_lpm_delete_v1604(
struct rte_lpm *lpm, uint32_t ip, uint8_t depth);
350 rte_lpm_delete_all_v20(
struct rte_lpm_v20 *lpm);
352 rte_lpm_delete_all_v1604(
struct rte_lpm *lpm);
369 unsigned tbl24_index = (ip >> 8);
371 const uint32_t *ptbl;
374 RTE_LPM_RETURN_IF_TRUE(((lpm == NULL) || (next_hop == NULL)), -EINVAL);
377 ptbl = (
const uint32_t *)(&lpm->tbl24[tbl24_index]);
381 if (
unlikely((tbl_entry & RTE_LPM_VALID_EXT_ENTRY_BITMASK) ==
382 RTE_LPM_VALID_EXT_ENTRY_BITMASK)) {
384 unsigned tbl8_index = (uint8_t)ip +
385 (((uint32_t)tbl_entry & 0x00FFFFFF) *
386 RTE_LPM_TBL8_GROUP_NUM_ENTRIES);
388 ptbl = (
const uint32_t *)&lpm->tbl8[tbl8_index];
392 *next_hop = ((uint32_t)tbl_entry & 0x00FFFFFF);
416 #define rte_lpm_lookup_bulk(lpm, ips, next_hops, n) \
417 rte_lpm_lookup_bulk_func(lpm, ips, next_hops, n)
420 rte_lpm_lookup_bulk_func(
const struct rte_lpm *lpm,
const uint32_t *ips,
421 uint32_t *next_hops,
const unsigned n)
424 unsigned tbl24_indexes[n];
425 const uint32_t *ptbl;
428 RTE_LPM_RETURN_IF_TRUE(((lpm == NULL) || (ips == NULL) ||
429 (next_hops == NULL)), -EINVAL);
431 for (i = 0; i < n; i++) {
432 tbl24_indexes[i] = ips[i] >> 8;
435 for (i = 0; i < n; i++) {
437 ptbl = (
const uint32_t *)&lpm->tbl24[tbl24_indexes[i]];
438 next_hops[i] = *ptbl;
441 if (
unlikely((next_hops[i] & RTE_LPM_VALID_EXT_ENTRY_BITMASK) ==
442 RTE_LPM_VALID_EXT_ENTRY_BITMASK)) {
444 unsigned tbl8_index = (uint8_t)ips[i] +
445 (((uint32_t)next_hops[i] & 0x00FFFFFF) *
446 RTE_LPM_TBL8_GROUP_NUM_ENTRIES);
448 ptbl = (
const uint32_t *)&lpm->tbl8[tbl8_index];
449 next_hops[i] = *ptbl;
456 #define RTE_LPM_MASKX4_RES UINT64_C(0x00ffffff00ffffff)
481 #if defined(RTE_ARCH_ARM) || defined(RTE_ARCH_ARM64)
482 #include "rte_lpm_neon.h"
484 #include "rte_lpm_sse.h"