43 #include <sys/queue.h>
51 #include <rte_compat.h>
58 #define RTE_LPM_NAMESIZE 32
61 #define RTE_LPM_MAX_DEPTH 32
64 #define RTE_LPM_TBL24_NUM_ENTRIES (1 << 24)
67 #define RTE_LPM_TBL8_GROUP_NUM_ENTRIES 256
70 #define RTE_LPM_MAX_TBL8_NUM_GROUPS (1 << 24)
73 #define RTE_LPM_TBL8_NUM_GROUPS 256
76 #define RTE_LPM_TBL8_NUM_ENTRIES (RTE_LPM_TBL8_NUM_GROUPS * \
77 RTE_LPM_TBL8_GROUP_NUM_ENTRIES)
80 #if defined(RTE_LIBRTE_LPM_DEBUG)
81 #define RTE_LPM_RETURN_IF_TRUE(cond, retval) do { \
82 if (cond) return (retval); \
85 #define RTE_LPM_RETURN_IF_TRUE(cond, retval)
89 #define RTE_LPM_VALID_EXT_ENTRY_BITMASK 0x03000000
92 #define RTE_LPM_LOOKUP_SUCCESS 0x01000000
94 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
97 struct rte_lpm_tbl_entry_v20 {
117 uint8_t valid_group :1;
122 struct rte_lpm_tbl_entry {
128 uint32_t next_hop :24;
138 uint32_t valid_group :1;
144 struct rte_lpm_tbl_entry_v20 {
146 uint8_t valid_group :1;
155 struct rte_lpm_tbl_entry {
157 uint32_t valid_group :1;
159 uint32_t next_hop :24;
173 struct rte_lpm_rule_v20 {
178 struct rte_lpm_rule {
184 struct rte_lpm_rule_info {
197 struct rte_lpm_tbl_entry_v20 tbl24[RTE_LPM_TBL24_NUM_ENTRIES]
199 struct rte_lpm_tbl_entry_v20 tbl8[RTE_LPM_TBL8_NUM_ENTRIES]
201 struct rte_lpm_rule_v20 rules_tbl[]
209 uint32_t number_tbl8s;
213 struct rte_lpm_tbl_entry tbl24[RTE_LPM_TBL24_NUM_ENTRIES]
215 struct rte_lpm_tbl_entry *tbl8;
216 struct rte_lpm_rule *rules_tbl;
242 rte_lpm_create_v20(
const char *name,
int socket_id,
int max_rules,
int flags);
244 rte_lpm_create_v1604(
const char *name,
int socket_id,
260 rte_lpm_find_existing_v20(
const char *name);
262 rte_lpm_find_existing_v1604(
const char *name);
275 rte_lpm_free_v20(
struct rte_lpm_v20 *lpm);
277 rte_lpm_free_v1604(
struct rte_lpm *lpm);
294 rte_lpm_add(
struct rte_lpm *lpm, uint32_t ip, uint8_t depth, uint32_t next_hop);
296 rte_lpm_add_v20(
struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth,
299 rte_lpm_add_v1604(
struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
321 rte_lpm_is_rule_present_v20(
struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth,
324 rte_lpm_is_rule_present_v1604(
struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
342 rte_lpm_delete_v20(
struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth);
344 rte_lpm_delete_v1604(
struct rte_lpm *lpm, uint32_t ip, uint8_t depth);
355 rte_lpm_delete_all_v20(
struct rte_lpm_v20 *lpm);
357 rte_lpm_delete_all_v1604(
struct rte_lpm *lpm);
374 unsigned tbl24_index = (ip >> 8);
376 const uint32_t *ptbl;
379 RTE_LPM_RETURN_IF_TRUE(((lpm == NULL) || (next_hop == NULL)), -EINVAL);
382 ptbl = (
const uint32_t *)(&lpm->tbl24[tbl24_index]);
386 if (
unlikely((tbl_entry & RTE_LPM_VALID_EXT_ENTRY_BITMASK) ==
387 RTE_LPM_VALID_EXT_ENTRY_BITMASK)) {
389 unsigned tbl8_index = (uint8_t)ip +
390 (((uint32_t)tbl_entry & 0x00FFFFFF) *
391 RTE_LPM_TBL8_GROUP_NUM_ENTRIES);
393 ptbl = (
const uint32_t *)&lpm->tbl8[tbl8_index];
397 *next_hop = ((uint32_t)tbl_entry & 0x00FFFFFF);
421 #define rte_lpm_lookup_bulk(lpm, ips, next_hops, n) \
422 rte_lpm_lookup_bulk_func(lpm, ips, next_hops, n)
425 rte_lpm_lookup_bulk_func(
const struct rte_lpm *lpm,
const uint32_t *ips,
426 uint32_t *next_hops,
const unsigned n)
429 unsigned tbl24_indexes[n];
430 const uint32_t *ptbl;
433 RTE_LPM_RETURN_IF_TRUE(((lpm == NULL) || (ips == NULL) ||
434 (next_hops == NULL)), -EINVAL);
436 for (i = 0; i < n; i++) {
437 tbl24_indexes[i] = ips[i] >> 8;
440 for (i = 0; i < n; i++) {
442 ptbl = (
const uint32_t *)&lpm->tbl24[tbl24_indexes[i]];
443 next_hops[i] = *ptbl;
446 if (
unlikely((next_hops[i] & RTE_LPM_VALID_EXT_ENTRY_BITMASK) ==
447 RTE_LPM_VALID_EXT_ENTRY_BITMASK)) {
449 unsigned tbl8_index = (uint8_t)ips[i] +
450 (((uint32_t)next_hops[i] & 0x00FFFFFF) *
451 RTE_LPM_TBL8_GROUP_NUM_ENTRIES);
453 ptbl = (
const uint32_t *)&lpm->tbl8[tbl8_index];
454 next_hops[i] = *ptbl;
461 #define RTE_LPM_MASKX4_RES UINT64_C(0x00ffffff00ffffff)
486 #if defined(RTE_ARCH_ARM) || defined(RTE_ARCH_ARM64)
487 #include "rte_lpm_neon.h"
488 #elif defined(RTE_ARCH_PPC_64)
489 #include "rte_lpm_altivec.h"
491 #include "rte_lpm_sse.h"