DPDK  20.08.0
rte_lpm.h
Go to the documentation of this file.
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2014 Intel Corporation
3  * Copyright(c) 2020 Arm Limited
4  */
5 
6 #ifndef _RTE_LPM_H_
7 #define _RTE_LPM_H_
8 
14 #include <errno.h>
15 #include <sys/queue.h>
16 #include <stdint.h>
17 #include <stdlib.h>
18 #include <rte_branch_prediction.h>
19 #include <rte_byteorder.h>
20 #include <rte_config.h>
21 #include <rte_memory.h>
22 #include <rte_common.h>
23 #include <rte_vect.h>
24 #include <rte_rcu_qsbr.h>
25 
26 #ifdef __cplusplus
27 extern "C" {
28 #endif
29 
31 #define RTE_LPM_NAMESIZE 32
32 
34 #define RTE_LPM_MAX_DEPTH 32
35 
37 #define RTE_LPM_TBL24_NUM_ENTRIES (1 << 24)
38 
40 #define RTE_LPM_TBL8_GROUP_NUM_ENTRIES 256
41 
43 #define RTE_LPM_MAX_TBL8_NUM_GROUPS (1 << 24)
44 
46 #define RTE_LPM_TBL8_NUM_GROUPS 256
47 
49 #define RTE_LPM_TBL8_NUM_ENTRIES (RTE_LPM_TBL8_NUM_GROUPS * \
50  RTE_LPM_TBL8_GROUP_NUM_ENTRIES)
51 
53 #if defined(RTE_LIBRTE_LPM_DEBUG)
54 #define RTE_LPM_RETURN_IF_TRUE(cond, retval) do { \
55  if (cond) return (retval); \
56 } while (0)
57 #else
58 #define RTE_LPM_RETURN_IF_TRUE(cond, retval)
59 #endif
60 
62 #define RTE_LPM_VALID_EXT_ENTRY_BITMASK 0x03000000
63 
65 #define RTE_LPM_LOOKUP_SUCCESS 0x01000000
66 
68 #define RTE_LPM_RCU_DQ_RECLAIM_MAX 16
69 
76 };
77 
78 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
79 
80 __extension__
81 struct rte_lpm_tbl_entry {
87  uint32_t next_hop :24;
88  /* Using single uint8_t to store 3 values. */
89  uint32_t valid :1;
97  uint32_t valid_group :1;
98  uint32_t depth :6;
99 };
100 
101 #else
102 
103 __extension__
104 struct rte_lpm_tbl_entry {
105  uint32_t depth :6;
106  uint32_t valid_group :1;
107  uint32_t valid :1;
108  uint32_t next_hop :24;
109 
110 };
111 
112 #endif
113 
116  uint32_t max_rules;
117  uint32_t number_tbl8s;
118  int flags;
119 };
120 
122 struct rte_lpm_rule {
123  uint32_t ip;
124  uint32_t next_hop;
125 };
126 
128 struct rte_lpm_rule_info {
129  uint32_t used_rules;
130  uint32_t first_rule;
131 };
132 
134 struct rte_lpm {
135  /* LPM metadata. */
136  char name[RTE_LPM_NAMESIZE];
137  uint32_t max_rules;
138  uint32_t number_tbl8s;
139  struct rte_lpm_rule_info rule_info[RTE_LPM_MAX_DEPTH];
141  /* LPM Tables. */
142  struct rte_lpm_tbl_entry tbl24[RTE_LPM_TBL24_NUM_ENTRIES]
144  struct rte_lpm_tbl_entry *tbl8;
145  struct rte_lpm_rule *rules_tbl;
146 };
147 
150  struct rte_rcu_qsbr *v; /* RCU QSBR variable. */
151  /* Mode of RCU QSBR. RTE_LPM_QSBR_MODE_xxx
152  * '0' for default: create defer queue for reclaim.
153  */
154  enum rte_lpm_qsbr_mode mode;
155  uint32_t dq_size; /* RCU defer queue size.
156  * default: lpm->number_tbl8s.
157  */
158  uint32_t reclaim_thd; /* Threshold to trigger auto reclaim. */
159  uint32_t reclaim_max; /* Max entries to reclaim in one go.
160  * default: RTE_LPM_RCU_DQ_RECLAIM_MAX.
161  */
162 };
163 
183 struct rte_lpm *
184 rte_lpm_create(const char *name, int socket_id,
185  const struct rte_lpm_config *config);
186 
197 struct rte_lpm *
198 rte_lpm_find_existing(const char *name);
199 
208 void
209 rte_lpm_free(struct rte_lpm *lpm);
210 
229 __rte_experimental
230 int rte_lpm_rcu_qsbr_add(struct rte_lpm *lpm, struct rte_lpm_rcu_config *cfg);
231 
246 int
247 rte_lpm_add(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, uint32_t next_hop);
248 
264 int
265 rte_lpm_is_rule_present(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
266 uint32_t *next_hop);
267 
280 int
281 rte_lpm_delete(struct rte_lpm *lpm, uint32_t ip, uint8_t depth);
282 
289 void
290 rte_lpm_delete_all(struct rte_lpm *lpm);
291 
304 static inline int
305 rte_lpm_lookup(struct rte_lpm *lpm, uint32_t ip, uint32_t *next_hop)
306 {
307  unsigned tbl24_index = (ip >> 8);
308  uint32_t tbl_entry;
309  const uint32_t *ptbl;
310 
311  /* DEBUG: Check user input arguments. */
312  RTE_LPM_RETURN_IF_TRUE(((lpm == NULL) || (next_hop == NULL)), -EINVAL);
313 
314  /* Copy tbl24 entry */
315  ptbl = (const uint32_t *)(&lpm->tbl24[tbl24_index]);
316  tbl_entry = *ptbl;
317 
318  /* Memory ordering is not required in lookup. Because dataflow
319  * dependency exists, compiler or HW won't be able to re-order
320  * the operations.
321  */
322  /* Copy tbl8 entry (only if needed) */
323  if (unlikely((tbl_entry & RTE_LPM_VALID_EXT_ENTRY_BITMASK) ==
324  RTE_LPM_VALID_EXT_ENTRY_BITMASK)) {
325 
326  unsigned tbl8_index = (uint8_t)ip +
327  (((uint32_t)tbl_entry & 0x00FFFFFF) *
328  RTE_LPM_TBL8_GROUP_NUM_ENTRIES);
329 
330  ptbl = (const uint32_t *)&lpm->tbl8[tbl8_index];
331  tbl_entry = *ptbl;
332  }
333 
334  *next_hop = ((uint32_t)tbl_entry & 0x00FFFFFF);
335  return (tbl_entry & RTE_LPM_LOOKUP_SUCCESS) ? 0 : -ENOENT;
336 }
337 
358 #define rte_lpm_lookup_bulk(lpm, ips, next_hops, n) \
359  rte_lpm_lookup_bulk_func(lpm, ips, next_hops, n)
360 
361 static inline int
362 rte_lpm_lookup_bulk_func(const struct rte_lpm *lpm, const uint32_t *ips,
363  uint32_t *next_hops, const unsigned n)
364 {
365  unsigned i;
366  unsigned tbl24_indexes[n];
367  const uint32_t *ptbl;
368 
369  /* DEBUG: Check user input arguments. */
370  RTE_LPM_RETURN_IF_TRUE(((lpm == NULL) || (ips == NULL) ||
371  (next_hops == NULL)), -EINVAL);
372 
373  for (i = 0; i < n; i++) {
374  tbl24_indexes[i] = ips[i] >> 8;
375  }
376 
377  for (i = 0; i < n; i++) {
378  /* Simply copy tbl24 entry to output */
379  ptbl = (const uint32_t *)&lpm->tbl24[tbl24_indexes[i]];
380  next_hops[i] = *ptbl;
381 
382  /* Overwrite output with tbl8 entry if needed */
383  if (unlikely((next_hops[i] & RTE_LPM_VALID_EXT_ENTRY_BITMASK) ==
384  RTE_LPM_VALID_EXT_ENTRY_BITMASK)) {
385 
386  unsigned tbl8_index = (uint8_t)ips[i] +
387  (((uint32_t)next_hops[i] & 0x00FFFFFF) *
388  RTE_LPM_TBL8_GROUP_NUM_ENTRIES);
389 
390  ptbl = (const uint32_t *)&lpm->tbl8[tbl8_index];
391  next_hops[i] = *ptbl;
392  }
393  }
394  return 0;
395 }
396 
397 /* Mask four results. */
398 #define RTE_LPM_MASKX4_RES UINT64_C(0x00ffffff00ffffff)
399 
419 static inline void
420 rte_lpm_lookupx4(const struct rte_lpm *lpm, xmm_t ip, uint32_t hop[4],
421  uint32_t defv);
422 
423 #if defined(RTE_ARCH_ARM) || defined(RTE_ARCH_ARM64)
424 #include "rte_lpm_neon.h"
425 #elif defined(RTE_ARCH_PPC_64)
426 #include "rte_lpm_altivec.h"
427 #else
428 #include "rte_lpm_sse.h"
429 #endif
430 
431 #ifdef __cplusplus
432 }
433 #endif
434 
435 #endif /* _RTE_LPM_H_ */
void rte_lpm_free(struct rte_lpm *lpm)
__rte_experimental int rte_lpm_rcu_qsbr_add(struct rte_lpm *lpm, struct rte_lpm_rcu_config *cfg)
struct rte_lpm * rte_lpm_create(const char *name, int socket_id, const struct rte_lpm_config *config)
void rte_lpm_delete_all(struct rte_lpm *lpm)
rte_lpm_qsbr_mode
Definition: rte_lpm.h:71
uint32_t max_rules
Definition: rte_lpm.h:116
#define RTE_LPM_MAX_DEPTH
Definition: rte_lpm.h:34
static int rte_lpm_lookup(struct rte_lpm *lpm, uint32_t ip, uint32_t *next_hop)
Definition: rte_lpm.h:305
#define unlikely(x)
#define RTE_LPM_NAMESIZE
Definition: rte_lpm.h:31
static void rte_lpm_lookupx4(const struct rte_lpm *lpm, xmm_t ip, uint32_t hop[4], uint32_t defv)
uint32_t number_tbl8s
Definition: rte_lpm.h:117
int rte_lpm_delete(struct rte_lpm *lpm, uint32_t ip, uint8_t depth)
#define __rte_cache_aligned
Definition: rte_common.h:376
#define RTE_LPM_LOOKUP_SUCCESS
Definition: rte_lpm.h:65
int rte_lpm_add(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, uint32_t next_hop)
int rte_lpm_is_rule_present(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, uint32_t *next_hop)
struct rte_lpm * rte_lpm_find_existing(const char *name)