DPDK  19.08.2
rte_lpm.h
Go to the documentation of this file.
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2014 Intel Corporation
3  */
4 
5 #ifndef _RTE_LPM_H_
6 #define _RTE_LPM_H_
7 
13 #include <errno.h>
14 #include <sys/queue.h>
15 #include <stdint.h>
16 #include <stdlib.h>
17 #include <rte_branch_prediction.h>
18 #include <rte_byteorder.h>
19 #include <rte_config.h>
20 #include <rte_memory.h>
21 #include <rte_common.h>
22 #include <rte_vect.h>
23 #include <rte_compat.h>
24 
25 #ifdef __cplusplus
26 extern "C" {
27 #endif
28 
30 #define RTE_LPM_NAMESIZE 32
31 
33 #define RTE_LPM_MAX_DEPTH 32
34 
36 #define RTE_LPM_TBL24_NUM_ENTRIES (1 << 24)
37 
39 #define RTE_LPM_TBL8_GROUP_NUM_ENTRIES 256
40 
42 #define RTE_LPM_MAX_TBL8_NUM_GROUPS (1 << 24)
43 
45 #define RTE_LPM_TBL8_NUM_GROUPS 256
46 
48 #define RTE_LPM_TBL8_NUM_ENTRIES (RTE_LPM_TBL8_NUM_GROUPS * \
49  RTE_LPM_TBL8_GROUP_NUM_ENTRIES)
50 
52 #if defined(RTE_LIBRTE_LPM_DEBUG)
53 #define RTE_LPM_RETURN_IF_TRUE(cond, retval) do { \
54  if (cond) return (retval); \
55 } while (0)
56 #else
57 #define RTE_LPM_RETURN_IF_TRUE(cond, retval)
58 #endif
59 
61 #define RTE_LPM_VALID_EXT_ENTRY_BITMASK 0x03000000
62 
64 #define RTE_LPM_LOOKUP_SUCCESS 0x01000000
65 
66 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
67 
68 __extension__
69 struct rte_lpm_tbl_entry_v20 {
76  union {
77  uint8_t next_hop;
78  uint8_t group_idx;
79  };
80  /* Using single uint8_t to store 3 values. */
81  uint8_t valid :1;
89  uint8_t valid_group :1;
90  uint8_t depth :6;
91 } __rte_aligned(sizeof(uint16_t));
92 
93 __extension__
94 struct rte_lpm_tbl_entry {
100  uint32_t next_hop :24;
101  /* Using single uint8_t to store 3 values. */
102  uint32_t valid :1;
110  uint32_t valid_group :1;
111  uint32_t depth :6;
112 };
113 
114 #else
115 __extension__
116 struct rte_lpm_tbl_entry_v20 {
117  uint8_t depth :6;
118  uint8_t valid_group :1;
119  uint8_t valid :1;
120  union {
121  uint8_t group_idx;
122  uint8_t next_hop;
123  };
124 } __rte_aligned(sizeof(uint16_t));
125 
126 __extension__
127 struct rte_lpm_tbl_entry {
128  uint32_t depth :6;
129  uint32_t valid_group :1;
130  uint32_t valid :1;
131  uint32_t next_hop :24;
132 
133 };
134 
135 #endif
136 
139  uint32_t max_rules;
140  uint32_t number_tbl8s;
141  int flags;
142 };
143 
145 struct rte_lpm_rule_v20 {
146  uint32_t ip;
147  uint8_t next_hop;
148 };
149 
150 struct rte_lpm_rule {
151  uint32_t ip;
152  uint32_t next_hop;
153 };
154 
156 struct rte_lpm_rule_info {
157  uint32_t used_rules;
158  uint32_t first_rule;
159 };
160 
162 struct rte_lpm_v20 {
163  /* LPM metadata. */
164  char name[RTE_LPM_NAMESIZE];
165  uint32_t max_rules;
166  struct rte_lpm_rule_info rule_info[RTE_LPM_MAX_DEPTH];
168  /* LPM Tables. */
169  struct rte_lpm_tbl_entry_v20 tbl24[RTE_LPM_TBL24_NUM_ENTRIES]
171  struct rte_lpm_tbl_entry_v20 tbl8[RTE_LPM_TBL8_NUM_ENTRIES]
173  struct rte_lpm_rule_v20 rules_tbl[]
175 };
176 
177 struct rte_lpm {
178  /* LPM metadata. */
179  char name[RTE_LPM_NAMESIZE];
180  uint32_t max_rules;
181  uint32_t number_tbl8s;
182  struct rte_lpm_rule_info rule_info[RTE_LPM_MAX_DEPTH];
184  /* LPM Tables. */
185  struct rte_lpm_tbl_entry tbl24[RTE_LPM_TBL24_NUM_ENTRIES]
187  struct rte_lpm_tbl_entry *tbl8;
188  struct rte_lpm_rule *rules_tbl;
189 };
190 
210 struct rte_lpm *
211 rte_lpm_create(const char *name, int socket_id,
212  const struct rte_lpm_config *config);
213 struct rte_lpm_v20 *
214 rte_lpm_create_v20(const char *name, int socket_id, int max_rules, int flags);
215 struct rte_lpm *
216 rte_lpm_create_v1604(const char *name, int socket_id,
217  const struct rte_lpm_config *config);
218 
229 struct rte_lpm *
230 rte_lpm_find_existing(const char *name);
231 struct rte_lpm_v20 *
232 rte_lpm_find_existing_v20(const char *name);
233 struct rte_lpm *
234 rte_lpm_find_existing_v1604(const char *name);
235 
244 void
245 rte_lpm_free(struct rte_lpm *lpm);
246 void
247 rte_lpm_free_v20(struct rte_lpm_v20 *lpm);
248 void
249 rte_lpm_free_v1604(struct rte_lpm *lpm);
250 
265 int
266 rte_lpm_add(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, uint32_t next_hop);
267 int
268 rte_lpm_add_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth,
269  uint8_t next_hop);
270 int
271 rte_lpm_add_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
272  uint32_t next_hop);
273 
289 int
290 rte_lpm_is_rule_present(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
291 uint32_t *next_hop);
292 int
293 rte_lpm_is_rule_present_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth,
294 uint8_t *next_hop);
295 int
296 rte_lpm_is_rule_present_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
297 uint32_t *next_hop);
298 
311 int
312 rte_lpm_delete(struct rte_lpm *lpm, uint32_t ip, uint8_t depth);
313 int
314 rte_lpm_delete_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth);
315 int
316 rte_lpm_delete_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth);
317 
324 void
325 rte_lpm_delete_all(struct rte_lpm *lpm);
326 void
327 rte_lpm_delete_all_v20(struct rte_lpm_v20 *lpm);
328 void
329 rte_lpm_delete_all_v1604(struct rte_lpm *lpm);
330 
343 static inline int
344 rte_lpm_lookup(struct rte_lpm *lpm, uint32_t ip, uint32_t *next_hop)
345 {
346  unsigned tbl24_index = (ip >> 8);
347  uint32_t tbl_entry;
348  const uint32_t *ptbl;
349 
350  /* DEBUG: Check user input arguments. */
351  RTE_LPM_RETURN_IF_TRUE(((lpm == NULL) || (next_hop == NULL)), -EINVAL);
352 
353  /* Copy tbl24 entry */
354  ptbl = (const uint32_t *)(&lpm->tbl24[tbl24_index]);
355  tbl_entry = *ptbl;
356 
357  /* Memory ordering is not required in lookup. Because dataflow
358  * dependency exists, compiler or HW won't be able to re-order
359  * the operations.
360  */
361  /* Copy tbl8 entry (only if needed) */
362  if (unlikely((tbl_entry & RTE_LPM_VALID_EXT_ENTRY_BITMASK) ==
363  RTE_LPM_VALID_EXT_ENTRY_BITMASK)) {
364 
365  unsigned tbl8_index = (uint8_t)ip +
366  (((uint32_t)tbl_entry & 0x00FFFFFF) *
367  RTE_LPM_TBL8_GROUP_NUM_ENTRIES);
368 
369  ptbl = (const uint32_t *)&lpm->tbl8[tbl8_index];
370  tbl_entry = *ptbl;
371  }
372 
373  *next_hop = ((uint32_t)tbl_entry & 0x00FFFFFF);
374  return (tbl_entry & RTE_LPM_LOOKUP_SUCCESS) ? 0 : -ENOENT;
375 }
376 
397 #define rte_lpm_lookup_bulk(lpm, ips, next_hops, n) \
398  rte_lpm_lookup_bulk_func(lpm, ips, next_hops, n)
399 
400 static inline int
401 rte_lpm_lookup_bulk_func(const struct rte_lpm *lpm, const uint32_t *ips,
402  uint32_t *next_hops, const unsigned n)
403 {
404  unsigned i;
405  unsigned tbl24_indexes[n];
406  const uint32_t *ptbl;
407 
408  /* DEBUG: Check user input arguments. */
409  RTE_LPM_RETURN_IF_TRUE(((lpm == NULL) || (ips == NULL) ||
410  (next_hops == NULL)), -EINVAL);
411 
412  for (i = 0; i < n; i++) {
413  tbl24_indexes[i] = ips[i] >> 8;
414  }
415 
416  for (i = 0; i < n; i++) {
417  /* Simply copy tbl24 entry to output */
418  ptbl = (const uint32_t *)&lpm->tbl24[tbl24_indexes[i]];
419  next_hops[i] = *ptbl;
420 
421  /* Overwrite output with tbl8 entry if needed */
422  if (unlikely((next_hops[i] & RTE_LPM_VALID_EXT_ENTRY_BITMASK) ==
423  RTE_LPM_VALID_EXT_ENTRY_BITMASK)) {
424 
425  unsigned tbl8_index = (uint8_t)ips[i] +
426  (((uint32_t)next_hops[i] & 0x00FFFFFF) *
427  RTE_LPM_TBL8_GROUP_NUM_ENTRIES);
428 
429  ptbl = (const uint32_t *)&lpm->tbl8[tbl8_index];
430  next_hops[i] = *ptbl;
431  }
432  }
433  return 0;
434 }
435 
436 /* Mask four results. */
437 #define RTE_LPM_MASKX4_RES UINT64_C(0x00ffffff00ffffff)
438 
458 static inline void
459 rte_lpm_lookupx4(const struct rte_lpm *lpm, xmm_t ip, uint32_t hop[4],
460  uint32_t defv);
461 
462 #if defined(RTE_ARCH_ARM) || defined(RTE_ARCH_ARM64)
463 #include "rte_lpm_neon.h"
464 #elif defined(RTE_ARCH_PPC_64)
465 #include "rte_lpm_altivec.h"
466 #else
467 #include "rte_lpm_sse.h"
468 #endif
469 
470 #ifdef __cplusplus
471 }
472 #endif
473 
474 #endif /* _RTE_LPM_H_ */
void rte_lpm_free(struct rte_lpm *lpm)
struct rte_lpm * rte_lpm_create(const char *name, int socket_id, const struct rte_lpm_config *config)
void rte_lpm_delete_all(struct rte_lpm *lpm)
uint32_t max_rules
Definition: rte_lpm.h:139
#define RTE_LPM_MAX_DEPTH
Definition: rte_lpm.h:33
static int rte_lpm_lookup(struct rte_lpm *lpm, uint32_t ip, uint32_t *next_hop)
Definition: rte_lpm.h:344
#define unlikely(x)
#define RTE_LPM_NAMESIZE
Definition: rte_lpm.h:30
static void rte_lpm_lookupx4(const struct rte_lpm *lpm, xmm_t ip, uint32_t hop[4], uint32_t defv)
uint8_t depth
Definition: rte_lpm.h:89
uint32_t number_tbl8s
Definition: rte_lpm.h:140
int rte_lpm_delete(struct rte_lpm *lpm, uint32_t ip, uint8_t depth)
#define __rte_cache_aligned
Definition: rte_memory.h:64
#define RTE_STD_C11
Definition: rte_common.h:40
#define RTE_LPM_LOOKUP_SUCCESS
Definition: rte_lpm.h:64
int rte_lpm_add(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, uint32_t next_hop)
uint8_t valid
Definition: rte_lpm.h:80
uint8_t valid_group
Definition: rte_lpm.h:88
int rte_lpm_is_rule_present(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, uint32_t *next_hop)
struct rte_lpm * rte_lpm_find_existing(const char *name)
#define __rte_aligned(a)
Definition: rte_common.h:64