DPDK 21.11.9
rte_lpm.h
Go to the documentation of this file.
1/* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
3 * Copyright(c) 2020 Arm Limited
4 */
5
6#ifndef _RTE_LPM_H_
7#define _RTE_LPM_H_
8
14#include <errno.h>
15#include <sys/queue.h>
16#include <stdint.h>
17#include <stdlib.h>
19#include <rte_byteorder.h>
20#include <rte_config.h>
21#include <rte_memory.h>
22#include <rte_common.h>
23#include <rte_vect.h>
24#include <rte_rcu_qsbr.h>
25
26#ifdef __cplusplus
27extern "C" {
28#endif
29
31#define RTE_LPM_NAMESIZE 32
32
34#define RTE_LPM_MAX_DEPTH 32
35
37#define RTE_LPM_TBL24_NUM_ENTRIES (1 << 24)
38
40#define RTE_LPM_TBL8_GROUP_NUM_ENTRIES 256
41
43#define RTE_LPM_MAX_TBL8_NUM_GROUPS (1 << 24)
44
46#define RTE_LPM_TBL8_NUM_GROUPS 256
47
49#define RTE_LPM_TBL8_NUM_ENTRIES (RTE_LPM_TBL8_NUM_GROUPS * \
50 RTE_LPM_TBL8_GROUP_NUM_ENTRIES)
51
53#if defined(RTE_LIBRTE_LPM_DEBUG)
54#define RTE_LPM_RETURN_IF_TRUE(cond, retval) do { \
55 if (cond) return (retval); \
56} while (0)
57#else
58#define RTE_LPM_RETURN_IF_TRUE(cond, retval)
59#endif
60
62#define RTE_LPM_VALID_EXT_ENTRY_BITMASK 0x03000000
63
65#define RTE_LPM_LOOKUP_SUCCESS 0x01000000
66
68#define RTE_LPM_RCU_DQ_RECLAIM_MAX 16
69
76};
77
78#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
80__extension__
81struct rte_lpm_tbl_entry {
87 uint32_t next_hop :24;
88 /* Using single uint8_t to store 3 values. */
89 uint32_t valid :1;
97 uint32_t valid_group :1;
98 uint32_t depth :6;
99};
100
101#else
102
103__extension__
104struct rte_lpm_tbl_entry {
105 uint32_t depth :6;
106 uint32_t valid_group :1;
107 uint32_t valid :1;
108 uint32_t next_hop :24;
109
110};
111
112#endif
113
116 uint32_t max_rules;
117 uint32_t number_tbl8s;
118 int flags;
119};
120
122struct rte_lpm {
123 /* LPM Tables. */
124 struct rte_lpm_tbl_entry tbl24[RTE_LPM_TBL24_NUM_ENTRIES]
126 struct rte_lpm_tbl_entry *tbl8;
127};
128
131 struct rte_rcu_qsbr *v; /* RCU QSBR variable. */
132 /* Mode of RCU QSBR. RTE_LPM_QSBR_MODE_xxx
133 * '0' for default: create defer queue for reclaim.
134 */
135 enum rte_lpm_qsbr_mode mode;
136 uint32_t dq_size; /* RCU defer queue size.
137 * default: lpm->number_tbl8s.
138 */
139 uint32_t reclaim_thd; /* Threshold to trigger auto reclaim. */
140 uint32_t reclaim_max; /* Max entries to reclaim in one go.
141 * default: RTE_LPM_RCU_DQ_RECLAIM_MAX.
142 */
143};
144
164struct rte_lpm *
165rte_lpm_create(const char *name, int socket_id,
166 const struct rte_lpm_config *config);
167
178struct rte_lpm *
179rte_lpm_find_existing(const char *name);
180
187void
188rte_lpm_free(struct rte_lpm *lpm);
189
208__rte_experimental
209int rte_lpm_rcu_qsbr_add(struct rte_lpm *lpm, struct rte_lpm_rcu_config *cfg);
210
225int
226rte_lpm_add(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, uint32_t next_hop);
227
243int
244rte_lpm_is_rule_present(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
245uint32_t *next_hop);
246
259int
260rte_lpm_delete(struct rte_lpm *lpm, uint32_t ip, uint8_t depth);
261
268void
269rte_lpm_delete_all(struct rte_lpm *lpm);
270
283static inline int
284rte_lpm_lookup(struct rte_lpm *lpm, uint32_t ip, uint32_t *next_hop)
285{
286 unsigned tbl24_index = (ip >> 8);
287 uint32_t tbl_entry;
288 const uint32_t *ptbl;
289
290 /* DEBUG: Check user input arguments. */
291 RTE_LPM_RETURN_IF_TRUE(((lpm == NULL) || (next_hop == NULL)), -EINVAL);
292
293 /* Copy tbl24 entry */
294 ptbl = (const uint32_t *)(&lpm->tbl24[tbl24_index]);
295 tbl_entry = *ptbl;
296
297 /* Memory ordering is not required in lookup. Because dataflow
298 * dependency exists, compiler or HW won't be able to re-order
299 * the operations.
300 */
301 /* Copy tbl8 entry (only if needed) */
302 if (unlikely((tbl_entry & RTE_LPM_VALID_EXT_ENTRY_BITMASK) ==
303 RTE_LPM_VALID_EXT_ENTRY_BITMASK)) {
304
305 unsigned tbl8_index = (uint8_t)ip +
306 (((uint32_t)tbl_entry & 0x00FFFFFF) *
307 RTE_LPM_TBL8_GROUP_NUM_ENTRIES);
308
309 ptbl = (const uint32_t *)&lpm->tbl8[tbl8_index];
310 tbl_entry = *ptbl;
311 }
312
313 *next_hop = ((uint32_t)tbl_entry & 0x00FFFFFF);
314 return (tbl_entry & RTE_LPM_LOOKUP_SUCCESS) ? 0 : -ENOENT;
315}
316
337#define rte_lpm_lookup_bulk(lpm, ips, next_hops, n) \
338 rte_lpm_lookup_bulk_func(lpm, ips, next_hops, n)
339
340static inline int
341rte_lpm_lookup_bulk_func(const struct rte_lpm *lpm, const uint32_t *ips,
342 uint32_t *next_hops, const unsigned n)
343{
344 unsigned i;
345 unsigned tbl24_indexes[n];
346 const uint32_t *ptbl;
347
348 /* DEBUG: Check user input arguments. */
349 RTE_LPM_RETURN_IF_TRUE(((lpm == NULL) || (ips == NULL) ||
350 (next_hops == NULL)), -EINVAL);
351
352 for (i = 0; i < n; i++) {
353 tbl24_indexes[i] = ips[i] >> 8;
354 }
355
356 for (i = 0; i < n; i++) {
357 /* Simply copy tbl24 entry to output */
358 ptbl = (const uint32_t *)&lpm->tbl24[tbl24_indexes[i]];
359 next_hops[i] = *ptbl;
360
361 /* Overwrite output with tbl8 entry if needed */
362 if (unlikely((next_hops[i] & RTE_LPM_VALID_EXT_ENTRY_BITMASK) ==
363 RTE_LPM_VALID_EXT_ENTRY_BITMASK)) {
364
365 unsigned tbl8_index = (uint8_t)ips[i] +
366 (((uint32_t)next_hops[i] & 0x00FFFFFF) *
367 RTE_LPM_TBL8_GROUP_NUM_ENTRIES);
368
369 ptbl = (const uint32_t *)&lpm->tbl8[tbl8_index];
370 next_hops[i] = *ptbl;
371 }
372 }
373 return 0;
374}
375
376/* Mask four results. */
377#define RTE_LPM_MASKX4_RES UINT64_C(0x00ffffff00ffffff)
378
398static inline void
399rte_lpm_lookupx4(const struct rte_lpm *lpm, xmm_t ip, uint32_t hop[4],
400 uint32_t defv);
401
402#if defined(RTE_ARCH_ARM)
403#ifdef RTE_HAS_SVE_ACLE
404#include "rte_lpm_sve.h"
405#else
406#include "rte_lpm_neon.h"
407#endif
408#elif defined(RTE_ARCH_PPC_64)
409#include "rte_lpm_altivec.h"
410#else
411#include "rte_lpm_sse.h"
412#endif
413
414#ifdef __cplusplus
415}
416#endif
417
418#endif /* _RTE_LPM_H_ */
#define unlikely(x)
#define __rte_cache_aligned
Definition: rte_common.h:420
int rte_lpm_add(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, uint32_t next_hop)
int rte_lpm_delete(struct rte_lpm *lpm, uint32_t ip, uint8_t depth)
static int rte_lpm_lookup(struct rte_lpm *lpm, uint32_t ip, uint32_t *next_hop)
Definition: rte_lpm.h:284
__rte_experimental int rte_lpm_rcu_qsbr_add(struct rte_lpm *lpm, struct rte_lpm_rcu_config *cfg)
void rte_lpm_free(struct rte_lpm *lpm)
static void rte_lpm_lookupx4(const struct rte_lpm *lpm, xmm_t ip, uint32_t hop[4], uint32_t defv)
#define RTE_LPM_LOOKUP_SUCCESS
Definition: rte_lpm.h:65
rte_lpm_qsbr_mode
Definition: rte_lpm.h:71
@ RTE_LPM_QSBR_MODE_DQ
Definition: rte_lpm.h:73
@ RTE_LPM_QSBR_MODE_SYNC
Definition: rte_lpm.h:75
struct rte_lpm * rte_lpm_create(const char *name, int socket_id, const struct rte_lpm_config *config)
void rte_lpm_delete_all(struct rte_lpm *lpm)
int rte_lpm_is_rule_present(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, uint32_t *next_hop)
struct rte_lpm * rte_lpm_find_existing(const char *name)
uint32_t number_tbl8s
Definition: rte_lpm.h:117
uint32_t max_rules
Definition: rte_lpm.h:116