DPDK  2.1.0
rte_lpm.h
Go to the documentation of this file.
1 /*-
2  * BSD LICENSE
3  *
4  * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * * Redistributions of source code must retain the above copyright
12  * notice, this list of conditions and the following disclaimer.
13  * * Redistributions in binary form must reproduce the above copyright
14  * notice, this list of conditions and the following disclaimer in
15  * the documentation and/or other materials provided with the
16  * distribution.
17  * * Neither the name of Intel Corporation nor the names of its
18  * contributors may be used to endorse or promote products derived
19  * from this software without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #ifndef _RTE_LPM_H_
35 #define _RTE_LPM_H_
36 
42 #include <errno.h>
43 #include <sys/queue.h>
44 #include <stdint.h>
45 #include <stdlib.h>
46 #include <rte_branch_prediction.h>
47 #include <rte_byteorder.h>
48 #include <rte_memory.h>
49 #include <rte_common.h>
50 #include <rte_vect.h>
51 
52 #ifdef __cplusplus
53 extern "C" {
54 #endif
55 
57 #define RTE_LPM_NAMESIZE 32
58 
62 #define RTE_LPM_HEAP 0
63 
67 #define RTE_LPM_MEMZONE 1
68 
70 #define RTE_LPM_MAX_DEPTH 32
71 
73 #define RTE_LPM_TBL24_NUM_ENTRIES (1 << 24)
74 
76 #define RTE_LPM_TBL8_GROUP_NUM_ENTRIES 256
77 
79 #define RTE_LPM_TBL8_NUM_GROUPS 256
80 
82 #define RTE_LPM_TBL8_NUM_ENTRIES (RTE_LPM_TBL8_NUM_GROUPS * \
83  RTE_LPM_TBL8_GROUP_NUM_ENTRIES)
84 
86 #if defined(RTE_LIBRTE_LPM_DEBUG)
87 #define RTE_LPM_RETURN_IF_TRUE(cond, retval) do { \
88  if (cond) return (retval); \
89 } while (0)
90 #else
91 #define RTE_LPM_RETURN_IF_TRUE(cond, retval)
92 #endif
93 
95 #define RTE_LPM_VALID_EXT_ENTRY_BITMASK 0x0300
96 
98 #define RTE_LPM_LOOKUP_SUCCESS 0x0100
99 
100 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
101 
102 struct rte_lpm_tbl24_entry {
103  /* Stores Next hop or group index (i.e. gindex)into tbl8. */
104  union {
105  uint8_t next_hop;
106  uint8_t tbl8_gindex;
107  };
108  /* Using single uint8_t to store 3 values. */
109  uint8_t valid :1;
110  uint8_t ext_entry :1;
111  uint8_t depth :6;
112 };
113 
115 struct rte_lpm_tbl8_entry {
116  uint8_t next_hop;
117  /* Using single uint8_t to store 3 values. */
118  uint8_t valid :1;
119  uint8_t valid_group :1;
120  uint8_t depth :6;
121 };
122 #else
123 struct rte_lpm_tbl24_entry {
124  uint8_t depth :6;
125  uint8_t ext_entry :1;
126  uint8_t valid :1;
127  union {
128  uint8_t tbl8_gindex;
129  uint8_t next_hop;
130  };
131 };
132 
133 struct rte_lpm_tbl8_entry {
134  uint8_t depth :6;
135  uint8_t valid_group :1;
136  uint8_t valid :1;
137  uint8_t next_hop;
138 };
139 #endif
140 
142 struct rte_lpm_rule {
143  uint32_t ip;
144  uint8_t next_hop;
145 };
146 
148 struct rte_lpm_rule_info {
149  uint32_t used_rules;
150  uint32_t first_rule;
151 };
152 
154 struct rte_lpm {
155  /* LPM metadata. */
156  char name[RTE_LPM_NAMESIZE];
157  int mem_location;
158  uint32_t max_rules;
159  struct rte_lpm_rule_info rule_info[RTE_LPM_MAX_DEPTH];
161  /* LPM Tables. */
162  struct rte_lpm_tbl24_entry tbl24[RTE_LPM_TBL24_NUM_ENTRIES] \
164  struct rte_lpm_tbl8_entry tbl8[RTE_LPM_TBL8_NUM_ENTRIES] \
166  struct rte_lpm_rule rules_tbl[0] \
168 };
169 
191 struct rte_lpm *
192 rte_lpm_create(const char *name, int socket_id, int max_rules, int flags);
193 
204 struct rte_lpm *
205 rte_lpm_find_existing(const char *name);
206 
215 void
216 rte_lpm_free(struct rte_lpm *lpm);
217 
232 int
233 rte_lpm_add(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, uint8_t next_hop);
234 
250 int
251 rte_lpm_is_rule_present(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
252 uint8_t *next_hop);
253 
266 int
267 rte_lpm_delete(struct rte_lpm *lpm, uint32_t ip, uint8_t depth);
268 
275 void
276 rte_lpm_delete_all(struct rte_lpm *lpm);
277 
290 static inline int
291 rte_lpm_lookup(struct rte_lpm *lpm, uint32_t ip, uint8_t *next_hop)
292 {
293  unsigned tbl24_index = (ip >> 8);
294  uint16_t tbl_entry;
295 
296  /* DEBUG: Check user input arguments. */
297  RTE_LPM_RETURN_IF_TRUE(((lpm == NULL) || (next_hop == NULL)), -EINVAL);
298 
299  /* Copy tbl24 entry */
300  tbl_entry = *(const uint16_t *)&lpm->tbl24[tbl24_index];
301 
302  /* Copy tbl8 entry (only if needed) */
303  if (unlikely((tbl_entry & RTE_LPM_VALID_EXT_ENTRY_BITMASK) ==
304  RTE_LPM_VALID_EXT_ENTRY_BITMASK)) {
305 
306  unsigned tbl8_index = (uint8_t)ip +
307  ((uint8_t)tbl_entry * RTE_LPM_TBL8_GROUP_NUM_ENTRIES);
308 
309  tbl_entry = *(const uint16_t *)&lpm->tbl8[tbl8_index];
310  }
311 
312  *next_hop = (uint8_t)tbl_entry;
313  return (tbl_entry & RTE_LPM_LOOKUP_SUCCESS) ? 0 : -ENOENT;
314 }
315 
336 #define rte_lpm_lookup_bulk(lpm, ips, next_hops, n) \
337  rte_lpm_lookup_bulk_func(lpm, ips, next_hops, n)
338 
339 static inline int
340 rte_lpm_lookup_bulk_func(const struct rte_lpm *lpm, const uint32_t * ips,
341  uint16_t * next_hops, const unsigned n)
342 {
343  unsigned i;
344  unsigned tbl24_indexes[n];
345 
346  /* DEBUG: Check user input arguments. */
347  RTE_LPM_RETURN_IF_TRUE(((lpm == NULL) || (ips == NULL) ||
348  (next_hops == NULL)), -EINVAL);
349 
350  for (i = 0; i < n; i++) {
351  tbl24_indexes[i] = ips[i] >> 8;
352  }
353 
354  for (i = 0; i < n; i++) {
355  /* Simply copy tbl24 entry to output */
356  next_hops[i] = *(const uint16_t *)&lpm->tbl24[tbl24_indexes[i]];
357 
358  /* Overwrite output with tbl8 entry if needed */
359  if (unlikely((next_hops[i] & RTE_LPM_VALID_EXT_ENTRY_BITMASK) ==
360  RTE_LPM_VALID_EXT_ENTRY_BITMASK)) {
361 
362  unsigned tbl8_index = (uint8_t)ips[i] +
363  ((uint8_t)next_hops[i] *
364  RTE_LPM_TBL8_GROUP_NUM_ENTRIES);
365 
366  next_hops[i] = *(const uint16_t *)&lpm->tbl8[tbl8_index];
367  }
368  }
369  return 0;
370 }
371 
372 /* Mask four results. */
373 #define RTE_LPM_MASKX4_RES UINT64_C(0x00ff00ff00ff00ff)
374 
394 static inline void
395 rte_lpm_lookupx4(const struct rte_lpm *lpm, __m128i ip, uint16_t hop[4],
396  uint16_t defv)
397 {
398  __m128i i24;
399  rte_xmm_t i8;
400  uint16_t tbl[4];
401  uint64_t idx, pt;
402 
403  const __m128i mask8 =
404  _mm_set_epi32(UINT8_MAX, UINT8_MAX, UINT8_MAX, UINT8_MAX);
405 
406  /*
407  * RTE_LPM_VALID_EXT_ENTRY_BITMASK for 4 LPM entries
408  * as one 64-bit value (0x0300030003000300).
409  */
410  const uint64_t mask_xv =
411  ((uint64_t)RTE_LPM_VALID_EXT_ENTRY_BITMASK |
412  (uint64_t)RTE_LPM_VALID_EXT_ENTRY_BITMASK << 16 |
413  (uint64_t)RTE_LPM_VALID_EXT_ENTRY_BITMASK << 32 |
414  (uint64_t)RTE_LPM_VALID_EXT_ENTRY_BITMASK << 48);
415 
416  /*
417  * RTE_LPM_LOOKUP_SUCCESS for 4 LPM entries
418  * as one 64-bit value (0x0100010001000100).
419  */
420  const uint64_t mask_v =
421  ((uint64_t)RTE_LPM_LOOKUP_SUCCESS |
422  (uint64_t)RTE_LPM_LOOKUP_SUCCESS << 16 |
423  (uint64_t)RTE_LPM_LOOKUP_SUCCESS << 32 |
424  (uint64_t)RTE_LPM_LOOKUP_SUCCESS << 48);
425 
426  /* get 4 indexes for tbl24[]. */
427  i24 = _mm_srli_epi32(ip, CHAR_BIT);
428 
429  /* extract values from tbl24[] */
430  idx = _mm_cvtsi128_si64(i24);
431  i24 = _mm_srli_si128(i24, sizeof(uint64_t));
432 
433  tbl[0] = *(const uint16_t *)&lpm->tbl24[(uint32_t)idx];
434  tbl[1] = *(const uint16_t *)&lpm->tbl24[idx >> 32];
435 
436  idx = _mm_cvtsi128_si64(i24);
437 
438  tbl[2] = *(const uint16_t *)&lpm->tbl24[(uint32_t)idx];
439  tbl[3] = *(const uint16_t *)&lpm->tbl24[idx >> 32];
440 
441  /* get 4 indexes for tbl8[]. */
442  i8.x = _mm_and_si128(ip, mask8);
443 
444  pt = (uint64_t)tbl[0] |
445  (uint64_t)tbl[1] << 16 |
446  (uint64_t)tbl[2] << 32 |
447  (uint64_t)tbl[3] << 48;
448 
449  /* search successfully finished for all 4 IP addresses. */
450  if (likely((pt & mask_xv) == mask_v)) {
451  uintptr_t ph = (uintptr_t)hop;
452  *(uint64_t *)ph = pt & RTE_LPM_MASKX4_RES;
453  return;
454  }
455 
456  if (unlikely((pt & RTE_LPM_VALID_EXT_ENTRY_BITMASK) ==
457  RTE_LPM_VALID_EXT_ENTRY_BITMASK)) {
458  i8.u32[0] = i8.u32[0] +
459  (uint8_t)tbl[0] * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
460  tbl[0] = *(const uint16_t *)&lpm->tbl8[i8.u32[0]];
461  }
462  if (unlikely((pt >> 16 & RTE_LPM_VALID_EXT_ENTRY_BITMASK) ==
463  RTE_LPM_VALID_EXT_ENTRY_BITMASK)) {
464  i8.u32[1] = i8.u32[1] +
465  (uint8_t)tbl[1] * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
466  tbl[1] = *(const uint16_t *)&lpm->tbl8[i8.u32[1]];
467  }
468  if (unlikely((pt >> 32 & RTE_LPM_VALID_EXT_ENTRY_BITMASK) ==
469  RTE_LPM_VALID_EXT_ENTRY_BITMASK)) {
470  i8.u32[2] = i8.u32[2] +
471  (uint8_t)tbl[2] * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
472  tbl[2] = *(const uint16_t *)&lpm->tbl8[i8.u32[2]];
473  }
474  if (unlikely((pt >> 48 & RTE_LPM_VALID_EXT_ENTRY_BITMASK) ==
475  RTE_LPM_VALID_EXT_ENTRY_BITMASK)) {
476  i8.u32[3] = i8.u32[3] +
477  (uint8_t)tbl[3] * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
478  tbl[3] = *(const uint16_t *)&lpm->tbl8[i8.u32[3]];
479  }
480 
481  hop[0] = (tbl[0] & RTE_LPM_LOOKUP_SUCCESS) ? (uint8_t)tbl[0] : defv;
482  hop[1] = (tbl[1] & RTE_LPM_LOOKUP_SUCCESS) ? (uint8_t)tbl[1] : defv;
483  hop[2] = (tbl[2] & RTE_LPM_LOOKUP_SUCCESS) ? (uint8_t)tbl[2] : defv;
484  hop[3] = (tbl[3] & RTE_LPM_LOOKUP_SUCCESS) ? (uint8_t)tbl[3] : defv;
485 }
486 
487 #ifdef __cplusplus
488 }
489 #endif
490 
491 #endif /* _RTE_LPM_H_ */