DPDK  2.2.0
rte_lpm.h
Go to the documentation of this file.
1 /*-
2  * BSD LICENSE
3  *
4  * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * * Redistributions of source code must retain the above copyright
12  * notice, this list of conditions and the following disclaimer.
13  * * Redistributions in binary form must reproduce the above copyright
14  * notice, this list of conditions and the following disclaimer in
15  * the documentation and/or other materials provided with the
16  * distribution.
17  * * Neither the name of Intel Corporation nor the names of its
18  * contributors may be used to endorse or promote products derived
19  * from this software without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #ifndef _RTE_LPM_H_
35 #define _RTE_LPM_H_
36 
42 #include <errno.h>
43 #include <sys/queue.h>
44 #include <stdint.h>
45 #include <stdlib.h>
46 #include <rte_branch_prediction.h>
47 #include <rte_byteorder.h>
48 #include <rte_memory.h>
49 #include <rte_common.h>
50 #include <rte_vect.h>
51 
52 #ifdef __cplusplus
53 extern "C" {
54 #endif
55 
57 #define RTE_LPM_NAMESIZE 32
58 
60 #define RTE_LPM_MAX_DEPTH 32
61 
63 #define RTE_LPM_TBL24_NUM_ENTRIES (1 << 24)
64 
66 #define RTE_LPM_TBL8_GROUP_NUM_ENTRIES 256
67 
69 #define RTE_LPM_TBL8_NUM_GROUPS 256
70 
72 #define RTE_LPM_TBL8_NUM_ENTRIES (RTE_LPM_TBL8_NUM_GROUPS * \
73  RTE_LPM_TBL8_GROUP_NUM_ENTRIES)
74 
76 #if defined(RTE_LIBRTE_LPM_DEBUG)
77 #define RTE_LPM_RETURN_IF_TRUE(cond, retval) do { \
78  if (cond) return (retval); \
79 } while (0)
80 #else
81 #define RTE_LPM_RETURN_IF_TRUE(cond, retval)
82 #endif
83 
85 #define RTE_LPM_VALID_EXT_ENTRY_BITMASK 0x0300
86 
88 #define RTE_LPM_LOOKUP_SUCCESS 0x0100
89 
90 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
91 
92 struct rte_lpm_tbl24_entry {
93  /* Stores Next hop or group index (i.e. gindex)into tbl8. */
94  union {
95  uint8_t next_hop;
96  uint8_t tbl8_gindex;
97  };
98  /* Using single uint8_t to store 3 values. */
99  uint8_t valid :1;
100  uint8_t ext_entry :1;
101  uint8_t depth :6;
102 };
103 
105 struct rte_lpm_tbl8_entry {
106  uint8_t next_hop;
107  /* Using single uint8_t to store 3 values. */
108  uint8_t valid :1;
109  uint8_t valid_group :1;
110  uint8_t depth :6;
111 };
112 #else
113 struct rte_lpm_tbl24_entry {
114  uint8_t depth :6;
115  uint8_t ext_entry :1;
116  uint8_t valid :1;
117  union {
118  uint8_t tbl8_gindex;
119  uint8_t next_hop;
120  };
121 };
122 
123 struct rte_lpm_tbl8_entry {
124  uint8_t depth :6;
125  uint8_t valid_group :1;
126  uint8_t valid :1;
127  uint8_t next_hop;
128 };
129 #endif
130 
132 struct rte_lpm_rule {
133  uint32_t ip;
134  uint8_t next_hop;
135 };
136 
138 struct rte_lpm_rule_info {
139  uint32_t used_rules;
140  uint32_t first_rule;
141 };
142 
144 struct rte_lpm {
145  /* LPM metadata. */
146  char name[RTE_LPM_NAMESIZE];
147  uint32_t max_rules;
148  struct rte_lpm_rule_info rule_info[RTE_LPM_MAX_DEPTH];
150  /* LPM Tables. */
151  struct rte_lpm_tbl24_entry tbl24[RTE_LPM_TBL24_NUM_ENTRIES] \
153  struct rte_lpm_tbl8_entry tbl8[RTE_LPM_TBL8_NUM_ENTRIES] \
155  struct rte_lpm_rule rules_tbl[0] \
157 };
158 
180 struct rte_lpm *
181 rte_lpm_create(const char *name, int socket_id, int max_rules, int flags);
182 
193 struct rte_lpm *
194 rte_lpm_find_existing(const char *name);
195 
204 void
205 rte_lpm_free(struct rte_lpm *lpm);
206 
221 int
222 rte_lpm_add(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, uint8_t next_hop);
223 
239 int
240 rte_lpm_is_rule_present(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
241 uint8_t *next_hop);
242 
255 int
256 rte_lpm_delete(struct rte_lpm *lpm, uint32_t ip, uint8_t depth);
257 
264 void
265 rte_lpm_delete_all(struct rte_lpm *lpm);
266 
279 static inline int
280 rte_lpm_lookup(struct rte_lpm *lpm, uint32_t ip, uint8_t *next_hop)
281 {
282  unsigned tbl24_index = (ip >> 8);
283  uint16_t tbl_entry;
284 
285  /* DEBUG: Check user input arguments. */
286  RTE_LPM_RETURN_IF_TRUE(((lpm == NULL) || (next_hop == NULL)), -EINVAL);
287 
288  /* Copy tbl24 entry */
289  tbl_entry = *(const uint16_t *)&lpm->tbl24[tbl24_index];
290 
291  /* Copy tbl8 entry (only if needed) */
292  if (unlikely((tbl_entry & RTE_LPM_VALID_EXT_ENTRY_BITMASK) ==
293  RTE_LPM_VALID_EXT_ENTRY_BITMASK)) {
294 
295  unsigned tbl8_index = (uint8_t)ip +
296  ((uint8_t)tbl_entry * RTE_LPM_TBL8_GROUP_NUM_ENTRIES);
297 
298  tbl_entry = *(const uint16_t *)&lpm->tbl8[tbl8_index];
299  }
300 
301  *next_hop = (uint8_t)tbl_entry;
302  return (tbl_entry & RTE_LPM_LOOKUP_SUCCESS) ? 0 : -ENOENT;
303 }
304 
325 #define rte_lpm_lookup_bulk(lpm, ips, next_hops, n) \
326  rte_lpm_lookup_bulk_func(lpm, ips, next_hops, n)
327 
328 static inline int
329 rte_lpm_lookup_bulk_func(const struct rte_lpm *lpm, const uint32_t * ips,
330  uint16_t * next_hops, const unsigned n)
331 {
332  unsigned i;
333  unsigned tbl24_indexes[n];
334 
335  /* DEBUG: Check user input arguments. */
336  RTE_LPM_RETURN_IF_TRUE(((lpm == NULL) || (ips == NULL) ||
337  (next_hops == NULL)), -EINVAL);
338 
339  for (i = 0; i < n; i++) {
340  tbl24_indexes[i] = ips[i] >> 8;
341  }
342 
343  for (i = 0; i < n; i++) {
344  /* Simply copy tbl24 entry to output */
345  next_hops[i] = *(const uint16_t *)&lpm->tbl24[tbl24_indexes[i]];
346 
347  /* Overwrite output with tbl8 entry if needed */
348  if (unlikely((next_hops[i] & RTE_LPM_VALID_EXT_ENTRY_BITMASK) ==
349  RTE_LPM_VALID_EXT_ENTRY_BITMASK)) {
350 
351  unsigned tbl8_index = (uint8_t)ips[i] +
352  ((uint8_t)next_hops[i] *
353  RTE_LPM_TBL8_GROUP_NUM_ENTRIES);
354 
355  next_hops[i] = *(const uint16_t *)&lpm->tbl8[tbl8_index];
356  }
357  }
358  return 0;
359 }
360 
361 /* Mask four results. */
362 #define RTE_LPM_MASKX4_RES UINT64_C(0x00ff00ff00ff00ff)
363 
383 static inline void
384 rte_lpm_lookupx4(const struct rte_lpm *lpm, __m128i ip, uint16_t hop[4],
385  uint16_t defv)
386 {
387  __m128i i24;
388  rte_xmm_t i8;
389  uint16_t tbl[4];
390  uint64_t idx, pt;
391 
392  const __m128i mask8 =
393  _mm_set_epi32(UINT8_MAX, UINT8_MAX, UINT8_MAX, UINT8_MAX);
394 
395  /*
396  * RTE_LPM_VALID_EXT_ENTRY_BITMASK for 4 LPM entries
397  * as one 64-bit value (0x0300030003000300).
398  */
399  const uint64_t mask_xv =
400  ((uint64_t)RTE_LPM_VALID_EXT_ENTRY_BITMASK |
401  (uint64_t)RTE_LPM_VALID_EXT_ENTRY_BITMASK << 16 |
402  (uint64_t)RTE_LPM_VALID_EXT_ENTRY_BITMASK << 32 |
403  (uint64_t)RTE_LPM_VALID_EXT_ENTRY_BITMASK << 48);
404 
405  /*
406  * RTE_LPM_LOOKUP_SUCCESS for 4 LPM entries
407  * as one 64-bit value (0x0100010001000100).
408  */
409  const uint64_t mask_v =
410  ((uint64_t)RTE_LPM_LOOKUP_SUCCESS |
411  (uint64_t)RTE_LPM_LOOKUP_SUCCESS << 16 |
412  (uint64_t)RTE_LPM_LOOKUP_SUCCESS << 32 |
413  (uint64_t)RTE_LPM_LOOKUP_SUCCESS << 48);
414 
415  /* get 4 indexes for tbl24[]. */
416  i24 = _mm_srli_epi32(ip, CHAR_BIT);
417 
418  /* extract values from tbl24[] */
419  idx = _mm_cvtsi128_si64(i24);
420  i24 = _mm_srli_si128(i24, sizeof(uint64_t));
421 
422  tbl[0] = *(const uint16_t *)&lpm->tbl24[(uint32_t)idx];
423  tbl[1] = *(const uint16_t *)&lpm->tbl24[idx >> 32];
424 
425  idx = _mm_cvtsi128_si64(i24);
426 
427  tbl[2] = *(const uint16_t *)&lpm->tbl24[(uint32_t)idx];
428  tbl[3] = *(const uint16_t *)&lpm->tbl24[idx >> 32];
429 
430  /* get 4 indexes for tbl8[]. */
431  i8.x = _mm_and_si128(ip, mask8);
432 
433  pt = (uint64_t)tbl[0] |
434  (uint64_t)tbl[1] << 16 |
435  (uint64_t)tbl[2] << 32 |
436  (uint64_t)tbl[3] << 48;
437 
438  /* search successfully finished for all 4 IP addresses. */
439  if (likely((pt & mask_xv) == mask_v)) {
440  uintptr_t ph = (uintptr_t)hop;
441  *(uint64_t *)ph = pt & RTE_LPM_MASKX4_RES;
442  return;
443  }
444 
445  if (unlikely((pt & RTE_LPM_VALID_EXT_ENTRY_BITMASK) ==
446  RTE_LPM_VALID_EXT_ENTRY_BITMASK)) {
447  i8.u32[0] = i8.u32[0] +
448  (uint8_t)tbl[0] * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
449  tbl[0] = *(const uint16_t *)&lpm->tbl8[i8.u32[0]];
450  }
451  if (unlikely((pt >> 16 & RTE_LPM_VALID_EXT_ENTRY_BITMASK) ==
452  RTE_LPM_VALID_EXT_ENTRY_BITMASK)) {
453  i8.u32[1] = i8.u32[1] +
454  (uint8_t)tbl[1] * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
455  tbl[1] = *(const uint16_t *)&lpm->tbl8[i8.u32[1]];
456  }
457  if (unlikely((pt >> 32 & RTE_LPM_VALID_EXT_ENTRY_BITMASK) ==
458  RTE_LPM_VALID_EXT_ENTRY_BITMASK)) {
459  i8.u32[2] = i8.u32[2] +
460  (uint8_t)tbl[2] * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
461  tbl[2] = *(const uint16_t *)&lpm->tbl8[i8.u32[2]];
462  }
463  if (unlikely((pt >> 48 & RTE_LPM_VALID_EXT_ENTRY_BITMASK) ==
464  RTE_LPM_VALID_EXT_ENTRY_BITMASK)) {
465  i8.u32[3] = i8.u32[3] +
466  (uint8_t)tbl[3] * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
467  tbl[3] = *(const uint16_t *)&lpm->tbl8[i8.u32[3]];
468  }
469 
470  hop[0] = (tbl[0] & RTE_LPM_LOOKUP_SUCCESS) ? (uint8_t)tbl[0] : defv;
471  hop[1] = (tbl[1] & RTE_LPM_LOOKUP_SUCCESS) ? (uint8_t)tbl[1] : defv;
472  hop[2] = (tbl[2] & RTE_LPM_LOOKUP_SUCCESS) ? (uint8_t)tbl[2] : defv;
473  hop[3] = (tbl[3] & RTE_LPM_LOOKUP_SUCCESS) ? (uint8_t)tbl[3] : defv;
474 }
475 
476 #ifdef __cplusplus
477 }
478 #endif
479 
480 #endif /* _RTE_LPM_H_ */