39 #ifndef _RTE_LPM_NEON_H_
40 #define _RTE_LPM_NEON_H_
59 uint64_t idx, pt, pt2;
62 const uint32_t mask = UINT8_MAX;
63 const int32x4_t mask8 = vdupq_n_s32(mask);
69 const uint64_t mask_xv =
70 ((uint64_t)RTE_LPM_VALID_EXT_ENTRY_BITMASK |
71 (uint64_t)RTE_LPM_VALID_EXT_ENTRY_BITMASK << 32);
77 const uint64_t mask_v =
82 i24 = vshrq_n_u32((uint32x4_t)ip, CHAR_BIT);
85 idx = vgetq_lane_u64((uint64x2_t)i24, 0);
87 ptbl = (
const uint32_t *)&lpm->tbl24[(uint32_t)idx];
89 ptbl = (
const uint32_t *)&lpm->tbl24[idx >> 32];
92 idx = vgetq_lane_u64((uint64x2_t)i24, 1);
94 ptbl = (
const uint32_t *)&lpm->tbl24[(uint32_t)idx];
96 ptbl = (
const uint32_t *)&lpm->tbl24[idx >> 32];
100 i8.x = vandq_s32(ip, mask8);
102 pt = (uint64_t)tbl[0] |
103 (uint64_t)tbl[1] << 32;
104 pt2 = (uint64_t)tbl[2] |
105 (uint64_t)tbl[3] << 32;
108 if (
likely((pt & mask_xv) == mask_v) &&
109 likely((pt2 & mask_xv) == mask_v)) {
110 *(uint64_t *)hop = pt & RTE_LPM_MASKX4_RES;
111 *(uint64_t *)(hop + 2) = pt2 & RTE_LPM_MASKX4_RES;
115 if (
unlikely((pt & RTE_LPM_VALID_EXT_ENTRY_BITMASK) ==
116 RTE_LPM_VALID_EXT_ENTRY_BITMASK)) {
117 i8.u32[0] = i8.u32[0] +
118 (uint8_t)tbl[0] * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
119 ptbl = (
const uint32_t *)&lpm->tbl8[i8.u32[0]];
122 if (
unlikely((pt >> 32 & RTE_LPM_VALID_EXT_ENTRY_BITMASK) ==
123 RTE_LPM_VALID_EXT_ENTRY_BITMASK)) {
124 i8.u32[1] = i8.u32[1] +
125 (uint8_t)tbl[1] * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
126 ptbl = (
const uint32_t *)&lpm->tbl8[i8.u32[1]];
129 if (
unlikely((pt2 & RTE_LPM_VALID_EXT_ENTRY_BITMASK) ==
130 RTE_LPM_VALID_EXT_ENTRY_BITMASK)) {
131 i8.u32[2] = i8.u32[2] +
132 (uint8_t)tbl[2] * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
133 ptbl = (
const uint32_t *)&lpm->tbl8[i8.u32[2]];
136 if (
unlikely((pt2 >> 32 & RTE_LPM_VALID_EXT_ENTRY_BITMASK) ==
137 RTE_LPM_VALID_EXT_ENTRY_BITMASK)) {
138 i8.u32[3] = i8.u32[3] +
139 (uint8_t)tbl[3] * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
140 ptbl = (
const uint32_t *)&lpm->tbl8[i8.u32[3]];