39 #ifndef _RTE_LPM_NEON_H_
40 #define _RTE_LPM_NEON_H_
58 uint64_t idx, pt, pt2;
61 const uint32_t mask = UINT8_MAX;
62 const int32x4_t mask8 = vdupq_n_s32(mask);
68 const uint64_t mask_xv =
69 ((uint64_t)RTE_LPM_VALID_EXT_ENTRY_BITMASK |
70 (uint64_t)RTE_LPM_VALID_EXT_ENTRY_BITMASK << 32);
76 const uint64_t mask_v =
81 i24 = vshrq_n_u32((uint32x4_t)ip, CHAR_BIT);
84 idx = vgetq_lane_u64((uint64x2_t)i24, 0);
86 ptbl = (
const uint32_t *)&lpm->tbl24[(uint32_t)idx];
88 ptbl = (
const uint32_t *)&lpm->tbl24[idx >> 32];
91 idx = vgetq_lane_u64((uint64x2_t)i24, 1);
93 ptbl = (
const uint32_t *)&lpm->tbl24[(uint32_t)idx];
95 ptbl = (
const uint32_t *)&lpm->tbl24[idx >> 32];
99 i8.x = vandq_s32(ip, mask8);
101 pt = (uint64_t)tbl[0] |
102 (uint64_t)tbl[1] << 32;
103 pt2 = (uint64_t)tbl[2] |
104 (uint64_t)tbl[3] << 32;
107 if (
likely((pt & mask_xv) == mask_v) &&
108 likely((pt2 & mask_xv) == mask_v)) {
109 *(uint64_t *)hop = pt & RTE_LPM_MASKX4_RES;
110 *(uint64_t *)(hop + 2) = pt2 & RTE_LPM_MASKX4_RES;
114 if (
unlikely((pt & RTE_LPM_VALID_EXT_ENTRY_BITMASK) ==
115 RTE_LPM_VALID_EXT_ENTRY_BITMASK)) {
116 i8.u32[0] = i8.u32[0] +
117 (uint8_t)tbl[0] * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
118 ptbl = (
const uint32_t *)&lpm->tbl8[i8.u32[0]];
121 if (
unlikely((pt >> 32 & RTE_LPM_VALID_EXT_ENTRY_BITMASK) ==
122 RTE_LPM_VALID_EXT_ENTRY_BITMASK)) {
123 i8.u32[1] = i8.u32[1] +
124 (uint8_t)tbl[1] * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
125 ptbl = (
const uint32_t *)&lpm->tbl8[i8.u32[1]];
128 if (
unlikely((pt2 & RTE_LPM_VALID_EXT_ENTRY_BITMASK) ==
129 RTE_LPM_VALID_EXT_ENTRY_BITMASK)) {
130 i8.u32[2] = i8.u32[2] +
131 (uint8_t)tbl[2] * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
132 ptbl = (
const uint32_t *)&lpm->tbl8[i8.u32[2]];
135 if (
unlikely((pt2 >> 32 & RTE_LPM_VALID_EXT_ENTRY_BITMASK) ==
136 RTE_LPM_VALID_EXT_ENTRY_BITMASK)) {
137 i8.u32[3] = i8.u32[3] +
138 (uint8_t)tbl[3] * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
139 ptbl = (
const uint32_t *)&lpm->tbl8[i8.u32[3]];