59 #if defined(RTE_ARCH_X86) || defined(RTE_MACHINE_CPUFLAG_NEON)
67 static const __m128i rte_thash_ipv6_bswap_mask = {
68 0x0405060700010203ULL, 0x0C0D0E0F08090A0BULL};
75 #define RTE_THASH_V4_L3_LEN ((sizeof(struct rte_ipv4_tuple) - \
76 sizeof(((struct rte_ipv4_tuple *)0)->sctp_tag)) / 4)
83 #define RTE_THASH_V4_L4_LEN ((sizeof(struct rte_ipv4_tuple)) / 4)
89 #define RTE_THASH_V6_L3_LEN ((sizeof(struct rte_ipv6_tuple) - \
90 sizeof(((struct rte_ipv6_tuple *)0)->sctp_tag)) / 4)
97 #define RTE_THASH_V6_L4_LEN ((sizeof(struct rte_ipv6_tuple)) / 4)
122 uint8_t src_addr[16];
123 uint8_t dst_addr[16];
134 union rte_thash_tuple {
138 } __attribute__((aligned(XMM_SIZE)));
157 for (i = 0; i < (len >> 2); i++)
173 __m128i ipv6 = _mm_loadu_si128((
const __m128i *)orig->
src_addr);
174 *(__m128i *)targ->v6.src_addr =
175 _mm_shuffle_epi8(ipv6, rte_thash_ipv6_bswap_mask);
176 ipv6 = _mm_loadu_si128((
const __m128i *)orig->
dst_addr);
177 *(__m128i *)targ->v6.dst_addr =
178 _mm_shuffle_epi8(ipv6, rte_thash_ipv6_bswap_mask);
179 #elif defined(RTE_MACHINE_CPUFLAG_NEON)
180 uint8x16_t ipv6 = vld1q_u8((uint8_t
const *)orig->
src_addr);
181 vst1q_u8((uint8_t *)targ->v6.src_addr, vrev32q_u8(ipv6));
182 ipv6 = vld1q_u8((uint8_t
const *)orig->
dst_addr);
183 vst1q_u8((uint8_t *)targ->v6.dst_addr, vrev32q_u8(ipv6));
186 for (i = 0; i < 4; i++) {
187 *((uint32_t *)targ->v6.src_addr + i) =
189 *((uint32_t *)targ->v6.dst_addr + i) =
206 static inline uint32_t
208 const uint8_t *rss_key)
210 uint32_t i, j, ret = 0;
212 for (j = 0; j < input_len; j++) {
213 for (i = 0; i < 32; i++) {
214 if (input_tuple[j] & (1 << (31 - i))) {
216 (uint32_t)((uint64_t)(
rte_cpu_to_be_32(((
const uint32_t *)rss_key)[j + 1])) >>
237 static inline uint32_t
239 const uint8_t *rss_key)
241 uint32_t i, j, ret = 0;
243 for (j = 0; j < input_len; j++) {
244 for (i = 0; i < 32; i++) {
245 if (input_tuple[j] & (1 << (31 - i))) {
246 ret ^= ((
const uint32_t *)rss_key)[j] << i |
247 (uint32_t)((uint64_t)(((
const uint32_t *)rss_key)[j + 1]) >> (32 - i));