DPDK 21.11.9
rte_thash_x86_gfni.h
Go to the documentation of this file.
1/* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2021 Intel Corporation
3 */
4
5#ifndef _RTE_THASH_X86_GFNI_H_
6#define _RTE_THASH_X86_GFNI_H_
7
15#include <rte_vect.h>
16
17#ifdef __cplusplus
18extern "C" {
19#endif
20
21#if defined(__GFNI__) && defined(__AVX512F__)
22#define RTE_THASH_GFNI_DEFINED
23
24#define RTE_THASH_FIRST_ITER_MSK 0x0f0f0f0f0f0e0c08
25#define RTE_THASH_PERM_MSK 0x0f0f0f0f0f0f0f0f
26#define RTE_THASH_FIRST_ITER_MSK_2 0xf0f0f0f0f0e0c080
27#define RTE_THASH_PERM_MSK_2 0xf0f0f0f0f0f0f0f0
28#define RTE_THASH_REWIND_MSK 0x0000000000113377
29
30__rte_internal
31static inline void
32__rte_thash_xor_reduce(__m512i xor_acc, uint32_t *val_1, uint32_t *val_2)
33{
34 __m256i tmp_256_1, tmp_256_2;
35 __m128i tmp128_1, tmp128_2;
36
37 tmp_256_1 = _mm512_castsi512_si256(xor_acc);
38 tmp_256_2 = _mm512_extracti32x8_epi32(xor_acc, 1);
39 tmp_256_1 = _mm256_xor_si256(tmp_256_1, tmp_256_2);
40
41 tmp128_1 = _mm256_castsi256_si128(tmp_256_1);
42 tmp128_2 = _mm256_extracti32x4_epi32(tmp_256_1, 1);
43 tmp128_1 = _mm_xor_si128(tmp128_1, tmp128_2);
44
45#ifdef RTE_ARCH_X86_64
46 uint64_t tmp_1, tmp_2;
47 tmp_1 = _mm_extract_epi64(tmp128_1, 0);
48 tmp_2 = _mm_extract_epi64(tmp128_1, 1);
49 tmp_1 ^= tmp_2;
50
51 *val_1 = (uint32_t)tmp_1;
52 *val_2 = (uint32_t)(tmp_1 >> 32);
53#else
54 uint32_t tmp_1, tmp_2;
55 tmp_1 = _mm_extract_epi32(tmp128_1, 0);
56 tmp_2 = _mm_extract_epi32(tmp128_1, 1);
57 tmp_1 ^= _mm_extract_epi32(tmp128_1, 2);
58 tmp_2 ^= _mm_extract_epi32(tmp128_1, 3);
59
60 *val_1 = tmp_1;
61 *val_2 = tmp_2;
62#endif
63}
64
65__rte_internal
66static inline __m512i
67__rte_thash_gfni(const uint64_t *mtrx, const uint8_t *tuple,
68 const uint8_t *secondary_tuple, int len)
69{
70 __m512i permute_idx = _mm512_set_epi32(0x07060504, 0x07060504,
71 0x06050403, 0x06050403,
72 0x05040302, 0x05040302,
73 0x04030201, 0x04030201,
74 0x03020100, 0x03020100,
75 0x020100FF, 0x020100FF,
76 0x0100FFFE, 0x0100FFFE,
77 0x00FFFEFD, 0x00FFFEFD);
78 const __m512i rewind_idx = _mm512_set_epi32(0x00000000, 0x00000000,
79 0x00000000, 0x00000000,
80 0x00000000, 0x00000000,
81 0x00000000, 0x00000000,
82 0x00000000, 0x00000000,
83 0x0000003B, 0x0000003B,
84 0x00003B3A, 0x00003B3A,
85 0x003B3A39, 0x003B3A39);
86 const __mmask64 rewind_mask = RTE_THASH_REWIND_MSK;
87 const __m512i shift_8 = _mm512_set1_epi8(8);
88 __m512i xor_acc = _mm512_setzero_si512();
89 __m512i perm_bytes = _mm512_setzero_si512();
90 __m512i vals, matrixes, tuple_bytes_2;
91 __m512i tuple_bytes = _mm512_setzero_si512();
92 __mmask64 load_mask, permute_mask_2;
93 __mmask64 permute_mask = 0;
94 int chunk_len = 0, i = 0;
95 uint8_t mtrx_msk;
96 const int prepend = 3;
97
98 for (; len > 0; len -= 64, tuple += 64) {
99 if (i == 8)
100 perm_bytes = _mm512_maskz_permutexvar_epi8(rewind_mask,
101 rewind_idx, perm_bytes);
102
103 permute_mask = RTE_THASH_FIRST_ITER_MSK;
104 load_mask = (len >= 64) ? UINT64_MAX : ((1ULL << len) - 1);
105 tuple_bytes = _mm512_maskz_loadu_epi8(load_mask, tuple);
106 if (secondary_tuple) {
107 permute_mask_2 = RTE_THASH_FIRST_ITER_MSK_2;
108 tuple_bytes_2 = _mm512_maskz_loadu_epi8(load_mask,
109 secondary_tuple);
110 }
111
112 chunk_len = __builtin_popcountll(load_mask);
113 for (i = 0; i < ((chunk_len + prepend) / 8); i++, mtrx += 8) {
114 perm_bytes = _mm512_mask_permutexvar_epi8(perm_bytes,
115 permute_mask, permute_idx, tuple_bytes);
116
117 if (secondary_tuple)
118 perm_bytes =
119 _mm512_mask_permutexvar_epi8(perm_bytes,
120 permute_mask_2, permute_idx,
121 tuple_bytes_2);
122
123 matrixes = _mm512_maskz_loadu_epi64(UINT8_MAX, mtrx);
124 vals = _mm512_gf2p8affine_epi64_epi8(perm_bytes,
125 matrixes, 0);
126
127 xor_acc = _mm512_xor_si512(xor_acc, vals);
128 permute_idx = _mm512_add_epi8(permute_idx, shift_8);
129 permute_mask = RTE_THASH_PERM_MSK;
130 if (secondary_tuple)
131 permute_mask_2 = RTE_THASH_PERM_MSK_2;
132 }
133 }
134
135 int rest_len = (chunk_len + prepend) % 8;
136 if (rest_len != 0) {
137 mtrx_msk = (1 << (rest_len % 8)) - 1;
138 matrixes = _mm512_maskz_loadu_epi64(mtrx_msk, mtrx);
139 if (i == 8) {
140 perm_bytes = _mm512_maskz_permutexvar_epi8(rewind_mask,
141 rewind_idx, perm_bytes);
142 } else {
143 perm_bytes = _mm512_mask_permutexvar_epi8(perm_bytes,
144 permute_mask, permute_idx, tuple_bytes);
145
146 if (secondary_tuple)
147 perm_bytes =
148 _mm512_mask_permutexvar_epi8(
149 perm_bytes, permute_mask_2,
150 permute_idx, tuple_bytes_2);
151 }
152
153 vals = _mm512_gf2p8affine_epi64_epi8(perm_bytes, matrixes, 0);
154 xor_acc = _mm512_xor_si512(xor_acc, vals);
155 }
156
157 return xor_acc;
158}
159
177__rte_experimental
178static inline uint32_t
179rte_thash_gfni(const uint64_t *m, const uint8_t *tuple, int len)
180{
181 uint32_t val, val_zero;
182
183 __m512i xor_acc = __rte_thash_gfni(m, tuple, NULL, len);
184 __rte_thash_xor_reduce(xor_acc, &val, &val_zero);
185
186 return val;
187}
188
209__rte_experimental
210static inline void
211rte_thash_gfni_bulk(const uint64_t *mtrx, int len, uint8_t *tuple[],
212 uint32_t val[], uint32_t num)
213{
214 uint32_t i;
215 uint32_t val_zero;
216 __m512i xor_acc;
217
218 for (i = 0; i != (num & ~1); i += 2) {
219 xor_acc = __rte_thash_gfni(mtrx, tuple[i], tuple[i + 1], len);
220 __rte_thash_xor_reduce(xor_acc, val + i, val + i + 1);
221 }
222
223 if (num & 1) {
224 xor_acc = __rte_thash_gfni(mtrx, tuple[i], NULL, len);
225 __rte_thash_xor_reduce(xor_acc, val + i, &val_zero);
226 }
227}
228
229#endif /* __GFNI__ && __AVX512F__ */
230
231#ifdef __cplusplus
232}
233#endif
234
235#endif /* _RTE_THASH_X86_GFNI_H_ */