4 #ifndef __INCLUDE_RTE_SWX_PIPELINE_INTERNAL_H__ 5 #define __INCLUDE_RTE_SWX_PIPELINE_INTERNAL_H__ 27 #define TRACE(...) printf(__VA_ARGS__) 35 #define ntoh64(x) rte_be_to_cpu_64(x) 36 #define hton64(x) rte_cpu_to_be_64(x) 49 TAILQ_ENTRY(struct_type) node;
58 TAILQ_HEAD(struct_type_tailq, struct_type);
64 TAILQ_ENTRY(port_in_type) node;
69 TAILQ_HEAD(port_in_type_tailq, port_in_type);
72 TAILQ_ENTRY(port_in) node;
73 struct port_in_type *type;
78 TAILQ_HEAD(port_in_tailq, port_in);
80 struct port_in_runtime {
88 struct port_out_type {
89 TAILQ_ENTRY(port_out_type) node;
94 TAILQ_HEAD(port_out_type_tailq, port_out_type);
97 TAILQ_ENTRY(port_out) node;
98 struct port_out_type *type;
103 TAILQ_HEAD(port_out_tailq, port_out);
105 struct port_out_runtime {
116 struct mirroring_session {
119 uint32_t truncation_length;
125 struct extern_type_member_func {
126 TAILQ_ENTRY(extern_type_member_func) node;
132 TAILQ_HEAD(extern_type_member_func_tailq, extern_type_member_func);
135 TAILQ_ENTRY(extern_type) node;
137 struct struct_type *mailbox_struct_type;
140 struct extern_type_member_func_tailq funcs;
144 TAILQ_HEAD(extern_type_tailq, extern_type);
147 TAILQ_ENTRY(extern_obj) node;
149 struct extern_type *type;
155 TAILQ_HEAD(extern_obj_tailq, extern_obj);
157 #ifndef RTE_SWX_EXTERN_TYPE_MEMBER_FUNCS_MAX 158 #define RTE_SWX_EXTERN_TYPE_MEMBER_FUNCS_MAX 8 161 struct extern_obj_runtime {
171 TAILQ_ENTRY(extern_func) node;
173 struct struct_type *mailbox_struct_type;
179 TAILQ_HEAD(extern_func_tailq, extern_func);
181 struct extern_func_runtime {
190 TAILQ_ENTRY(hash_func) node;
196 TAILQ_HEAD(hash_func_tailq, hash_func);
198 struct hash_func_runtime {
206 TAILQ_ENTRY(rss) node;
211 TAILQ_HEAD(rss_tailq, rss);
222 TAILQ_ENTRY(header) node;
224 struct struct_type *st;
229 TAILQ_HEAD(header_tailq, header);
231 struct header_runtime {
236 struct header_out_runtime {
262 enum instruction_type {
320 INSTR_HDR_INVALIDATE,
373 INSTR_ALU_CKADD_FIELD,
374 INSTR_ALU_CKADD_STRUCT20,
375 INSTR_ALU_CKADD_STRUCT,
381 INSTR_ALU_CKSUB_FIELD,
439 INSTR_REGPREFETCH_RH,
440 INSTR_REGPREFETCH_RM,
441 INSTR_REGPREFETCH_RI,
519 INSTR_LEARNER_REARM_NEW,
522 INSTR_LEARNER_FORGET,
581 INSTR_JMP_ACTION_HIT,
586 INSTR_JMP_ACTION_MISS,
639 struct instr_operand {
660 uint8_t header_id[8];
661 uint8_t struct_id[8];
666 struct instr_hdr_validity {
677 uint8_t mf_first_arg_offset;
678 uint8_t mf_timeout_id_offset;
679 uint8_t mf_timeout_id_n_bits;
682 struct instr_extern_obj {
687 struct instr_extern_func {
691 struct instr_hash_func {
692 uint8_t hash_func_id;
721 struct instr_dst_src {
722 struct instr_operand dst;
724 struct instr_operand src;
729 struct instr_regarray {
734 struct instr_operand idx;
739 struct instr_operand dstsrc;
749 struct instr_operand idx;
753 struct instr_operand length;
756 struct instr_operand color_in;
757 uint32_t color_in_val;
760 struct instr_operand color_out;
765 uint8_t header_id[8];
766 uint8_t struct_id[8];
777 struct instruction *ip;
780 struct instr_operand a;
786 struct instr_operand b;
792 enum instruction_type type;
795 struct instr_dst_src mirror;
796 struct instr_hdr_validity valid;
797 struct instr_dst_src mov;
798 struct instr_regarray regarray;
799 struct instr_meter meter;
800 struct instr_dma dma;
801 struct instr_dst_src alu;
802 struct instr_table table;
803 struct instr_learn learn;
804 struct instr_extern_obj ext_obj;
805 struct instr_extern_func ext_func;
806 struct instr_hash_func hash_func;
807 struct instr_rss rss;
808 struct instr_jmp jmp;
812 struct instruction_data {
819 typedef void (*instr_exec_t)(
struct rte_swx_pipeline *);
825 (*action_func_t)(
struct rte_swx_pipeline *p);
828 TAILQ_ENTRY(action) node;
830 struct struct_type *st;
831 int *args_endianness;
832 struct instruction *instructions;
833 struct instruction_data *instruction_data;
834 uint32_t n_instructions;
838 TAILQ_HEAD(action_tailq, action);
844 TAILQ_ENTRY(table_type) node;
850 TAILQ_HEAD(table_type_tailq, table_type);
858 TAILQ_ENTRY(table) node;
861 struct table_type *type;
864 struct match_field *fields;
866 struct header *header;
869 struct action **actions;
870 struct action *default_action;
871 uint8_t *default_action_data;
873 int default_action_is_const;
874 uint32_t action_data_size_max;
875 int *action_is_for_table_entries;
876 int *action_is_for_default_entry;
878 struct hash_func *hf;
883 TAILQ_HEAD(table_tailq, table);
885 struct table_runtime {
891 struct table_statistics {
892 uint64_t n_pkts_hit[2];
893 uint64_t *n_pkts_action;
900 TAILQ_ENTRY(selector) node;
903 struct field *group_id_field;
904 struct field **selector_fields;
905 uint32_t n_selector_fields;
906 struct header *selector_header;
907 struct field *member_id_field;
909 uint32_t n_groups_max;
910 uint32_t n_members_per_group_max;
915 TAILQ_HEAD(selector_tailq, selector);
917 struct selector_runtime {
919 uint8_t **group_id_buffer;
920 uint8_t **selector_buffer;
921 uint8_t **member_id_buffer;
924 struct selector_statistics {
932 TAILQ_ENTRY(learner) node;
936 struct field **fields;
938 struct header *header;
941 struct action **actions;
942 struct action *default_action;
943 uint8_t *default_action_data;
945 int default_action_is_const;
946 uint32_t action_data_size_max;
947 int *action_is_for_table_entries;
948 int *action_is_for_default_entry;
950 struct hash_func *hf;
957 TAILQ_HEAD(learner_tailq, learner);
959 struct learner_runtime {
964 struct learner_statistics {
965 uint64_t n_pkts_hit[2];
966 uint64_t n_pkts_learn[2];
967 uint64_t n_pkts_rearm;
968 uint64_t n_pkts_forget;
969 uint64_t *n_pkts_action;
976 TAILQ_ENTRY(regarray) node;
983 TAILQ_HEAD(regarray_tailq, regarray);
985 struct regarray_runtime {
993 struct meter_profile {
994 TAILQ_ENTRY(meter_profile) node;
997 struct rte_meter_trtcm_profile profile;
1001 TAILQ_HEAD(meter_profile_tailq, meter_profile);
1004 TAILQ_ENTRY(metarray) node;
1010 TAILQ_HEAD(metarray_tailq, metarray);
1014 struct meter_profile *profile;
1022 struct metarray_runtime {
1023 struct meter *metarray;
1034 uint32_t *mirroring_slots;
1035 uint64_t mirroring_slots_mask;
1037 uint32_t recirc_pass_id;
1043 struct header_runtime *headers;
1044 struct header_out_runtime *headers_out;
1045 uint8_t *header_storage;
1046 uint8_t *header_out_storage;
1047 uint64_t valid_headers;
1048 uint32_t n_headers_out;
1054 struct table_runtime *tables;
1055 struct selector_runtime *selectors;
1056 struct learner_runtime *learners;
1061 uint32_t learner_id;
1065 struct extern_obj_runtime *extern_objs;
1066 struct extern_func_runtime *extern_funcs;
1069 struct instruction *ip;
1070 struct instruction *ret;
1073 #define MASK64_BIT_GET(mask, pos) ((mask) & (1LLU << (pos))) 1074 #define MASK64_BIT_SET(mask, pos) ((mask) | (1LLU << (pos))) 1075 #define MASK64_BIT_CLR(mask, pos) ((mask) & ~(1LLU << (pos))) 1077 #define HEADER_VALID(thread, header_id) \ 1078 MASK64_BIT_GET((thread)->valid_headers, header_id) 1080 static inline uint64_t
1081 instr_operand_hbo(
struct thread *t,
const struct instr_operand *x)
1083 uint8_t *x_struct = t->structs[x->struct_id];
1084 uint64_t *x64_ptr = (uint64_t *)&x_struct[x->offset];
1085 uint64_t x64 = *x64_ptr;
1086 uint64_t x64_mask = UINT64_MAX >> (64 - x->n_bits);
1088 return x64 & x64_mask;
1091 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN 1093 static inline uint64_t
1094 instr_operand_nbo(
struct thread *t,
const struct instr_operand *x)
1096 uint8_t *x_struct = t->structs[x->struct_id];
1097 uint64_t *x64_ptr = (uint64_t *)&x_struct[x->offset];
1098 uint64_t x64 = *x64_ptr;
1100 return ntoh64(x64) >> (64 - x->n_bits);
1105 #define instr_operand_nbo instr_operand_hbo 1109 #define ALU(thread, ip, operator) \ 1111 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \ 1112 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \ 1113 uint64_t dst64 = *dst64_ptr; \ 1114 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \ 1115 uint64_t dst = dst64 & dst64_mask; \ 1117 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \ 1118 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \ 1119 uint64_t src64 = *src64_ptr; \ 1120 uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->alu.src.n_bits); \ 1121 uint64_t src = src64 & src64_mask; \ 1123 uint64_t result = dst operator src; \ 1125 *dst64_ptr = (dst64 & ~dst64_mask) | (result & dst64_mask); \ 1128 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN 1130 #define ALU_MH(thread, ip, operator) \ 1132 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \ 1133 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \ 1134 uint64_t dst64 = *dst64_ptr; \ 1135 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \ 1136 uint64_t dst = dst64 & dst64_mask; \ 1138 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \ 1139 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \ 1140 uint64_t src64 = *src64_ptr; \ 1141 uint64_t src = ntoh64(src64) >> (64 - (ip)->alu.src.n_bits); \ 1143 uint64_t result = dst operator src; \ 1145 *dst64_ptr = (dst64 & ~dst64_mask) | (result & dst64_mask); \ 1148 #define ALU_HM(thread, ip, operator) \ 1150 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \ 1151 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \ 1152 uint64_t dst64 = *dst64_ptr; \ 1153 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \ 1154 uint64_t dst = ntoh64(dst64) >> (64 - (ip)->alu.dst.n_bits); \ 1156 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \ 1157 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \ 1158 uint64_t src64 = *src64_ptr; \ 1159 uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->alu.src.n_bits); \ 1160 uint64_t src = src64 & src64_mask; \ 1162 uint64_t result = dst operator src; \ 1163 result = hton64(result << (64 - (ip)->alu.dst.n_bits)); \ 1165 *dst64_ptr = (dst64 & ~dst64_mask) | result; \ 1168 #define ALU_HM_FAST(thread, ip, operator) \ 1170 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \ 1171 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \ 1172 uint64_t dst64 = *dst64_ptr; \ 1173 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \ 1174 uint64_t dst = dst64 & dst64_mask; \ 1176 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \ 1177 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \ 1178 uint64_t src64 = *src64_ptr; \ 1179 uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->alu.src.n_bits); \ 1180 uint64_t src = hton64(src64 & src64_mask) >> (64 - (ip)->alu.dst.n_bits); \ 1182 uint64_t result = dst operator src; \ 1184 *dst64_ptr = (dst64 & ~dst64_mask) | result; \ 1187 #define ALU_HH(thread, ip, operator) \ 1189 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \ 1190 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \ 1191 uint64_t dst64 = *dst64_ptr; \ 1192 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \ 1193 uint64_t dst = ntoh64(dst64) >> (64 - (ip)->alu.dst.n_bits); \ 1195 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \ 1196 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \ 1197 uint64_t src64 = *src64_ptr; \ 1198 uint64_t src = ntoh64(src64) >> (64 - (ip)->alu.src.n_bits); \ 1200 uint64_t result = dst operator src; \ 1201 result = hton64(result << (64 - (ip)->alu.dst.n_bits)); \ 1203 *dst64_ptr = (dst64 & ~dst64_mask) | result; \ 1206 #define ALU_HH_FAST(thread, ip, operator) \ 1208 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \ 1209 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \ 1210 uint64_t dst64 = *dst64_ptr; \ 1211 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \ 1212 uint64_t dst = dst64 & dst64_mask; \ 1214 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \ 1215 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \ 1216 uint64_t src64 = *src64_ptr; \ 1217 uint64_t src = (src64 << (64 - (ip)->alu.src.n_bits)) >> (64 - (ip)->alu.dst.n_bits); \ 1219 uint64_t result = dst operator src; \ 1221 *dst64_ptr = (dst64 & ~dst64_mask) | result; \ 1228 #define ALU_HM_FAST ALU 1230 #define ALU_HH_FAST ALU 1234 #define ALU_I(thread, ip, operator) \ 1236 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \ 1237 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \ 1238 uint64_t dst64 = *dst64_ptr; \ 1239 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \ 1240 uint64_t dst = dst64 & dst64_mask; \ 1242 uint64_t src = (ip)->alu.src_val; \ 1244 uint64_t result = dst operator src; \ 1246 *dst64_ptr = (dst64 & ~dst64_mask) | (result & dst64_mask); \ 1249 #define ALU_MI ALU_I 1251 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN 1253 #define ALU_HI(thread, ip, operator) \ 1255 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \ 1256 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \ 1257 uint64_t dst64 = *dst64_ptr; \ 1258 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \ 1259 uint64_t dst = ntoh64(dst64) >> (64 - (ip)->alu.dst.n_bits); \ 1261 uint64_t src = (ip)->alu.src_val; \ 1263 uint64_t result = dst operator src; \ 1264 result = hton64(result << (64 - (ip)->alu.dst.n_bits)); \ 1266 *dst64_ptr = (dst64 & ~dst64_mask) | result; \ 1271 #define ALU_HI ALU_I 1275 #define MOV(thread, ip) \ 1277 uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id]; \ 1278 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset]; \ 1279 uint64_t dst64 = *dst64_ptr; \ 1280 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits); \ 1282 uint8_t *src_struct = (thread)->structs[(ip)->mov.src.struct_id]; \ 1283 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->mov.src.offset]; \ 1284 uint64_t src64 = *src64_ptr; \ 1285 uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->mov.src.n_bits); \ 1286 uint64_t src = src64 & src64_mask; \ 1288 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask); \ 1291 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN 1293 #define MOV_MH(thread, ip) \ 1295 uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id]; \ 1296 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset]; \ 1297 uint64_t dst64 = *dst64_ptr; \ 1298 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits); \ 1300 uint8_t *src_struct = (thread)->structs[(ip)->mov.src.struct_id]; \ 1301 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->mov.src.offset]; \ 1302 uint64_t src64 = *src64_ptr; \ 1303 uint64_t src = ntoh64(src64) >> (64 - (ip)->mov.src.n_bits); \ 1305 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask); \ 1308 #define MOV_HM(thread, ip) \ 1310 uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id]; \ 1311 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset]; \ 1312 uint64_t dst64 = *dst64_ptr; \ 1313 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits); \ 1315 uint8_t *src_struct = (thread)->structs[(ip)->mov.src.struct_id]; \ 1316 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->mov.src.offset]; \ 1317 uint64_t src64 = *src64_ptr; \ 1318 uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->mov.src.n_bits); \ 1319 uint64_t src = src64 & src64_mask; \ 1321 src = hton64(src) >> (64 - (ip)->mov.dst.n_bits); \ 1322 *dst64_ptr = (dst64 & ~dst64_mask) | src; \ 1325 #define MOV_HH(thread, ip) \ 1327 uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id]; \ 1328 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset]; \ 1329 uint64_t dst64 = *dst64_ptr; \ 1330 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits); \ 1332 uint8_t *src_struct = (thread)->structs[(ip)->mov.src.struct_id]; \ 1333 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->mov.src.offset]; \ 1334 uint64_t src64 = *src64_ptr; \ 1336 uint64_t src = src64 << (64 - (ip)->mov.src.n_bits); \ 1337 src = src >> (64 - (ip)->mov.dst.n_bits); \ 1338 *dst64_ptr = (dst64 & ~dst64_mask) | src; \ 1349 #define MOV_I(thread, ip) \ 1351 uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id]; \ 1352 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset]; \ 1353 uint64_t dst64 = *dst64_ptr; \ 1354 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits); \ 1356 uint64_t src = (ip)->mov.src_val; \ 1358 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask); \ 1361 #define JMP_CMP(thread, ip, operator) \ 1363 uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \ 1364 uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \ 1365 uint64_t a64 = *a64_ptr; \ 1366 uint64_t a64_mask = UINT64_MAX >> (64 - (ip)->jmp.a.n_bits); \ 1367 uint64_t a = a64 & a64_mask; \ 1369 uint8_t *b_struct = (thread)->structs[(ip)->jmp.b.struct_id]; \ 1370 uint64_t *b64_ptr = (uint64_t *)&b_struct[(ip)->jmp.b.offset]; \ 1371 uint64_t b64 = *b64_ptr; \ 1372 uint64_t b64_mask = UINT64_MAX >> (64 - (ip)->jmp.b.n_bits); \ 1373 uint64_t b = b64 & b64_mask; \ 1375 (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \ 1378 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN 1380 #define JMP_CMP_MH(thread, ip, operator) \ 1382 uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \ 1383 uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \ 1384 uint64_t a64 = *a64_ptr; \ 1385 uint64_t a64_mask = UINT64_MAX >> (64 - (ip)->jmp.a.n_bits); \ 1386 uint64_t a = a64 & a64_mask; \ 1388 uint8_t *b_struct = (thread)->structs[(ip)->jmp.b.struct_id]; \ 1389 uint64_t *b64_ptr = (uint64_t *)&b_struct[(ip)->jmp.b.offset]; \ 1390 uint64_t b64 = *b64_ptr; \ 1391 uint64_t b = ntoh64(b64) >> (64 - (ip)->jmp.b.n_bits); \ 1393 (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \ 1396 #define JMP_CMP_HM(thread, ip, operator) \ 1398 uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \ 1399 uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \ 1400 uint64_t a64 = *a64_ptr; \ 1401 uint64_t a = ntoh64(a64) >> (64 - (ip)->jmp.a.n_bits); \ 1403 uint8_t *b_struct = (thread)->structs[(ip)->jmp.b.struct_id]; \ 1404 uint64_t *b64_ptr = (uint64_t *)&b_struct[(ip)->jmp.b.offset]; \ 1405 uint64_t b64 = *b64_ptr; \ 1406 uint64_t b64_mask = UINT64_MAX >> (64 - (ip)->jmp.b.n_bits); \ 1407 uint64_t b = b64 & b64_mask; \ 1409 (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \ 1412 #define JMP_CMP_HH(thread, ip, operator) \ 1414 uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \ 1415 uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \ 1416 uint64_t a64 = *a64_ptr; \ 1417 uint64_t a = ntoh64(a64) >> (64 - (ip)->jmp.a.n_bits); \ 1419 uint8_t *b_struct = (thread)->structs[(ip)->jmp.b.struct_id]; \ 1420 uint64_t *b64_ptr = (uint64_t *)&b_struct[(ip)->jmp.b.offset]; \ 1421 uint64_t b64 = *b64_ptr; \ 1422 uint64_t b = ntoh64(b64) >> (64 - (ip)->jmp.b.n_bits); \ 1424 (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \ 1427 #define JMP_CMP_HH_FAST(thread, ip, operator) \ 1429 uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \ 1430 uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \ 1431 uint64_t a64 = *a64_ptr; \ 1432 uint64_t a = a64 << (64 - (ip)->jmp.a.n_bits); \ 1434 uint8_t *b_struct = (thread)->structs[(ip)->jmp.b.struct_id]; \ 1435 uint64_t *b64_ptr = (uint64_t *)&b_struct[(ip)->jmp.b.offset]; \ 1436 uint64_t b64 = *b64_ptr; \ 1437 uint64_t b = b64 << (64 - (ip)->jmp.b.n_bits); \ 1439 (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \ 1444 #define JMP_CMP_MH JMP_CMP 1445 #define JMP_CMP_HM JMP_CMP 1446 #define JMP_CMP_HH JMP_CMP 1447 #define JMP_CMP_HH_FAST JMP_CMP 1451 #define JMP_CMP_I(thread, ip, operator) \ 1453 uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \ 1454 uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \ 1455 uint64_t a64 = *a64_ptr; \ 1456 uint64_t a64_mask = UINT64_MAX >> (64 - (ip)->jmp.a.n_bits); \ 1457 uint64_t a = a64 & a64_mask; \ 1459 uint64_t b = (ip)->jmp.b_val; \ 1461 (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \ 1464 #define JMP_CMP_MI JMP_CMP_I 1466 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN 1468 #define JMP_CMP_HI(thread, ip, operator) \ 1470 uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \ 1471 uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \ 1472 uint64_t a64 = *a64_ptr; \ 1473 uint64_t a = ntoh64(a64) >> (64 - (ip)->jmp.a.n_bits); \ 1475 uint64_t b = (ip)->jmp.b_val; \ 1477 (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \ 1482 #define JMP_CMP_HI JMP_CMP_I 1486 #define METADATA_READ(thread, offset, n_bits) \ 1488 uint64_t *m64_ptr = (uint64_t *)&(thread)->metadata[offset]; \ 1489 uint64_t m64 = *m64_ptr; \ 1490 uint64_t m64_mask = UINT64_MAX >> (64 - (n_bits)); \ 1494 #define METADATA_WRITE(thread, offset, n_bits, value) \ 1496 uint64_t *m64_ptr = (uint64_t *)&(thread)->metadata[offset]; \ 1497 uint64_t m64 = *m64_ptr; \ 1498 uint64_t m64_mask = UINT64_MAX >> (64 - (n_bits)); \ 1500 uint64_t m_new = value; \ 1502 *m64_ptr = (m64 & ~m64_mask) | (m_new & m64_mask); \ 1505 #ifndef RTE_SWX_PIPELINE_THREADS_MAX 1506 #define RTE_SWX_PIPELINE_THREADS_MAX 16 1509 #ifndef RTE_SWX_PIPELINE_INSTRUCTION_TABLE_SIZE_MAX 1510 #define RTE_SWX_PIPELINE_INSTRUCTION_TABLE_SIZE_MAX 1024 1513 struct rte_swx_pipeline {
1516 struct struct_type_tailq struct_types;
1517 struct port_in_type_tailq port_in_types;
1518 struct port_in_tailq ports_in;
1519 struct port_out_type_tailq port_out_types;
1520 struct port_out_tailq ports_out;
1521 struct extern_type_tailq extern_types;
1522 struct extern_obj_tailq extern_objs;
1523 struct extern_func_tailq extern_funcs;
1524 struct hash_func_tailq hash_funcs;
1525 struct rss_tailq rss;
1526 struct header_tailq headers;
1527 struct struct_type *metadata_st;
1528 uint32_t metadata_struct_id;
1529 struct action_tailq actions;
1530 struct table_type_tailq table_types;
1531 struct table_tailq tables;
1532 struct selector_tailq selectors;
1533 struct learner_tailq learners;
1534 struct regarray_tailq regarrays;
1535 struct meter_profile_tailq meter_profiles;
1536 struct metarray_tailq metarrays;
1538 struct port_in_runtime *in;
1539 struct port_out_runtime *out;
1540 struct mirroring_session *mirroring_sessions;
1541 struct instruction **action_instructions;
1542 action_func_t *action_funcs;
1544 struct table_statistics *table_stats;
1545 struct selector_statistics *selector_stats;
1546 struct learner_statistics *learner_stats;
1547 struct hash_func_runtime *hash_func_runtime;
1548 struct rss_runtime **rss_runtime;
1549 struct regarray_runtime *regarray_runtime;
1550 struct metarray_runtime *metarray_runtime;
1551 struct instruction *instructions;
1552 struct instruction_data *instruction_data;
1553 instr_exec_t *instruction_table;
1554 struct thread threads[RTE_SWX_PIPELINE_THREADS_MAX];
1558 uint32_t n_ports_in;
1559 uint32_t n_ports_out;
1560 uint32_t n_mirroring_slots;
1561 uint32_t n_mirroring_sessions;
1562 uint32_t n_extern_objs;
1563 uint32_t n_extern_funcs;
1564 uint32_t n_hash_funcs;
1568 uint32_t n_selectors;
1569 uint32_t n_learners;
1570 uint32_t n_regarrays;
1571 uint32_t n_metarrays;
1575 uint32_t n_instructions;
1584 pipeline_port_inc(
struct rte_swx_pipeline *p)
1586 p->port_id = (p->port_id + 1) & (p->n_ports_in - 1);
1590 thread_ip_reset(
struct rte_swx_pipeline *p,
struct thread *t)
1592 t->ip = p->instructions;
1596 thread_ip_set(
struct thread *t,
struct instruction *ip)
1602 thread_ip_action_call(
struct rte_swx_pipeline *p,
1607 t->ip = p->action_instructions[action_id];
1611 thread_ip_inc(
struct rte_swx_pipeline *p);
1614 thread_ip_inc(
struct rte_swx_pipeline *p)
1616 struct thread *t = &p->threads[p->thread_id];
1622 thread_ip_inc_cond(
struct thread *t,
int cond)
1628 thread_yield(
struct rte_swx_pipeline *p)
1630 p->thread_id = (p->thread_id + 1) & (RTE_SWX_PIPELINE_THREADS_MAX - 1);
1634 thread_yield_cond(
struct rte_swx_pipeline *p,
int cond)
1636 p->thread_id = (p->thread_id + cond) & (RTE_SWX_PIPELINE_THREADS_MAX - 1);
1643 __instr_rx_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
1645 struct port_in_runtime *port = &p->in[p->port_id];
1650 if (t->recirculate) {
1651 TRACE(
"[Thread %2u] rx - recirculate (pass %u)\n",
1653 t->recirc_pass_id + 1);
1657 t->mirroring_slots_mask = 0;
1659 t->recirc_pass_id++;
1662 t->valid_headers = 0;
1663 t->n_headers_out = 0;
1666 t->table_state = p->table_state;
1672 pkt_received = port->pkt_rx(port->obj, pkt);
1676 TRACE(
"[Thread %2u] rx %s from port %u\n",
1678 pkt_received ?
"1 pkt" :
"0 pkts",
1681 t->mirroring_slots_mask = 0;
1682 t->recirc_pass_id = 0;
1685 t->valid_headers = 0;
1686 t->n_headers_out = 0;
1689 METADATA_WRITE(t, ip->io.io.offset, ip->io.io.n_bits, p->port_id);
1692 t->table_state = p->table_state;
1695 pipeline_port_inc(p);
1697 return pkt_received;
1701 instr_rx_exec(
struct rte_swx_pipeline *p)
1703 struct thread *t = &p->threads[p->thread_id];
1704 struct instruction *ip = t->ip;
1708 pkt_received = __instr_rx_exec(p, t, ip);
1711 thread_ip_inc_cond(t, pkt_received);
1719 emit_handler(
struct thread *t)
1721 struct header_out_runtime *h0 = &t->headers_out[0];
1722 struct header_out_runtime *h1 = &t->headers_out[1];
1723 uint32_t offset = 0, i;
1726 if ((t->n_headers_out == 1) &&
1727 (h0->ptr + h0->n_bytes == t->ptr)) {
1728 TRACE(
"Emit handler: no header change or header decap.\n");
1730 t->pkt.offset -= h0->n_bytes;
1731 t->pkt.length += h0->n_bytes;
1737 if ((t->n_headers_out == 2) &&
1738 (h1->ptr + h1->n_bytes == t->ptr) &&
1739 (h0->ptr == h0->ptr0)) {
1742 TRACE(
"Emit handler: header encapsulation.\n");
1744 offset = h0->n_bytes + h1->n_bytes;
1745 memcpy(t->ptr - offset, h0->ptr, h0->n_bytes);
1746 t->pkt.offset -= offset;
1747 t->pkt.length += offset;
1753 TRACE(
"Emit handler: complex case.\n");
1755 for (i = 0; i < t->n_headers_out; i++) {
1756 struct header_out_runtime *h = &t->headers_out[i];
1758 memcpy(&t->header_out_storage[offset], h->ptr, h->n_bytes);
1759 offset += h->n_bytes;
1763 memcpy(t->ptr - offset, t->header_out_storage, offset);
1764 t->pkt.offset -= offset;
1765 t->pkt.length += offset;
1770 mirroring_handler(
struct rte_swx_pipeline *p,
struct thread *t,
struct rte_swx_pkt *pkt)
1772 uint64_t slots_mask = t->mirroring_slots_mask, slot_mask;
1775 for (slot_id = 0, slot_mask = 1LLU ; slots_mask; slot_id++, slot_mask <<= 1)
1776 if (slot_mask & slots_mask) {
1777 struct port_out_runtime *port;
1778 struct mirroring_session *session;
1779 uint32_t port_id, session_id;
1781 session_id = t->mirroring_slots[slot_id];
1782 session = &p->mirroring_sessions[session_id];
1784 port_id = session->port_id;
1785 port = &p->out[port_id];
1787 if (session->fast_clone)
1788 port->pkt_fast_clone_tx(port->obj, pkt);
1790 port->pkt_clone_tx(port->obj, pkt, session->truncation_length);
1792 slots_mask &= ~slot_mask;
1797 __instr_tx_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
1799 uint64_t port_id = METADATA_READ(t, ip->io.io.offset, ip->io.io.n_bits);
1800 struct port_out_runtime *port = &p->out[port_id];
1804 if (t->recirculate) {
1805 TRACE(
"[Thread %2u]: tx 1 pkt - recirculate\n",
1812 mirroring_handler(p, t, pkt);
1817 TRACE(
"[Thread %2u]: tx 1 pkt to port %u\n",
1825 mirroring_handler(p, t, pkt);
1826 port->pkt_tx(port->obj, pkt);
1830 __instr_tx_i_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
1832 uint64_t port_id = ip->io.io.val;
1833 struct port_out_runtime *port = &p->out[port_id];
1837 if (t->recirculate) {
1838 TRACE(
"[Thread %2u]: tx (i) 1 pkt - recirculate\n",
1845 mirroring_handler(p, t, pkt);
1850 TRACE(
"[Thread %2u]: tx (i) 1 pkt to port %u\n",
1858 mirroring_handler(p, t, pkt);
1859 port->pkt_tx(port->obj, pkt);
1863 __instr_drop_exec(
struct rte_swx_pipeline *p,
1867 uint64_t port_id = p->n_ports_out - 1;
1868 struct port_out_runtime *port = &p->out[port_id];
1871 TRACE(
"[Thread %2u]: drop 1 pkt\n",
1878 mirroring_handler(p, t, pkt);
1879 port->pkt_tx(port->obj, pkt);
1883 __instr_mirror_exec(
struct rte_swx_pipeline *p,
1885 const struct instruction *ip)
1887 uint64_t slot_id = instr_operand_hbo(t, &ip->mirror.dst);
1888 uint64_t session_id = instr_operand_hbo(t, &ip->mirror.src);
1890 slot_id &= p->n_mirroring_slots - 1;
1891 session_id &= p->n_mirroring_sessions - 1;
1893 TRACE(
"[Thread %2u]: mirror pkt (slot = %u, session = %u)\n",
1896 (uint32_t)session_id);
1898 t->mirroring_slots[slot_id] = session_id;
1899 t->mirroring_slots_mask |= 1LLU << slot_id;
1903 __instr_recirculate_exec(
struct rte_swx_pipeline *p __rte_unused,
1905 const struct instruction *ip __rte_unused)
1907 TRACE(
"[Thread %2u]: recirculate\n",
1914 __instr_recircid_exec(
struct rte_swx_pipeline *p __rte_unused,
1916 const struct instruction *ip)
1918 TRACE(
"[Thread %2u]: recircid (pass %u)\n",
1923 METADATA_WRITE(t, ip->io.io.offset, ip->io.io.n_bits, t->recirc_pass_id);
1930 __instr_hdr_extract_many_exec(
struct rte_swx_pipeline *p __rte_unused,
1932 const struct instruction *ip,
1935 uint64_t valid_headers = t->valid_headers;
1936 uint8_t *ptr = t->ptr;
1937 uint32_t offset = t->pkt.offset;
1938 uint32_t
length = t->pkt.length;
1941 for (i = 0; i < n_extract; i++) {
1942 uint32_t header_id = ip->io.hdr.header_id[i];
1943 uint32_t struct_id = ip->io.hdr.struct_id[i];
1944 uint32_t n_bytes = ip->io.hdr.n_bytes[i];
1946 TRACE(
"[Thread %2u]: extract header %u (%u bytes)\n",
1952 t->structs[struct_id] = ptr;
1953 valid_headers = MASK64_BIT_SET(valid_headers, header_id);
1962 t->valid_headers = valid_headers;
1971 __instr_hdr_extract_exec(
struct rte_swx_pipeline *p,
1973 const struct instruction *ip)
1975 __instr_hdr_extract_many_exec(p, t, ip, 1);
1979 __instr_hdr_extract2_exec(
struct rte_swx_pipeline *p,
1981 const struct instruction *ip)
1983 TRACE(
"[Thread %2u] *** The next 2 instructions are fused. ***\n", p->thread_id);
1985 __instr_hdr_extract_many_exec(p, t, ip, 2);
1989 __instr_hdr_extract3_exec(
struct rte_swx_pipeline *p,
1991 const struct instruction *ip)
1993 TRACE(
"[Thread %2u] *** The next 3 instructions are fused. ***\n", p->thread_id);
1995 __instr_hdr_extract_many_exec(p, t, ip, 3);
1999 __instr_hdr_extract4_exec(
struct rte_swx_pipeline *p,
2001 const struct instruction *ip)
2003 TRACE(
"[Thread %2u] *** The next 4 instructions are fused. ***\n", p->thread_id);
2005 __instr_hdr_extract_many_exec(p, t, ip, 4);
2009 __instr_hdr_extract5_exec(
struct rte_swx_pipeline *p,
2011 const struct instruction *ip)
2013 TRACE(
"[Thread %2u] *** The next 5 instructions are fused. ***\n", p->thread_id);
2015 __instr_hdr_extract_many_exec(p, t, ip, 5);
2019 __instr_hdr_extract6_exec(
struct rte_swx_pipeline *p,
2021 const struct instruction *ip)
2023 TRACE(
"[Thread %2u] *** The next 6 instructions are fused. ***\n", p->thread_id);
2025 __instr_hdr_extract_many_exec(p, t, ip, 6);
2029 __instr_hdr_extract7_exec(
struct rte_swx_pipeline *p,
2031 const struct instruction *ip)
2033 TRACE(
"[Thread %2u] *** The next 7 instructions are fused. ***\n", p->thread_id);
2035 __instr_hdr_extract_many_exec(p, t, ip, 7);
2039 __instr_hdr_extract8_exec(
struct rte_swx_pipeline *p,
2041 const struct instruction *ip)
2043 TRACE(
"[Thread %2u] *** The next 8 instructions are fused. ***\n", p->thread_id);
2045 __instr_hdr_extract_many_exec(p, t, ip, 8);
2049 __instr_hdr_extract_m_exec(
struct rte_swx_pipeline *p __rte_unused,
2051 const struct instruction *ip)
2053 uint64_t valid_headers = t->valid_headers;
2054 uint8_t *ptr = t->ptr;
2055 uint32_t offset = t->pkt.offset;
2056 uint32_t length = t->pkt.length;
2058 uint32_t n_bytes_last = METADATA_READ(t, ip->io.io.offset, ip->io.io.n_bits);
2059 uint32_t header_id = ip->io.hdr.header_id[0];
2060 uint32_t struct_id = ip->io.hdr.struct_id[0];
2061 uint32_t n_bytes = ip->io.hdr.n_bytes[0];
2063 struct header_runtime *h = &t->headers[header_id];
2065 TRACE(
"[Thread %2u]: extract header %u (%u + %u bytes)\n",
2071 n_bytes += n_bytes_last;
2074 t->structs[struct_id] = ptr;
2075 t->valid_headers = MASK64_BIT_SET(valid_headers, header_id);
2076 h->n_bytes = n_bytes;
2079 t->pkt.offset = offset + n_bytes;
2080 t->pkt.length = length - n_bytes;
2081 t->ptr = ptr + n_bytes;
2085 __instr_hdr_lookahead_exec(
struct rte_swx_pipeline *p __rte_unused,
2087 const struct instruction *ip)
2089 uint64_t valid_headers = t->valid_headers;
2090 uint8_t *ptr = t->ptr;
2092 uint32_t header_id = ip->io.hdr.header_id[0];
2093 uint32_t struct_id = ip->io.hdr.struct_id[0];
2095 TRACE(
"[Thread %2u]: lookahead header %u\n",
2100 t->structs[struct_id] = ptr;
2101 t->valid_headers = MASK64_BIT_SET(valid_headers, header_id);
2108 __instr_hdr_emit_many_exec(
struct rte_swx_pipeline *p __rte_unused,
2110 const struct instruction *ip,
2113 uint64_t valid_headers = t->valid_headers;
2114 uint32_t n_headers_out = t->n_headers_out;
2115 struct header_out_runtime *ho = NULL;
2116 uint8_t *ho_ptr = NULL;
2117 uint32_t ho_nbytes = 0, i;
2119 for (i = 0; i < n_emit; i++) {
2120 uint32_t header_id = ip->io.hdr.header_id[i];
2121 uint32_t struct_id = ip->io.hdr.struct_id[i];
2123 struct header_runtime *hi = &t->headers[header_id];
2124 uint8_t *hi_ptr0 = hi->ptr0;
2125 uint32_t n_bytes = hi->n_bytes;
2127 uint8_t *hi_ptr = t->structs[struct_id];
2129 if (!MASK64_BIT_GET(valid_headers, header_id)) {
2130 TRACE(
"[Thread %2u]: emit header %u (invalid)\n",
2137 TRACE(
"[Thread %2u]: emit header %u (valid)\n",
2143 if (!n_headers_out) {
2144 ho = &t->headers_out[0];
2150 ho_nbytes = n_bytes;
2156 ho = &t->headers_out[n_headers_out - 1];
2159 ho_nbytes = ho->n_bytes;
2163 if (ho_ptr + ho_nbytes == hi_ptr) {
2164 ho_nbytes += n_bytes;
2166 ho->n_bytes = ho_nbytes;
2173 ho_nbytes = n_bytes;
2180 ho->n_bytes = ho_nbytes;
2181 t->n_headers_out = n_headers_out;
2185 __instr_hdr_emit_exec(
struct rte_swx_pipeline *p,
2187 const struct instruction *ip)
2189 __instr_hdr_emit_many_exec(p, t, ip, 1);
2193 __instr_hdr_emit_tx_exec(
struct rte_swx_pipeline *p,
2195 const struct instruction *ip)
2197 TRACE(
"[Thread %2u] *** The next 2 instructions are fused. ***\n", p->thread_id);
2199 __instr_hdr_emit_many_exec(p, t, ip, 1);
2200 __instr_tx_exec(p, t, ip);
2204 __instr_hdr_emit2_tx_exec(
struct rte_swx_pipeline *p,
2206 const struct instruction *ip)
2208 TRACE(
"[Thread %2u] *** The next 3 instructions are fused. ***\n", p->thread_id);
2210 __instr_hdr_emit_many_exec(p, t, ip, 2);
2211 __instr_tx_exec(p, t, ip);
2215 __instr_hdr_emit3_tx_exec(
struct rte_swx_pipeline *p,
2217 const struct instruction *ip)
2219 TRACE(
"[Thread %2u] *** The next 4 instructions are fused. ***\n", p->thread_id);
2221 __instr_hdr_emit_many_exec(p, t, ip, 3);
2222 __instr_tx_exec(p, t, ip);
2226 __instr_hdr_emit4_tx_exec(
struct rte_swx_pipeline *p,
2228 const struct instruction *ip)
2230 TRACE(
"[Thread %2u] *** The next 5 instructions are fused. ***\n", p->thread_id);
2232 __instr_hdr_emit_many_exec(p, t, ip, 4);
2233 __instr_tx_exec(p, t, ip);
2237 __instr_hdr_emit5_tx_exec(
struct rte_swx_pipeline *p,
2239 const struct instruction *ip)
2241 TRACE(
"[Thread %2u] *** The next 6 instructions are fused. ***\n", p->thread_id);
2243 __instr_hdr_emit_many_exec(p, t, ip, 5);
2244 __instr_tx_exec(p, t, ip);
2248 __instr_hdr_emit6_tx_exec(
struct rte_swx_pipeline *p,
2250 const struct instruction *ip)
2252 TRACE(
"[Thread %2u] *** The next 7 instructions are fused. ***\n", p->thread_id);
2254 __instr_hdr_emit_many_exec(p, t, ip, 6);
2255 __instr_tx_exec(p, t, ip);
2259 __instr_hdr_emit7_tx_exec(
struct rte_swx_pipeline *p,
2261 const struct instruction *ip)
2263 TRACE(
"[Thread %2u] *** The next 8 instructions are fused. ***\n", p->thread_id);
2265 __instr_hdr_emit_many_exec(p, t, ip, 7);
2266 __instr_tx_exec(p, t, ip);
2270 __instr_hdr_emit8_tx_exec(
struct rte_swx_pipeline *p,
2272 const struct instruction *ip)
2274 TRACE(
"[Thread %2u] *** The next 9 instructions are fused. ***\n", p->thread_id);
2276 __instr_hdr_emit_many_exec(p, t, ip, 8);
2277 __instr_tx_exec(p, t, ip);
2284 __instr_hdr_validate_exec(
struct rte_swx_pipeline *p __rte_unused,
2286 const struct instruction *ip)
2288 uint32_t header_id = ip->valid.header_id;
2289 uint32_t struct_id = ip->valid.struct_id;
2290 uint64_t valid_headers = t->valid_headers;
2291 struct header_runtime *h = &t->headers[header_id];
2293 TRACE(
"[Thread %2u] validate header %u\n", p->thread_id, header_id);
2299 if (MASK64_BIT_GET(valid_headers, header_id))
2303 t->structs[struct_id] = h->ptr0;
2304 t->valid_headers = MASK64_BIT_SET(valid_headers, header_id);
2311 __instr_hdr_invalidate_exec(
struct rte_swx_pipeline *p __rte_unused,
2313 const struct instruction *ip)
2315 uint32_t header_id = ip->valid.header_id;
2317 TRACE(
"[Thread %2u] invalidate header %u\n", p->thread_id, header_id);
2320 t->valid_headers = MASK64_BIT_CLR(t->valid_headers, header_id);
2327 __instr_learn_exec(
struct rte_swx_pipeline *p,
2329 const struct instruction *ip)
2331 uint64_t action_id = ip->learn.action_id;
2332 uint32_t mf_first_arg_offset = ip->learn.mf_first_arg_offset;
2333 uint32_t timeout_id = METADATA_READ(t, ip->learn.mf_timeout_id_offset,
2334 ip->learn.mf_timeout_id_n_bits);
2335 uint32_t learner_id = t->learner_id;
2337 p->n_selectors + learner_id];
2338 struct learner_runtime *l = &t->learners[learner_id];
2339 struct learner_statistics *stats = &p->learner_stats[learner_id];
2347 &t->metadata[mf_first_arg_offset],
2350 TRACE(
"[Thread %2u] learner %u learn %s\n",
2353 status ?
"ok" :
"error");
2355 stats->n_pkts_learn[status] += 1;
2362 __instr_rearm_exec(
struct rte_swx_pipeline *p,
2364 const struct instruction *ip __rte_unused)
2366 uint32_t learner_id = t->learner_id;
2368 p->n_selectors + learner_id];
2369 struct learner_runtime *l = &t->learners[learner_id];
2370 struct learner_statistics *stats = &p->learner_stats[learner_id];
2375 TRACE(
"[Thread %2u] learner %u rearm\n",
2379 stats->n_pkts_rearm += 1;
2383 __instr_rearm_new_exec(
struct rte_swx_pipeline *p,
2385 const struct instruction *ip)
2387 uint32_t timeout_id = METADATA_READ(t, ip->learn.mf_timeout_id_offset,
2388 ip->learn.mf_timeout_id_n_bits);
2389 uint32_t learner_id = t->learner_id;
2391 p->n_selectors + learner_id];
2392 struct learner_runtime *l = &t->learners[learner_id];
2393 struct learner_statistics *stats = &p->learner_stats[learner_id];
2398 TRACE(
"[Thread %2u] learner %u rearm with timeout ID %u\n",
2403 stats->n_pkts_rearm += 1;
2410 __instr_forget_exec(
struct rte_swx_pipeline *p,
2412 const struct instruction *ip __rte_unused)
2414 uint32_t learner_id = t->learner_id;
2416 p->n_selectors + learner_id];
2417 struct learner_runtime *l = &t->learners[learner_id];
2418 struct learner_statistics *stats = &p->learner_stats[learner_id];
2423 TRACE(
"[Thread %2u] learner %u forget\n",
2427 stats->n_pkts_forget += 1;
2434 __instr_entryid_exec(
struct rte_swx_pipeline *p __rte_unused,
2436 const struct instruction *ip)
2438 TRACE(
"[Thread %2u]: entryid\n",
2442 METADATA_WRITE(t, ip->mov.dst.offset, ip->mov.dst.n_bits, t->entry_id);
2448 static inline uint32_t
2449 __instr_extern_obj_exec(
struct rte_swx_pipeline *p __rte_unused,
2451 const struct instruction *ip)
2453 uint32_t obj_id = ip->ext_obj.ext_obj_id;
2454 uint32_t func_id = ip->ext_obj.func_id;
2455 struct extern_obj_runtime *obj = &t->extern_objs[obj_id];
2459 TRACE(
"[Thread %2u] extern obj %u member func %u\n",
2464 done = func(obj->obj, obj->mailbox);
2469 static inline uint32_t
2470 __instr_extern_func_exec(
struct rte_swx_pipeline *p __rte_unused,
2472 const struct instruction *ip)
2474 uint32_t ext_func_id = ip->ext_func.ext_func_id;
2475 struct extern_func_runtime *ext_func = &t->extern_funcs[ext_func_id];
2479 TRACE(
"[Thread %2u] extern func %u\n",
2483 done = func(ext_func->mailbox);
2492 __instr_hash_func_exec(
struct rte_swx_pipeline *p,
2494 const struct instruction *ip)
2496 uint32_t hash_func_id = ip->hash_func.hash_func_id;
2497 uint32_t dst_offset = ip->hash_func.dst.offset;
2498 uint32_t n_dst_bits = ip->hash_func.dst.n_bits;
2499 uint32_t src_struct_id = ip->hash_func.src.struct_id;
2500 uint32_t src_offset = ip->hash_func.src.offset;
2501 uint32_t n_src_bytes = ip->hash_func.src.n_bytes;
2503 struct hash_func_runtime *func = &p->hash_func_runtime[hash_func_id];
2504 uint8_t *src_ptr = t->structs[src_struct_id];
2507 TRACE(
"[Thread %2u] hash %u\n",
2511 result = func->func(&src_ptr[src_offset], n_src_bytes, 0);
2512 METADATA_WRITE(t, dst_offset, n_dst_bits, result);
2518 static inline uint32_t
2519 rss_func(
void *rss_key, uint32_t rss_key_size,
void *input_data, uint32_t input_data_size)
2521 uint32_t *key = (uint32_t *)rss_key;
2522 uint32_t *data = (uint32_t *)input_data;
2523 uint32_t key_size = rss_key_size >> 2;
2524 uint32_t data_size = input_data_size >> 2;
2525 uint32_t hash_val = 0, i;
2527 for (i = 0; i < data_size; i++) {
2530 for (d = data[i]; d; d &= (d - 1)) {
2531 uint32_t key0, key1, pos;
2534 key0 = key[i % key_size] << (31 - pos);
2535 key1 = key[(i + 1) % key_size] >> (pos + 1);
2536 hash_val ^= key0 | key1;
2544 __instr_rss_exec(
struct rte_swx_pipeline *p,
2546 const struct instruction *ip)
2548 uint32_t rss_obj_id = ip->rss.rss_obj_id;
2549 uint32_t dst_offset = ip->rss.dst.offset;
2550 uint32_t n_dst_bits = ip->rss.dst.n_bits;
2551 uint32_t src_struct_id = ip->rss.src.struct_id;
2552 uint32_t src_offset = ip->rss.src.offset;
2553 uint32_t n_src_bytes = ip->rss.src.n_bytes;
2555 struct rss_runtime *r = p->rss_runtime[rss_obj_id];
2556 uint8_t *src_ptr = t->structs[src_struct_id];
2559 TRACE(
"[Thread %2u] rss %u\n",
2563 result = rss_func(r->key, r->key_size, &src_ptr[src_offset], n_src_bytes);
2564 METADATA_WRITE(t, dst_offset, n_dst_bits, result);
2571 __instr_mov_exec(
struct rte_swx_pipeline *p __rte_unused,
2573 const struct instruction *ip)
2575 TRACE(
"[Thread %2u] mov\n", p->thread_id);
2581 __instr_mov_mh_exec(
struct rte_swx_pipeline *p __rte_unused,
2583 const struct instruction *ip)
2585 TRACE(
"[Thread %2u] mov (mh)\n", p->thread_id);
2591 __instr_mov_hm_exec(
struct rte_swx_pipeline *p __rte_unused,
2593 const struct instruction *ip)
2595 TRACE(
"[Thread %2u] mov (hm)\n", p->thread_id);
2601 __instr_mov_hh_exec(
struct rte_swx_pipeline *p __rte_unused,
2603 const struct instruction *ip)
2605 TRACE(
"[Thread %2u] mov (hh)\n", p->thread_id);
2611 __instr_mov_dma_exec(
struct rte_swx_pipeline *p __rte_unused,
2613 const struct instruction *ip)
2615 uint8_t *dst = t->structs[ip->mov.dst.struct_id] + ip->mov.dst.offset;
2616 uint8_t *src = t->structs[ip->mov.src.struct_id] + ip->mov.src.offset;
2618 uint32_t n_dst = ip->mov.dst.n_bits >> 3;
2619 uint32_t n_src = ip->mov.src.n_bits >> 3;
2621 TRACE(
"[Thread %2u] mov (dma) %u bytes\n", p->thread_id, n);
2624 if (n_dst > n_src) {
2625 uint32_t n_dst_zero = n_dst - n_src;
2628 memset(dst, 0, n_dst_zero);
2632 memcpy(dst, src, n_src);
2634 uint32_t n_src_skipped = n_src - n_dst;
2637 src += n_src_skipped;
2638 memcpy(dst, src, n_dst);
2643 __instr_mov_128_exec(
struct rte_swx_pipeline *p __rte_unused,
2645 const struct instruction *ip)
2647 uint8_t *dst_struct = t->structs[ip->mov.dst.struct_id];
2648 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[ip->mov.dst.offset];
2650 uint8_t *src_struct = t->structs[ip->mov.src.struct_id];
2651 uint64_t *src64_ptr = (uint64_t *)&src_struct[ip->mov.src.offset];
2653 TRACE(
"[Thread %2u] mov (128)\n", p->thread_id);
2655 dst64_ptr[0] = src64_ptr[0];
2656 dst64_ptr[1] = src64_ptr[1];
2660 __instr_mov_128_32_exec(
struct rte_swx_pipeline *p __rte_unused,
2662 const struct instruction *ip)
2664 uint8_t *dst = t->structs[ip->mov.dst.struct_id] + ip->mov.dst.offset;
2665 uint8_t *src = t->structs[ip->mov.src.struct_id] + ip->mov.src.offset;
2667 uint32_t *dst32 = (uint32_t *)dst;
2668 uint32_t *src32 = (uint32_t *)src;
2670 TRACE(
"[Thread %2u] mov (128 <- 32)\n", p->thread_id);
2675 dst32[3] = src32[0];
2679 __instr_mov_i_exec(
struct rte_swx_pipeline *p __rte_unused,
2681 const struct instruction *ip)
2683 TRACE(
"[Thread %2u] mov m.f %" PRIx64
"\n", p->thread_id, ip->mov.src_val);
2692 __instr_dma_ht_many_exec(
struct rte_swx_pipeline *p __rte_unused,
2694 const struct instruction *ip,
2697 uint8_t *action_data = t->structs[0];
2698 uint64_t valid_headers = t->valid_headers;
2701 for (i = 0; i < n_dma; i++) {
2702 uint32_t header_id = ip->dma.dst.header_id[i];
2703 uint32_t struct_id = ip->dma.dst.struct_id[i];
2704 uint32_t offset = ip->dma.src.offset[i];
2705 uint32_t n_bytes = ip->dma.n_bytes[i];
2707 struct header_runtime *h = &t->headers[header_id];
2708 uint8_t *h_ptr0 = h->ptr0;
2709 uint8_t *h_ptr = t->structs[struct_id];
2711 void *dst = MASK64_BIT_GET(valid_headers, header_id) ?
2713 void *src = &action_data[offset];
2715 TRACE(
"[Thread %2u] dma h.s t.f\n", p->thread_id);
2718 memcpy(dst, src, n_bytes);
2719 t->structs[struct_id] = dst;
2720 valid_headers = MASK64_BIT_SET(valid_headers, header_id);
2723 t->valid_headers = valid_headers;
2727 __instr_dma_ht_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
2729 __instr_dma_ht_many_exec(p, t, ip, 1);
2733 __instr_dma_ht2_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
2735 TRACE(
"[Thread %2u] *** The next 2 instructions are fused. ***\n", p->thread_id);
2737 __instr_dma_ht_many_exec(p, t, ip, 2);
2741 __instr_dma_ht3_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
2743 TRACE(
"[Thread %2u] *** The next 3 instructions are fused. ***\n", p->thread_id);
2745 __instr_dma_ht_many_exec(p, t, ip, 3);
2749 __instr_dma_ht4_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
2751 TRACE(
"[Thread %2u] *** The next 4 instructions are fused. ***\n", p->thread_id);
2753 __instr_dma_ht_many_exec(p, t, ip, 4);
2757 __instr_dma_ht5_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
2759 TRACE(
"[Thread %2u] *** The next 5 instructions are fused. ***\n", p->thread_id);
2761 __instr_dma_ht_many_exec(p, t, ip, 5);
2765 __instr_dma_ht6_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
2767 TRACE(
"[Thread %2u] *** The next 6 instructions are fused. ***\n", p->thread_id);
2769 __instr_dma_ht_many_exec(p, t, ip, 6);
2773 __instr_dma_ht7_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
2775 TRACE(
"[Thread %2u] *** The next 7 instructions are fused. ***\n", p->thread_id);
2777 __instr_dma_ht_many_exec(p, t, ip, 7);
2781 __instr_dma_ht8_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
2783 TRACE(
"[Thread %2u] *** The next 8 instructions are fused. ***\n", p->thread_id);
2785 __instr_dma_ht_many_exec(p, t, ip, 8);
2792 __instr_alu_add_exec(
struct rte_swx_pipeline *p __rte_unused,
2794 const struct instruction *ip)
2796 TRACE(
"[Thread %2u] add\n", p->thread_id);
2802 __instr_alu_add_mh_exec(
struct rte_swx_pipeline *p __rte_unused,
2804 const struct instruction *ip)
2806 TRACE(
"[Thread %2u] add (mh)\n", p->thread_id);
2812 __instr_alu_add_hm_exec(
struct rte_swx_pipeline *p __rte_unused,
2814 const struct instruction *ip)
2816 TRACE(
"[Thread %2u] add (hm)\n", p->thread_id);
2822 __instr_alu_add_hh_exec(
struct rte_swx_pipeline *p __rte_unused,
2824 const struct instruction *ip)
2826 TRACE(
"[Thread %2u] add (hh)\n", p->thread_id);
2832 __instr_alu_add_mi_exec(
struct rte_swx_pipeline *p __rte_unused,
2834 const struct instruction *ip)
2836 TRACE(
"[Thread %2u] add (mi)\n", p->thread_id);
2842 __instr_alu_add_hi_exec(
struct rte_swx_pipeline *p __rte_unused,
2844 const struct instruction *ip)
2846 TRACE(
"[Thread %2u] add (hi)\n", p->thread_id);
2852 __instr_alu_sub_exec(
struct rte_swx_pipeline *p __rte_unused,
2854 const struct instruction *ip)
2856 TRACE(
"[Thread %2u] sub\n", p->thread_id);
2862 __instr_alu_sub_mh_exec(
struct rte_swx_pipeline *p __rte_unused,
2864 const struct instruction *ip)
2866 TRACE(
"[Thread %2u] sub (mh)\n", p->thread_id);
2872 __instr_alu_sub_hm_exec(
struct rte_swx_pipeline *p __rte_unused,
2874 const struct instruction *ip)
2876 TRACE(
"[Thread %2u] sub (hm)\n", p->thread_id);
2882 __instr_alu_sub_hh_exec(
struct rte_swx_pipeline *p __rte_unused,
2884 const struct instruction *ip)
2886 TRACE(
"[Thread %2u] sub (hh)\n", p->thread_id);
2892 __instr_alu_sub_mi_exec(
struct rte_swx_pipeline *p __rte_unused,
2894 const struct instruction *ip)
2896 TRACE(
"[Thread %2u] sub (mi)\n", p->thread_id);
2902 __instr_alu_sub_hi_exec(
struct rte_swx_pipeline *p __rte_unused,
2904 const struct instruction *ip)
2906 TRACE(
"[Thread %2u] sub (hi)\n", p->thread_id);
2912 __instr_alu_shl_exec(
struct rte_swx_pipeline *p __rte_unused,
2914 const struct instruction *ip)
2916 TRACE(
"[Thread %2u] shl\n", p->thread_id);
2922 __instr_alu_shl_mh_exec(
struct rte_swx_pipeline *p __rte_unused,
2924 const struct instruction *ip)
2926 TRACE(
"[Thread %2u] shl (mh)\n", p->thread_id);
2932 __instr_alu_shl_hm_exec(
struct rte_swx_pipeline *p __rte_unused,
2934 const struct instruction *ip)
2936 TRACE(
"[Thread %2u] shl (hm)\n", p->thread_id);
2942 __instr_alu_shl_hh_exec(
struct rte_swx_pipeline *p __rte_unused,
2944 const struct instruction *ip)
2946 TRACE(
"[Thread %2u] shl (hh)\n", p->thread_id);
2952 __instr_alu_shl_mi_exec(
struct rte_swx_pipeline *p __rte_unused,
2954 const struct instruction *ip)
2956 TRACE(
"[Thread %2u] shl (mi)\n", p->thread_id);
2962 __instr_alu_shl_hi_exec(
struct rte_swx_pipeline *p __rte_unused,
2964 const struct instruction *ip)
2966 TRACE(
"[Thread %2u] shl (hi)\n", p->thread_id);
2972 __instr_alu_shr_exec(
struct rte_swx_pipeline *p __rte_unused,
2974 const struct instruction *ip)
2976 TRACE(
"[Thread %2u] shr\n", p->thread_id);
2982 __instr_alu_shr_mh_exec(
struct rte_swx_pipeline *p __rte_unused,
2984 const struct instruction *ip)
2986 TRACE(
"[Thread %2u] shr (mh)\n", p->thread_id);
2992 __instr_alu_shr_hm_exec(
struct rte_swx_pipeline *p __rte_unused,
2994 const struct instruction *ip)
2996 TRACE(
"[Thread %2u] shr (hm)\n", p->thread_id);
3002 __instr_alu_shr_hh_exec(
struct rte_swx_pipeline *p __rte_unused,
3004 const struct instruction *ip)
3006 TRACE(
"[Thread %2u] shr (hh)\n", p->thread_id);
3012 __instr_alu_shr_mi_exec(
struct rte_swx_pipeline *p __rte_unused,
3014 const struct instruction *ip)
3016 TRACE(
"[Thread %2u] shr (mi)\n", p->thread_id);
3023 __instr_alu_shr_hi_exec(
struct rte_swx_pipeline *p __rte_unused,
3025 const struct instruction *ip)
3027 TRACE(
"[Thread %2u] shr (hi)\n", p->thread_id);
3033 __instr_alu_and_exec(
struct rte_swx_pipeline *p __rte_unused,
3035 const struct instruction *ip)
3037 TRACE(
"[Thread %2u] and\n", p->thread_id);
3043 __instr_alu_and_mh_exec(
struct rte_swx_pipeline *p __rte_unused,
3045 const struct instruction *ip)
3047 TRACE(
"[Thread %2u] and (mh)\n", p->thread_id);
3053 __instr_alu_and_hm_exec(
struct rte_swx_pipeline *p __rte_unused,
3055 const struct instruction *ip)
3057 TRACE(
"[Thread %2u] and (hm)\n", p->thread_id);
3059 ALU_HM_FAST(t, ip, &);
3063 __instr_alu_and_hh_exec(
struct rte_swx_pipeline *p __rte_unused,
3065 const struct instruction *ip)
3067 TRACE(
"[Thread %2u] and (hh)\n", p->thread_id);
3069 ALU_HH_FAST(t, ip, &);
3073 __instr_alu_and_i_exec(
struct rte_swx_pipeline *p __rte_unused,
3075 const struct instruction *ip)
3077 TRACE(
"[Thread %2u] and (i)\n", p->thread_id);
3083 __instr_alu_or_exec(
struct rte_swx_pipeline *p __rte_unused,
3085 const struct instruction *ip)
3087 TRACE(
"[Thread %2u] or\n", p->thread_id);
3093 __instr_alu_or_mh_exec(
struct rte_swx_pipeline *p __rte_unused,
3095 const struct instruction *ip)
3097 TRACE(
"[Thread %2u] or (mh)\n", p->thread_id);
3103 __instr_alu_or_hm_exec(
struct rte_swx_pipeline *p __rte_unused,
3105 const struct instruction *ip)
3107 TRACE(
"[Thread %2u] or (hm)\n", p->thread_id);
3109 ALU_HM_FAST(t, ip, |);
3113 __instr_alu_or_hh_exec(
struct rte_swx_pipeline *p __rte_unused,
3115 const struct instruction *ip)
3117 TRACE(
"[Thread %2u] or (hh)\n", p->thread_id);
3119 ALU_HH_FAST(t, ip, |);
3123 __instr_alu_or_i_exec(
struct rte_swx_pipeline *p __rte_unused,
3125 const struct instruction *ip)
3127 TRACE(
"[Thread %2u] or (i)\n", p->thread_id);
3133 __instr_alu_xor_exec(
struct rte_swx_pipeline *p __rte_unused,
3135 const struct instruction *ip)
3137 TRACE(
"[Thread %2u] xor\n", p->thread_id);
3143 __instr_alu_xor_mh_exec(
struct rte_swx_pipeline *p __rte_unused,
3145 const struct instruction *ip)
3147 TRACE(
"[Thread %2u] xor (mh)\n", p->thread_id);
3153 __instr_alu_xor_hm_exec(
struct rte_swx_pipeline *p __rte_unused,
3155 const struct instruction *ip)
3157 TRACE(
"[Thread %2u] xor (hm)\n", p->thread_id);
3159 ALU_HM_FAST(t, ip, ^);
3163 __instr_alu_xor_hh_exec(
struct rte_swx_pipeline *p __rte_unused,
3165 const struct instruction *ip)
3167 TRACE(
"[Thread %2u] xor (hh)\n", p->thread_id);
3169 ALU_HH_FAST(t, ip, ^);
3173 __instr_alu_xor_i_exec(
struct rte_swx_pipeline *p __rte_unused,
3175 const struct instruction *ip)
3177 TRACE(
"[Thread %2u] xor (i)\n", p->thread_id);
3183 __instr_alu_ckadd_field_exec(
struct rte_swx_pipeline *p __rte_unused,
3185 const struct instruction *ip)
3187 uint8_t *dst_struct, *src_struct;
3188 uint16_t *dst16_ptr, dst;
3189 uint64_t *src64_ptr, src64, src64_mask, src;
3192 TRACE(
"[Thread %2u] ckadd (field)\n", p->thread_id);
3195 dst_struct = t->structs[ip->alu.dst.struct_id];
3196 dst16_ptr = (uint16_t *)&dst_struct[ip->alu.dst.offset];
3199 src_struct = t->structs[ip->alu.src.struct_id];
3200 src64_ptr = (uint64_t *)&src_struct[ip->alu.src.offset];
3202 src64_mask = UINT64_MAX >> (64 - ip->alu.src.n_bits);
3203 src = src64 & src64_mask;
3213 r += (src >> 32) + (src & 0xFFFFFFFF);
3219 r = (r & 0xFFFF) + (r >> 16);
3224 r = (r & 0xFFFF) + (r >> 16);
3231 r = (r & 0xFFFF) + (r >> 16);
3237 *dst16_ptr = (uint16_t)r;
3241 __instr_alu_cksub_field_exec(
struct rte_swx_pipeline *p __rte_unused,
3243 const struct instruction *ip)
3245 uint8_t *dst_struct, *src_struct;
3246 uint16_t *dst16_ptr, dst;
3247 uint64_t *src64_ptr, src64, src64_mask, src;
3250 TRACE(
"[Thread %2u] cksub (field)\n", p->thread_id);
3253 dst_struct = t->structs[ip->alu.dst.struct_id];
3254 dst16_ptr = (uint16_t *)&dst_struct[ip->alu.dst.offset];
3257 src_struct = t->structs[ip->alu.src.struct_id];
3258 src64_ptr = (uint64_t *)&src_struct[ip->alu.src.offset];
3260 src64_mask = UINT64_MAX >> (64 - ip->alu.src.n_bits);
3261 src = src64 & src64_mask;
3279 r += 0xFFFF00000ULL;
3284 r -= (src >> 32) + (src & 0xFFFFFFFF);
3289 r = (r & 0xFFFF) + (r >> 16);
3294 r = (r & 0xFFFF) + (r >> 16);
3301 r = (r & 0xFFFF) + (r >> 16);
3307 *dst16_ptr = (uint16_t)r;
3311 __instr_alu_ckadd_struct20_exec(
struct rte_swx_pipeline *p __rte_unused,
3313 const struct instruction *ip)
3315 uint8_t *dst_struct, *src_struct;
3316 uint16_t *dst16_ptr, dst;
3317 uint32_t *src32_ptr;
3320 TRACE(
"[Thread %2u] ckadd (struct of 20 bytes)\n", p->thread_id);
3323 dst_struct = t->structs[ip->alu.dst.struct_id];
3324 dst16_ptr = (uint16_t *)&dst_struct[ip->alu.dst.offset];
3327 src_struct = t->structs[ip->alu.src.struct_id];
3328 src32_ptr = (uint32_t *)&src_struct[0];
3338 r0 += r1 + src32_ptr[4];
3343 r0 = (r0 & 0xFFFF) + (r0 >> 16);
3348 r0 = (r0 & 0xFFFF) + (r0 >> 16);
3355 r0 = (r0 & 0xFFFF) + (r0 >> 16);
3359 r0 = r0 ? r0 : 0xFFFF;
3361 *dst16_ptr = (uint16_t)r0;
3365 __instr_alu_ckadd_struct_exec(
struct rte_swx_pipeline *p __rte_unused,
3367 const struct instruction *ip)
3369 uint32_t src_header_id = ip->alu.src.n_bits;
3370 uint32_t n_src_header_bytes = t->headers[src_header_id].n_bytes;
3371 uint8_t *dst_struct, *src_struct;
3372 uint16_t *dst16_ptr, dst;
3373 uint32_t *src32_ptr;
3377 if (n_src_header_bytes == 20) {
3378 __instr_alu_ckadd_struct20_exec(p, t, ip);
3382 TRACE(
"[Thread %2u] ckadd (struct)\n", p->thread_id);
3385 dst_struct = t->structs[ip->alu.dst.struct_id];
3386 dst16_ptr = (uint16_t *)&dst_struct[ip->alu.dst.offset];
3389 src_struct = t->structs[ip->alu.src.struct_id];
3390 src32_ptr = (uint32_t *)&src_struct[0];
3400 for (i = 0; i < n_src_header_bytes / 4; i++, src32_ptr++)
3406 r = (r & 0xFFFF) + (r >> 16);
3411 r = (r & 0xFFFF) + (r >> 16);
3418 r = (r & 0xFFFF) + (r >> 16);
3424 *dst16_ptr = (uint16_t)r;
3430 static inline uint64_t *
3431 instr_regarray_regarray(
struct rte_swx_pipeline *p,
const struct instruction *ip)
3433 struct regarray_runtime *r = &p->regarray_runtime[ip->regarray.regarray_id];
3437 static inline uint64_t
3438 instr_regarray_idx_hbo(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3440 struct regarray_runtime *r = &p->regarray_runtime[ip->regarray.regarray_id];
3442 uint8_t *idx_struct = t->structs[ip->regarray.idx.struct_id];
3443 uint64_t *idx64_ptr = (uint64_t *)&idx_struct[ip->regarray.idx.offset];
3444 uint64_t idx64 = *idx64_ptr;
3445 uint64_t idx64_mask = UINT64_MAX >> (64 - ip->regarray.idx.n_bits);
3446 uint64_t idx = idx64 & idx64_mask & r->size_mask;
3451 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN 3453 static inline uint64_t
3454 instr_regarray_idx_nbo(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3456 struct regarray_runtime *r = &p->regarray_runtime[ip->regarray.regarray_id];
3458 uint8_t *idx_struct = t->structs[ip->regarray.idx.struct_id];
3459 uint64_t *idx64_ptr = (uint64_t *)&idx_struct[ip->regarray.idx.offset];
3460 uint64_t idx64 = *idx64_ptr;
3461 uint64_t idx = (ntoh64(idx64) >> (64 - ip->regarray.idx.n_bits)) & r->size_mask;
3468 #define instr_regarray_idx_nbo instr_regarray_idx_hbo 3472 static inline uint64_t
3473 instr_regarray_idx_imm(
struct rte_swx_pipeline *p,
const struct instruction *ip)
3475 struct regarray_runtime *r = &p->regarray_runtime[ip->regarray.regarray_id];
3477 uint64_t idx = ip->regarray.idx_val & r->size_mask;
3482 static inline uint64_t
3483 instr_regarray_src_hbo(
struct thread *t,
const struct instruction *ip)
3485 uint8_t *src_struct = t->structs[ip->regarray.dstsrc.struct_id];
3486 uint64_t *src64_ptr = (uint64_t *)&src_struct[ip->regarray.dstsrc.offset];
3487 uint64_t src64 = *src64_ptr;
3488 uint64_t src64_mask = UINT64_MAX >> (64 - ip->regarray.dstsrc.n_bits);
3489 uint64_t src = src64 & src64_mask;
3494 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN 3496 static inline uint64_t
3497 instr_regarray_src_nbo(
struct thread *t,
const struct instruction *ip)
3499 uint8_t *src_struct = t->structs[ip->regarray.dstsrc.struct_id];
3500 uint64_t *src64_ptr = (uint64_t *)&src_struct[ip->regarray.dstsrc.offset];
3501 uint64_t src64 = *src64_ptr;
3502 uint64_t src = ntoh64(src64) >> (64 - ip->regarray.dstsrc.n_bits);
3509 #define instr_regarray_src_nbo instr_regarray_src_hbo 3514 instr_regarray_dst_hbo_src_hbo_set(
struct thread *t,
const struct instruction *ip, uint64_t src)
3516 uint8_t *dst_struct = t->structs[ip->regarray.dstsrc.struct_id];
3517 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[ip->regarray.dstsrc.offset];
3518 uint64_t dst64 = *dst64_ptr;
3519 uint64_t dst64_mask = UINT64_MAX >> (64 - ip->regarray.dstsrc.n_bits);
3521 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask);
3525 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN 3528 instr_regarray_dst_nbo_src_hbo_set(
struct thread *t,
const struct instruction *ip, uint64_t src)
3530 uint8_t *dst_struct = t->structs[ip->regarray.dstsrc.struct_id];
3531 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[ip->regarray.dstsrc.offset];
3532 uint64_t dst64 = *dst64_ptr;
3533 uint64_t dst64_mask = UINT64_MAX >> (64 - ip->regarray.dstsrc.n_bits);
3535 src = hton64(src) >> (64 - ip->regarray.dstsrc.n_bits);
3536 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask);
3541 #define instr_regarray_dst_nbo_src_hbo_set instr_regarray_dst_hbo_src_hbo_set 3546 __instr_regprefetch_rh_exec(
struct rte_swx_pipeline *p,
3548 const struct instruction *ip)
3550 uint64_t *regarray, idx;
3552 TRACE(
"[Thread %2u] regprefetch (r[h])\n", p->thread_id);
3554 regarray = instr_regarray_regarray(p, ip);
3555 idx = instr_regarray_idx_nbo(p, t, ip);
3560 __instr_regprefetch_rm_exec(
struct rte_swx_pipeline *p,
3562 const struct instruction *ip)
3564 uint64_t *regarray, idx;
3566 TRACE(
"[Thread %2u] regprefetch (r[m])\n", p->thread_id);
3568 regarray = instr_regarray_regarray(p, ip);
3569 idx = instr_regarray_idx_hbo(p, t, ip);
3574 __instr_regprefetch_ri_exec(
struct rte_swx_pipeline *p,
3575 struct thread *t __rte_unused,
3576 const struct instruction *ip)
3578 uint64_t *regarray, idx;
3580 TRACE(
"[Thread %2u] regprefetch (r[i])\n", p->thread_id);
3582 regarray = instr_regarray_regarray(p, ip);
3583 idx = instr_regarray_idx_imm(p, ip);
3588 __instr_regrd_hrh_exec(
struct rte_swx_pipeline *p,
3590 const struct instruction *ip)
3592 uint64_t *regarray, idx;
3594 TRACE(
"[Thread %2u] regrd (h = r[h])\n", p->thread_id);
3596 regarray = instr_regarray_regarray(p, ip);
3597 idx = instr_regarray_idx_nbo(p, t, ip);
3598 instr_regarray_dst_nbo_src_hbo_set(t, ip, regarray[idx]);
3602 __instr_regrd_hrm_exec(
struct rte_swx_pipeline *p,
3604 const struct instruction *ip)
3606 uint64_t *regarray, idx;
3608 TRACE(
"[Thread %2u] regrd (h = r[m])\n", p->thread_id);
3611 regarray = instr_regarray_regarray(p, ip);
3612 idx = instr_regarray_idx_hbo(p, t, ip);
3613 instr_regarray_dst_nbo_src_hbo_set(t, ip, regarray[idx]);
3617 __instr_regrd_mrh_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3619 uint64_t *regarray, idx;
3621 TRACE(
"[Thread %2u] regrd (m = r[h])\n", p->thread_id);
3623 regarray = instr_regarray_regarray(p, ip);
3624 idx = instr_regarray_idx_nbo(p, t, ip);
3625 instr_regarray_dst_hbo_src_hbo_set(t, ip, regarray[idx]);
3629 __instr_regrd_mrm_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3631 uint64_t *regarray, idx;
3633 TRACE(
"[Thread %2u] regrd (m = r[m])\n", p->thread_id);
3635 regarray = instr_regarray_regarray(p, ip);
3636 idx = instr_regarray_idx_hbo(p, t, ip);
3637 instr_regarray_dst_hbo_src_hbo_set(t, ip, regarray[idx]);
3641 __instr_regrd_hri_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3643 uint64_t *regarray, idx;
3645 TRACE(
"[Thread %2u] regrd (h = r[i])\n", p->thread_id);
3647 regarray = instr_regarray_regarray(p, ip);
3648 idx = instr_regarray_idx_imm(p, ip);
3649 instr_regarray_dst_nbo_src_hbo_set(t, ip, regarray[idx]);
3653 __instr_regrd_mri_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3655 uint64_t *regarray, idx;
3657 TRACE(
"[Thread %2u] regrd (m = r[i])\n", p->thread_id);
3659 regarray = instr_regarray_regarray(p, ip);
3660 idx = instr_regarray_idx_imm(p, ip);
3661 instr_regarray_dst_hbo_src_hbo_set(t, ip, regarray[idx]);
3665 __instr_regwr_rhh_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3667 uint64_t *regarray, idx, src;
3669 TRACE(
"[Thread %2u] regwr (r[h] = h)\n", p->thread_id);
3671 regarray = instr_regarray_regarray(p, ip);
3672 idx = instr_regarray_idx_nbo(p, t, ip);
3673 src = instr_regarray_src_nbo(t, ip);
3674 regarray[idx] = src;
3678 __instr_regwr_rhm_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3680 uint64_t *regarray, idx, src;
3682 TRACE(
"[Thread %2u] regwr (r[h] = m)\n", p->thread_id);
3684 regarray = instr_regarray_regarray(p, ip);
3685 idx = instr_regarray_idx_nbo(p, t, ip);
3686 src = instr_regarray_src_hbo(t, ip);
3687 regarray[idx] = src;
3691 __instr_regwr_rmh_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3693 uint64_t *regarray, idx, src;
3695 TRACE(
"[Thread %2u] regwr (r[m] = h)\n", p->thread_id);
3697 regarray = instr_regarray_regarray(p, ip);
3698 idx = instr_regarray_idx_hbo(p, t, ip);
3699 src = instr_regarray_src_nbo(t, ip);
3700 regarray[idx] = src;
3704 __instr_regwr_rmm_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3706 uint64_t *regarray, idx, src;
3708 TRACE(
"[Thread %2u] regwr (r[m] = m)\n", p->thread_id);
3710 regarray = instr_regarray_regarray(p, ip);
3711 idx = instr_regarray_idx_hbo(p, t, ip);
3712 src = instr_regarray_src_hbo(t, ip);
3713 regarray[idx] = src;
3717 __instr_regwr_rhi_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3719 uint64_t *regarray, idx, src;
3721 TRACE(
"[Thread %2u] regwr (r[h] = i)\n", p->thread_id);
3723 regarray = instr_regarray_regarray(p, ip);
3724 idx = instr_regarray_idx_nbo(p, t, ip);
3725 src = ip->regarray.dstsrc_val;
3726 regarray[idx] = src;
3730 __instr_regwr_rmi_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3732 uint64_t *regarray, idx, src;
3734 TRACE(
"[Thread %2u] regwr (r[m] = i)\n", p->thread_id);
3736 regarray = instr_regarray_regarray(p, ip);
3737 idx = instr_regarray_idx_hbo(p, t, ip);
3738 src = ip->regarray.dstsrc_val;
3739 regarray[idx] = src;
3743 __instr_regwr_rih_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3745 uint64_t *regarray, idx, src;
3747 TRACE(
"[Thread %2u] regwr (r[i] = h)\n", p->thread_id);
3749 regarray = instr_regarray_regarray(p, ip);
3750 idx = instr_regarray_idx_imm(p, ip);
3751 src = instr_regarray_src_nbo(t, ip);
3752 regarray[idx] = src;
3756 __instr_regwr_rim_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3758 uint64_t *regarray, idx, src;
3760 TRACE(
"[Thread %2u] regwr (r[i] = m)\n", p->thread_id);
3762 regarray = instr_regarray_regarray(p, ip);
3763 idx = instr_regarray_idx_imm(p, ip);
3764 src = instr_regarray_src_hbo(t, ip);
3765 regarray[idx] = src;
3769 __instr_regwr_rii_exec(
struct rte_swx_pipeline *p,
3770 struct thread *t __rte_unused,
3771 const struct instruction *ip)
3773 uint64_t *regarray, idx, src;
3775 TRACE(
"[Thread %2u] regwr (r[i] = i)\n", p->thread_id);
3777 regarray = instr_regarray_regarray(p, ip);
3778 idx = instr_regarray_idx_imm(p, ip);
3779 src = ip->regarray.dstsrc_val;
3780 regarray[idx] = src;
3784 __instr_regadd_rhh_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3786 uint64_t *regarray, idx, src;
3788 TRACE(
"[Thread %2u] regadd (r[h] += h)\n", p->thread_id);
3790 regarray = instr_regarray_regarray(p, ip);
3791 idx = instr_regarray_idx_nbo(p, t, ip);
3792 src = instr_regarray_src_nbo(t, ip);
3793 regarray[idx] += src;
3797 __instr_regadd_rhm_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3799 uint64_t *regarray, idx, src;
3801 TRACE(
"[Thread %2u] regadd (r[h] += m)\n", p->thread_id);
3803 regarray = instr_regarray_regarray(p, ip);
3804 idx = instr_regarray_idx_nbo(p, t, ip);
3805 src = instr_regarray_src_hbo(t, ip);
3806 regarray[idx] += src;
3810 __instr_regadd_rmh_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3812 uint64_t *regarray, idx, src;
3814 TRACE(
"[Thread %2u] regadd (r[m] += h)\n", p->thread_id);
3816 regarray = instr_regarray_regarray(p, ip);
3817 idx = instr_regarray_idx_hbo(p, t, ip);
3818 src = instr_regarray_src_nbo(t, ip);
3819 regarray[idx] += src;
3823 __instr_regadd_rmm_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3825 uint64_t *regarray, idx, src;
3827 TRACE(
"[Thread %2u] regadd (r[m] += m)\n", p->thread_id);
3829 regarray = instr_regarray_regarray(p, ip);
3830 idx = instr_regarray_idx_hbo(p, t, ip);
3831 src = instr_regarray_src_hbo(t, ip);
3832 regarray[idx] += src;
3836 __instr_regadd_rhi_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3838 uint64_t *regarray, idx, src;
3840 TRACE(
"[Thread %2u] regadd (r[h] += i)\n", p->thread_id);
3842 regarray = instr_regarray_regarray(p, ip);
3843 idx = instr_regarray_idx_nbo(p, t, ip);
3844 src = ip->regarray.dstsrc_val;
3845 regarray[idx] += src;
3849 __instr_regadd_rmi_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3851 uint64_t *regarray, idx, src;
3853 TRACE(
"[Thread %2u] regadd (r[m] += i)\n", p->thread_id);
3855 regarray = instr_regarray_regarray(p, ip);
3856 idx = instr_regarray_idx_hbo(p, t, ip);
3857 src = ip->regarray.dstsrc_val;
3858 regarray[idx] += src;
3862 __instr_regadd_rih_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3864 uint64_t *regarray, idx, src;
3866 TRACE(
"[Thread %2u] regadd (r[i] += h)\n", p->thread_id);
3868 regarray = instr_regarray_regarray(p, ip);
3869 idx = instr_regarray_idx_imm(p, ip);
3870 src = instr_regarray_src_nbo(t, ip);
3871 regarray[idx] += src;
3875 __instr_regadd_rim_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3877 uint64_t *regarray, idx, src;
3879 TRACE(
"[Thread %2u] regadd (r[i] += m)\n", p->thread_id);
3881 regarray = instr_regarray_regarray(p, ip);
3882 idx = instr_regarray_idx_imm(p, ip);
3883 src = instr_regarray_src_hbo(t, ip);
3884 regarray[idx] += src;
3888 __instr_regadd_rii_exec(
struct rte_swx_pipeline *p,
3889 struct thread *t __rte_unused,
3890 const struct instruction *ip)
3892 uint64_t *regarray, idx, src;
3894 TRACE(
"[Thread %2u] regadd (r[i] += i)\n", p->thread_id);
3896 regarray = instr_regarray_regarray(p, ip);
3897 idx = instr_regarray_idx_imm(p, ip);
3898 src = ip->regarray.dstsrc_val;
3899 regarray[idx] += src;
3905 static inline struct meter *
3906 instr_meter_idx_hbo(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3908 struct metarray_runtime *r = &p->metarray_runtime[ip->meter.metarray_id];
3910 uint8_t *idx_struct = t->structs[ip->meter.idx.struct_id];
3911 uint64_t *idx64_ptr = (uint64_t *)&idx_struct[ip->meter.idx.offset];
3912 uint64_t idx64 = *idx64_ptr;
3913 uint64_t idx64_mask = UINT64_MAX >> (64 - (ip)->meter.idx.n_bits);
3914 uint64_t idx = idx64 & idx64_mask & r->size_mask;
3916 return &r->metarray[idx];
3919 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN 3921 static inline struct meter *
3922 instr_meter_idx_nbo(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3924 struct metarray_runtime *r = &p->metarray_runtime[ip->meter.metarray_id];
3926 uint8_t *idx_struct = t->structs[ip->meter.idx.struct_id];
3927 uint64_t *idx64_ptr = (uint64_t *)&idx_struct[ip->meter.idx.offset];
3928 uint64_t idx64 = *idx64_ptr;
3929 uint64_t idx = (ntoh64(idx64) >> (64 - ip->meter.idx.n_bits)) & r->size_mask;
3931 return &r->metarray[idx];
3936 #define instr_meter_idx_nbo instr_meter_idx_hbo 3940 static inline struct meter *
3941 instr_meter_idx_imm(
struct rte_swx_pipeline *p,
const struct instruction *ip)
3943 struct metarray_runtime *r = &p->metarray_runtime[ip->meter.metarray_id];
3945 uint64_t idx = ip->meter.idx_val & r->size_mask;
3947 return &r->metarray[idx];
3950 static inline uint32_t
3951 instr_meter_length_hbo(
struct thread *t,
const struct instruction *ip)
3953 uint8_t *src_struct = t->structs[ip->meter.length.struct_id];
3954 uint64_t *src64_ptr = (uint64_t *)&src_struct[ip->meter.length.offset];
3955 uint64_t src64 = *src64_ptr;
3956 uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->meter.length.n_bits);
3957 uint64_t src = src64 & src64_mask;
3959 return (uint32_t)src;
3962 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN 3964 static inline uint32_t
3965 instr_meter_length_nbo(
struct thread *t,
const struct instruction *ip)
3967 uint8_t *src_struct = t->structs[ip->meter.length.struct_id];
3968 uint64_t *src64_ptr = (uint64_t *)&src_struct[ip->meter.length.offset];
3969 uint64_t src64 = *src64_ptr;
3970 uint64_t src = ntoh64(src64) >> (64 - ip->meter.length.n_bits);
3972 return (uint32_t)src;
3977 #define instr_meter_length_nbo instr_meter_length_hbo 3982 instr_meter_color_in_hbo(
struct thread *t,
const struct instruction *ip)
3984 uint8_t *src_struct = t->structs[ip->meter.color_in.struct_id];
3985 uint64_t *src64_ptr = (uint64_t *)&src_struct[ip->meter.color_in.offset];
3986 uint64_t src64 = *src64_ptr;
3987 uint64_t src64_mask = UINT64_MAX >> (64 - ip->meter.color_in.n_bits);
3988 uint64_t src = src64 & src64_mask;
3994 instr_meter_color_out_hbo_set(
struct thread *t,
3995 const struct instruction *ip,
3998 uint8_t *dst_struct = t->structs[ip->meter.color_out.struct_id];
3999 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[ip->meter.color_out.offset];
4000 uint64_t dst64 = *dst64_ptr;
4001 uint64_t dst64_mask = UINT64_MAX >> (64 - ip->meter.color_out.n_bits);
4003 uint64_t src = (uint64_t)color_out;
4005 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask);
4009 __instr_metprefetch_h_exec(
struct rte_swx_pipeline *p,
4011 const struct instruction *ip)
4015 TRACE(
"[Thread %2u] metprefetch (h)\n", p->thread_id);
4017 m = instr_meter_idx_nbo(p, t, ip);
4022 __instr_metprefetch_m_exec(
struct rte_swx_pipeline *p,
4024 const struct instruction *ip)
4028 TRACE(
"[Thread %2u] metprefetch (m)\n", p->thread_id);
4030 m = instr_meter_idx_hbo(p, t, ip);
4035 __instr_metprefetch_i_exec(
struct rte_swx_pipeline *p,
4036 struct thread *t __rte_unused,
4037 const struct instruction *ip)
4041 TRACE(
"[Thread %2u] metprefetch (i)\n", p->thread_id);
4043 m = instr_meter_idx_imm(p, ip);
4048 __instr_meter_hhm_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
4051 uint64_t time, n_pkts, n_bytes;
4055 TRACE(
"[Thread %2u] meter (hhm)\n", p->thread_id);
4057 m = instr_meter_idx_nbo(p, t, ip);
4060 length = instr_meter_length_nbo(t, ip);
4061 color_in = instr_meter_color_in_hbo(t, ip);
4064 &m->profile->profile,
4069 color_out &= m->color_mask;
4071 n_pkts = m->n_pkts[color_out];
4072 n_bytes = m->n_bytes[color_out];
4074 instr_meter_color_out_hbo_set(t, ip, color_out);
4076 m->n_pkts[color_out] = n_pkts + 1;
4077 m->n_bytes[color_out] = n_bytes + length;
4081 __instr_meter_hhi_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
4084 uint64_t time, n_pkts, n_bytes;
4088 TRACE(
"[Thread %2u] meter (hhi)\n", p->thread_id);
4090 m = instr_meter_idx_nbo(p, t, ip);
4093 length = instr_meter_length_nbo(t, ip);
4094 color_in = (
enum rte_color)ip->meter.color_in_val;
4097 &m->profile->profile,
4102 color_out &= m->color_mask;
4104 n_pkts = m->n_pkts[color_out];
4105 n_bytes = m->n_bytes[color_out];
4107 instr_meter_color_out_hbo_set(t, ip, color_out);
4109 m->n_pkts[color_out] = n_pkts + 1;
4110 m->n_bytes[color_out] = n_bytes + length;
4114 __instr_meter_hmm_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
4117 uint64_t time, n_pkts, n_bytes;
4121 TRACE(
"[Thread %2u] meter (hmm)\n", p->thread_id);
4123 m = instr_meter_idx_nbo(p, t, ip);
4126 length = instr_meter_length_hbo(t, ip);
4127 color_in = instr_meter_color_in_hbo(t, ip);
4130 &m->profile->profile,
4135 color_out &= m->color_mask;
4137 n_pkts = m->n_pkts[color_out];
4138 n_bytes = m->n_bytes[color_out];
4140 instr_meter_color_out_hbo_set(t, ip, color_out);
4142 m->n_pkts[color_out] = n_pkts + 1;
4143 m->n_bytes[color_out] = n_bytes + length;
4147 __instr_meter_hmi_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
4150 uint64_t time, n_pkts, n_bytes;
4154 TRACE(
"[Thread %2u] meter (hmi)\n", p->thread_id);
4156 m = instr_meter_idx_nbo(p, t, ip);
4159 length = instr_meter_length_hbo(t, ip);
4160 color_in = (
enum rte_color)ip->meter.color_in_val;
4163 &m->profile->profile,
4168 color_out &= m->color_mask;
4170 n_pkts = m->n_pkts[color_out];
4171 n_bytes = m->n_bytes[color_out];
4173 instr_meter_color_out_hbo_set(t, ip, color_out);
4175 m->n_pkts[color_out] = n_pkts + 1;
4176 m->n_bytes[color_out] = n_bytes + length;
4180 __instr_meter_mhm_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
4183 uint64_t time, n_pkts, n_bytes;
4187 TRACE(
"[Thread %2u] meter (mhm)\n", p->thread_id);
4189 m = instr_meter_idx_hbo(p, t, ip);
4192 length = instr_meter_length_nbo(t, ip);
4193 color_in = instr_meter_color_in_hbo(t, ip);
4196 &m->profile->profile,
4201 color_out &= m->color_mask;
4203 n_pkts = m->n_pkts[color_out];
4204 n_bytes = m->n_bytes[color_out];
4206 instr_meter_color_out_hbo_set(t, ip, color_out);
4208 m->n_pkts[color_out] = n_pkts + 1;
4209 m->n_bytes[color_out] = n_bytes + length;
4213 __instr_meter_mhi_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
4216 uint64_t time, n_pkts, n_bytes;
4220 TRACE(
"[Thread %2u] meter (mhi)\n", p->thread_id);
4222 m = instr_meter_idx_hbo(p, t, ip);
4225 length = instr_meter_length_nbo(t, ip);
4226 color_in = (
enum rte_color)ip->meter.color_in_val;
4229 &m->profile->profile,
4234 color_out &= m->color_mask;
4236 n_pkts = m->n_pkts[color_out];
4237 n_bytes = m->n_bytes[color_out];
4239 instr_meter_color_out_hbo_set(t, ip, color_out);
4241 m->n_pkts[color_out] = n_pkts + 1;
4242 m->n_bytes[color_out] = n_bytes + length;
4246 __instr_meter_mmm_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
4249 uint64_t time, n_pkts, n_bytes;
4253 TRACE(
"[Thread %2u] meter (mmm)\n", p->thread_id);
4255 m = instr_meter_idx_hbo(p, t, ip);
4258 length = instr_meter_length_hbo(t, ip);
4259 color_in = instr_meter_color_in_hbo(t, ip);
4262 &m->profile->profile,
4267 color_out &= m->color_mask;
4269 n_pkts = m->n_pkts[color_out];
4270 n_bytes = m->n_bytes[color_out];
4272 instr_meter_color_out_hbo_set(t, ip, color_out);
4274 m->n_pkts[color_out] = n_pkts + 1;
4275 m->n_bytes[color_out] = n_bytes + length;
4279 __instr_meter_mmi_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
4282 uint64_t time, n_pkts, n_bytes;
4286 TRACE(
"[Thread %2u] meter (mmi)\n", p->thread_id);
4288 m = instr_meter_idx_hbo(p, t, ip);
4291 length = instr_meter_length_hbo(t, ip);
4292 color_in = (
enum rte_color)ip->meter.color_in_val;
4295 &m->profile->profile,
4300 color_out &= m->color_mask;
4302 n_pkts = m->n_pkts[color_out];
4303 n_bytes = m->n_bytes[color_out];
4305 instr_meter_color_out_hbo_set(t, ip, color_out);
4307 m->n_pkts[color_out] = n_pkts + 1;
4308 m->n_bytes[color_out] = n_bytes + length;
4312 __instr_meter_ihm_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
4315 uint64_t time, n_pkts, n_bytes;
4319 TRACE(
"[Thread %2u] meter (ihm)\n", p->thread_id);
4321 m = instr_meter_idx_imm(p, ip);
4324 length = instr_meter_length_nbo(t, ip);
4325 color_in = instr_meter_color_in_hbo(t, ip);
4328 &m->profile->profile,
4333 color_out &= m->color_mask;
4335 n_pkts = m->n_pkts[color_out];
4336 n_bytes = m->n_bytes[color_out];
4338 instr_meter_color_out_hbo_set(t, ip, color_out);
4340 m->n_pkts[color_out] = n_pkts + 1;
4341 m->n_bytes[color_out] = n_bytes + length;
4345 __instr_meter_ihi_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
4348 uint64_t time, n_pkts, n_bytes;
4352 TRACE(
"[Thread %2u] meter (ihi)\n", p->thread_id);
4354 m = instr_meter_idx_imm(p, ip);
4357 length = instr_meter_length_nbo(t, ip);
4358 color_in = (
enum rte_color)ip->meter.color_in_val;
4361 &m->profile->profile,
4366 color_out &= m->color_mask;
4368 n_pkts = m->n_pkts[color_out];
4369 n_bytes = m->n_bytes[color_out];
4371 instr_meter_color_out_hbo_set(t, ip, color_out);
4373 m->n_pkts[color_out] = n_pkts + 1;
4374 m->n_bytes[color_out] = n_bytes + length;
4378 __instr_meter_imm_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
4381 uint64_t time, n_pkts, n_bytes;
4385 TRACE(
"[Thread %2u] meter (imm)\n", p->thread_id);
4387 m = instr_meter_idx_imm(p, ip);
4390 length = instr_meter_length_hbo(t, ip);
4391 color_in = instr_meter_color_in_hbo(t, ip);
4394 &m->profile->profile,
4399 color_out &= m->color_mask;
4401 n_pkts = m->n_pkts[color_out];
4402 n_bytes = m->n_bytes[color_out];
4404 instr_meter_color_out_hbo_set(t, ip, color_out);
4406 m->n_pkts[color_out] = n_pkts + 1;
4407 m->n_bytes[color_out] = n_bytes + length;
4411 __instr_meter_imi_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
4414 uint64_t time, n_pkts, n_bytes;
4418 TRACE(
"[Thread %2u] meter (imi)\n", p->thread_id);
4420 m = instr_meter_idx_imm(p, ip);
4423 length = instr_meter_length_hbo(t, ip);
4424 color_in = (
enum rte_color)ip->meter.color_in_val;
4427 &m->profile->profile,
4432 color_out &= m->color_mask;
4434 n_pkts = m->n_pkts[color_out];
4435 n_bytes = m->n_bytes[color_out];
4437 instr_meter_color_out_hbo_set(t, ip, color_out);
4439 m->n_pkts[color_out] = n_pkts + 1;
4440 m->n_bytes[color_out] = n_bytes + length;
uint32_t(* rte_swx_hash_func_t)(const void *key, uint32_t length, uint32_t seed)
int(* rte_swx_table_lookup_t)(void *table, void *mailbox, uint8_t **key, uint64_t *action_id, uint8_t **action_data, size_t *entry_id, int *hit)
int(* rte_swx_extern_func_t)(void *mailbox)
__rte_experimental void rte_swx_table_learner_delete(void *table, void *mailbox)
__rte_experimental void rte_swx_table_learner_rearm_new(void *table, void *mailbox, uint64_t time, uint32_t key_timeout_id)
__rte_experimental uint32_t rte_swx_table_learner_add(void *table, void *mailbox, uint64_t time, uint64_t action_id, uint8_t *action_data, uint32_t key_timeout_id)
static uint32_t rte_bsf32(uint32_t v)
void(* rte_swx_port_out_flush_t)(void *port)
static enum rte_color rte_meter_trtcm_color_aware_check(struct rte_meter_trtcm *m, struct rte_meter_trtcm_profile *p, uint64_t time, uint32_t pkt_len, enum rte_color pkt_color)
static uint64_t rte_get_tsc_cycles(void)
void(* rte_swx_port_out_pkt_clone_tx_t)(void *port, struct rte_swx_pkt *pkt, uint32_t truncation_length)
void(* rte_swx_port_out_pkt_fast_clone_tx_t)(void *port, struct rte_swx_pkt *pkt)
void(* rte_swx_extern_type_destructor_t)(void *object)
void *(* rte_swx_extern_type_constructor_t)(const char *args)
void(* rte_swx_port_out_pkt_tx_t)(void *port, struct rte_swx_pkt *pkt)
__rte_experimental void rte_swx_table_learner_rearm(void *table, void *mailbox, uint64_t time)
#define RTE_SWX_TABLE_LEARNER_N_KEY_TIMEOUTS_MAX
#define RTE_SWX_NAME_SIZE
int(* rte_swx_extern_type_member_func_t)(void *object, void *mailbox)
static void rte_prefetch0(const volatile void *p)
int(* rte_swx_port_in_pkt_rx_t)(void *port, struct rte_swx_pkt *pkt)