4 #ifndef __INCLUDE_RTE_SWX_PIPELINE_INTERNAL_H__ 5 #define __INCLUDE_RTE_SWX_PIPELINE_INTERNAL_H__ 28 #define TRACE(...) printf(__VA_ARGS__) 36 #define ntoh64(x) rte_be_to_cpu_64(x) 37 #define hton64(x) rte_cpu_to_be_64(x) 50 TAILQ_ENTRY(struct_type) node;
59 TAILQ_HEAD(struct_type_tailq, struct_type);
65 TAILQ_ENTRY(port_in_type) node;
70 TAILQ_HEAD(port_in_type_tailq, port_in_type);
73 TAILQ_ENTRY(port_in) node;
74 struct port_in_type *type;
79 TAILQ_HEAD(port_in_tailq, port_in);
81 struct port_in_runtime {
89 struct port_out_type {
90 TAILQ_ENTRY(port_out_type) node;
95 TAILQ_HEAD(port_out_type_tailq, port_out_type);
98 TAILQ_ENTRY(port_out) node;
99 struct port_out_type *type;
104 TAILQ_HEAD(port_out_tailq, port_out);
106 struct port_out_runtime {
117 struct mirroring_session {
120 uint32_t truncation_length;
126 struct extern_type_member_func {
127 TAILQ_ENTRY(extern_type_member_func) node;
133 TAILQ_HEAD(extern_type_member_func_tailq, extern_type_member_func);
136 TAILQ_ENTRY(extern_type) node;
138 struct struct_type *mailbox_struct_type;
141 struct extern_type_member_func_tailq funcs;
145 TAILQ_HEAD(extern_type_tailq, extern_type);
148 TAILQ_ENTRY(extern_obj) node;
150 struct extern_type *type;
156 TAILQ_HEAD(extern_obj_tailq, extern_obj);
158 #ifndef RTE_SWX_EXTERN_TYPE_MEMBER_FUNCS_MAX 159 #define RTE_SWX_EXTERN_TYPE_MEMBER_FUNCS_MAX 8 162 struct extern_obj_runtime {
172 TAILQ_ENTRY(extern_func) node;
174 struct struct_type *mailbox_struct_type;
180 TAILQ_HEAD(extern_func_tailq, extern_func);
182 struct extern_func_runtime {
191 TAILQ_ENTRY(hash_func) node;
197 TAILQ_HEAD(hash_func_tailq, hash_func);
199 struct hash_func_runtime {
207 TAILQ_ENTRY(rss) node;
212 TAILQ_HEAD(rss_tailq, rss);
223 TAILQ_ENTRY(header) node;
225 struct struct_type *st;
230 TAILQ_HEAD(header_tailq, header);
232 struct header_runtime {
237 struct header_out_runtime {
277 enum instruction_type {
335 INSTR_HDR_INVALIDATE,
402 INSTR_ALU_CKADD_FIELD,
403 INSTR_ALU_CKADD_STRUCT20,
404 INSTR_ALU_CKADD_STRUCT,
410 INSTR_ALU_CKSUB_FIELD,
468 INSTR_REGPREFETCH_RH,
469 INSTR_REGPREFETCH_RM,
470 INSTR_REGPREFETCH_RI,
548 INSTR_LEARNER_REARM_NEW,
551 INSTR_LEARNER_FORGET,
610 INSTR_JMP_ACTION_HIT,
615 INSTR_JMP_ACTION_MISS,
668 struct instr_operand {
689 uint8_t header_id[8];
690 uint8_t struct_id[8];
695 struct instr_hdr_validity {
706 uint8_t mf_first_arg_offset;
707 uint8_t mf_timeout_id_offset;
708 uint8_t mf_timeout_id_n_bits;
711 struct instr_extern_obj {
716 struct instr_extern_func {
720 struct instr_hash_func {
721 uint8_t hash_func_id;
750 struct instr_dst_src {
751 struct instr_operand dst;
753 struct instr_operand src;
758 struct instr_regarray {
763 struct instr_operand idx;
768 struct instr_operand dstsrc;
778 struct instr_operand idx;
782 struct instr_operand length;
785 struct instr_operand color_in;
786 uint32_t color_in_val;
789 struct instr_operand color_out;
794 uint8_t header_id[8];
795 uint8_t struct_id[8];
806 struct instruction *ip;
809 struct instr_operand a;
815 struct instr_operand b;
821 enum instruction_type type;
824 struct instr_dst_src mirror;
825 struct instr_hdr_validity valid;
826 struct instr_dst_src mov;
827 struct instr_regarray regarray;
828 struct instr_meter meter;
829 struct instr_dma dma;
830 struct instr_dst_src alu;
831 struct instr_table table;
832 struct instr_learn learn;
833 struct instr_extern_obj ext_obj;
834 struct instr_extern_func ext_func;
835 struct instr_hash_func hash_func;
836 struct instr_rss rss;
837 struct instr_jmp jmp;
841 struct instruction_data {
848 typedef void (*instr_exec_t)(
struct rte_swx_pipeline *);
854 (*action_func_t)(
struct rte_swx_pipeline *p);
857 TAILQ_ENTRY(action) node;
859 struct struct_type *st;
860 int *args_endianness;
861 struct instruction *instructions;
862 struct instruction_data *instruction_data;
863 uint32_t n_instructions;
867 TAILQ_HEAD(action_tailq, action);
873 TAILQ_ENTRY(table_type) node;
879 TAILQ_HEAD(table_type_tailq, table_type);
887 TAILQ_ENTRY(table) node;
890 struct table_type *type;
893 struct match_field *fields;
895 struct header *header;
898 struct action **actions;
899 struct action *default_action;
900 uint8_t *default_action_data;
902 int default_action_is_const;
903 uint32_t action_data_size_max;
904 int *action_is_for_table_entries;
905 int *action_is_for_default_entry;
907 struct hash_func *hf;
912 TAILQ_HEAD(table_tailq, table);
914 struct table_runtime {
920 struct table_statistics {
921 uint64_t n_pkts_hit[2];
922 uint64_t *n_pkts_action;
929 TAILQ_ENTRY(selector) node;
932 struct field *group_id_field;
933 struct field **selector_fields;
934 uint32_t n_selector_fields;
935 struct header *selector_header;
936 struct field *member_id_field;
938 uint32_t n_groups_max;
939 uint32_t n_members_per_group_max;
944 TAILQ_HEAD(selector_tailq, selector);
946 struct selector_runtime {
948 uint8_t **group_id_buffer;
949 uint8_t **selector_buffer;
950 uint8_t **member_id_buffer;
953 struct selector_statistics {
961 TAILQ_ENTRY(learner) node;
965 struct field **fields;
967 struct header *header;
970 struct action **actions;
971 struct action *default_action;
972 uint8_t *default_action_data;
974 int default_action_is_const;
975 uint32_t action_data_size_max;
976 int *action_is_for_table_entries;
977 int *action_is_for_default_entry;
979 struct hash_func *hf;
986 TAILQ_HEAD(learner_tailq, learner);
988 struct learner_runtime {
993 struct learner_statistics {
994 uint64_t n_pkts_hit[2];
995 uint64_t n_pkts_learn[2];
996 uint64_t n_pkts_rearm;
997 uint64_t n_pkts_forget;
998 uint64_t *n_pkts_action;
1005 TAILQ_ENTRY(regarray) node;
1012 TAILQ_HEAD(regarray_tailq, regarray);
1014 struct regarray_runtime {
1022 struct meter_profile {
1023 TAILQ_ENTRY(meter_profile) node;
1026 struct rte_meter_trtcm_profile profile;
1030 TAILQ_HEAD(meter_profile_tailq, meter_profile);
1033 TAILQ_ENTRY(metarray) node;
1039 TAILQ_HEAD(metarray_tailq, metarray);
1043 struct meter_profile *profile;
1051 struct metarray_runtime {
1052 struct meter *metarray;
1063 uint32_t *mirroring_slots;
1064 uint64_t mirroring_slots_mask;
1066 uint32_t recirc_pass_id;
1072 struct header_runtime *headers;
1073 struct header_out_runtime *headers_out;
1074 uint8_t *header_storage;
1075 uint8_t *header_out_storage;
1076 uint64_t valid_headers;
1077 uint32_t n_headers_out;
1083 struct table_runtime *tables;
1084 struct selector_runtime *selectors;
1085 struct learner_runtime *learners;
1090 uint32_t learner_id;
1094 struct extern_obj_runtime *extern_objs;
1095 struct extern_func_runtime *extern_funcs;
1098 struct instruction *ip;
1099 struct instruction *ret;
1102 #define MASK64_BIT_GET(mask, pos) ((mask) & (1LLU << (pos))) 1103 #define MASK64_BIT_SET(mask, pos) ((mask) | (1LLU << (pos))) 1104 #define MASK64_BIT_CLR(mask, pos) ((mask) & ~(1LLU << (pos))) 1106 #define HEADER_VALID(thread, header_id) \ 1107 MASK64_BIT_GET((thread)->valid_headers, header_id) 1109 static inline uint64_t
1110 instr_operand_hbo(
struct thread *t,
const struct instr_operand *x)
1112 uint8_t *x_struct = t->structs[x->struct_id];
1113 uint64_t *x64_ptr = (uint64_t *)&x_struct[x->offset];
1114 uint64_t x64 = *x64_ptr;
1115 uint64_t x64_mask = UINT64_MAX >> (64 - x->n_bits);
1117 return x64 & x64_mask;
1120 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN 1122 static inline uint64_t
1123 instr_operand_nbo(
struct thread *t,
const struct instr_operand *x)
1125 uint8_t *x_struct = t->structs[x->struct_id];
1126 uint64_t *x64_ptr = (uint64_t *)&x_struct[x->offset];
1127 uint64_t x64 = *x64_ptr;
1129 return ntoh64(x64) >> (64 - x->n_bits);
1134 #define instr_operand_nbo instr_operand_hbo 1138 #define ALU(thread, ip, operator) \ 1140 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \ 1141 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \ 1142 uint64_t dst64 = *dst64_ptr; \ 1143 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \ 1144 uint64_t dst = dst64 & dst64_mask; \ 1146 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \ 1147 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \ 1148 uint64_t src64 = *src64_ptr; \ 1149 uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->alu.src.n_bits); \ 1150 uint64_t src = src64 & src64_mask; \ 1152 uint64_t result = dst operator src; \ 1154 *dst64_ptr = (dst64 & ~dst64_mask) | (result & dst64_mask); \ 1157 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN 1159 #define ALU_MH(thread, ip, operator) \ 1161 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \ 1162 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \ 1163 uint64_t dst64 = *dst64_ptr; \ 1164 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \ 1165 uint64_t dst = dst64 & dst64_mask; \ 1167 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \ 1168 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \ 1169 uint64_t src64 = *src64_ptr; \ 1170 uint64_t src = ntoh64(src64) >> (64 - (ip)->alu.src.n_bits); \ 1172 uint64_t result = dst operator src; \ 1174 *dst64_ptr = (dst64 & ~dst64_mask) | (result & dst64_mask); \ 1177 #define ALU_HM(thread, ip, operator) \ 1179 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \ 1180 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \ 1181 uint64_t dst64 = *dst64_ptr; \ 1182 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \ 1183 uint64_t dst = ntoh64(dst64) >> (64 - (ip)->alu.dst.n_bits); \ 1185 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \ 1186 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \ 1187 uint64_t src64 = *src64_ptr; \ 1188 uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->alu.src.n_bits); \ 1189 uint64_t src = src64 & src64_mask; \ 1191 uint64_t result = dst operator src; \ 1192 result = hton64(result << (64 - (ip)->alu.dst.n_bits)); \ 1194 *dst64_ptr = (dst64 & ~dst64_mask) | result; \ 1197 #define ALU_HM_FAST(thread, ip, operator) \ 1199 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \ 1200 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \ 1201 uint64_t dst64 = *dst64_ptr; \ 1202 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \ 1203 uint64_t dst = dst64 & dst64_mask; \ 1205 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \ 1206 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \ 1207 uint64_t src64 = *src64_ptr; \ 1208 uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->alu.src.n_bits); \ 1209 uint64_t src = hton64(src64 & src64_mask) >> (64 - (ip)->alu.dst.n_bits); \ 1211 uint64_t result = dst operator src; \ 1213 *dst64_ptr = (dst64 & ~dst64_mask) | result; \ 1216 #define ALU_HH(thread, ip, operator) \ 1218 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \ 1219 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \ 1220 uint64_t dst64 = *dst64_ptr; \ 1221 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \ 1222 uint64_t dst = ntoh64(dst64) >> (64 - (ip)->alu.dst.n_bits); \ 1224 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \ 1225 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \ 1226 uint64_t src64 = *src64_ptr; \ 1227 uint64_t src = ntoh64(src64) >> (64 - (ip)->alu.src.n_bits); \ 1229 uint64_t result = dst operator src; \ 1230 result = hton64(result << (64 - (ip)->alu.dst.n_bits)); \ 1232 *dst64_ptr = (dst64 & ~dst64_mask) | result; \ 1235 #define ALU_HH_FAST(thread, ip, operator) \ 1237 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \ 1238 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \ 1239 uint64_t dst64 = *dst64_ptr; \ 1240 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \ 1241 uint64_t dst = dst64 & dst64_mask; \ 1243 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \ 1244 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \ 1245 uint64_t src64 = *src64_ptr; \ 1246 uint64_t src = (src64 << (64 - (ip)->alu.src.n_bits)) >> (64 - (ip)->alu.dst.n_bits); \ 1248 uint64_t result = dst operator src; \ 1250 *dst64_ptr = (dst64 & ~dst64_mask) | result; \ 1257 #define ALU_HM_FAST ALU 1259 #define ALU_HH_FAST ALU 1263 #define ALU_I(thread, ip, operator) \ 1265 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \ 1266 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \ 1267 uint64_t dst64 = *dst64_ptr; \ 1268 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \ 1269 uint64_t dst = dst64 & dst64_mask; \ 1271 uint64_t src = (ip)->alu.src_val; \ 1273 uint64_t result = dst operator src; \ 1275 *dst64_ptr = (dst64 & ~dst64_mask) | (result & dst64_mask); \ 1278 #define ALU_MI ALU_I 1280 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN 1282 #define ALU_HI(thread, ip, operator) \ 1284 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \ 1285 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \ 1286 uint64_t dst64 = *dst64_ptr; \ 1287 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \ 1288 uint64_t dst = ntoh64(dst64) >> (64 - (ip)->alu.dst.n_bits); \ 1290 uint64_t src = (ip)->alu.src_val; \ 1292 uint64_t result = dst operator src; \ 1293 result = hton64(result << (64 - (ip)->alu.dst.n_bits)); \ 1295 *dst64_ptr = (dst64 & ~dst64_mask) | result; \ 1300 #define ALU_HI ALU_I 1304 #define MOV(thread, ip) \ 1306 uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id]; \ 1307 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset]; \ 1308 uint64_t dst64 = *dst64_ptr; \ 1309 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits); \ 1311 uint8_t *src_struct = (thread)->structs[(ip)->mov.src.struct_id]; \ 1312 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->mov.src.offset]; \ 1313 uint64_t src64 = *src64_ptr; \ 1314 uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->mov.src.n_bits); \ 1315 uint64_t src = src64 & src64_mask; \ 1317 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask); \ 1320 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN 1322 #define MOV_MH(thread, ip) \ 1324 uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id]; \ 1325 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset]; \ 1326 uint64_t dst64 = *dst64_ptr; \ 1327 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits); \ 1329 uint8_t *src_struct = (thread)->structs[(ip)->mov.src.struct_id]; \ 1330 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->mov.src.offset]; \ 1331 uint64_t src64 = *src64_ptr; \ 1332 uint64_t src = ntoh64(src64) >> (64 - (ip)->mov.src.n_bits); \ 1334 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask); \ 1337 #define MOV_HM(thread, ip) \ 1339 uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id]; \ 1340 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset]; \ 1341 uint64_t dst64 = *dst64_ptr; \ 1342 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits); \ 1344 uint8_t *src_struct = (thread)->structs[(ip)->mov.src.struct_id]; \ 1345 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->mov.src.offset]; \ 1346 uint64_t src64 = *src64_ptr; \ 1347 uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->mov.src.n_bits); \ 1348 uint64_t src = src64 & src64_mask; \ 1350 src = hton64(src) >> (64 - (ip)->mov.dst.n_bits); \ 1351 *dst64_ptr = (dst64 & ~dst64_mask) | src; \ 1354 #define MOV_HH(thread, ip) \ 1356 uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id]; \ 1357 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset]; \ 1358 uint64_t dst64 = *dst64_ptr; \ 1359 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits); \ 1361 uint8_t *src_struct = (thread)->structs[(ip)->mov.src.struct_id]; \ 1362 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->mov.src.offset]; \ 1363 uint64_t src64 = *src64_ptr; \ 1365 uint64_t src = src64 << (64 - (ip)->mov.src.n_bits); \ 1366 src = src >> (64 - (ip)->mov.dst.n_bits); \ 1367 *dst64_ptr = (dst64 & ~dst64_mask) | src; \ 1378 #define MOV_I(thread, ip) \ 1380 uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id]; \ 1381 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset]; \ 1382 uint64_t dst64 = *dst64_ptr; \ 1383 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits); \ 1385 uint64_t src = (ip)->mov.src_val; \ 1387 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask); \ 1390 #define JMP_CMP(thread, ip, operator) \ 1392 uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \ 1393 uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \ 1394 uint64_t a64 = *a64_ptr; \ 1395 uint64_t a64_mask = UINT64_MAX >> (64 - (ip)->jmp.a.n_bits); \ 1396 uint64_t a = a64 & a64_mask; \ 1398 uint8_t *b_struct = (thread)->structs[(ip)->jmp.b.struct_id]; \ 1399 uint64_t *b64_ptr = (uint64_t *)&b_struct[(ip)->jmp.b.offset]; \ 1400 uint64_t b64 = *b64_ptr; \ 1401 uint64_t b64_mask = UINT64_MAX >> (64 - (ip)->jmp.b.n_bits); \ 1402 uint64_t b = b64 & b64_mask; \ 1404 (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \ 1407 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN 1409 #define JMP_CMP_MH(thread, ip, operator) \ 1411 uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \ 1412 uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \ 1413 uint64_t a64 = *a64_ptr; \ 1414 uint64_t a64_mask = UINT64_MAX >> (64 - (ip)->jmp.a.n_bits); \ 1415 uint64_t a = a64 & a64_mask; \ 1417 uint8_t *b_struct = (thread)->structs[(ip)->jmp.b.struct_id]; \ 1418 uint64_t *b64_ptr = (uint64_t *)&b_struct[(ip)->jmp.b.offset]; \ 1419 uint64_t b64 = *b64_ptr; \ 1420 uint64_t b = ntoh64(b64) >> (64 - (ip)->jmp.b.n_bits); \ 1422 (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \ 1425 #define JMP_CMP_HM(thread, ip, operator) \ 1427 uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \ 1428 uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \ 1429 uint64_t a64 = *a64_ptr; \ 1430 uint64_t a = ntoh64(a64) >> (64 - (ip)->jmp.a.n_bits); \ 1432 uint8_t *b_struct = (thread)->structs[(ip)->jmp.b.struct_id]; \ 1433 uint64_t *b64_ptr = (uint64_t *)&b_struct[(ip)->jmp.b.offset]; \ 1434 uint64_t b64 = *b64_ptr; \ 1435 uint64_t b64_mask = UINT64_MAX >> (64 - (ip)->jmp.b.n_bits); \ 1436 uint64_t b = b64 & b64_mask; \ 1438 (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \ 1441 #define JMP_CMP_HH(thread, ip, operator) \ 1443 uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \ 1444 uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \ 1445 uint64_t a64 = *a64_ptr; \ 1446 uint64_t a = ntoh64(a64) >> (64 - (ip)->jmp.a.n_bits); \ 1448 uint8_t *b_struct = (thread)->structs[(ip)->jmp.b.struct_id]; \ 1449 uint64_t *b64_ptr = (uint64_t *)&b_struct[(ip)->jmp.b.offset]; \ 1450 uint64_t b64 = *b64_ptr; \ 1451 uint64_t b = ntoh64(b64) >> (64 - (ip)->jmp.b.n_bits); \ 1453 (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \ 1456 #define JMP_CMP_HH_FAST(thread, ip, operator) \ 1458 uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \ 1459 uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \ 1460 uint64_t a64 = *a64_ptr; \ 1461 uint64_t a = a64 << (64 - (ip)->jmp.a.n_bits); \ 1463 uint8_t *b_struct = (thread)->structs[(ip)->jmp.b.struct_id]; \ 1464 uint64_t *b64_ptr = (uint64_t *)&b_struct[(ip)->jmp.b.offset]; \ 1465 uint64_t b64 = *b64_ptr; \ 1466 uint64_t b = b64 << (64 - (ip)->jmp.b.n_bits); \ 1468 (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \ 1473 #define JMP_CMP_MH JMP_CMP 1474 #define JMP_CMP_HM JMP_CMP 1475 #define JMP_CMP_HH JMP_CMP 1476 #define JMP_CMP_HH_FAST JMP_CMP 1480 #define JMP_CMP_I(thread, ip, operator) \ 1482 uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \ 1483 uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \ 1484 uint64_t a64 = *a64_ptr; \ 1485 uint64_t a64_mask = UINT64_MAX >> (64 - (ip)->jmp.a.n_bits); \ 1486 uint64_t a = a64 & a64_mask; \ 1488 uint64_t b = (ip)->jmp.b_val; \ 1490 (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \ 1493 #define JMP_CMP_MI JMP_CMP_I 1495 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN 1497 #define JMP_CMP_HI(thread, ip, operator) \ 1499 uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \ 1500 uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \ 1501 uint64_t a64 = *a64_ptr; \ 1502 uint64_t a = ntoh64(a64) >> (64 - (ip)->jmp.a.n_bits); \ 1504 uint64_t b = (ip)->jmp.b_val; \ 1506 (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \ 1511 #define JMP_CMP_HI JMP_CMP_I 1515 #define METADATA_READ(thread, offset, n_bits) \ 1517 uint64_t *m64_ptr = (uint64_t *)&(thread)->metadata[offset]; \ 1518 uint64_t m64 = *m64_ptr; \ 1519 uint64_t m64_mask = UINT64_MAX >> (64 - (n_bits)); \ 1523 #define METADATA_WRITE(thread, offset, n_bits, value) \ 1525 uint64_t *m64_ptr = (uint64_t *)&(thread)->metadata[offset]; \ 1526 uint64_t m64 = *m64_ptr; \ 1527 uint64_t m64_mask = UINT64_MAX >> (64 - (n_bits)); \ 1529 uint64_t m_new = value; \ 1531 *m64_ptr = (m64 & ~m64_mask) | (m_new & m64_mask); \ 1534 #ifndef RTE_SWX_PIPELINE_THREADS_MAX 1535 #define RTE_SWX_PIPELINE_THREADS_MAX 16 1538 #ifndef RTE_SWX_PIPELINE_INSTRUCTION_TABLE_SIZE_MAX 1539 #define RTE_SWX_PIPELINE_INSTRUCTION_TABLE_SIZE_MAX 1024 1542 struct rte_swx_pipeline {
1545 struct struct_type_tailq struct_types;
1546 struct port_in_type_tailq port_in_types;
1547 struct port_in_tailq ports_in;
1548 struct port_out_type_tailq port_out_types;
1549 struct port_out_tailq ports_out;
1550 struct extern_type_tailq extern_types;
1551 struct extern_obj_tailq extern_objs;
1552 struct extern_func_tailq extern_funcs;
1553 struct hash_func_tailq hash_funcs;
1554 struct rss_tailq rss;
1555 struct header_tailq headers;
1556 struct struct_type *metadata_st;
1557 uint32_t metadata_struct_id;
1558 struct action_tailq actions;
1559 struct table_type_tailq table_types;
1560 struct table_tailq tables;
1561 struct selector_tailq selectors;
1562 struct learner_tailq learners;
1563 struct regarray_tailq regarrays;
1564 struct meter_profile_tailq meter_profiles;
1565 struct metarray_tailq metarrays;
1567 struct port_in_runtime *in;
1568 struct port_out_runtime *out;
1569 struct mirroring_session *mirroring_sessions;
1570 struct instruction **action_instructions;
1571 action_func_t *action_funcs;
1573 struct table_statistics *table_stats;
1574 struct selector_statistics *selector_stats;
1575 struct learner_statistics *learner_stats;
1576 struct hash_func_runtime *hash_func_runtime;
1577 struct rss_runtime **rss_runtime;
1578 struct regarray_runtime *regarray_runtime;
1579 struct metarray_runtime *metarray_runtime;
1580 struct instruction *instructions;
1581 struct instruction_data *instruction_data;
1582 instr_exec_t *instruction_table;
1583 struct thread threads[RTE_SWX_PIPELINE_THREADS_MAX];
1587 uint32_t n_ports_in;
1588 uint32_t n_ports_out;
1589 uint32_t n_mirroring_slots;
1590 uint32_t n_mirroring_sessions;
1591 uint32_t n_extern_objs;
1592 uint32_t n_extern_funcs;
1593 uint32_t n_hash_funcs;
1597 uint32_t n_selectors;
1598 uint32_t n_learners;
1599 uint32_t n_regarrays;
1600 uint32_t n_metarrays;
1604 uint32_t n_instructions;
1613 pipeline_port_inc(
struct rte_swx_pipeline *p)
1615 uint32_t port_id = p->port_id;
1618 if (port_id == p->n_ports_in)
1621 p->port_id = port_id;
1625 thread_ip_reset(
struct rte_swx_pipeline *p,
struct thread *t)
1627 t->ip = p->instructions;
1631 thread_ip_set(
struct thread *t,
struct instruction *ip)
1637 thread_ip_action_call(
struct rte_swx_pipeline *p,
1642 t->ip = p->action_instructions[action_id];
1646 thread_ip_inc(
struct rte_swx_pipeline *p);
1649 thread_ip_inc(
struct rte_swx_pipeline *p)
1651 struct thread *t = &p->threads[p->thread_id];
1657 thread_ip_inc_cond(
struct thread *t,
int cond)
1663 thread_yield(
struct rte_swx_pipeline *p)
1665 p->thread_id = (p->thread_id + 1) & (RTE_SWX_PIPELINE_THREADS_MAX - 1);
1669 thread_yield_cond(
struct rte_swx_pipeline *p,
int cond)
1671 p->thread_id = (p->thread_id + cond) & (RTE_SWX_PIPELINE_THREADS_MAX - 1);
1678 __instr_rx_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
1680 struct port_in_runtime *port = &p->in[p->port_id];
1685 if (t->recirculate) {
1686 TRACE(
"[Thread %2u] rx - recirculate (pass %u)\n",
1688 t->recirc_pass_id + 1);
1692 t->mirroring_slots_mask = 0;
1694 t->recirc_pass_id++;
1697 t->valid_headers = 0;
1698 t->n_headers_out = 0;
1701 t->table_state = p->table_state;
1707 pkt_received = port->pkt_rx(port->obj, pkt);
1711 TRACE(
"[Thread %2u] rx %s from port %u\n",
1713 pkt_received ?
"1 pkt" :
"0 pkts",
1716 t->mirroring_slots_mask = 0;
1717 t->recirc_pass_id = 0;
1720 t->valid_headers = 0;
1721 t->n_headers_out = 0;
1724 METADATA_WRITE(t, ip->io.io.offset, ip->io.io.n_bits, p->port_id);
1727 t->table_state = p->table_state;
1730 pipeline_port_inc(p);
1732 return pkt_received;
1736 instr_rx_exec(
struct rte_swx_pipeline *p)
1738 struct thread *t = &p->threads[p->thread_id];
1739 struct instruction *ip = t->ip;
1743 pkt_received = __instr_rx_exec(p, t, ip);
1746 thread_ip_inc_cond(t, pkt_received);
1754 emit_handler(
struct thread *t)
1756 struct header_out_runtime *h0 = &t->headers_out[0];
1757 struct header_out_runtime *h1 = &t->headers_out[1];
1758 uint32_t offset = 0, i;
1761 if ((t->n_headers_out == 1) &&
1762 (h0->ptr + h0->n_bytes == t->ptr)) {
1763 TRACE(
"Emit handler: no header change or header decap.\n");
1765 t->pkt.offset -= h0->n_bytes;
1766 t->pkt.length += h0->n_bytes;
1772 if ((t->n_headers_out == 2) &&
1773 (h1->ptr + h1->n_bytes == t->ptr) &&
1774 (h0->ptr == h0->ptr0)) {
1777 TRACE(
"Emit handler: header encapsulation.\n");
1779 offset = h0->n_bytes + h1->n_bytes;
1780 memcpy(t->ptr - offset, h0->ptr, h0->n_bytes);
1781 t->pkt.offset -= offset;
1782 t->pkt.length += offset;
1788 TRACE(
"Emit handler: complex case.\n");
1790 for (i = 0; i < t->n_headers_out; i++) {
1791 struct header_out_runtime *h = &t->headers_out[i];
1793 memcpy(&t->header_out_storage[offset], h->ptr, h->n_bytes);
1794 offset += h->n_bytes;
1798 memcpy(t->ptr - offset, t->header_out_storage, offset);
1799 t->pkt.offset -= offset;
1800 t->pkt.length += offset;
1805 mirroring_handler(
struct rte_swx_pipeline *p,
struct thread *t,
struct rte_swx_pkt *pkt)
1807 uint64_t slots_mask = t->mirroring_slots_mask, slot_mask;
1810 for (slot_id = 0, slot_mask = 1LLU ; slots_mask; slot_id++, slot_mask <<= 1)
1811 if (slot_mask & slots_mask) {
1812 struct port_out_runtime *port;
1813 struct mirroring_session *session;
1814 uint32_t port_id, session_id;
1816 session_id = t->mirroring_slots[slot_id];
1817 session = &p->mirroring_sessions[session_id];
1819 port_id = session->port_id;
1820 port = &p->out[port_id];
1822 if (session->fast_clone)
1823 port->pkt_fast_clone_tx(port->obj, pkt);
1825 port->pkt_clone_tx(port->obj, pkt, session->truncation_length);
1827 slots_mask &= ~slot_mask;
1832 __instr_tx_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
1835 struct port_out_runtime *port;
1839 if (t->recirculate) {
1840 TRACE(
"[Thread %2u]: tx 1 pkt - recirculate\n",
1847 mirroring_handler(p, t, pkt);
1855 port_id = METADATA_READ(t, ip->io.io.offset, ip->io.io.n_bits);
1856 if (port_id >= p->n_ports_out)
1857 port_id = p->n_ports_out - 1;
1859 port = &p->out[port_id];
1861 TRACE(
"[Thread %2u]: tx 1 pkt to port %u\n",
1869 mirroring_handler(p, t, pkt);
1870 port->pkt_tx(port->obj, pkt);
1874 __instr_tx_i_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
1877 struct port_out_runtime *port;
1881 if (t->recirculate) {
1882 TRACE(
"[Thread %2u]: tx (i) 1 pkt - recirculate\n",
1889 mirroring_handler(p, t, pkt);
1901 port_id = ip->io.io.val;
1902 if (port_id >= p->n_ports_out)
1903 port_id = p->n_ports_out - 1;
1905 port = &p->out[port_id];
1907 TRACE(
"[Thread %2u]: tx (i) 1 pkt to port %u\n",
1915 mirroring_handler(p, t, pkt);
1916 port->pkt_tx(port->obj, pkt);
1920 __instr_drop_exec(
struct rte_swx_pipeline *p,
1924 uint64_t port_id = p->n_ports_out - 1;
1925 struct port_out_runtime *port = &p->out[port_id];
1928 TRACE(
"[Thread %2u]: drop 1 pkt\n",
1935 mirroring_handler(p, t, pkt);
1936 port->pkt_tx(port->obj, pkt);
1940 __instr_mirror_exec(
struct rte_swx_pipeline *p,
1942 const struct instruction *ip)
1944 uint64_t slot_id = instr_operand_hbo(t, &ip->mirror.dst);
1945 uint64_t session_id = instr_operand_hbo(t, &ip->mirror.src);
1947 slot_id &= p->n_mirroring_slots - 1;
1948 session_id &= p->n_mirroring_sessions - 1;
1950 TRACE(
"[Thread %2u]: mirror pkt (slot = %u, session = %u)\n",
1953 (uint32_t)session_id);
1955 t->mirroring_slots[slot_id] = session_id;
1956 t->mirroring_slots_mask |= 1LLU << slot_id;
1960 __instr_recirculate_exec(
struct rte_swx_pipeline *p __rte_unused,
1962 const struct instruction *ip __rte_unused)
1964 TRACE(
"[Thread %2u]: recirculate\n",
1971 __instr_recircid_exec(
struct rte_swx_pipeline *p __rte_unused,
1973 const struct instruction *ip)
1975 TRACE(
"[Thread %2u]: recircid (pass %u)\n",
1980 METADATA_WRITE(t, ip->io.io.offset, ip->io.io.n_bits, t->recirc_pass_id);
1987 __instr_hdr_extract_many_exec(
struct rte_swx_pipeline *p __rte_unused,
1989 const struct instruction *ip,
1992 uint64_t valid_headers = t->valid_headers;
1993 uint8_t *ptr = t->ptr;
1994 uint32_t offset = t->pkt.offset;
1995 uint32_t
length = t->pkt.length;
1998 for (i = 0; i < n_extract; i++) {
1999 uint32_t header_id = ip->io.hdr.header_id[i];
2000 uint32_t struct_id = ip->io.hdr.struct_id[i];
2001 uint32_t n_bytes = ip->io.hdr.n_bytes[i];
2003 TRACE(
"[Thread %2u]: extract header %u (%u bytes)\n",
2009 t->structs[struct_id] = ptr;
2010 valid_headers = MASK64_BIT_SET(valid_headers, header_id);
2019 t->valid_headers = valid_headers;
2028 __instr_hdr_extract_exec(
struct rte_swx_pipeline *p,
2030 const struct instruction *ip)
2032 __instr_hdr_extract_many_exec(p, t, ip, 1);
2036 __instr_hdr_extract2_exec(
struct rte_swx_pipeline *p,
2038 const struct instruction *ip)
2040 TRACE(
"[Thread %2u] *** The next 2 instructions are fused. ***\n", p->thread_id);
2042 __instr_hdr_extract_many_exec(p, t, ip, 2);
2046 __instr_hdr_extract3_exec(
struct rte_swx_pipeline *p,
2048 const struct instruction *ip)
2050 TRACE(
"[Thread %2u] *** The next 3 instructions are fused. ***\n", p->thread_id);
2052 __instr_hdr_extract_many_exec(p, t, ip, 3);
2056 __instr_hdr_extract4_exec(
struct rte_swx_pipeline *p,
2058 const struct instruction *ip)
2060 TRACE(
"[Thread %2u] *** The next 4 instructions are fused. ***\n", p->thread_id);
2062 __instr_hdr_extract_many_exec(p, t, ip, 4);
2066 __instr_hdr_extract5_exec(
struct rte_swx_pipeline *p,
2068 const struct instruction *ip)
2070 TRACE(
"[Thread %2u] *** The next 5 instructions are fused. ***\n", p->thread_id);
2072 __instr_hdr_extract_many_exec(p, t, ip, 5);
2076 __instr_hdr_extract6_exec(
struct rte_swx_pipeline *p,
2078 const struct instruction *ip)
2080 TRACE(
"[Thread %2u] *** The next 6 instructions are fused. ***\n", p->thread_id);
2082 __instr_hdr_extract_many_exec(p, t, ip, 6);
2086 __instr_hdr_extract7_exec(
struct rte_swx_pipeline *p,
2088 const struct instruction *ip)
2090 TRACE(
"[Thread %2u] *** The next 7 instructions are fused. ***\n", p->thread_id);
2092 __instr_hdr_extract_many_exec(p, t, ip, 7);
2096 __instr_hdr_extract8_exec(
struct rte_swx_pipeline *p,
2098 const struct instruction *ip)
2100 TRACE(
"[Thread %2u] *** The next 8 instructions are fused. ***\n", p->thread_id);
2102 __instr_hdr_extract_many_exec(p, t, ip, 8);
2106 __instr_hdr_extract_m_exec(
struct rte_swx_pipeline *p __rte_unused,
2108 const struct instruction *ip)
2110 uint64_t valid_headers = t->valid_headers;
2111 uint8_t *ptr = t->ptr;
2112 uint32_t offset = t->pkt.offset;
2113 uint32_t length = t->pkt.length;
2115 uint32_t n_bytes_last = METADATA_READ(t, ip->io.io.offset, ip->io.io.n_bits);
2116 uint32_t header_id = ip->io.hdr.header_id[0];
2117 uint32_t struct_id = ip->io.hdr.struct_id[0];
2118 uint32_t n_bytes = ip->io.hdr.n_bytes[0];
2120 struct header_runtime *h = &t->headers[header_id];
2122 TRACE(
"[Thread %2u]: extract header %u (%u + %u bytes)\n",
2128 n_bytes += n_bytes_last;
2131 t->structs[struct_id] = ptr;
2132 t->valid_headers = MASK64_BIT_SET(valid_headers, header_id);
2133 h->n_bytes = n_bytes;
2136 t->pkt.offset = offset + n_bytes;
2137 t->pkt.length = length - n_bytes;
2138 t->ptr = ptr + n_bytes;
2142 __instr_hdr_lookahead_exec(
struct rte_swx_pipeline *p __rte_unused,
2144 const struct instruction *ip)
2146 uint64_t valid_headers = t->valid_headers;
2147 uint8_t *ptr = t->ptr;
2149 uint32_t header_id = ip->io.hdr.header_id[0];
2150 uint32_t struct_id = ip->io.hdr.struct_id[0];
2152 TRACE(
"[Thread %2u]: lookahead header %u\n",
2157 t->structs[struct_id] = ptr;
2158 t->valid_headers = MASK64_BIT_SET(valid_headers, header_id);
2165 __instr_hdr_emit_many_exec(
struct rte_swx_pipeline *p __rte_unused,
2167 const struct instruction *ip,
2170 uint64_t valid_headers = t->valid_headers;
2171 uint32_t n_headers_out = t->n_headers_out;
2172 struct header_out_runtime *ho = NULL;
2173 uint8_t *ho_ptr = NULL;
2174 uint32_t ho_nbytes = 0, i;
2176 for (i = 0; i < n_emit; i++) {
2177 uint32_t header_id = ip->io.hdr.header_id[i];
2178 uint32_t struct_id = ip->io.hdr.struct_id[i];
2180 struct header_runtime *hi = &t->headers[header_id];
2181 uint8_t *hi_ptr0 = hi->ptr0;
2182 uint32_t n_bytes = hi->n_bytes;
2184 uint8_t *hi_ptr = t->structs[struct_id];
2186 if (!MASK64_BIT_GET(valid_headers, header_id)) {
2187 TRACE(
"[Thread %2u]: emit header %u (invalid)\n",
2194 TRACE(
"[Thread %2u]: emit header %u (valid)\n",
2200 if (!n_headers_out) {
2201 ho = &t->headers_out[0];
2207 ho_nbytes = n_bytes;
2213 ho = &t->headers_out[n_headers_out - 1];
2216 ho_nbytes = ho->n_bytes;
2220 if (ho_ptr + ho_nbytes == hi_ptr) {
2221 ho_nbytes += n_bytes;
2223 ho->n_bytes = ho_nbytes;
2230 ho_nbytes = n_bytes;
2237 ho->n_bytes = ho_nbytes;
2238 t->n_headers_out = n_headers_out;
2242 __instr_hdr_emit_exec(
struct rte_swx_pipeline *p,
2244 const struct instruction *ip)
2246 __instr_hdr_emit_many_exec(p, t, ip, 1);
2250 __instr_hdr_emit_tx_exec(
struct rte_swx_pipeline *p,
2252 const struct instruction *ip)
2254 TRACE(
"[Thread %2u] *** The next 2 instructions are fused. ***\n", p->thread_id);
2256 __instr_hdr_emit_many_exec(p, t, ip, 1);
2257 __instr_tx_exec(p, t, ip);
2261 __instr_hdr_emit2_tx_exec(
struct rte_swx_pipeline *p,
2263 const struct instruction *ip)
2265 TRACE(
"[Thread %2u] *** The next 3 instructions are fused. ***\n", p->thread_id);
2267 __instr_hdr_emit_many_exec(p, t, ip, 2);
2268 __instr_tx_exec(p, t, ip);
2272 __instr_hdr_emit3_tx_exec(
struct rte_swx_pipeline *p,
2274 const struct instruction *ip)
2276 TRACE(
"[Thread %2u] *** The next 4 instructions are fused. ***\n", p->thread_id);
2278 __instr_hdr_emit_many_exec(p, t, ip, 3);
2279 __instr_tx_exec(p, t, ip);
2283 __instr_hdr_emit4_tx_exec(
struct rte_swx_pipeline *p,
2285 const struct instruction *ip)
2287 TRACE(
"[Thread %2u] *** The next 5 instructions are fused. ***\n", p->thread_id);
2289 __instr_hdr_emit_many_exec(p, t, ip, 4);
2290 __instr_tx_exec(p, t, ip);
2294 __instr_hdr_emit5_tx_exec(
struct rte_swx_pipeline *p,
2296 const struct instruction *ip)
2298 TRACE(
"[Thread %2u] *** The next 6 instructions are fused. ***\n", p->thread_id);
2300 __instr_hdr_emit_many_exec(p, t, ip, 5);
2301 __instr_tx_exec(p, t, ip);
2305 __instr_hdr_emit6_tx_exec(
struct rte_swx_pipeline *p,
2307 const struct instruction *ip)
2309 TRACE(
"[Thread %2u] *** The next 7 instructions are fused. ***\n", p->thread_id);
2311 __instr_hdr_emit_many_exec(p, t, ip, 6);
2312 __instr_tx_exec(p, t, ip);
2316 __instr_hdr_emit7_tx_exec(
struct rte_swx_pipeline *p,
2318 const struct instruction *ip)
2320 TRACE(
"[Thread %2u] *** The next 8 instructions are fused. ***\n", p->thread_id);
2322 __instr_hdr_emit_many_exec(p, t, ip, 7);
2323 __instr_tx_exec(p, t, ip);
2327 __instr_hdr_emit8_tx_exec(
struct rte_swx_pipeline *p,
2329 const struct instruction *ip)
2331 TRACE(
"[Thread %2u] *** The next 9 instructions are fused. ***\n", p->thread_id);
2333 __instr_hdr_emit_many_exec(p, t, ip, 8);
2334 __instr_tx_exec(p, t, ip);
2341 __instr_hdr_validate_exec(
struct rte_swx_pipeline *p __rte_unused,
2343 const struct instruction *ip)
2345 uint32_t header_id = ip->valid.header_id;
2346 uint32_t struct_id = ip->valid.struct_id;
2347 uint64_t valid_headers = t->valid_headers;
2348 struct header_runtime *h = &t->headers[header_id];
2350 TRACE(
"[Thread %2u] validate header %u\n", p->thread_id, header_id);
2356 if (MASK64_BIT_GET(valid_headers, header_id))
2360 t->structs[struct_id] = h->ptr0;
2361 t->valid_headers = MASK64_BIT_SET(valid_headers, header_id);
2368 __instr_hdr_invalidate_exec(
struct rte_swx_pipeline *p __rte_unused,
2370 const struct instruction *ip)
2372 uint32_t header_id = ip->valid.header_id;
2374 TRACE(
"[Thread %2u] invalidate header %u\n", p->thread_id, header_id);
2377 t->valid_headers = MASK64_BIT_CLR(t->valid_headers, header_id);
2384 __instr_learn_exec(
struct rte_swx_pipeline *p,
2386 const struct instruction *ip)
2388 uint64_t action_id = ip->learn.action_id;
2389 uint32_t mf_first_arg_offset = ip->learn.mf_first_arg_offset;
2390 uint32_t timeout_id = METADATA_READ(t, ip->learn.mf_timeout_id_offset,
2391 ip->learn.mf_timeout_id_n_bits);
2392 uint32_t learner_id = t->learner_id;
2394 p->n_selectors + learner_id];
2395 struct learner_runtime *l = &t->learners[learner_id];
2396 struct learner_statistics *stats = &p->learner_stats[learner_id];
2404 &t->metadata[mf_first_arg_offset],
2407 TRACE(
"[Thread %2u] learner %u learn %s\n",
2410 status ?
"ok" :
"error");
2412 stats->n_pkts_learn[status] += 1;
2419 __instr_rearm_exec(
struct rte_swx_pipeline *p,
2421 const struct instruction *ip __rte_unused)
2423 uint32_t learner_id = t->learner_id;
2425 p->n_selectors + learner_id];
2426 struct learner_runtime *l = &t->learners[learner_id];
2427 struct learner_statistics *stats = &p->learner_stats[learner_id];
2432 TRACE(
"[Thread %2u] learner %u rearm\n",
2436 stats->n_pkts_rearm += 1;
2440 __instr_rearm_new_exec(
struct rte_swx_pipeline *p,
2442 const struct instruction *ip)
2444 uint32_t timeout_id = METADATA_READ(t, ip->learn.mf_timeout_id_offset,
2445 ip->learn.mf_timeout_id_n_bits);
2446 uint32_t learner_id = t->learner_id;
2448 p->n_selectors + learner_id];
2449 struct learner_runtime *l = &t->learners[learner_id];
2450 struct learner_statistics *stats = &p->learner_stats[learner_id];
2455 TRACE(
"[Thread %2u] learner %u rearm with timeout ID %u\n",
2460 stats->n_pkts_rearm += 1;
2467 __instr_forget_exec(
struct rte_swx_pipeline *p,
2469 const struct instruction *ip __rte_unused)
2471 uint32_t learner_id = t->learner_id;
2473 p->n_selectors + learner_id];
2474 struct learner_runtime *l = &t->learners[learner_id];
2475 struct learner_statistics *stats = &p->learner_stats[learner_id];
2480 TRACE(
"[Thread %2u] learner %u forget\n",
2484 stats->n_pkts_forget += 1;
2491 __instr_entryid_exec(
struct rte_swx_pipeline *p __rte_unused,
2493 const struct instruction *ip)
2495 TRACE(
"[Thread %2u]: entryid\n",
2499 METADATA_WRITE(t, ip->mov.dst.offset, ip->mov.dst.n_bits, t->entry_id);
2505 static inline uint32_t
2506 __instr_extern_obj_exec(
struct rte_swx_pipeline *p __rte_unused,
2508 const struct instruction *ip)
2510 uint32_t obj_id = ip->ext_obj.ext_obj_id;
2511 uint32_t func_id = ip->ext_obj.func_id;
2512 struct extern_obj_runtime *obj = &t->extern_objs[obj_id];
2516 TRACE(
"[Thread %2u] extern obj %u member func %u\n",
2521 done = func(obj->obj, obj->mailbox);
2526 static inline uint32_t
2527 __instr_extern_func_exec(
struct rte_swx_pipeline *p __rte_unused,
2529 const struct instruction *ip)
2531 uint32_t ext_func_id = ip->ext_func.ext_func_id;
2532 struct extern_func_runtime *ext_func = &t->extern_funcs[ext_func_id];
2536 TRACE(
"[Thread %2u] extern func %u\n",
2540 done = func(ext_func->mailbox);
2549 __instr_hash_func_exec(
struct rte_swx_pipeline *p,
2551 const struct instruction *ip)
2553 uint32_t hash_func_id = ip->hash_func.hash_func_id;
2554 uint32_t dst_offset = ip->hash_func.dst.offset;
2555 uint32_t n_dst_bits = ip->hash_func.dst.n_bits;
2556 uint32_t src_struct_id = ip->hash_func.src.struct_id;
2557 uint32_t src_offset = ip->hash_func.src.offset;
2558 uint32_t n_src_bytes = ip->hash_func.src.n_bytes;
2560 struct hash_func_runtime *func = &p->hash_func_runtime[hash_func_id];
2561 uint8_t *src_ptr = t->structs[src_struct_id];
2564 TRACE(
"[Thread %2u] hash %u\n",
2568 result = func->func(&src_ptr[src_offset], n_src_bytes, 0);
2569 METADATA_WRITE(t, dst_offset, n_dst_bits, result);
2575 static inline uint32_t
2576 rss_func(
void *rss_key, uint32_t rss_key_size,
void *input_data, uint32_t input_data_size)
2578 uint32_t *key = (uint32_t *)rss_key;
2579 uint32_t *data = (uint32_t *)input_data;
2580 uint32_t key_size = rss_key_size >> 2;
2581 uint32_t data_size = input_data_size >> 2;
2582 uint32_t hash_val = 0, i;
2584 for (i = 0; i < data_size; i++) {
2587 for (d = data[i]; d; d &= (d - 1)) {
2588 uint32_t key0, key1, pos;
2591 key0 = key[i % key_size] << (31 - pos);
2592 key1 = key[(i + 1) % key_size] >> (pos + 1);
2593 hash_val ^= key0 | key1;
2601 __instr_rss_exec(
struct rte_swx_pipeline *p,
2603 const struct instruction *ip)
2605 uint32_t rss_obj_id = ip->rss.rss_obj_id;
2606 uint32_t dst_offset = ip->rss.dst.offset;
2607 uint32_t n_dst_bits = ip->rss.dst.n_bits;
2608 uint32_t src_struct_id = ip->rss.src.struct_id;
2609 uint32_t src_offset = ip->rss.src.offset;
2610 uint32_t n_src_bytes = ip->rss.src.n_bytes;
2612 struct rss_runtime *r = p->rss_runtime[rss_obj_id];
2613 uint8_t *src_ptr = t->structs[src_struct_id];
2616 TRACE(
"[Thread %2u] rss %u\n",
2620 result = rss_func(r->key, r->key_size, &src_ptr[src_offset], n_src_bytes);
2621 METADATA_WRITE(t, dst_offset, n_dst_bits, result);
2628 __instr_mov_exec(
struct rte_swx_pipeline *p __rte_unused,
2630 const struct instruction *ip)
2632 TRACE(
"[Thread %2u] mov\n", p->thread_id);
2638 __instr_mov_mh_exec(
struct rte_swx_pipeline *p __rte_unused,
2640 const struct instruction *ip)
2642 TRACE(
"[Thread %2u] mov (mh)\n", p->thread_id);
2648 __instr_mov_hm_exec(
struct rte_swx_pipeline *p __rte_unused,
2650 const struct instruction *ip)
2652 TRACE(
"[Thread %2u] mov (hm)\n", p->thread_id);
2658 __instr_mov_hh_exec(
struct rte_swx_pipeline *p __rte_unused,
2660 const struct instruction *ip)
2662 TRACE(
"[Thread %2u] mov (hh)\n", p->thread_id);
2668 __instr_mov_dma_exec(
struct rte_swx_pipeline *p __rte_unused,
2670 const struct instruction *ip)
2672 uint8_t *dst = t->structs[ip->mov.dst.struct_id] + ip->mov.dst.offset;
2673 uint8_t *src = t->structs[ip->mov.src.struct_id] + ip->mov.src.offset;
2675 uint32_t n_dst = ip->mov.dst.n_bits >> 3;
2676 uint32_t n_src = ip->mov.src.n_bits >> 3;
2678 TRACE(
"[Thread %2u] mov (dma) %u bytes\n", p->thread_id, n);
2681 if (n_dst > n_src) {
2682 uint32_t n_dst_zero = n_dst - n_src;
2685 memset(dst, 0, n_dst_zero);
2689 memcpy(dst, src, n_src);
2691 uint32_t n_src_skipped = n_src - n_dst;
2694 src += n_src_skipped;
2695 memcpy(dst, src, n_dst);
2700 __instr_mov_128_exec(
struct rte_swx_pipeline *p __rte_unused,
2702 const struct instruction *ip)
2704 uint8_t *dst_struct = t->structs[ip->mov.dst.struct_id];
2705 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[ip->mov.dst.offset];
2707 uint8_t *src_struct = t->structs[ip->mov.src.struct_id];
2708 uint64_t *src64_ptr = (uint64_t *)&src_struct[ip->mov.src.offset];
2710 TRACE(
"[Thread %2u] mov (128)\n", p->thread_id);
2712 dst64_ptr[0] = src64_ptr[0];
2713 dst64_ptr[1] = src64_ptr[1];
2717 __instr_mov_128_64_exec(
struct rte_swx_pipeline *p __rte_unused,
2719 const struct instruction *ip)
2721 uint8_t *dst = t->structs[ip->mov.dst.struct_id] + ip->mov.dst.offset;
2722 uint8_t *src = t->structs[ip->mov.src.struct_id] + ip->mov.src.offset;
2724 uint64_t *dst64 = (uint64_t *)dst;
2725 uint64_t *src64 = (uint64_t *)src;
2727 TRACE(
"[Thread %2u] mov (128 <- 64)\n", p->thread_id);
2730 dst64[1] = src64[0];
2734 __instr_mov_64_128_exec(
struct rte_swx_pipeline *p __rte_unused,
2736 const struct instruction *ip)
2738 uint8_t *dst = t->structs[ip->mov.dst.struct_id] + ip->mov.dst.offset;
2739 uint8_t *src = t->structs[ip->mov.src.struct_id] + ip->mov.src.offset;
2741 uint64_t *dst64 = (uint64_t *)dst;
2742 uint64_t *src64 = (uint64_t *)src;
2744 TRACE(
"[Thread %2u] mov (64 <- 128)\n", p->thread_id);
2746 dst64[0] = src64[1];
2750 __instr_mov_128_32_exec(
struct rte_swx_pipeline *p __rte_unused,
2752 const struct instruction *ip)
2754 uint8_t *dst = t->structs[ip->mov.dst.struct_id] + ip->mov.dst.offset;
2755 uint8_t *src = t->structs[ip->mov.src.struct_id] + ip->mov.src.offset;
2757 uint32_t *dst32 = (uint32_t *)dst;
2758 uint32_t *src32 = (uint32_t *)src;
2760 TRACE(
"[Thread %2u] mov (128 <- 32)\n", p->thread_id);
2765 dst32[3] = src32[0];
2769 __instr_mov_32_128_exec(
struct rte_swx_pipeline *p __rte_unused,
2771 const struct instruction *ip)
2773 uint8_t *dst = t->structs[ip->mov.dst.struct_id] + ip->mov.dst.offset;
2774 uint8_t *src = t->structs[ip->mov.src.struct_id] + ip->mov.src.offset;
2776 uint32_t *dst32 = (uint32_t *)dst;
2777 uint32_t *src32 = (uint32_t *)src;
2779 TRACE(
"[Thread %2u] mov (32 <- 128)\n", p->thread_id);
2781 dst32[0] = src32[3];
2785 __instr_mov_i_exec(
struct rte_swx_pipeline *p __rte_unused,
2787 const struct instruction *ip)
2789 TRACE(
"[Thread %2u] mov m.f %" PRIx64
"\n", p->thread_id, ip->mov.src_val);
2798 __instr_movh_exec(
struct rte_swx_pipeline *p __rte_unused,
2800 const struct instruction *ip)
2802 uint8_t *dst = t->structs[ip->mov.dst.struct_id] + ip->mov.dst.offset;
2803 uint8_t *src = t->structs[ip->mov.src.struct_id] + ip->mov.src.offset;
2805 uint64_t *dst64 = (uint64_t *)dst;
2806 uint64_t *src64 = (uint64_t *)src;
2808 TRACE(
"[Thread %2u] movh\n", p->thread_id);
2810 dst64[0] = src64[0];
2817 __instr_dma_ht_many_exec(
struct rte_swx_pipeline *p __rte_unused,
2819 const struct instruction *ip,
2822 uint8_t *action_data = t->structs[0];
2823 uint64_t valid_headers = t->valid_headers;
2826 for (i = 0; i < n_dma; i++) {
2827 uint32_t header_id = ip->dma.dst.header_id[i];
2828 uint32_t struct_id = ip->dma.dst.struct_id[i];
2829 uint32_t offset = ip->dma.src.offset[i];
2830 uint32_t n_bytes = ip->dma.n_bytes[i];
2832 struct header_runtime *h = &t->headers[header_id];
2833 uint8_t *h_ptr0 = h->ptr0;
2834 uint8_t *h_ptr = t->structs[struct_id];
2836 void *dst = MASK64_BIT_GET(valid_headers, header_id) ?
2838 void *src = &action_data[offset];
2840 TRACE(
"[Thread %2u] dma h.s t.f\n", p->thread_id);
2843 memcpy(dst, src, n_bytes);
2844 t->structs[struct_id] = dst;
2845 valid_headers = MASK64_BIT_SET(valid_headers, header_id);
2848 t->valid_headers = valid_headers;
2852 __instr_dma_ht_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
2854 __instr_dma_ht_many_exec(p, t, ip, 1);
2858 __instr_dma_ht2_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
2860 TRACE(
"[Thread %2u] *** The next 2 instructions are fused. ***\n", p->thread_id);
2862 __instr_dma_ht_many_exec(p, t, ip, 2);
2866 __instr_dma_ht3_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
2868 TRACE(
"[Thread %2u] *** The next 3 instructions are fused. ***\n", p->thread_id);
2870 __instr_dma_ht_many_exec(p, t, ip, 3);
2874 __instr_dma_ht4_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
2876 TRACE(
"[Thread %2u] *** The next 4 instructions are fused. ***\n", p->thread_id);
2878 __instr_dma_ht_many_exec(p, t, ip, 4);
2882 __instr_dma_ht5_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
2884 TRACE(
"[Thread %2u] *** The next 5 instructions are fused. ***\n", p->thread_id);
2886 __instr_dma_ht_many_exec(p, t, ip, 5);
2890 __instr_dma_ht6_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
2892 TRACE(
"[Thread %2u] *** The next 6 instructions are fused. ***\n", p->thread_id);
2894 __instr_dma_ht_many_exec(p, t, ip, 6);
2898 __instr_dma_ht7_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
2900 TRACE(
"[Thread %2u] *** The next 7 instructions are fused. ***\n", p->thread_id);
2902 __instr_dma_ht_many_exec(p, t, ip, 7);
2906 __instr_dma_ht8_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
2908 TRACE(
"[Thread %2u] *** The next 8 instructions are fused. ***\n", p->thread_id);
2910 __instr_dma_ht_many_exec(p, t, ip, 8);
2917 __instr_alu_add_exec(
struct rte_swx_pipeline *p __rte_unused,
2919 const struct instruction *ip)
2921 TRACE(
"[Thread %2u] add\n", p->thread_id);
2927 __instr_alu_add_mh_exec(
struct rte_swx_pipeline *p __rte_unused,
2929 const struct instruction *ip)
2931 TRACE(
"[Thread %2u] add (mh)\n", p->thread_id);
2937 __instr_alu_add_hm_exec(
struct rte_swx_pipeline *p __rte_unused,
2939 const struct instruction *ip)
2941 TRACE(
"[Thread %2u] add (hm)\n", p->thread_id);
2947 __instr_alu_add_hh_exec(
struct rte_swx_pipeline *p __rte_unused,
2949 const struct instruction *ip)
2951 TRACE(
"[Thread %2u] add (hh)\n", p->thread_id);
2957 __instr_alu_add_mi_exec(
struct rte_swx_pipeline *p __rte_unused,
2959 const struct instruction *ip)
2961 TRACE(
"[Thread %2u] add (mi)\n", p->thread_id);
2967 __instr_alu_add_hi_exec(
struct rte_swx_pipeline *p __rte_unused,
2969 const struct instruction *ip)
2971 TRACE(
"[Thread %2u] add (hi)\n", p->thread_id);
2977 __instr_alu_sub_exec(
struct rte_swx_pipeline *p __rte_unused,
2979 const struct instruction *ip)
2981 TRACE(
"[Thread %2u] sub\n", p->thread_id);
2987 __instr_alu_sub_mh_exec(
struct rte_swx_pipeline *p __rte_unused,
2989 const struct instruction *ip)
2991 TRACE(
"[Thread %2u] sub (mh)\n", p->thread_id);
2997 __instr_alu_sub_hm_exec(
struct rte_swx_pipeline *p __rte_unused,
2999 const struct instruction *ip)
3001 TRACE(
"[Thread %2u] sub (hm)\n", p->thread_id);
3007 __instr_alu_sub_hh_exec(
struct rte_swx_pipeline *p __rte_unused,
3009 const struct instruction *ip)
3011 TRACE(
"[Thread %2u] sub (hh)\n", p->thread_id);
3017 __instr_alu_sub_mi_exec(
struct rte_swx_pipeline *p __rte_unused,
3019 const struct instruction *ip)
3021 TRACE(
"[Thread %2u] sub (mi)\n", p->thread_id);
3027 __instr_alu_sub_hi_exec(
struct rte_swx_pipeline *p __rte_unused,
3029 const struct instruction *ip)
3031 TRACE(
"[Thread %2u] sub (hi)\n", p->thread_id);
3037 __instr_alu_shl_exec(
struct rte_swx_pipeline *p __rte_unused,
3039 const struct instruction *ip)
3041 TRACE(
"[Thread %2u] shl\n", p->thread_id);
3047 __instr_alu_shl_mh_exec(
struct rte_swx_pipeline *p __rte_unused,
3049 const struct instruction *ip)
3051 TRACE(
"[Thread %2u] shl (mh)\n", p->thread_id);
3057 __instr_alu_shl_hm_exec(
struct rte_swx_pipeline *p __rte_unused,
3059 const struct instruction *ip)
3061 TRACE(
"[Thread %2u] shl (hm)\n", p->thread_id);
3067 __instr_alu_shl_hh_exec(
struct rte_swx_pipeline *p __rte_unused,
3069 const struct instruction *ip)
3071 TRACE(
"[Thread %2u] shl (hh)\n", p->thread_id);
3077 __instr_alu_shl_mi_exec(
struct rte_swx_pipeline *p __rte_unused,
3079 const struct instruction *ip)
3081 TRACE(
"[Thread %2u] shl (mi)\n", p->thread_id);
3087 __instr_alu_shl_hi_exec(
struct rte_swx_pipeline *p __rte_unused,
3089 const struct instruction *ip)
3091 TRACE(
"[Thread %2u] shl (hi)\n", p->thread_id);
3097 __instr_alu_shr_exec(
struct rte_swx_pipeline *p __rte_unused,
3099 const struct instruction *ip)
3101 TRACE(
"[Thread %2u] shr\n", p->thread_id);
3107 __instr_alu_shr_mh_exec(
struct rte_swx_pipeline *p __rte_unused,
3109 const struct instruction *ip)
3111 TRACE(
"[Thread %2u] shr (mh)\n", p->thread_id);
3117 __instr_alu_shr_hm_exec(
struct rte_swx_pipeline *p __rte_unused,
3119 const struct instruction *ip)
3121 TRACE(
"[Thread %2u] shr (hm)\n", p->thread_id);
3127 __instr_alu_shr_hh_exec(
struct rte_swx_pipeline *p __rte_unused,
3129 const struct instruction *ip)
3131 TRACE(
"[Thread %2u] shr (hh)\n", p->thread_id);
3137 __instr_alu_shr_mi_exec(
struct rte_swx_pipeline *p __rte_unused,
3139 const struct instruction *ip)
3141 TRACE(
"[Thread %2u] shr (mi)\n", p->thread_id);
3148 __instr_alu_shr_hi_exec(
struct rte_swx_pipeline *p __rte_unused,
3150 const struct instruction *ip)
3152 TRACE(
"[Thread %2u] shr (hi)\n", p->thread_id);
3158 __instr_alu_and_exec(
struct rte_swx_pipeline *p __rte_unused,
3160 const struct instruction *ip)
3162 TRACE(
"[Thread %2u] and\n", p->thread_id);
3168 __instr_alu_and_mh_exec(
struct rte_swx_pipeline *p __rte_unused,
3170 const struct instruction *ip)
3172 TRACE(
"[Thread %2u] and (mh)\n", p->thread_id);
3178 __instr_alu_and_hm_exec(
struct rte_swx_pipeline *p __rte_unused,
3180 const struct instruction *ip)
3182 TRACE(
"[Thread %2u] and (hm)\n", p->thread_id);
3184 ALU_HM_FAST(t, ip, &);
3188 __instr_alu_and_hh_exec(
struct rte_swx_pipeline *p __rte_unused,
3190 const struct instruction *ip)
3192 TRACE(
"[Thread %2u] and (hh)\n", p->thread_id);
3194 ALU_HH_FAST(t, ip, &);
3198 __instr_alu_and_i_exec(
struct rte_swx_pipeline *p __rte_unused,
3200 const struct instruction *ip)
3202 TRACE(
"[Thread %2u] and (i)\n", p->thread_id);
3208 __instr_alu_or_exec(
struct rte_swx_pipeline *p __rte_unused,
3210 const struct instruction *ip)
3212 TRACE(
"[Thread %2u] or\n", p->thread_id);
3218 __instr_alu_or_mh_exec(
struct rte_swx_pipeline *p __rte_unused,
3220 const struct instruction *ip)
3222 TRACE(
"[Thread %2u] or (mh)\n", p->thread_id);
3228 __instr_alu_or_hm_exec(
struct rte_swx_pipeline *p __rte_unused,
3230 const struct instruction *ip)
3232 TRACE(
"[Thread %2u] or (hm)\n", p->thread_id);
3234 ALU_HM_FAST(t, ip, |);
3238 __instr_alu_or_hh_exec(
struct rte_swx_pipeline *p __rte_unused,
3240 const struct instruction *ip)
3242 TRACE(
"[Thread %2u] or (hh)\n", p->thread_id);
3244 ALU_HH_FAST(t, ip, |);
3248 __instr_alu_or_i_exec(
struct rte_swx_pipeline *p __rte_unused,
3250 const struct instruction *ip)
3252 TRACE(
"[Thread %2u] or (i)\n", p->thread_id);
3258 __instr_alu_xor_exec(
struct rte_swx_pipeline *p __rte_unused,
3260 const struct instruction *ip)
3262 TRACE(
"[Thread %2u] xor\n", p->thread_id);
3268 __instr_alu_xor_mh_exec(
struct rte_swx_pipeline *p __rte_unused,
3270 const struct instruction *ip)
3272 TRACE(
"[Thread %2u] xor (mh)\n", p->thread_id);
3278 __instr_alu_xor_hm_exec(
struct rte_swx_pipeline *p __rte_unused,
3280 const struct instruction *ip)
3282 TRACE(
"[Thread %2u] xor (hm)\n", p->thread_id);
3284 ALU_HM_FAST(t, ip, ^);
3288 __instr_alu_xor_hh_exec(
struct rte_swx_pipeline *p __rte_unused,
3290 const struct instruction *ip)
3292 TRACE(
"[Thread %2u] xor (hh)\n", p->thread_id);
3294 ALU_HH_FAST(t, ip, ^);
3298 __instr_alu_xor_i_exec(
struct rte_swx_pipeline *p __rte_unused,
3300 const struct instruction *ip)
3302 TRACE(
"[Thread %2u] xor (i)\n", p->thread_id);
3308 __instr_alu_ckadd_field_exec(
struct rte_swx_pipeline *p __rte_unused,
3310 const struct instruction *ip)
3312 uint8_t *dst_struct, *src_struct;
3313 uint16_t *dst16_ptr, dst;
3314 uint64_t *src64_ptr, src64, src64_mask, src;
3317 TRACE(
"[Thread %2u] ckadd (field)\n", p->thread_id);
3320 dst_struct = t->structs[ip->alu.dst.struct_id];
3321 dst16_ptr = (uint16_t *)&dst_struct[ip->alu.dst.offset];
3324 src_struct = t->structs[ip->alu.src.struct_id];
3325 src64_ptr = (uint64_t *)&src_struct[ip->alu.src.offset];
3327 src64_mask = UINT64_MAX >> (64 - ip->alu.src.n_bits);
3328 src = src64 & src64_mask;
3338 r += (src >> 32) + (src & 0xFFFFFFFF);
3344 r = (r & 0xFFFF) + (r >> 16);
3349 r = (r & 0xFFFF) + (r >> 16);
3356 r = (r & 0xFFFF) + (r >> 16);
3362 *dst16_ptr = (uint16_t)r;
3366 __instr_alu_cksub_field_exec(
struct rte_swx_pipeline *p __rte_unused,
3368 const struct instruction *ip)
3370 uint8_t *dst_struct, *src_struct;
3371 uint16_t *dst16_ptr, dst;
3372 uint64_t *src64_ptr, src64, src64_mask, src;
3375 TRACE(
"[Thread %2u] cksub (field)\n", p->thread_id);
3378 dst_struct = t->structs[ip->alu.dst.struct_id];
3379 dst16_ptr = (uint16_t *)&dst_struct[ip->alu.dst.offset];
3382 src_struct = t->structs[ip->alu.src.struct_id];
3383 src64_ptr = (uint64_t *)&src_struct[ip->alu.src.offset];
3385 src64_mask = UINT64_MAX >> (64 - ip->alu.src.n_bits);
3386 src = src64 & src64_mask;
3404 r += 0xFFFF00000ULL;
3409 r -= (src >> 32) + (src & 0xFFFFFFFF);
3414 r = (r & 0xFFFF) + (r >> 16);
3419 r = (r & 0xFFFF) + (r >> 16);
3426 r = (r & 0xFFFF) + (r >> 16);
3432 *dst16_ptr = (uint16_t)r;
3436 __instr_alu_ckadd_struct20_exec(
struct rte_swx_pipeline *p __rte_unused,
3438 const struct instruction *ip)
3440 uint8_t *dst_struct, *src_struct;
3441 uint16_t *dst16_ptr, dst;
3442 uint32_t *src32_ptr;
3445 TRACE(
"[Thread %2u] ckadd (struct of 20 bytes)\n", p->thread_id);
3448 dst_struct = t->structs[ip->alu.dst.struct_id];
3449 dst16_ptr = (uint16_t *)&dst_struct[ip->alu.dst.offset];
3452 src_struct = t->structs[ip->alu.src.struct_id];
3453 src32_ptr = (uint32_t *)&src_struct[0];
3463 r0 += r1 + src32_ptr[4];
3468 r0 = (r0 & 0xFFFF) + (r0 >> 16);
3473 r0 = (r0 & 0xFFFF) + (r0 >> 16);
3480 r0 = (r0 & 0xFFFF) + (r0 >> 16);
3484 r0 = r0 ? r0 : 0xFFFF;
3486 *dst16_ptr = (uint16_t)r0;
3490 __instr_alu_ckadd_struct_exec(
struct rte_swx_pipeline *p __rte_unused,
3492 const struct instruction *ip)
3494 uint32_t src_header_id = ip->alu.src.n_bits;
3495 uint32_t n_src_header_bytes = t->headers[src_header_id].n_bytes;
3496 uint8_t *dst_struct, *src_struct;
3497 uint16_t *dst16_ptr, dst;
3498 uint32_t *src32_ptr;
3502 if (n_src_header_bytes == 20) {
3503 __instr_alu_ckadd_struct20_exec(p, t, ip);
3507 TRACE(
"[Thread %2u] ckadd (struct)\n", p->thread_id);
3510 dst_struct = t->structs[ip->alu.dst.struct_id];
3511 dst16_ptr = (uint16_t *)&dst_struct[ip->alu.dst.offset];
3514 src_struct = t->structs[ip->alu.src.struct_id];
3515 src32_ptr = (uint32_t *)&src_struct[0];
3525 for (i = 0; i < n_src_header_bytes / 4; i++, src32_ptr++)
3531 r = (r & 0xFFFF) + (r >> 16);
3536 r = (r & 0xFFFF) + (r >> 16);
3543 r = (r & 0xFFFF) + (r >> 16);
3549 *dst16_ptr = (uint16_t)r;
3555 static inline uint64_t *
3556 instr_regarray_regarray(
struct rte_swx_pipeline *p,
const struct instruction *ip)
3558 struct regarray_runtime *r = &p->regarray_runtime[ip->regarray.regarray_id];
3562 static inline uint64_t
3563 instr_regarray_idx_hbo(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3565 struct regarray_runtime *r = &p->regarray_runtime[ip->regarray.regarray_id];
3567 uint8_t *idx_struct = t->structs[ip->regarray.idx.struct_id];
3568 uint64_t *idx64_ptr = (uint64_t *)&idx_struct[ip->regarray.idx.offset];
3569 uint64_t idx64 = *idx64_ptr;
3570 uint64_t idx64_mask = UINT64_MAX >> (64 - ip->regarray.idx.n_bits);
3571 uint64_t idx = idx64 & idx64_mask & r->size_mask;
3576 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN 3578 static inline uint64_t
3579 instr_regarray_idx_nbo(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3581 struct regarray_runtime *r = &p->regarray_runtime[ip->regarray.regarray_id];
3583 uint8_t *idx_struct = t->structs[ip->regarray.idx.struct_id];
3584 uint64_t *idx64_ptr = (uint64_t *)&idx_struct[ip->regarray.idx.offset];
3585 uint64_t idx64 = *idx64_ptr;
3586 uint64_t idx = (ntoh64(idx64) >> (64 - ip->regarray.idx.n_bits)) & r->size_mask;
3593 #define instr_regarray_idx_nbo instr_regarray_idx_hbo 3597 static inline uint64_t
3598 instr_regarray_idx_imm(
struct rte_swx_pipeline *p,
const struct instruction *ip)
3600 struct regarray_runtime *r = &p->regarray_runtime[ip->regarray.regarray_id];
3602 uint64_t idx = ip->regarray.idx_val & r->size_mask;
3607 static inline uint64_t
3608 instr_regarray_src_hbo(
struct thread *t,
const struct instruction *ip)
3610 uint8_t *src_struct = t->structs[ip->regarray.dstsrc.struct_id];
3611 uint64_t *src64_ptr = (uint64_t *)&src_struct[ip->regarray.dstsrc.offset];
3612 uint64_t src64 = *src64_ptr;
3613 uint64_t src64_mask = UINT64_MAX >> (64 - ip->regarray.dstsrc.n_bits);
3614 uint64_t src = src64 & src64_mask;
3619 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN 3621 static inline uint64_t
3622 instr_regarray_src_nbo(
struct thread *t,
const struct instruction *ip)
3624 uint8_t *src_struct = t->structs[ip->regarray.dstsrc.struct_id];
3625 uint64_t *src64_ptr = (uint64_t *)&src_struct[ip->regarray.dstsrc.offset];
3626 uint64_t src64 = *src64_ptr;
3627 uint64_t src = ntoh64(src64) >> (64 - ip->regarray.dstsrc.n_bits);
3634 #define instr_regarray_src_nbo instr_regarray_src_hbo 3639 instr_regarray_dst_hbo_src_hbo_set(
struct thread *t,
const struct instruction *ip, uint64_t src)
3641 uint8_t *dst_struct = t->structs[ip->regarray.dstsrc.struct_id];
3642 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[ip->regarray.dstsrc.offset];
3643 uint64_t dst64 = *dst64_ptr;
3644 uint64_t dst64_mask = UINT64_MAX >> (64 - ip->regarray.dstsrc.n_bits);
3646 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask);
3650 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN 3653 instr_regarray_dst_nbo_src_hbo_set(
struct thread *t,
const struct instruction *ip, uint64_t src)
3655 uint8_t *dst_struct = t->structs[ip->regarray.dstsrc.struct_id];
3656 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[ip->regarray.dstsrc.offset];
3657 uint64_t dst64 = *dst64_ptr;
3658 uint64_t dst64_mask = UINT64_MAX >> (64 - ip->regarray.dstsrc.n_bits);
3660 src = hton64(src) >> (64 - ip->regarray.dstsrc.n_bits);
3661 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask);
3666 #define instr_regarray_dst_nbo_src_hbo_set instr_regarray_dst_hbo_src_hbo_set 3671 __instr_regprefetch_rh_exec(
struct rte_swx_pipeline *p,
3673 const struct instruction *ip)
3675 uint64_t *regarray, idx;
3677 TRACE(
"[Thread %2u] regprefetch (r[h])\n", p->thread_id);
3679 regarray = instr_regarray_regarray(p, ip);
3680 idx = instr_regarray_idx_nbo(p, t, ip);
3685 __instr_regprefetch_rm_exec(
struct rte_swx_pipeline *p,
3687 const struct instruction *ip)
3689 uint64_t *regarray, idx;
3691 TRACE(
"[Thread %2u] regprefetch (r[m])\n", p->thread_id);
3693 regarray = instr_regarray_regarray(p, ip);
3694 idx = instr_regarray_idx_hbo(p, t, ip);
3699 __instr_regprefetch_ri_exec(
struct rte_swx_pipeline *p,
3700 struct thread *t __rte_unused,
3701 const struct instruction *ip)
3703 uint64_t *regarray, idx;
3705 TRACE(
"[Thread %2u] regprefetch (r[i])\n", p->thread_id);
3707 regarray = instr_regarray_regarray(p, ip);
3708 idx = instr_regarray_idx_imm(p, ip);
3713 __instr_regrd_hrh_exec(
struct rte_swx_pipeline *p,
3715 const struct instruction *ip)
3717 uint64_t *regarray, idx;
3719 TRACE(
"[Thread %2u] regrd (h = r[h])\n", p->thread_id);
3721 regarray = instr_regarray_regarray(p, ip);
3722 idx = instr_regarray_idx_nbo(p, t, ip);
3723 instr_regarray_dst_nbo_src_hbo_set(t, ip, regarray[idx]);
3727 __instr_regrd_hrm_exec(
struct rte_swx_pipeline *p,
3729 const struct instruction *ip)
3731 uint64_t *regarray, idx;
3733 TRACE(
"[Thread %2u] regrd (h = r[m])\n", p->thread_id);
3736 regarray = instr_regarray_regarray(p, ip);
3737 idx = instr_regarray_idx_hbo(p, t, ip);
3738 instr_regarray_dst_nbo_src_hbo_set(t, ip, regarray[idx]);
3742 __instr_regrd_mrh_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3744 uint64_t *regarray, idx;
3746 TRACE(
"[Thread %2u] regrd (m = r[h])\n", p->thread_id);
3748 regarray = instr_regarray_regarray(p, ip);
3749 idx = instr_regarray_idx_nbo(p, t, ip);
3750 instr_regarray_dst_hbo_src_hbo_set(t, ip, regarray[idx]);
3754 __instr_regrd_mrm_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3756 uint64_t *regarray, idx;
3758 TRACE(
"[Thread %2u] regrd (m = r[m])\n", p->thread_id);
3760 regarray = instr_regarray_regarray(p, ip);
3761 idx = instr_regarray_idx_hbo(p, t, ip);
3762 instr_regarray_dst_hbo_src_hbo_set(t, ip, regarray[idx]);
3766 __instr_regrd_hri_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3768 uint64_t *regarray, idx;
3770 TRACE(
"[Thread %2u] regrd (h = r[i])\n", p->thread_id);
3772 regarray = instr_regarray_regarray(p, ip);
3773 idx = instr_regarray_idx_imm(p, ip);
3774 instr_regarray_dst_nbo_src_hbo_set(t, ip, regarray[idx]);
3778 __instr_regrd_mri_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3780 uint64_t *regarray, idx;
3782 TRACE(
"[Thread %2u] regrd (m = r[i])\n", p->thread_id);
3784 regarray = instr_regarray_regarray(p, ip);
3785 idx = instr_regarray_idx_imm(p, ip);
3786 instr_regarray_dst_hbo_src_hbo_set(t, ip, regarray[idx]);
3790 __instr_regwr_rhh_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3792 uint64_t *regarray, idx, src;
3794 TRACE(
"[Thread %2u] regwr (r[h] = h)\n", p->thread_id);
3796 regarray = instr_regarray_regarray(p, ip);
3797 idx = instr_regarray_idx_nbo(p, t, ip);
3798 src = instr_regarray_src_nbo(t, ip);
3799 regarray[idx] = src;
3803 __instr_regwr_rhm_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3805 uint64_t *regarray, idx, src;
3807 TRACE(
"[Thread %2u] regwr (r[h] = m)\n", p->thread_id);
3809 regarray = instr_regarray_regarray(p, ip);
3810 idx = instr_regarray_idx_nbo(p, t, ip);
3811 src = instr_regarray_src_hbo(t, ip);
3812 regarray[idx] = src;
3816 __instr_regwr_rmh_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3818 uint64_t *regarray, idx, src;
3820 TRACE(
"[Thread %2u] regwr (r[m] = h)\n", p->thread_id);
3822 regarray = instr_regarray_regarray(p, ip);
3823 idx = instr_regarray_idx_hbo(p, t, ip);
3824 src = instr_regarray_src_nbo(t, ip);
3825 regarray[idx] = src;
3829 __instr_regwr_rmm_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3831 uint64_t *regarray, idx, src;
3833 TRACE(
"[Thread %2u] regwr (r[m] = m)\n", p->thread_id);
3835 regarray = instr_regarray_regarray(p, ip);
3836 idx = instr_regarray_idx_hbo(p, t, ip);
3837 src = instr_regarray_src_hbo(t, ip);
3838 regarray[idx] = src;
3842 __instr_regwr_rhi_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3844 uint64_t *regarray, idx, src;
3846 TRACE(
"[Thread %2u] regwr (r[h] = i)\n", p->thread_id);
3848 regarray = instr_regarray_regarray(p, ip);
3849 idx = instr_regarray_idx_nbo(p, t, ip);
3850 src = ip->regarray.dstsrc_val;
3851 regarray[idx] = src;
3855 __instr_regwr_rmi_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3857 uint64_t *regarray, idx, src;
3859 TRACE(
"[Thread %2u] regwr (r[m] = i)\n", p->thread_id);
3861 regarray = instr_regarray_regarray(p, ip);
3862 idx = instr_regarray_idx_hbo(p, t, ip);
3863 src = ip->regarray.dstsrc_val;
3864 regarray[idx] = src;
3868 __instr_regwr_rih_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3870 uint64_t *regarray, idx, src;
3872 TRACE(
"[Thread %2u] regwr (r[i] = h)\n", p->thread_id);
3874 regarray = instr_regarray_regarray(p, ip);
3875 idx = instr_regarray_idx_imm(p, ip);
3876 src = instr_regarray_src_nbo(t, ip);
3877 regarray[idx] = src;
3881 __instr_regwr_rim_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3883 uint64_t *regarray, idx, src;
3885 TRACE(
"[Thread %2u] regwr (r[i] = m)\n", p->thread_id);
3887 regarray = instr_regarray_regarray(p, ip);
3888 idx = instr_regarray_idx_imm(p, ip);
3889 src = instr_regarray_src_hbo(t, ip);
3890 regarray[idx] = src;
3894 __instr_regwr_rii_exec(
struct rte_swx_pipeline *p,
3895 struct thread *t __rte_unused,
3896 const struct instruction *ip)
3898 uint64_t *regarray, idx, src;
3900 TRACE(
"[Thread %2u] regwr (r[i] = i)\n", p->thread_id);
3902 regarray = instr_regarray_regarray(p, ip);
3903 idx = instr_regarray_idx_imm(p, ip);
3904 src = ip->regarray.dstsrc_val;
3905 regarray[idx] = src;
3909 __instr_regadd_rhh_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3911 uint64_t *regarray, idx, src;
3913 TRACE(
"[Thread %2u] regadd (r[h] += h)\n", p->thread_id);
3915 regarray = instr_regarray_regarray(p, ip);
3916 idx = instr_regarray_idx_nbo(p, t, ip);
3917 src = instr_regarray_src_nbo(t, ip);
3918 regarray[idx] += src;
3922 __instr_regadd_rhm_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3924 uint64_t *regarray, idx, src;
3926 TRACE(
"[Thread %2u] regadd (r[h] += m)\n", p->thread_id);
3928 regarray = instr_regarray_regarray(p, ip);
3929 idx = instr_regarray_idx_nbo(p, t, ip);
3930 src = instr_regarray_src_hbo(t, ip);
3931 regarray[idx] += src;
3935 __instr_regadd_rmh_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3937 uint64_t *regarray, idx, src;
3939 TRACE(
"[Thread %2u] regadd (r[m] += h)\n", p->thread_id);
3941 regarray = instr_regarray_regarray(p, ip);
3942 idx = instr_regarray_idx_hbo(p, t, ip);
3943 src = instr_regarray_src_nbo(t, ip);
3944 regarray[idx] += src;
3948 __instr_regadd_rmm_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3950 uint64_t *regarray, idx, src;
3952 TRACE(
"[Thread %2u] regadd (r[m] += m)\n", p->thread_id);
3954 regarray = instr_regarray_regarray(p, ip);
3955 idx = instr_regarray_idx_hbo(p, t, ip);
3956 src = instr_regarray_src_hbo(t, ip);
3957 regarray[idx] += src;
3961 __instr_regadd_rhi_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3963 uint64_t *regarray, idx, src;
3965 TRACE(
"[Thread %2u] regadd (r[h] += i)\n", p->thread_id);
3967 regarray = instr_regarray_regarray(p, ip);
3968 idx = instr_regarray_idx_nbo(p, t, ip);
3969 src = ip->regarray.dstsrc_val;
3970 regarray[idx] += src;
3974 __instr_regadd_rmi_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3976 uint64_t *regarray, idx, src;
3978 TRACE(
"[Thread %2u] regadd (r[m] += i)\n", p->thread_id);
3980 regarray = instr_regarray_regarray(p, ip);
3981 idx = instr_regarray_idx_hbo(p, t, ip);
3982 src = ip->regarray.dstsrc_val;
3983 regarray[idx] += src;
3987 __instr_regadd_rih_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3989 uint64_t *regarray, idx, src;
3991 TRACE(
"[Thread %2u] regadd (r[i] += h)\n", p->thread_id);
3993 regarray = instr_regarray_regarray(p, ip);
3994 idx = instr_regarray_idx_imm(p, ip);
3995 src = instr_regarray_src_nbo(t, ip);
3996 regarray[idx] += src;
4000 __instr_regadd_rim_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
4002 uint64_t *regarray, idx, src;
4004 TRACE(
"[Thread %2u] regadd (r[i] += m)\n", p->thread_id);
4006 regarray = instr_regarray_regarray(p, ip);
4007 idx = instr_regarray_idx_imm(p, ip);
4008 src = instr_regarray_src_hbo(t, ip);
4009 regarray[idx] += src;
4013 __instr_regadd_rii_exec(
struct rte_swx_pipeline *p,
4014 struct thread *t __rte_unused,
4015 const struct instruction *ip)
4017 uint64_t *regarray, idx, src;
4019 TRACE(
"[Thread %2u] regadd (r[i] += i)\n", p->thread_id);
4021 regarray = instr_regarray_regarray(p, ip);
4022 idx = instr_regarray_idx_imm(p, ip);
4023 src = ip->regarray.dstsrc_val;
4024 regarray[idx] += src;
4030 static inline struct meter *
4031 instr_meter_idx_hbo(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
4033 struct metarray_runtime *r = &p->metarray_runtime[ip->meter.metarray_id];
4035 uint8_t *idx_struct = t->structs[ip->meter.idx.struct_id];
4036 uint64_t *idx64_ptr = (uint64_t *)&idx_struct[ip->meter.idx.offset];
4037 uint64_t idx64 = *idx64_ptr;
4038 uint64_t idx64_mask = UINT64_MAX >> (64 - (ip)->meter.idx.n_bits);
4039 uint64_t idx = idx64 & idx64_mask & r->size_mask;
4041 return &r->metarray[idx];
4044 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN 4046 static inline struct meter *
4047 instr_meter_idx_nbo(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
4049 struct metarray_runtime *r = &p->metarray_runtime[ip->meter.metarray_id];
4051 uint8_t *idx_struct = t->structs[ip->meter.idx.struct_id];
4052 uint64_t *idx64_ptr = (uint64_t *)&idx_struct[ip->meter.idx.offset];
4053 uint64_t idx64 = *idx64_ptr;
4054 uint64_t idx = (ntoh64(idx64) >> (64 - ip->meter.idx.n_bits)) & r->size_mask;
4056 return &r->metarray[idx];
4061 #define instr_meter_idx_nbo instr_meter_idx_hbo 4065 static inline struct meter *
4066 instr_meter_idx_imm(
struct rte_swx_pipeline *p,
const struct instruction *ip)
4068 struct metarray_runtime *r = &p->metarray_runtime[ip->meter.metarray_id];
4070 uint64_t idx = ip->meter.idx_val & r->size_mask;
4072 return &r->metarray[idx];
4075 static inline uint32_t
4076 instr_meter_length_hbo(
struct thread *t,
const struct instruction *ip)
4078 uint8_t *src_struct = t->structs[ip->meter.length.struct_id];
4079 uint64_t *src64_ptr = (uint64_t *)&src_struct[ip->meter.length.offset];
4080 uint64_t src64 = *src64_ptr;
4081 uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->meter.length.n_bits);
4082 uint64_t src = src64 & src64_mask;
4084 return (uint32_t)src;
4087 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN 4089 static inline uint32_t
4090 instr_meter_length_nbo(
struct thread *t,
const struct instruction *ip)
4092 uint8_t *src_struct = t->structs[ip->meter.length.struct_id];
4093 uint64_t *src64_ptr = (uint64_t *)&src_struct[ip->meter.length.offset];
4094 uint64_t src64 = *src64_ptr;
4095 uint64_t src = ntoh64(src64) >> (64 - ip->meter.length.n_bits);
4097 return (uint32_t)src;
4102 #define instr_meter_length_nbo instr_meter_length_hbo 4107 instr_meter_color_in_hbo(
struct thread *t,
const struct instruction *ip)
4109 uint8_t *src_struct = t->structs[ip->meter.color_in.struct_id];
4110 uint64_t *src64_ptr = (uint64_t *)&src_struct[ip->meter.color_in.offset];
4111 uint64_t src64 = *src64_ptr;
4112 uint64_t src64_mask = UINT64_MAX >> (64 - ip->meter.color_in.n_bits);
4113 uint64_t src = src64 & src64_mask;
4119 instr_meter_color_out_hbo_set(
struct thread *t,
4120 const struct instruction *ip,
4123 uint8_t *dst_struct = t->structs[ip->meter.color_out.struct_id];
4124 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[ip->meter.color_out.offset];
4125 uint64_t dst64 = *dst64_ptr;
4126 uint64_t dst64_mask = UINT64_MAX >> (64 - ip->meter.color_out.n_bits);
4128 uint64_t src = (uint64_t)color_out;
4130 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask);
4134 __instr_metprefetch_h_exec(
struct rte_swx_pipeline *p,
4136 const struct instruction *ip)
4140 TRACE(
"[Thread %2u] metprefetch (h)\n", p->thread_id);
4142 m = instr_meter_idx_nbo(p, t, ip);
4147 __instr_metprefetch_m_exec(
struct rte_swx_pipeline *p,
4149 const struct instruction *ip)
4153 TRACE(
"[Thread %2u] metprefetch (m)\n", p->thread_id);
4155 m = instr_meter_idx_hbo(p, t, ip);
4160 __instr_metprefetch_i_exec(
struct rte_swx_pipeline *p,
4161 struct thread *t __rte_unused,
4162 const struct instruction *ip)
4166 TRACE(
"[Thread %2u] metprefetch (i)\n", p->thread_id);
4168 m = instr_meter_idx_imm(p, ip);
4173 __instr_meter_hhm_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
4176 uint64_t time, n_pkts, n_bytes;
4180 TRACE(
"[Thread %2u] meter (hhm)\n", p->thread_id);
4182 m = instr_meter_idx_nbo(p, t, ip);
4185 length = instr_meter_length_nbo(t, ip);
4186 color_in = instr_meter_color_in_hbo(t, ip);
4189 &m->profile->profile,
4194 color_out &= m->color_mask;
4196 n_pkts = m->n_pkts[color_out];
4197 n_bytes = m->n_bytes[color_out];
4199 instr_meter_color_out_hbo_set(t, ip, color_out);
4201 m->n_pkts[color_out] = n_pkts + 1;
4202 m->n_bytes[color_out] = n_bytes + length;
4206 __instr_meter_hhi_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
4209 uint64_t time, n_pkts, n_bytes;
4213 TRACE(
"[Thread %2u] meter (hhi)\n", p->thread_id);
4215 m = instr_meter_idx_nbo(p, t, ip);
4218 length = instr_meter_length_nbo(t, ip);
4219 color_in = (
enum rte_color)ip->meter.color_in_val;
4222 &m->profile->profile,
4227 color_out &= m->color_mask;
4229 n_pkts = m->n_pkts[color_out];
4230 n_bytes = m->n_bytes[color_out];
4232 instr_meter_color_out_hbo_set(t, ip, color_out);
4234 m->n_pkts[color_out] = n_pkts + 1;
4235 m->n_bytes[color_out] = n_bytes + length;
4239 __instr_meter_hmm_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
4242 uint64_t time, n_pkts, n_bytes;
4246 TRACE(
"[Thread %2u] meter (hmm)\n", p->thread_id);
4248 m = instr_meter_idx_nbo(p, t, ip);
4251 length = instr_meter_length_hbo(t, ip);
4252 color_in = instr_meter_color_in_hbo(t, ip);
4255 &m->profile->profile,
4260 color_out &= m->color_mask;
4262 n_pkts = m->n_pkts[color_out];
4263 n_bytes = m->n_bytes[color_out];
4265 instr_meter_color_out_hbo_set(t, ip, color_out);
4267 m->n_pkts[color_out] = n_pkts + 1;
4268 m->n_bytes[color_out] = n_bytes + length;
4272 __instr_meter_hmi_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
4275 uint64_t time, n_pkts, n_bytes;
4279 TRACE(
"[Thread %2u] meter (hmi)\n", p->thread_id);
4281 m = instr_meter_idx_nbo(p, t, ip);
4284 length = instr_meter_length_hbo(t, ip);
4285 color_in = (
enum rte_color)ip->meter.color_in_val;
4288 &m->profile->profile,
4293 color_out &= m->color_mask;
4295 n_pkts = m->n_pkts[color_out];
4296 n_bytes = m->n_bytes[color_out];
4298 instr_meter_color_out_hbo_set(t, ip, color_out);
4300 m->n_pkts[color_out] = n_pkts + 1;
4301 m->n_bytes[color_out] = n_bytes + length;
4305 __instr_meter_mhm_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
4308 uint64_t time, n_pkts, n_bytes;
4312 TRACE(
"[Thread %2u] meter (mhm)\n", p->thread_id);
4314 m = instr_meter_idx_hbo(p, t, ip);
4317 length = instr_meter_length_nbo(t, ip);
4318 color_in = instr_meter_color_in_hbo(t, ip);
4321 &m->profile->profile,
4326 color_out &= m->color_mask;
4328 n_pkts = m->n_pkts[color_out];
4329 n_bytes = m->n_bytes[color_out];
4331 instr_meter_color_out_hbo_set(t, ip, color_out);
4333 m->n_pkts[color_out] = n_pkts + 1;
4334 m->n_bytes[color_out] = n_bytes + length;
4338 __instr_meter_mhi_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
4341 uint64_t time, n_pkts, n_bytes;
4345 TRACE(
"[Thread %2u] meter (mhi)\n", p->thread_id);
4347 m = instr_meter_idx_hbo(p, t, ip);
4350 length = instr_meter_length_nbo(t, ip);
4351 color_in = (
enum rte_color)ip->meter.color_in_val;
4354 &m->profile->profile,
4359 color_out &= m->color_mask;
4361 n_pkts = m->n_pkts[color_out];
4362 n_bytes = m->n_bytes[color_out];
4364 instr_meter_color_out_hbo_set(t, ip, color_out);
4366 m->n_pkts[color_out] = n_pkts + 1;
4367 m->n_bytes[color_out] = n_bytes + length;
4371 __instr_meter_mmm_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
4374 uint64_t time, n_pkts, n_bytes;
4378 TRACE(
"[Thread %2u] meter (mmm)\n", p->thread_id);
4380 m = instr_meter_idx_hbo(p, t, ip);
4383 length = instr_meter_length_hbo(t, ip);
4384 color_in = instr_meter_color_in_hbo(t, ip);
4387 &m->profile->profile,
4392 color_out &= m->color_mask;
4394 n_pkts = m->n_pkts[color_out];
4395 n_bytes = m->n_bytes[color_out];
4397 instr_meter_color_out_hbo_set(t, ip, color_out);
4399 m->n_pkts[color_out] = n_pkts + 1;
4400 m->n_bytes[color_out] = n_bytes + length;
4404 __instr_meter_mmi_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
4407 uint64_t time, n_pkts, n_bytes;
4411 TRACE(
"[Thread %2u] meter (mmi)\n", p->thread_id);
4413 m = instr_meter_idx_hbo(p, t, ip);
4416 length = instr_meter_length_hbo(t, ip);
4417 color_in = (
enum rte_color)ip->meter.color_in_val;
4420 &m->profile->profile,
4425 color_out &= m->color_mask;
4427 n_pkts = m->n_pkts[color_out];
4428 n_bytes = m->n_bytes[color_out];
4430 instr_meter_color_out_hbo_set(t, ip, color_out);
4432 m->n_pkts[color_out] = n_pkts + 1;
4433 m->n_bytes[color_out] = n_bytes + length;
4437 __instr_meter_ihm_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
4440 uint64_t time, n_pkts, n_bytes;
4444 TRACE(
"[Thread %2u] meter (ihm)\n", p->thread_id);
4446 m = instr_meter_idx_imm(p, ip);
4449 length = instr_meter_length_nbo(t, ip);
4450 color_in = instr_meter_color_in_hbo(t, ip);
4453 &m->profile->profile,
4458 color_out &= m->color_mask;
4460 n_pkts = m->n_pkts[color_out];
4461 n_bytes = m->n_bytes[color_out];
4463 instr_meter_color_out_hbo_set(t, ip, color_out);
4465 m->n_pkts[color_out] = n_pkts + 1;
4466 m->n_bytes[color_out] = n_bytes + length;
4470 __instr_meter_ihi_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
4473 uint64_t time, n_pkts, n_bytes;
4477 TRACE(
"[Thread %2u] meter (ihi)\n", p->thread_id);
4479 m = instr_meter_idx_imm(p, ip);
4482 length = instr_meter_length_nbo(t, ip);
4483 color_in = (
enum rte_color)ip->meter.color_in_val;
4486 &m->profile->profile,
4491 color_out &= m->color_mask;
4493 n_pkts = m->n_pkts[color_out];
4494 n_bytes = m->n_bytes[color_out];
4496 instr_meter_color_out_hbo_set(t, ip, color_out);
4498 m->n_pkts[color_out] = n_pkts + 1;
4499 m->n_bytes[color_out] = n_bytes + length;
4503 __instr_meter_imm_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
4506 uint64_t time, n_pkts, n_bytes;
4510 TRACE(
"[Thread %2u] meter (imm)\n", p->thread_id);
4512 m = instr_meter_idx_imm(p, ip);
4515 length = instr_meter_length_hbo(t, ip);
4516 color_in = instr_meter_color_in_hbo(t, ip);
4519 &m->profile->profile,
4524 color_out &= m->color_mask;
4526 n_pkts = m->n_pkts[color_out];
4527 n_bytes = m->n_bytes[color_out];
4529 instr_meter_color_out_hbo_set(t, ip, color_out);
4531 m->n_pkts[color_out] = n_pkts + 1;
4532 m->n_bytes[color_out] = n_bytes + length;
4536 __instr_meter_imi_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
4539 uint64_t time, n_pkts, n_bytes;
4543 TRACE(
"[Thread %2u] meter (imi)\n", p->thread_id);
4545 m = instr_meter_idx_imm(p, ip);
4548 length = instr_meter_length_hbo(t, ip);
4549 color_in = (
enum rte_color)ip->meter.color_in_val;
4552 &m->profile->profile,
4557 color_out &= m->color_mask;
4559 n_pkts = m->n_pkts[color_out];
4560 n_bytes = m->n_bytes[color_out];
4562 instr_meter_color_out_hbo_set(t, ip, color_out);
4564 m->n_pkts[color_out] = n_pkts + 1;
4565 m->n_bytes[color_out] = n_bytes + length;
uint32_t(* rte_swx_hash_func_t)(const void *key, uint32_t length, uint32_t seed)
int(* rte_swx_table_lookup_t)(void *table, void *mailbox, uint8_t **key, uint64_t *action_id, uint8_t **action_data, size_t *entry_id, int *hit)
int(* rte_swx_extern_func_t)(void *mailbox)
__rte_experimental void rte_swx_table_learner_delete(void *table, void *mailbox)
__rte_experimental void rte_swx_table_learner_rearm_new(void *table, void *mailbox, uint64_t time, uint32_t key_timeout_id)
__rte_experimental uint32_t rte_swx_table_learner_add(void *table, void *mailbox, uint64_t time, uint64_t action_id, uint8_t *action_data, uint32_t key_timeout_id)
static uint32_t rte_bsf32(uint32_t v)
void(* rte_swx_port_out_flush_t)(void *port)
static enum rte_color rte_meter_trtcm_color_aware_check(struct rte_meter_trtcm *m, struct rte_meter_trtcm_profile *p, uint64_t time, uint32_t pkt_len, enum rte_color pkt_color)
static uint64_t rte_get_tsc_cycles(void)
void(* rte_swx_port_out_pkt_clone_tx_t)(void *port, struct rte_swx_pkt *pkt, uint32_t truncation_length)
void(* rte_swx_port_out_pkt_fast_clone_tx_t)(void *port, struct rte_swx_pkt *pkt)
void(* rte_swx_extern_type_destructor_t)(void *object)
void *(* rte_swx_extern_type_constructor_t)(const char *args)
void(* rte_swx_port_out_pkt_tx_t)(void *port, struct rte_swx_pkt *pkt)
__rte_experimental void rte_swx_table_learner_rearm(void *table, void *mailbox, uint64_t time)
#define RTE_SWX_TABLE_LEARNER_N_KEY_TIMEOUTS_MAX
#define RTE_SWX_NAME_SIZE
int(* rte_swx_extern_type_member_func_t)(void *object, void *mailbox)
static void rte_prefetch0(const volatile void *p)
int(* rte_swx_port_in_pkt_rx_t)(void *port, struct rte_swx_pkt *pkt)