4 #ifndef __INCLUDE_RTE_SWX_PIPELINE_INTERNAL_H__ 5 #define __INCLUDE_RTE_SWX_PIPELINE_INTERNAL_H__ 27 #define TRACE(...) printf(__VA_ARGS__) 35 #define ntoh64(x) rte_be_to_cpu_64(x) 36 #define hton64(x) rte_cpu_to_be_64(x) 49 TAILQ_ENTRY(struct_type) node;
58 TAILQ_HEAD(struct_type_tailq, struct_type);
64 TAILQ_ENTRY(port_in_type) node;
69 TAILQ_HEAD(port_in_type_tailq, port_in_type);
72 TAILQ_ENTRY(port_in) node;
73 struct port_in_type *type;
78 TAILQ_HEAD(port_in_tailq, port_in);
80 struct port_in_runtime {
88 struct port_out_type {
89 TAILQ_ENTRY(port_out_type) node;
94 TAILQ_HEAD(port_out_type_tailq, port_out_type);
97 TAILQ_ENTRY(port_out) node;
98 struct port_out_type *type;
103 TAILQ_HEAD(port_out_tailq, port_out);
105 struct port_out_runtime {
116 struct mirroring_session {
119 uint32_t truncation_length;
125 struct extern_type_member_func {
126 TAILQ_ENTRY(extern_type_member_func) node;
132 TAILQ_HEAD(extern_type_member_func_tailq, extern_type_member_func);
135 TAILQ_ENTRY(extern_type) node;
137 struct struct_type *mailbox_struct_type;
140 struct extern_type_member_func_tailq funcs;
144 TAILQ_HEAD(extern_type_tailq, extern_type);
147 TAILQ_ENTRY(extern_obj) node;
149 struct extern_type *type;
155 TAILQ_HEAD(extern_obj_tailq, extern_obj);
157 #ifndef RTE_SWX_EXTERN_TYPE_MEMBER_FUNCS_MAX 158 #define RTE_SWX_EXTERN_TYPE_MEMBER_FUNCS_MAX 8 161 struct extern_obj_runtime {
171 TAILQ_ENTRY(extern_func) node;
173 struct struct_type *mailbox_struct_type;
179 TAILQ_HEAD(extern_func_tailq, extern_func);
181 struct extern_func_runtime {
190 TAILQ_ENTRY(hash_func) node;
196 TAILQ_HEAD(hash_func_tailq, hash_func);
198 struct hash_func_runtime {
206 TAILQ_ENTRY(header) node;
208 struct struct_type *st;
213 TAILQ_HEAD(header_tailq, header);
215 struct header_runtime {
220 struct header_out_runtime {
246 enum instruction_type {
304 INSTR_HDR_INVALIDATE,
354 INSTR_ALU_CKADD_FIELD,
355 INSTR_ALU_CKADD_STRUCT20,
356 INSTR_ALU_CKADD_STRUCT,
362 INSTR_ALU_CKSUB_FIELD,
420 INSTR_REGPREFETCH_RH,
421 INSTR_REGPREFETCH_RM,
422 INSTR_REGPREFETCH_RI,
500 INSTR_LEARNER_REARM_NEW,
503 INSTR_LEARNER_FORGET,
548 INSTR_JMP_ACTION_HIT,
553 INSTR_JMP_ACTION_MISS,
606 struct instr_operand {
627 uint8_t header_id[8];
628 uint8_t struct_id[8];
633 struct instr_hdr_validity {
644 uint8_t mf_first_arg_offset;
645 uint8_t mf_timeout_id_offset;
646 uint8_t mf_timeout_id_n_bits;
649 struct instr_extern_obj {
654 struct instr_extern_func {
658 struct instr_hash_func {
659 uint8_t hash_func_id;
673 struct instr_dst_src {
674 struct instr_operand dst;
676 struct instr_operand src;
681 struct instr_regarray {
686 struct instr_operand idx;
691 struct instr_operand dstsrc;
701 struct instr_operand idx;
705 struct instr_operand length;
708 struct instr_operand color_in;
709 uint32_t color_in_val;
712 struct instr_operand color_out;
717 uint8_t header_id[8];
718 uint8_t struct_id[8];
729 struct instruction *ip;
732 struct instr_operand a;
738 struct instr_operand b;
744 enum instruction_type type;
747 struct instr_dst_src mirror;
748 struct instr_hdr_validity valid;
749 struct instr_dst_src mov;
750 struct instr_regarray regarray;
751 struct instr_meter meter;
752 struct instr_dma dma;
753 struct instr_dst_src alu;
754 struct instr_table table;
755 struct instr_learn learn;
756 struct instr_extern_obj ext_obj;
757 struct instr_extern_func ext_func;
758 struct instr_hash_func hash_func;
759 struct instr_jmp jmp;
763 struct instruction_data {
770 typedef void (*instr_exec_t)(
struct rte_swx_pipeline *);
776 (*action_func_t)(
struct rte_swx_pipeline *p);
779 TAILQ_ENTRY(action) node;
781 struct struct_type *st;
782 int *args_endianness;
783 struct instruction *instructions;
784 struct instruction_data *instruction_data;
785 uint32_t n_instructions;
789 TAILQ_HEAD(action_tailq, action);
795 TAILQ_ENTRY(table_type) node;
801 TAILQ_HEAD(table_type_tailq, table_type);
809 TAILQ_ENTRY(table) node;
812 struct table_type *type;
815 struct match_field *fields;
817 struct header *header;
820 struct action **actions;
821 struct action *default_action;
822 uint8_t *default_action_data;
824 int default_action_is_const;
825 uint32_t action_data_size_max;
826 int *action_is_for_table_entries;
827 int *action_is_for_default_entry;
833 TAILQ_HEAD(table_tailq, table);
835 struct table_runtime {
841 struct table_statistics {
842 uint64_t n_pkts_hit[2];
843 uint64_t *n_pkts_action;
850 TAILQ_ENTRY(selector) node;
853 struct field *group_id_field;
854 struct field **selector_fields;
855 uint32_t n_selector_fields;
856 struct header *selector_header;
857 struct field *member_id_field;
859 uint32_t n_groups_max;
860 uint32_t n_members_per_group_max;
865 TAILQ_HEAD(selector_tailq, selector);
867 struct selector_runtime {
869 uint8_t **group_id_buffer;
870 uint8_t **selector_buffer;
871 uint8_t **member_id_buffer;
874 struct selector_statistics {
882 TAILQ_ENTRY(learner) node;
886 struct field **fields;
888 struct header *header;
891 struct action **actions;
892 struct action *default_action;
893 uint8_t *default_action_data;
895 int default_action_is_const;
896 uint32_t action_data_size_max;
897 int *action_is_for_table_entries;
898 int *action_is_for_default_entry;
906 TAILQ_HEAD(learner_tailq, learner);
908 struct learner_runtime {
913 struct learner_statistics {
914 uint64_t n_pkts_hit[2];
915 uint64_t n_pkts_learn[2];
916 uint64_t n_pkts_rearm;
917 uint64_t n_pkts_forget;
918 uint64_t *n_pkts_action;
925 TAILQ_ENTRY(regarray) node;
932 TAILQ_HEAD(regarray_tailq, regarray);
934 struct regarray_runtime {
942 struct meter_profile {
943 TAILQ_ENTRY(meter_profile) node;
946 struct rte_meter_trtcm_profile profile;
950 TAILQ_HEAD(meter_profile_tailq, meter_profile);
953 TAILQ_ENTRY(metarray) node;
959 TAILQ_HEAD(metarray_tailq, metarray);
963 struct meter_profile *profile;
971 struct metarray_runtime {
972 struct meter *metarray;
983 uint32_t *mirroring_slots;
984 uint64_t mirroring_slots_mask;
986 uint32_t recirc_pass_id;
992 struct header_runtime *headers;
993 struct header_out_runtime *headers_out;
994 uint8_t *header_storage;
995 uint8_t *header_out_storage;
996 uint64_t valid_headers;
997 uint32_t n_headers_out;
1003 struct table_runtime *tables;
1004 struct selector_runtime *selectors;
1005 struct learner_runtime *learners;
1009 uint32_t learner_id;
1013 struct extern_obj_runtime *extern_objs;
1014 struct extern_func_runtime *extern_funcs;
1017 struct instruction *ip;
1018 struct instruction *ret;
1021 #define MASK64_BIT_GET(mask, pos) ((mask) & (1LLU << (pos))) 1022 #define MASK64_BIT_SET(mask, pos) ((mask) | (1LLU << (pos))) 1023 #define MASK64_BIT_CLR(mask, pos) ((mask) & ~(1LLU << (pos))) 1025 #define HEADER_VALID(thread, header_id) \ 1026 MASK64_BIT_GET((thread)->valid_headers, header_id) 1028 static inline uint64_t
1029 instr_operand_hbo(
struct thread *t,
const struct instr_operand *x)
1031 uint8_t *x_struct = t->structs[x->struct_id];
1032 uint64_t *x64_ptr = (uint64_t *)&x_struct[x->offset];
1033 uint64_t x64 = *x64_ptr;
1034 uint64_t x64_mask = UINT64_MAX >> (64 - x->n_bits);
1036 return x64 & x64_mask;
1039 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN 1041 static inline uint64_t
1042 instr_operand_nbo(
struct thread *t,
const struct instr_operand *x)
1044 uint8_t *x_struct = t->structs[x->struct_id];
1045 uint64_t *x64_ptr = (uint64_t *)&x_struct[x->offset];
1046 uint64_t x64 = *x64_ptr;
1048 return ntoh64(x64) >> (64 - x->n_bits);
1053 #define instr_operand_nbo instr_operand_hbo 1057 #define ALU(thread, ip, operator) \ 1059 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \ 1060 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \ 1061 uint64_t dst64 = *dst64_ptr; \ 1062 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \ 1063 uint64_t dst = dst64 & dst64_mask; \ 1065 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \ 1066 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \ 1067 uint64_t src64 = *src64_ptr; \ 1068 uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->alu.src.n_bits); \ 1069 uint64_t src = src64 & src64_mask; \ 1071 uint64_t result = dst operator src; \ 1073 *dst64_ptr = (dst64 & ~dst64_mask) | (result & dst64_mask); \ 1076 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN 1078 #define ALU_MH(thread, ip, operator) \ 1080 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \ 1081 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \ 1082 uint64_t dst64 = *dst64_ptr; \ 1083 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \ 1084 uint64_t dst = dst64 & dst64_mask; \ 1086 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \ 1087 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \ 1088 uint64_t src64 = *src64_ptr; \ 1089 uint64_t src = ntoh64(src64) >> (64 - (ip)->alu.src.n_bits); \ 1091 uint64_t result = dst operator src; \ 1093 *dst64_ptr = (dst64 & ~dst64_mask) | (result & dst64_mask); \ 1096 #define ALU_HM(thread, ip, operator) \ 1098 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \ 1099 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \ 1100 uint64_t dst64 = *dst64_ptr; \ 1101 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \ 1102 uint64_t dst = ntoh64(dst64) >> (64 - (ip)->alu.dst.n_bits); \ 1104 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \ 1105 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \ 1106 uint64_t src64 = *src64_ptr; \ 1107 uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->alu.src.n_bits); \ 1108 uint64_t src = src64 & src64_mask; \ 1110 uint64_t result = dst operator src; \ 1111 result = hton64(result << (64 - (ip)->alu.dst.n_bits)); \ 1113 *dst64_ptr = (dst64 & ~dst64_mask) | result; \ 1116 #define ALU_HM_FAST(thread, ip, operator) \ 1118 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \ 1119 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \ 1120 uint64_t dst64 = *dst64_ptr; \ 1121 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \ 1122 uint64_t dst = dst64 & dst64_mask; \ 1124 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \ 1125 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \ 1126 uint64_t src64 = *src64_ptr; \ 1127 uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->alu.src.n_bits); \ 1128 uint64_t src = hton64(src64 & src64_mask) >> (64 - (ip)->alu.dst.n_bits); \ 1130 uint64_t result = dst operator src; \ 1132 *dst64_ptr = (dst64 & ~dst64_mask) | result; \ 1135 #define ALU_HH(thread, ip, operator) \ 1137 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \ 1138 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \ 1139 uint64_t dst64 = *dst64_ptr; \ 1140 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \ 1141 uint64_t dst = ntoh64(dst64) >> (64 - (ip)->alu.dst.n_bits); \ 1143 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \ 1144 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \ 1145 uint64_t src64 = *src64_ptr; \ 1146 uint64_t src = ntoh64(src64) >> (64 - (ip)->alu.src.n_bits); \ 1148 uint64_t result = dst operator src; \ 1149 result = hton64(result << (64 - (ip)->alu.dst.n_bits)); \ 1151 *dst64_ptr = (dst64 & ~dst64_mask) | result; \ 1154 #define ALU_HH_FAST(thread, ip, operator) \ 1156 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \ 1157 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \ 1158 uint64_t dst64 = *dst64_ptr; \ 1159 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \ 1160 uint64_t dst = dst64 & dst64_mask; \ 1162 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \ 1163 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \ 1164 uint64_t src64 = *src64_ptr; \ 1165 uint64_t src = (src64 << (64 - (ip)->alu.src.n_bits)) >> (64 - (ip)->alu.dst.n_bits); \ 1167 uint64_t result = dst operator src; \ 1169 *dst64_ptr = (dst64 & ~dst64_mask) | result; \ 1176 #define ALU_HM_FAST ALU 1178 #define ALU_HH_FAST ALU 1182 #define ALU_I(thread, ip, operator) \ 1184 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \ 1185 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \ 1186 uint64_t dst64 = *dst64_ptr; \ 1187 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \ 1188 uint64_t dst = dst64 & dst64_mask; \ 1190 uint64_t src = (ip)->alu.src_val; \ 1192 uint64_t result = dst operator src; \ 1194 *dst64_ptr = (dst64 & ~dst64_mask) | (result & dst64_mask); \ 1197 #define ALU_MI ALU_I 1199 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN 1201 #define ALU_HI(thread, ip, operator) \ 1203 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \ 1204 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \ 1205 uint64_t dst64 = *dst64_ptr; \ 1206 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \ 1207 uint64_t dst = ntoh64(dst64) >> (64 - (ip)->alu.dst.n_bits); \ 1209 uint64_t src = (ip)->alu.src_val; \ 1211 uint64_t result = dst operator src; \ 1212 result = hton64(result << (64 - (ip)->alu.dst.n_bits)); \ 1214 *dst64_ptr = (dst64 & ~dst64_mask) | result; \ 1219 #define ALU_HI ALU_I 1223 #define MOV(thread, ip) \ 1225 uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id]; \ 1226 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset]; \ 1227 uint64_t dst64 = *dst64_ptr; \ 1228 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits); \ 1230 uint8_t *src_struct = (thread)->structs[(ip)->mov.src.struct_id]; \ 1231 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->mov.src.offset]; \ 1232 uint64_t src64 = *src64_ptr; \ 1233 uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->mov.src.n_bits); \ 1234 uint64_t src = src64 & src64_mask; \ 1236 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask); \ 1239 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN 1241 #define MOV_MH(thread, ip) \ 1243 uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id]; \ 1244 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset]; \ 1245 uint64_t dst64 = *dst64_ptr; \ 1246 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits); \ 1248 uint8_t *src_struct = (thread)->structs[(ip)->mov.src.struct_id]; \ 1249 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->mov.src.offset]; \ 1250 uint64_t src64 = *src64_ptr; \ 1251 uint64_t src = ntoh64(src64) >> (64 - (ip)->mov.src.n_bits); \ 1253 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask); \ 1256 #define MOV_HM(thread, ip) \ 1258 uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id]; \ 1259 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset]; \ 1260 uint64_t dst64 = *dst64_ptr; \ 1261 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits); \ 1263 uint8_t *src_struct = (thread)->structs[(ip)->mov.src.struct_id]; \ 1264 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->mov.src.offset]; \ 1265 uint64_t src64 = *src64_ptr; \ 1266 uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->mov.src.n_bits); \ 1267 uint64_t src = src64 & src64_mask; \ 1269 src = hton64(src) >> (64 - (ip)->mov.dst.n_bits); \ 1270 *dst64_ptr = (dst64 & ~dst64_mask) | src; \ 1273 #define MOV_HH(thread, ip) \ 1275 uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id]; \ 1276 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset]; \ 1277 uint64_t dst64 = *dst64_ptr; \ 1278 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits); \ 1280 uint8_t *src_struct = (thread)->structs[(ip)->mov.src.struct_id]; \ 1281 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->mov.src.offset]; \ 1282 uint64_t src64 = *src64_ptr; \ 1284 uint64_t src = src64 << (64 - (ip)->mov.src.n_bits); \ 1285 src = src >> (64 - (ip)->mov.dst.n_bits); \ 1286 *dst64_ptr = (dst64 & ~dst64_mask) | src; \ 1297 #define MOV_I(thread, ip) \ 1299 uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id]; \ 1300 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset]; \ 1301 uint64_t dst64 = *dst64_ptr; \ 1302 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits); \ 1304 uint64_t src = (ip)->mov.src_val; \ 1306 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask); \ 1309 #define JMP_CMP(thread, ip, operator) \ 1311 uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \ 1312 uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \ 1313 uint64_t a64 = *a64_ptr; \ 1314 uint64_t a64_mask = UINT64_MAX >> (64 - (ip)->jmp.a.n_bits); \ 1315 uint64_t a = a64 & a64_mask; \ 1317 uint8_t *b_struct = (thread)->structs[(ip)->jmp.b.struct_id]; \ 1318 uint64_t *b64_ptr = (uint64_t *)&b_struct[(ip)->jmp.b.offset]; \ 1319 uint64_t b64 = *b64_ptr; \ 1320 uint64_t b64_mask = UINT64_MAX >> (64 - (ip)->jmp.b.n_bits); \ 1321 uint64_t b = b64 & b64_mask; \ 1323 (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \ 1326 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN 1328 #define JMP_CMP_MH(thread, ip, operator) \ 1330 uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \ 1331 uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \ 1332 uint64_t a64 = *a64_ptr; \ 1333 uint64_t a64_mask = UINT64_MAX >> (64 - (ip)->jmp.a.n_bits); \ 1334 uint64_t a = a64 & a64_mask; \ 1336 uint8_t *b_struct = (thread)->structs[(ip)->jmp.b.struct_id]; \ 1337 uint64_t *b64_ptr = (uint64_t *)&b_struct[(ip)->jmp.b.offset]; \ 1338 uint64_t b64 = *b64_ptr; \ 1339 uint64_t b = ntoh64(b64) >> (64 - (ip)->jmp.b.n_bits); \ 1341 (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \ 1344 #define JMP_CMP_HM(thread, ip, operator) \ 1346 uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \ 1347 uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \ 1348 uint64_t a64 = *a64_ptr; \ 1349 uint64_t a = ntoh64(a64) >> (64 - (ip)->jmp.a.n_bits); \ 1351 uint8_t *b_struct = (thread)->structs[(ip)->jmp.b.struct_id]; \ 1352 uint64_t *b64_ptr = (uint64_t *)&b_struct[(ip)->jmp.b.offset]; \ 1353 uint64_t b64 = *b64_ptr; \ 1354 uint64_t b64_mask = UINT64_MAX >> (64 - (ip)->jmp.b.n_bits); \ 1355 uint64_t b = b64 & b64_mask; \ 1357 (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \ 1360 #define JMP_CMP_HH(thread, ip, operator) \ 1362 uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \ 1363 uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \ 1364 uint64_t a64 = *a64_ptr; \ 1365 uint64_t a = ntoh64(a64) >> (64 - (ip)->jmp.a.n_bits); \ 1367 uint8_t *b_struct = (thread)->structs[(ip)->jmp.b.struct_id]; \ 1368 uint64_t *b64_ptr = (uint64_t *)&b_struct[(ip)->jmp.b.offset]; \ 1369 uint64_t b64 = *b64_ptr; \ 1370 uint64_t b = ntoh64(b64) >> (64 - (ip)->jmp.b.n_bits); \ 1372 (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \ 1375 #define JMP_CMP_HH_FAST(thread, ip, operator) \ 1377 uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \ 1378 uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \ 1379 uint64_t a64 = *a64_ptr; \ 1380 uint64_t a = a64 << (64 - (ip)->jmp.a.n_bits); \ 1382 uint8_t *b_struct = (thread)->structs[(ip)->jmp.b.struct_id]; \ 1383 uint64_t *b64_ptr = (uint64_t *)&b_struct[(ip)->jmp.b.offset]; \ 1384 uint64_t b64 = *b64_ptr; \ 1385 uint64_t b = b64 << (64 - (ip)->jmp.b.n_bits); \ 1387 (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \ 1392 #define JMP_CMP_MH JMP_CMP 1393 #define JMP_CMP_HM JMP_CMP 1394 #define JMP_CMP_HH JMP_CMP 1395 #define JMP_CMP_HH_FAST JMP_CMP 1399 #define JMP_CMP_I(thread, ip, operator) \ 1401 uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \ 1402 uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \ 1403 uint64_t a64 = *a64_ptr; \ 1404 uint64_t a64_mask = UINT64_MAX >> (64 - (ip)->jmp.a.n_bits); \ 1405 uint64_t a = a64 & a64_mask; \ 1407 uint64_t b = (ip)->jmp.b_val; \ 1409 (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \ 1412 #define JMP_CMP_MI JMP_CMP_I 1414 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN 1416 #define JMP_CMP_HI(thread, ip, operator) \ 1418 uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \ 1419 uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \ 1420 uint64_t a64 = *a64_ptr; \ 1421 uint64_t a = ntoh64(a64) >> (64 - (ip)->jmp.a.n_bits); \ 1423 uint64_t b = (ip)->jmp.b_val; \ 1425 (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \ 1430 #define JMP_CMP_HI JMP_CMP_I 1434 #define METADATA_READ(thread, offset, n_bits) \ 1436 uint64_t *m64_ptr = (uint64_t *)&(thread)->metadata[offset]; \ 1437 uint64_t m64 = *m64_ptr; \ 1438 uint64_t m64_mask = UINT64_MAX >> (64 - (n_bits)); \ 1442 #define METADATA_WRITE(thread, offset, n_bits, value) \ 1444 uint64_t *m64_ptr = (uint64_t *)&(thread)->metadata[offset]; \ 1445 uint64_t m64 = *m64_ptr; \ 1446 uint64_t m64_mask = UINT64_MAX >> (64 - (n_bits)); \ 1448 uint64_t m_new = value; \ 1450 *m64_ptr = (m64 & ~m64_mask) | (m_new & m64_mask); \ 1453 #ifndef RTE_SWX_PIPELINE_THREADS_MAX 1454 #define RTE_SWX_PIPELINE_THREADS_MAX 16 1457 #ifndef RTE_SWX_PIPELINE_INSTRUCTION_TABLE_SIZE_MAX 1458 #define RTE_SWX_PIPELINE_INSTRUCTION_TABLE_SIZE_MAX 256 1461 struct rte_swx_pipeline {
1462 struct struct_type_tailq struct_types;
1463 struct port_in_type_tailq port_in_types;
1464 struct port_in_tailq ports_in;
1465 struct port_out_type_tailq port_out_types;
1466 struct port_out_tailq ports_out;
1467 struct extern_type_tailq extern_types;
1468 struct extern_obj_tailq extern_objs;
1469 struct extern_func_tailq extern_funcs;
1470 struct hash_func_tailq hash_funcs;
1471 struct header_tailq headers;
1472 struct struct_type *metadata_st;
1473 uint32_t metadata_struct_id;
1474 struct action_tailq actions;
1475 struct table_type_tailq table_types;
1476 struct table_tailq tables;
1477 struct selector_tailq selectors;
1478 struct learner_tailq learners;
1479 struct regarray_tailq regarrays;
1480 struct meter_profile_tailq meter_profiles;
1481 struct metarray_tailq metarrays;
1483 struct port_in_runtime *in;
1484 struct port_out_runtime *out;
1485 struct mirroring_session *mirroring_sessions;
1486 struct instruction **action_instructions;
1487 action_func_t *action_funcs;
1489 struct table_statistics *table_stats;
1490 struct selector_statistics *selector_stats;
1491 struct learner_statistics *learner_stats;
1492 struct hash_func_runtime *hash_func_runtime;
1493 struct regarray_runtime *regarray_runtime;
1494 struct metarray_runtime *metarray_runtime;
1495 struct instruction *instructions;
1496 struct instruction_data *instruction_data;
1497 instr_exec_t *instruction_table;
1498 struct thread threads[RTE_SWX_PIPELINE_THREADS_MAX];
1502 uint32_t n_ports_in;
1503 uint32_t n_ports_out;
1504 uint32_t n_mirroring_slots;
1505 uint32_t n_mirroring_sessions;
1506 uint32_t n_extern_objs;
1507 uint32_t n_extern_funcs;
1508 uint32_t n_hash_funcs;
1511 uint32_t n_selectors;
1512 uint32_t n_learners;
1513 uint32_t n_regarrays;
1514 uint32_t n_metarrays;
1518 uint32_t n_instructions;
1527 pipeline_port_inc(
struct rte_swx_pipeline *p)
1529 p->port_id = (p->port_id + 1) & (p->n_ports_in - 1);
1533 thread_ip_reset(
struct rte_swx_pipeline *p,
struct thread *t)
1535 t->ip = p->instructions;
1539 thread_ip_set(
struct thread *t,
struct instruction *ip)
1545 thread_ip_action_call(
struct rte_swx_pipeline *p,
1550 t->ip = p->action_instructions[action_id];
1554 thread_ip_inc(
struct rte_swx_pipeline *p);
1557 thread_ip_inc(
struct rte_swx_pipeline *p)
1559 struct thread *t = &p->threads[p->thread_id];
1565 thread_ip_inc_cond(
struct thread *t,
int cond)
1571 thread_yield(
struct rte_swx_pipeline *p)
1573 p->thread_id = (p->thread_id + 1) & (RTE_SWX_PIPELINE_THREADS_MAX - 1);
1577 thread_yield_cond(
struct rte_swx_pipeline *p,
int cond)
1579 p->thread_id = (p->thread_id + cond) & (RTE_SWX_PIPELINE_THREADS_MAX - 1);
1586 __instr_rx_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
1588 struct port_in_runtime *port = &p->in[p->port_id];
1593 if (t->recirculate) {
1594 TRACE(
"[Thread %2u] rx - recirculate (pass %u)\n",
1596 t->recirc_pass_id + 1);
1600 t->mirroring_slots_mask = 0;
1602 t->recirc_pass_id++;
1605 t->valid_headers = 0;
1606 t->n_headers_out = 0;
1609 t->table_state = p->table_state;
1615 pkt_received = port->pkt_rx(port->obj, pkt);
1619 TRACE(
"[Thread %2u] rx %s from port %u\n",
1621 pkt_received ?
"1 pkt" :
"0 pkts",
1624 t->mirroring_slots_mask = 0;
1625 t->recirc_pass_id = 0;
1628 t->valid_headers = 0;
1629 t->n_headers_out = 0;
1632 METADATA_WRITE(t, ip->io.io.offset, ip->io.io.n_bits, p->port_id);
1635 t->table_state = p->table_state;
1638 pipeline_port_inc(p);
1640 return pkt_received;
1644 instr_rx_exec(
struct rte_swx_pipeline *p)
1646 struct thread *t = &p->threads[p->thread_id];
1647 struct instruction *ip = t->ip;
1651 pkt_received = __instr_rx_exec(p, t, ip);
1654 thread_ip_inc_cond(t, pkt_received);
1662 emit_handler(
struct thread *t)
1664 struct header_out_runtime *h0 = &t->headers_out[0];
1665 struct header_out_runtime *h1 = &t->headers_out[1];
1666 uint32_t offset = 0, i;
1669 if ((t->n_headers_out == 1) &&
1670 (h0->ptr + h0->n_bytes == t->ptr)) {
1671 TRACE(
"Emit handler: no header change or header decap.\n");
1673 t->pkt.offset -= h0->n_bytes;
1674 t->pkt.length += h0->n_bytes;
1680 if ((t->n_headers_out == 2) &&
1681 (h1->ptr + h1->n_bytes == t->ptr) &&
1682 (h0->ptr == h0->ptr0)) {
1685 TRACE(
"Emit handler: header encapsulation.\n");
1687 offset = h0->n_bytes + h1->n_bytes;
1688 memcpy(t->ptr - offset, h0->ptr, h0->n_bytes);
1689 t->pkt.offset -= offset;
1690 t->pkt.length += offset;
1696 TRACE(
"Emit handler: complex case.\n");
1698 for (i = 0; i < t->n_headers_out; i++) {
1699 struct header_out_runtime *h = &t->headers_out[i];
1701 memcpy(&t->header_out_storage[offset], h->ptr, h->n_bytes);
1702 offset += h->n_bytes;
1706 memcpy(t->ptr - offset, t->header_out_storage, offset);
1707 t->pkt.offset -= offset;
1708 t->pkt.length += offset;
1713 mirroring_handler(
struct rte_swx_pipeline *p,
struct thread *t,
struct rte_swx_pkt *pkt)
1715 uint64_t slots_mask = t->mirroring_slots_mask, slot_mask;
1718 for (slot_id = 0, slot_mask = 1LLU ; slots_mask; slot_id++, slot_mask <<= 1)
1719 if (slot_mask & slots_mask) {
1720 struct port_out_runtime *port;
1721 struct mirroring_session *session;
1722 uint32_t port_id, session_id;
1724 session_id = t->mirroring_slots[slot_id];
1725 session = &p->mirroring_sessions[session_id];
1727 port_id = session->port_id;
1728 port = &p->out[port_id];
1730 if (session->fast_clone)
1731 port->pkt_fast_clone_tx(port->obj, pkt);
1733 port->pkt_clone_tx(port->obj, pkt, session->truncation_length);
1735 slots_mask &= ~slot_mask;
1740 __instr_tx_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
1742 uint64_t port_id = METADATA_READ(t, ip->io.io.offset, ip->io.io.n_bits);
1743 struct port_out_runtime *port = &p->out[port_id];
1747 if (t->recirculate) {
1748 TRACE(
"[Thread %2u]: tx 1 pkt - recirculate\n",
1755 mirroring_handler(p, t, pkt);
1760 TRACE(
"[Thread %2u]: tx 1 pkt to port %u\n",
1768 mirroring_handler(p, t, pkt);
1769 port->pkt_tx(port->obj, pkt);
1773 __instr_tx_i_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
1775 uint64_t port_id = ip->io.io.val;
1776 struct port_out_runtime *port = &p->out[port_id];
1780 if (t->recirculate) {
1781 TRACE(
"[Thread %2u]: tx (i) 1 pkt - recirculate\n",
1788 mirroring_handler(p, t, pkt);
1793 TRACE(
"[Thread %2u]: tx (i) 1 pkt to port %u\n",
1801 mirroring_handler(p, t, pkt);
1802 port->pkt_tx(port->obj, pkt);
1806 __instr_drop_exec(
struct rte_swx_pipeline *p,
1810 uint64_t port_id = p->n_ports_out - 1;
1811 struct port_out_runtime *port = &p->out[port_id];
1814 TRACE(
"[Thread %2u]: drop 1 pkt\n",
1821 mirroring_handler(p, t, pkt);
1822 port->pkt_tx(port->obj, pkt);
1826 __instr_mirror_exec(
struct rte_swx_pipeline *p,
1828 const struct instruction *ip)
1830 uint64_t slot_id = instr_operand_hbo(t, &ip->mirror.dst);
1831 uint64_t session_id = instr_operand_hbo(t, &ip->mirror.src);
1833 slot_id &= p->n_mirroring_slots - 1;
1834 session_id &= p->n_mirroring_sessions - 1;
1836 TRACE(
"[Thread %2u]: mirror pkt (slot = %u, session = %u)\n",
1839 (uint32_t)session_id);
1841 t->mirroring_slots[slot_id] = session_id;
1842 t->mirroring_slots_mask |= 1LLU << slot_id;
1846 __instr_recirculate_exec(
struct rte_swx_pipeline *p __rte_unused,
1848 const struct instruction *ip __rte_unused)
1850 TRACE(
"[Thread %2u]: recirculate\n",
1857 __instr_recircid_exec(
struct rte_swx_pipeline *p __rte_unused,
1859 const struct instruction *ip)
1861 TRACE(
"[Thread %2u]: recircid (pass %u)\n",
1866 METADATA_WRITE(t, ip->io.io.offset, ip->io.io.n_bits, t->recirc_pass_id);
1873 __instr_hdr_extract_many_exec(
struct rte_swx_pipeline *p __rte_unused,
1875 const struct instruction *ip,
1878 uint64_t valid_headers = t->valid_headers;
1879 uint8_t *ptr = t->ptr;
1880 uint32_t offset = t->pkt.offset;
1881 uint32_t
length = t->pkt.length;
1884 for (i = 0; i < n_extract; i++) {
1885 uint32_t header_id = ip->io.hdr.header_id[i];
1886 uint32_t struct_id = ip->io.hdr.struct_id[i];
1887 uint32_t n_bytes = ip->io.hdr.n_bytes[i];
1889 TRACE(
"[Thread %2u]: extract header %u (%u bytes)\n",
1895 t->structs[struct_id] = ptr;
1896 valid_headers = MASK64_BIT_SET(valid_headers, header_id);
1905 t->valid_headers = valid_headers;
1914 __instr_hdr_extract_exec(
struct rte_swx_pipeline *p,
1916 const struct instruction *ip)
1918 __instr_hdr_extract_many_exec(p, t, ip, 1);
1922 __instr_hdr_extract2_exec(
struct rte_swx_pipeline *p,
1924 const struct instruction *ip)
1926 TRACE(
"[Thread %2u] *** The next 2 instructions are fused. ***\n", p->thread_id);
1928 __instr_hdr_extract_many_exec(p, t, ip, 2);
1932 __instr_hdr_extract3_exec(
struct rte_swx_pipeline *p,
1934 const struct instruction *ip)
1936 TRACE(
"[Thread %2u] *** The next 3 instructions are fused. ***\n", p->thread_id);
1938 __instr_hdr_extract_many_exec(p, t, ip, 3);
1942 __instr_hdr_extract4_exec(
struct rte_swx_pipeline *p,
1944 const struct instruction *ip)
1946 TRACE(
"[Thread %2u] *** The next 4 instructions are fused. ***\n", p->thread_id);
1948 __instr_hdr_extract_many_exec(p, t, ip, 4);
1952 __instr_hdr_extract5_exec(
struct rte_swx_pipeline *p,
1954 const struct instruction *ip)
1956 TRACE(
"[Thread %2u] *** The next 5 instructions are fused. ***\n", p->thread_id);
1958 __instr_hdr_extract_many_exec(p, t, ip, 5);
1962 __instr_hdr_extract6_exec(
struct rte_swx_pipeline *p,
1964 const struct instruction *ip)
1966 TRACE(
"[Thread %2u] *** The next 6 instructions are fused. ***\n", p->thread_id);
1968 __instr_hdr_extract_many_exec(p, t, ip, 6);
1972 __instr_hdr_extract7_exec(
struct rte_swx_pipeline *p,
1974 const struct instruction *ip)
1976 TRACE(
"[Thread %2u] *** The next 7 instructions are fused. ***\n", p->thread_id);
1978 __instr_hdr_extract_many_exec(p, t, ip, 7);
1982 __instr_hdr_extract8_exec(
struct rte_swx_pipeline *p,
1984 const struct instruction *ip)
1986 TRACE(
"[Thread %2u] *** The next 8 instructions are fused. ***\n", p->thread_id);
1988 __instr_hdr_extract_many_exec(p, t, ip, 8);
1992 __instr_hdr_extract_m_exec(
struct rte_swx_pipeline *p __rte_unused,
1994 const struct instruction *ip)
1996 uint64_t valid_headers = t->valid_headers;
1997 uint8_t *ptr = t->ptr;
1998 uint32_t offset = t->pkt.offset;
1999 uint32_t length = t->pkt.length;
2001 uint32_t n_bytes_last = METADATA_READ(t, ip->io.io.offset, ip->io.io.n_bits);
2002 uint32_t header_id = ip->io.hdr.header_id[0];
2003 uint32_t struct_id = ip->io.hdr.struct_id[0];
2004 uint32_t n_bytes = ip->io.hdr.n_bytes[0];
2006 struct header_runtime *h = &t->headers[header_id];
2008 TRACE(
"[Thread %2u]: extract header %u (%u + %u bytes)\n",
2014 n_bytes += n_bytes_last;
2017 t->structs[struct_id] = ptr;
2018 t->valid_headers = MASK64_BIT_SET(valid_headers, header_id);
2019 h->n_bytes = n_bytes;
2022 t->pkt.offset = offset + n_bytes;
2023 t->pkt.length = length - n_bytes;
2024 t->ptr = ptr + n_bytes;
2028 __instr_hdr_lookahead_exec(
struct rte_swx_pipeline *p __rte_unused,
2030 const struct instruction *ip)
2032 uint64_t valid_headers = t->valid_headers;
2033 uint8_t *ptr = t->ptr;
2035 uint32_t header_id = ip->io.hdr.header_id[0];
2036 uint32_t struct_id = ip->io.hdr.struct_id[0];
2038 TRACE(
"[Thread %2u]: lookahead header %u\n",
2043 t->structs[struct_id] = ptr;
2044 t->valid_headers = MASK64_BIT_SET(valid_headers, header_id);
2051 __instr_hdr_emit_many_exec(
struct rte_swx_pipeline *p __rte_unused,
2053 const struct instruction *ip,
2056 uint64_t valid_headers = t->valid_headers;
2057 uint32_t n_headers_out = t->n_headers_out;
2058 struct header_out_runtime *ho = NULL;
2059 uint8_t *ho_ptr = NULL;
2060 uint32_t ho_nbytes = 0, i;
2062 for (i = 0; i < n_emit; i++) {
2063 uint32_t header_id = ip->io.hdr.header_id[i];
2064 uint32_t struct_id = ip->io.hdr.struct_id[i];
2066 struct header_runtime *hi = &t->headers[header_id];
2067 uint8_t *hi_ptr0 = hi->ptr0;
2068 uint32_t n_bytes = hi->n_bytes;
2070 uint8_t *hi_ptr = t->structs[struct_id];
2072 if (!MASK64_BIT_GET(valid_headers, header_id)) {
2073 TRACE(
"[Thread %2u]: emit header %u (invalid)\n",
2080 TRACE(
"[Thread %2u]: emit header %u (valid)\n",
2086 if (!n_headers_out) {
2087 ho = &t->headers_out[0];
2093 ho_nbytes = n_bytes;
2099 ho = &t->headers_out[n_headers_out - 1];
2102 ho_nbytes = ho->n_bytes;
2106 if (ho_ptr + ho_nbytes == hi_ptr) {
2107 ho_nbytes += n_bytes;
2109 ho->n_bytes = ho_nbytes;
2116 ho_nbytes = n_bytes;
2123 ho->n_bytes = ho_nbytes;
2124 t->n_headers_out = n_headers_out;
2128 __instr_hdr_emit_exec(
struct rte_swx_pipeline *p,
2130 const struct instruction *ip)
2132 __instr_hdr_emit_many_exec(p, t, ip, 1);
2136 __instr_hdr_emit_tx_exec(
struct rte_swx_pipeline *p,
2138 const struct instruction *ip)
2140 TRACE(
"[Thread %2u] *** The next 2 instructions are fused. ***\n", p->thread_id);
2142 __instr_hdr_emit_many_exec(p, t, ip, 1);
2143 __instr_tx_exec(p, t, ip);
2147 __instr_hdr_emit2_tx_exec(
struct rte_swx_pipeline *p,
2149 const struct instruction *ip)
2151 TRACE(
"[Thread %2u] *** The next 3 instructions are fused. ***\n", p->thread_id);
2153 __instr_hdr_emit_many_exec(p, t, ip, 2);
2154 __instr_tx_exec(p, t, ip);
2158 __instr_hdr_emit3_tx_exec(
struct rte_swx_pipeline *p,
2160 const struct instruction *ip)
2162 TRACE(
"[Thread %2u] *** The next 4 instructions are fused. ***\n", p->thread_id);
2164 __instr_hdr_emit_many_exec(p, t, ip, 3);
2165 __instr_tx_exec(p, t, ip);
2169 __instr_hdr_emit4_tx_exec(
struct rte_swx_pipeline *p,
2171 const struct instruction *ip)
2173 TRACE(
"[Thread %2u] *** The next 5 instructions are fused. ***\n", p->thread_id);
2175 __instr_hdr_emit_many_exec(p, t, ip, 4);
2176 __instr_tx_exec(p, t, ip);
2180 __instr_hdr_emit5_tx_exec(
struct rte_swx_pipeline *p,
2182 const struct instruction *ip)
2184 TRACE(
"[Thread %2u] *** The next 6 instructions are fused. ***\n", p->thread_id);
2186 __instr_hdr_emit_many_exec(p, t, ip, 5);
2187 __instr_tx_exec(p, t, ip);
2191 __instr_hdr_emit6_tx_exec(
struct rte_swx_pipeline *p,
2193 const struct instruction *ip)
2195 TRACE(
"[Thread %2u] *** The next 7 instructions are fused. ***\n", p->thread_id);
2197 __instr_hdr_emit_many_exec(p, t, ip, 6);
2198 __instr_tx_exec(p, t, ip);
2202 __instr_hdr_emit7_tx_exec(
struct rte_swx_pipeline *p,
2204 const struct instruction *ip)
2206 TRACE(
"[Thread %2u] *** The next 8 instructions are fused. ***\n", p->thread_id);
2208 __instr_hdr_emit_many_exec(p, t, ip, 7);
2209 __instr_tx_exec(p, t, ip);
2213 __instr_hdr_emit8_tx_exec(
struct rte_swx_pipeline *p,
2215 const struct instruction *ip)
2217 TRACE(
"[Thread %2u] *** The next 9 instructions are fused. ***\n", p->thread_id);
2219 __instr_hdr_emit_many_exec(p, t, ip, 8);
2220 __instr_tx_exec(p, t, ip);
2227 __instr_hdr_validate_exec(
struct rte_swx_pipeline *p __rte_unused,
2229 const struct instruction *ip)
2231 uint32_t header_id = ip->valid.header_id;
2232 uint32_t struct_id = ip->valid.struct_id;
2233 uint64_t valid_headers = t->valid_headers;
2234 struct header_runtime *h = &t->headers[header_id];
2236 TRACE(
"[Thread %2u] validate header %u\n", p->thread_id, header_id);
2242 if (MASK64_BIT_GET(valid_headers, header_id))
2246 t->structs[struct_id] = h->ptr0;
2247 t->valid_headers = MASK64_BIT_SET(valid_headers, header_id);
2254 __instr_hdr_invalidate_exec(
struct rte_swx_pipeline *p __rte_unused,
2256 const struct instruction *ip)
2258 uint32_t header_id = ip->valid.header_id;
2260 TRACE(
"[Thread %2u] invalidate header %u\n", p->thread_id, header_id);
2263 t->valid_headers = MASK64_BIT_CLR(t->valid_headers, header_id);
2270 __instr_learn_exec(
struct rte_swx_pipeline *p,
2272 const struct instruction *ip)
2274 uint64_t action_id = ip->learn.action_id;
2275 uint32_t mf_first_arg_offset = ip->learn.mf_first_arg_offset;
2276 uint32_t timeout_id = METADATA_READ(t, ip->learn.mf_timeout_id_offset,
2277 ip->learn.mf_timeout_id_n_bits);
2278 uint32_t learner_id = t->learner_id;
2280 p->n_selectors + learner_id];
2281 struct learner_runtime *l = &t->learners[learner_id];
2282 struct learner_statistics *stats = &p->learner_stats[learner_id];
2290 &t->metadata[mf_first_arg_offset],
2293 TRACE(
"[Thread %2u] learner %u learn %s\n",
2296 status ?
"ok" :
"error");
2298 stats->n_pkts_learn[status] += 1;
2305 __instr_rearm_exec(
struct rte_swx_pipeline *p,
2307 const struct instruction *ip __rte_unused)
2309 uint32_t learner_id = t->learner_id;
2311 p->n_selectors + learner_id];
2312 struct learner_runtime *l = &t->learners[learner_id];
2313 struct learner_statistics *stats = &p->learner_stats[learner_id];
2318 TRACE(
"[Thread %2u] learner %u rearm\n",
2322 stats->n_pkts_rearm += 1;
2326 __instr_rearm_new_exec(
struct rte_swx_pipeline *p,
2328 const struct instruction *ip)
2330 uint32_t timeout_id = METADATA_READ(t, ip->learn.mf_timeout_id_offset,
2331 ip->learn.mf_timeout_id_n_bits);
2332 uint32_t learner_id = t->learner_id;
2334 p->n_selectors + learner_id];
2335 struct learner_runtime *l = &t->learners[learner_id];
2336 struct learner_statistics *stats = &p->learner_stats[learner_id];
2341 TRACE(
"[Thread %2u] learner %u rearm with timeout ID %u\n",
2346 stats->n_pkts_rearm += 1;
2353 __instr_forget_exec(
struct rte_swx_pipeline *p,
2355 const struct instruction *ip __rte_unused)
2357 uint32_t learner_id = t->learner_id;
2359 p->n_selectors + learner_id];
2360 struct learner_runtime *l = &t->learners[learner_id];
2361 struct learner_statistics *stats = &p->learner_stats[learner_id];
2366 TRACE(
"[Thread %2u] learner %u forget\n",
2370 stats->n_pkts_forget += 1;
2376 static inline uint32_t
2377 __instr_extern_obj_exec(
struct rte_swx_pipeline *p __rte_unused,
2379 const struct instruction *ip)
2381 uint32_t obj_id = ip->ext_obj.ext_obj_id;
2382 uint32_t func_id = ip->ext_obj.func_id;
2383 struct extern_obj_runtime *obj = &t->extern_objs[obj_id];
2387 TRACE(
"[Thread %2u] extern obj %u member func %u\n",
2392 done = func(obj->obj, obj->mailbox);
2397 static inline uint32_t
2398 __instr_extern_func_exec(
struct rte_swx_pipeline *p __rte_unused,
2400 const struct instruction *ip)
2402 uint32_t ext_func_id = ip->ext_func.ext_func_id;
2403 struct extern_func_runtime *ext_func = &t->extern_funcs[ext_func_id];
2407 TRACE(
"[Thread %2u] extern func %u\n",
2411 done = func(ext_func->mailbox);
2420 __instr_hash_func_exec(
struct rte_swx_pipeline *p,
2422 const struct instruction *ip)
2424 uint32_t hash_func_id = ip->hash_func.hash_func_id;
2425 uint32_t dst_offset = ip->hash_func.dst.offset;
2426 uint32_t n_dst_bits = ip->hash_func.dst.n_bits;
2427 uint32_t src_struct_id = ip->hash_func.src.struct_id;
2428 uint32_t src_offset = ip->hash_func.src.offset;
2429 uint32_t n_src_bytes = ip->hash_func.src.n_bytes;
2431 struct hash_func_runtime *func = &p->hash_func_runtime[hash_func_id];
2432 uint8_t *src_ptr = t->structs[src_struct_id];
2435 TRACE(
"[Thread %2u] hash %u\n",
2439 result = func->func(&src_ptr[src_offset], n_src_bytes, 0);
2440 METADATA_WRITE(t, dst_offset, n_dst_bits, result);
2447 __instr_mov_exec(
struct rte_swx_pipeline *p __rte_unused,
2449 const struct instruction *ip)
2451 TRACE(
"[Thread %2u] mov\n", p->thread_id);
2457 __instr_mov_mh_exec(
struct rte_swx_pipeline *p __rte_unused,
2459 const struct instruction *ip)
2461 TRACE(
"[Thread %2u] mov (mh)\n", p->thread_id);
2467 __instr_mov_hm_exec(
struct rte_swx_pipeline *p __rte_unused,
2469 const struct instruction *ip)
2471 TRACE(
"[Thread %2u] mov (hm)\n", p->thread_id);
2477 __instr_mov_hh_exec(
struct rte_swx_pipeline *p __rte_unused,
2479 const struct instruction *ip)
2481 TRACE(
"[Thread %2u] mov (hh)\n", p->thread_id);
2487 __instr_mov_i_exec(
struct rte_swx_pipeline *p __rte_unused,
2489 const struct instruction *ip)
2491 TRACE(
"[Thread %2u] mov m.f %" PRIx64
"\n", p->thread_id, ip->mov.src_val);
2500 __instr_dma_ht_many_exec(
struct rte_swx_pipeline *p __rte_unused,
2502 const struct instruction *ip,
2505 uint8_t *action_data = t->structs[0];
2506 uint64_t valid_headers = t->valid_headers;
2509 for (i = 0; i < n_dma; i++) {
2510 uint32_t header_id = ip->dma.dst.header_id[i];
2511 uint32_t struct_id = ip->dma.dst.struct_id[i];
2512 uint32_t offset = ip->dma.src.offset[i];
2513 uint32_t n_bytes = ip->dma.n_bytes[i];
2515 struct header_runtime *h = &t->headers[header_id];
2516 uint8_t *h_ptr0 = h->ptr0;
2517 uint8_t *h_ptr = t->structs[struct_id];
2519 void *dst = MASK64_BIT_GET(valid_headers, header_id) ?
2521 void *src = &action_data[offset];
2523 TRACE(
"[Thread %2u] dma h.s t.f\n", p->thread_id);
2526 memcpy(dst, src, n_bytes);
2527 t->structs[struct_id] = dst;
2528 valid_headers = MASK64_BIT_SET(valid_headers, header_id);
2531 t->valid_headers = valid_headers;
2535 __instr_dma_ht_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
2537 __instr_dma_ht_many_exec(p, t, ip, 1);
2541 __instr_dma_ht2_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
2543 TRACE(
"[Thread %2u] *** The next 2 instructions are fused. ***\n", p->thread_id);
2545 __instr_dma_ht_many_exec(p, t, ip, 2);
2549 __instr_dma_ht3_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
2551 TRACE(
"[Thread %2u] *** The next 3 instructions are fused. ***\n", p->thread_id);
2553 __instr_dma_ht_many_exec(p, t, ip, 3);
2557 __instr_dma_ht4_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
2559 TRACE(
"[Thread %2u] *** The next 4 instructions are fused. ***\n", p->thread_id);
2561 __instr_dma_ht_many_exec(p, t, ip, 4);
2565 __instr_dma_ht5_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
2567 TRACE(
"[Thread %2u] *** The next 5 instructions are fused. ***\n", p->thread_id);
2569 __instr_dma_ht_many_exec(p, t, ip, 5);
2573 __instr_dma_ht6_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
2575 TRACE(
"[Thread %2u] *** The next 6 instructions are fused. ***\n", p->thread_id);
2577 __instr_dma_ht_many_exec(p, t, ip, 6);
2581 __instr_dma_ht7_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
2583 TRACE(
"[Thread %2u] *** The next 7 instructions are fused. ***\n", p->thread_id);
2585 __instr_dma_ht_many_exec(p, t, ip, 7);
2589 __instr_dma_ht8_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
2591 TRACE(
"[Thread %2u] *** The next 8 instructions are fused. ***\n", p->thread_id);
2593 __instr_dma_ht_many_exec(p, t, ip, 8);
2600 __instr_alu_add_exec(
struct rte_swx_pipeline *p __rte_unused,
2602 const struct instruction *ip)
2604 TRACE(
"[Thread %2u] add\n", p->thread_id);
2610 __instr_alu_add_mh_exec(
struct rte_swx_pipeline *p __rte_unused,
2612 const struct instruction *ip)
2614 TRACE(
"[Thread %2u] add (mh)\n", p->thread_id);
2620 __instr_alu_add_hm_exec(
struct rte_swx_pipeline *p __rte_unused,
2622 const struct instruction *ip)
2624 TRACE(
"[Thread %2u] add (hm)\n", p->thread_id);
2630 __instr_alu_add_hh_exec(
struct rte_swx_pipeline *p __rte_unused,
2632 const struct instruction *ip)
2634 TRACE(
"[Thread %2u] add (hh)\n", p->thread_id);
2640 __instr_alu_add_mi_exec(
struct rte_swx_pipeline *p __rte_unused,
2642 const struct instruction *ip)
2644 TRACE(
"[Thread %2u] add (mi)\n", p->thread_id);
2650 __instr_alu_add_hi_exec(
struct rte_swx_pipeline *p __rte_unused,
2652 const struct instruction *ip)
2654 TRACE(
"[Thread %2u] add (hi)\n", p->thread_id);
2660 __instr_alu_sub_exec(
struct rte_swx_pipeline *p __rte_unused,
2662 const struct instruction *ip)
2664 TRACE(
"[Thread %2u] sub\n", p->thread_id);
2670 __instr_alu_sub_mh_exec(
struct rte_swx_pipeline *p __rte_unused,
2672 const struct instruction *ip)
2674 TRACE(
"[Thread %2u] sub (mh)\n", p->thread_id);
2680 __instr_alu_sub_hm_exec(
struct rte_swx_pipeline *p __rte_unused,
2682 const struct instruction *ip)
2684 TRACE(
"[Thread %2u] sub (hm)\n", p->thread_id);
2690 __instr_alu_sub_hh_exec(
struct rte_swx_pipeline *p __rte_unused,
2692 const struct instruction *ip)
2694 TRACE(
"[Thread %2u] sub (hh)\n", p->thread_id);
2700 __instr_alu_sub_mi_exec(
struct rte_swx_pipeline *p __rte_unused,
2702 const struct instruction *ip)
2704 TRACE(
"[Thread %2u] sub (mi)\n", p->thread_id);
2710 __instr_alu_sub_hi_exec(
struct rte_swx_pipeline *p __rte_unused,
2712 const struct instruction *ip)
2714 TRACE(
"[Thread %2u] sub (hi)\n", p->thread_id);
2720 __instr_alu_shl_exec(
struct rte_swx_pipeline *p __rte_unused,
2722 const struct instruction *ip)
2724 TRACE(
"[Thread %2u] shl\n", p->thread_id);
2730 __instr_alu_shl_mh_exec(
struct rte_swx_pipeline *p __rte_unused,
2732 const struct instruction *ip)
2734 TRACE(
"[Thread %2u] shl (mh)\n", p->thread_id);
2740 __instr_alu_shl_hm_exec(
struct rte_swx_pipeline *p __rte_unused,
2742 const struct instruction *ip)
2744 TRACE(
"[Thread %2u] shl (hm)\n", p->thread_id);
2750 __instr_alu_shl_hh_exec(
struct rte_swx_pipeline *p __rte_unused,
2752 const struct instruction *ip)
2754 TRACE(
"[Thread %2u] shl (hh)\n", p->thread_id);
2760 __instr_alu_shl_mi_exec(
struct rte_swx_pipeline *p __rte_unused,
2762 const struct instruction *ip)
2764 TRACE(
"[Thread %2u] shl (mi)\n", p->thread_id);
2770 __instr_alu_shl_hi_exec(
struct rte_swx_pipeline *p __rte_unused,
2772 const struct instruction *ip)
2774 TRACE(
"[Thread %2u] shl (hi)\n", p->thread_id);
2780 __instr_alu_shr_exec(
struct rte_swx_pipeline *p __rte_unused,
2782 const struct instruction *ip)
2784 TRACE(
"[Thread %2u] shr\n", p->thread_id);
2790 __instr_alu_shr_mh_exec(
struct rte_swx_pipeline *p __rte_unused,
2792 const struct instruction *ip)
2794 TRACE(
"[Thread %2u] shr (mh)\n", p->thread_id);
2800 __instr_alu_shr_hm_exec(
struct rte_swx_pipeline *p __rte_unused,
2802 const struct instruction *ip)
2804 TRACE(
"[Thread %2u] shr (hm)\n", p->thread_id);
2810 __instr_alu_shr_hh_exec(
struct rte_swx_pipeline *p __rte_unused,
2812 const struct instruction *ip)
2814 TRACE(
"[Thread %2u] shr (hh)\n", p->thread_id);
2820 __instr_alu_shr_mi_exec(
struct rte_swx_pipeline *p __rte_unused,
2822 const struct instruction *ip)
2824 TRACE(
"[Thread %2u] shr (mi)\n", p->thread_id);
2831 __instr_alu_shr_hi_exec(
struct rte_swx_pipeline *p __rte_unused,
2833 const struct instruction *ip)
2835 TRACE(
"[Thread %2u] shr (hi)\n", p->thread_id);
2841 __instr_alu_and_exec(
struct rte_swx_pipeline *p __rte_unused,
2843 const struct instruction *ip)
2845 TRACE(
"[Thread %2u] and\n", p->thread_id);
2851 __instr_alu_and_mh_exec(
struct rte_swx_pipeline *p __rte_unused,
2853 const struct instruction *ip)
2855 TRACE(
"[Thread %2u] and (mh)\n", p->thread_id);
2861 __instr_alu_and_hm_exec(
struct rte_swx_pipeline *p __rte_unused,
2863 const struct instruction *ip)
2865 TRACE(
"[Thread %2u] and (hm)\n", p->thread_id);
2867 ALU_HM_FAST(t, ip, &);
2871 __instr_alu_and_hh_exec(
struct rte_swx_pipeline *p __rte_unused,
2873 const struct instruction *ip)
2875 TRACE(
"[Thread %2u] and (hh)\n", p->thread_id);
2877 ALU_HH_FAST(t, ip, &);
2881 __instr_alu_and_i_exec(
struct rte_swx_pipeline *p __rte_unused,
2883 const struct instruction *ip)
2885 TRACE(
"[Thread %2u] and (i)\n", p->thread_id);
2891 __instr_alu_or_exec(
struct rte_swx_pipeline *p __rte_unused,
2893 const struct instruction *ip)
2895 TRACE(
"[Thread %2u] or\n", p->thread_id);
2901 __instr_alu_or_mh_exec(
struct rte_swx_pipeline *p __rte_unused,
2903 const struct instruction *ip)
2905 TRACE(
"[Thread %2u] or (mh)\n", p->thread_id);
2911 __instr_alu_or_hm_exec(
struct rte_swx_pipeline *p __rte_unused,
2913 const struct instruction *ip)
2915 TRACE(
"[Thread %2u] or (hm)\n", p->thread_id);
2917 ALU_HM_FAST(t, ip, |);
2921 __instr_alu_or_hh_exec(
struct rte_swx_pipeline *p __rte_unused,
2923 const struct instruction *ip)
2925 TRACE(
"[Thread %2u] or (hh)\n", p->thread_id);
2927 ALU_HH_FAST(t, ip, |);
2931 __instr_alu_or_i_exec(
struct rte_swx_pipeline *p __rte_unused,
2933 const struct instruction *ip)
2935 TRACE(
"[Thread %2u] or (i)\n", p->thread_id);
2941 __instr_alu_xor_exec(
struct rte_swx_pipeline *p __rte_unused,
2943 const struct instruction *ip)
2945 TRACE(
"[Thread %2u] xor\n", p->thread_id);
2951 __instr_alu_xor_mh_exec(
struct rte_swx_pipeline *p __rte_unused,
2953 const struct instruction *ip)
2955 TRACE(
"[Thread %2u] xor (mh)\n", p->thread_id);
2961 __instr_alu_xor_hm_exec(
struct rte_swx_pipeline *p __rte_unused,
2963 const struct instruction *ip)
2965 TRACE(
"[Thread %2u] xor (hm)\n", p->thread_id);
2967 ALU_HM_FAST(t, ip, ^);
2971 __instr_alu_xor_hh_exec(
struct rte_swx_pipeline *p __rte_unused,
2973 const struct instruction *ip)
2975 TRACE(
"[Thread %2u] xor (hh)\n", p->thread_id);
2977 ALU_HH_FAST(t, ip, ^);
2981 __instr_alu_xor_i_exec(
struct rte_swx_pipeline *p __rte_unused,
2983 const struct instruction *ip)
2985 TRACE(
"[Thread %2u] xor (i)\n", p->thread_id);
2991 __instr_alu_ckadd_field_exec(
struct rte_swx_pipeline *p __rte_unused,
2993 const struct instruction *ip)
2995 uint8_t *dst_struct, *src_struct;
2996 uint16_t *dst16_ptr, dst;
2997 uint64_t *src64_ptr, src64, src64_mask, src;
3000 TRACE(
"[Thread %2u] ckadd (field)\n", p->thread_id);
3003 dst_struct = t->structs[ip->alu.dst.struct_id];
3004 dst16_ptr = (uint16_t *)&dst_struct[ip->alu.dst.offset];
3007 src_struct = t->structs[ip->alu.src.struct_id];
3008 src64_ptr = (uint64_t *)&src_struct[ip->alu.src.offset];
3010 src64_mask = UINT64_MAX >> (64 - ip->alu.src.n_bits);
3011 src = src64 & src64_mask;
3021 r += (src >> 32) + (src & 0xFFFFFFFF);
3027 r = (r & 0xFFFF) + (r >> 16);
3032 r = (r & 0xFFFF) + (r >> 16);
3039 r = (r & 0xFFFF) + (r >> 16);
3045 *dst16_ptr = (uint16_t)r;
3049 __instr_alu_cksub_field_exec(
struct rte_swx_pipeline *p __rte_unused,
3051 const struct instruction *ip)
3053 uint8_t *dst_struct, *src_struct;
3054 uint16_t *dst16_ptr, dst;
3055 uint64_t *src64_ptr, src64, src64_mask, src;
3058 TRACE(
"[Thread %2u] cksub (field)\n", p->thread_id);
3061 dst_struct = t->structs[ip->alu.dst.struct_id];
3062 dst16_ptr = (uint16_t *)&dst_struct[ip->alu.dst.offset];
3065 src_struct = t->structs[ip->alu.src.struct_id];
3066 src64_ptr = (uint64_t *)&src_struct[ip->alu.src.offset];
3068 src64_mask = UINT64_MAX >> (64 - ip->alu.src.n_bits);
3069 src = src64 & src64_mask;
3087 r += 0xFFFF00000ULL;
3092 r -= (src >> 32) + (src & 0xFFFFFFFF);
3097 r = (r & 0xFFFF) + (r >> 16);
3102 r = (r & 0xFFFF) + (r >> 16);
3109 r = (r & 0xFFFF) + (r >> 16);
3115 *dst16_ptr = (uint16_t)r;
3119 __instr_alu_ckadd_struct20_exec(
struct rte_swx_pipeline *p __rte_unused,
3121 const struct instruction *ip)
3123 uint8_t *dst_struct, *src_struct;
3124 uint16_t *dst16_ptr, dst;
3125 uint32_t *src32_ptr;
3128 TRACE(
"[Thread %2u] ckadd (struct of 20 bytes)\n", p->thread_id);
3131 dst_struct = t->structs[ip->alu.dst.struct_id];
3132 dst16_ptr = (uint16_t *)&dst_struct[ip->alu.dst.offset];
3135 src_struct = t->structs[ip->alu.src.struct_id];
3136 src32_ptr = (uint32_t *)&src_struct[0];
3146 r0 += r1 + src32_ptr[4];
3151 r0 = (r0 & 0xFFFF) + (r0 >> 16);
3156 r0 = (r0 & 0xFFFF) + (r0 >> 16);
3163 r0 = (r0 & 0xFFFF) + (r0 >> 16);
3167 r0 = r0 ? r0 : 0xFFFF;
3169 *dst16_ptr = (uint16_t)r0;
3173 __instr_alu_ckadd_struct_exec(
struct rte_swx_pipeline *p __rte_unused,
3175 const struct instruction *ip)
3177 uint32_t src_header_id = ip->alu.src.n_bits;
3178 uint32_t n_src_header_bytes = t->headers[src_header_id].n_bytes;
3179 uint8_t *dst_struct, *src_struct;
3180 uint16_t *dst16_ptr, dst;
3181 uint32_t *src32_ptr;
3185 if (n_src_header_bytes == 20) {
3186 __instr_alu_ckadd_struct20_exec(p, t, ip);
3190 TRACE(
"[Thread %2u] ckadd (struct)\n", p->thread_id);
3193 dst_struct = t->structs[ip->alu.dst.struct_id];
3194 dst16_ptr = (uint16_t *)&dst_struct[ip->alu.dst.offset];
3197 src_struct = t->structs[ip->alu.src.struct_id];
3198 src32_ptr = (uint32_t *)&src_struct[0];
3208 for (i = 0; i < n_src_header_bytes / 4; i++, src32_ptr++)
3214 r = (r & 0xFFFF) + (r >> 16);
3219 r = (r & 0xFFFF) + (r >> 16);
3226 r = (r & 0xFFFF) + (r >> 16);
3232 *dst16_ptr = (uint16_t)r;
3238 static inline uint64_t *
3239 instr_regarray_regarray(
struct rte_swx_pipeline *p,
const struct instruction *ip)
3241 struct regarray_runtime *r = &p->regarray_runtime[ip->regarray.regarray_id];
3245 static inline uint64_t
3246 instr_regarray_idx_hbo(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3248 struct regarray_runtime *r = &p->regarray_runtime[ip->regarray.regarray_id];
3250 uint8_t *idx_struct = t->structs[ip->regarray.idx.struct_id];
3251 uint64_t *idx64_ptr = (uint64_t *)&idx_struct[ip->regarray.idx.offset];
3252 uint64_t idx64 = *idx64_ptr;
3253 uint64_t idx64_mask = UINT64_MAX >> (64 - ip->regarray.idx.n_bits);
3254 uint64_t idx = idx64 & idx64_mask & r->size_mask;
3259 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN 3261 static inline uint64_t
3262 instr_regarray_idx_nbo(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3264 struct regarray_runtime *r = &p->regarray_runtime[ip->regarray.regarray_id];
3266 uint8_t *idx_struct = t->structs[ip->regarray.idx.struct_id];
3267 uint64_t *idx64_ptr = (uint64_t *)&idx_struct[ip->regarray.idx.offset];
3268 uint64_t idx64 = *idx64_ptr;
3269 uint64_t idx = (ntoh64(idx64) >> (64 - ip->regarray.idx.n_bits)) & r->size_mask;
3276 #define instr_regarray_idx_nbo instr_regarray_idx_hbo 3280 static inline uint64_t
3281 instr_regarray_idx_imm(
struct rte_swx_pipeline *p,
const struct instruction *ip)
3283 struct regarray_runtime *r = &p->regarray_runtime[ip->regarray.regarray_id];
3285 uint64_t idx = ip->regarray.idx_val & r->size_mask;
3290 static inline uint64_t
3291 instr_regarray_src_hbo(
struct thread *t,
const struct instruction *ip)
3293 uint8_t *src_struct = t->structs[ip->regarray.dstsrc.struct_id];
3294 uint64_t *src64_ptr = (uint64_t *)&src_struct[ip->regarray.dstsrc.offset];
3295 uint64_t src64 = *src64_ptr;
3296 uint64_t src64_mask = UINT64_MAX >> (64 - ip->regarray.dstsrc.n_bits);
3297 uint64_t src = src64 & src64_mask;
3302 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN 3304 static inline uint64_t
3305 instr_regarray_src_nbo(
struct thread *t,
const struct instruction *ip)
3307 uint8_t *src_struct = t->structs[ip->regarray.dstsrc.struct_id];
3308 uint64_t *src64_ptr = (uint64_t *)&src_struct[ip->regarray.dstsrc.offset];
3309 uint64_t src64 = *src64_ptr;
3310 uint64_t src = ntoh64(src64) >> (64 - ip->regarray.dstsrc.n_bits);
3317 #define instr_regarray_src_nbo instr_regarray_src_hbo 3322 instr_regarray_dst_hbo_src_hbo_set(
struct thread *t,
const struct instruction *ip, uint64_t src)
3324 uint8_t *dst_struct = t->structs[ip->regarray.dstsrc.struct_id];
3325 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[ip->regarray.dstsrc.offset];
3326 uint64_t dst64 = *dst64_ptr;
3327 uint64_t dst64_mask = UINT64_MAX >> (64 - ip->regarray.dstsrc.n_bits);
3329 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask);
3333 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN 3336 instr_regarray_dst_nbo_src_hbo_set(
struct thread *t,
const struct instruction *ip, uint64_t src)
3338 uint8_t *dst_struct = t->structs[ip->regarray.dstsrc.struct_id];
3339 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[ip->regarray.dstsrc.offset];
3340 uint64_t dst64 = *dst64_ptr;
3341 uint64_t dst64_mask = UINT64_MAX >> (64 - ip->regarray.dstsrc.n_bits);
3343 src = hton64(src) >> (64 - ip->regarray.dstsrc.n_bits);
3344 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask);
3349 #define instr_regarray_dst_nbo_src_hbo_set instr_regarray_dst_hbo_src_hbo_set 3354 __instr_regprefetch_rh_exec(
struct rte_swx_pipeline *p,
3356 const struct instruction *ip)
3358 uint64_t *regarray, idx;
3360 TRACE(
"[Thread %2u] regprefetch (r[h])\n", p->thread_id);
3362 regarray = instr_regarray_regarray(p, ip);
3363 idx = instr_regarray_idx_nbo(p, t, ip);
3368 __instr_regprefetch_rm_exec(
struct rte_swx_pipeline *p,
3370 const struct instruction *ip)
3372 uint64_t *regarray, idx;
3374 TRACE(
"[Thread %2u] regprefetch (r[m])\n", p->thread_id);
3376 regarray = instr_regarray_regarray(p, ip);
3377 idx = instr_regarray_idx_hbo(p, t, ip);
3382 __instr_regprefetch_ri_exec(
struct rte_swx_pipeline *p,
3383 struct thread *t __rte_unused,
3384 const struct instruction *ip)
3386 uint64_t *regarray, idx;
3388 TRACE(
"[Thread %2u] regprefetch (r[i])\n", p->thread_id);
3390 regarray = instr_regarray_regarray(p, ip);
3391 idx = instr_regarray_idx_imm(p, ip);
3396 __instr_regrd_hrh_exec(
struct rte_swx_pipeline *p,
3398 const struct instruction *ip)
3400 uint64_t *regarray, idx;
3402 TRACE(
"[Thread %2u] regrd (h = r[h])\n", p->thread_id);
3404 regarray = instr_regarray_regarray(p, ip);
3405 idx = instr_regarray_idx_nbo(p, t, ip);
3406 instr_regarray_dst_nbo_src_hbo_set(t, ip, regarray[idx]);
3410 __instr_regrd_hrm_exec(
struct rte_swx_pipeline *p,
3412 const struct instruction *ip)
3414 uint64_t *regarray, idx;
3416 TRACE(
"[Thread %2u] regrd (h = r[m])\n", p->thread_id);
3419 regarray = instr_regarray_regarray(p, ip);
3420 idx = instr_regarray_idx_hbo(p, t, ip);
3421 instr_regarray_dst_nbo_src_hbo_set(t, ip, regarray[idx]);
3425 __instr_regrd_mrh_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3427 uint64_t *regarray, idx;
3429 TRACE(
"[Thread %2u] regrd (m = r[h])\n", p->thread_id);
3431 regarray = instr_regarray_regarray(p, ip);
3432 idx = instr_regarray_idx_nbo(p, t, ip);
3433 instr_regarray_dst_hbo_src_hbo_set(t, ip, regarray[idx]);
3437 __instr_regrd_mrm_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3439 uint64_t *regarray, idx;
3441 TRACE(
"[Thread %2u] regrd (m = r[m])\n", p->thread_id);
3443 regarray = instr_regarray_regarray(p, ip);
3444 idx = instr_regarray_idx_hbo(p, t, ip);
3445 instr_regarray_dst_hbo_src_hbo_set(t, ip, regarray[idx]);
3449 __instr_regrd_hri_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3451 uint64_t *regarray, idx;
3453 TRACE(
"[Thread %2u] regrd (h = r[i])\n", p->thread_id);
3455 regarray = instr_regarray_regarray(p, ip);
3456 idx = instr_regarray_idx_imm(p, ip);
3457 instr_regarray_dst_nbo_src_hbo_set(t, ip, regarray[idx]);
3461 __instr_regrd_mri_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3463 uint64_t *regarray, idx;
3465 TRACE(
"[Thread %2u] regrd (m = r[i])\n", p->thread_id);
3467 regarray = instr_regarray_regarray(p, ip);
3468 idx = instr_regarray_idx_imm(p, ip);
3469 instr_regarray_dst_hbo_src_hbo_set(t, ip, regarray[idx]);
3473 __instr_regwr_rhh_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3475 uint64_t *regarray, idx, src;
3477 TRACE(
"[Thread %2u] regwr (r[h] = h)\n", p->thread_id);
3479 regarray = instr_regarray_regarray(p, ip);
3480 idx = instr_regarray_idx_nbo(p, t, ip);
3481 src = instr_regarray_src_nbo(t, ip);
3482 regarray[idx] = src;
3486 __instr_regwr_rhm_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3488 uint64_t *regarray, idx, src;
3490 TRACE(
"[Thread %2u] regwr (r[h] = m)\n", p->thread_id);
3492 regarray = instr_regarray_regarray(p, ip);
3493 idx = instr_regarray_idx_nbo(p, t, ip);
3494 src = instr_regarray_src_hbo(t, ip);
3495 regarray[idx] = src;
3499 __instr_regwr_rmh_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3501 uint64_t *regarray, idx, src;
3503 TRACE(
"[Thread %2u] regwr (r[m] = h)\n", p->thread_id);
3505 regarray = instr_regarray_regarray(p, ip);
3506 idx = instr_regarray_idx_hbo(p, t, ip);
3507 src = instr_regarray_src_nbo(t, ip);
3508 regarray[idx] = src;
3512 __instr_regwr_rmm_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3514 uint64_t *regarray, idx, src;
3516 TRACE(
"[Thread %2u] regwr (r[m] = m)\n", p->thread_id);
3518 regarray = instr_regarray_regarray(p, ip);
3519 idx = instr_regarray_idx_hbo(p, t, ip);
3520 src = instr_regarray_src_hbo(t, ip);
3521 regarray[idx] = src;
3525 __instr_regwr_rhi_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3527 uint64_t *regarray, idx, src;
3529 TRACE(
"[Thread %2u] regwr (r[h] = i)\n", p->thread_id);
3531 regarray = instr_regarray_regarray(p, ip);
3532 idx = instr_regarray_idx_nbo(p, t, ip);
3533 src = ip->regarray.dstsrc_val;
3534 regarray[idx] = src;
3538 __instr_regwr_rmi_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3540 uint64_t *regarray, idx, src;
3542 TRACE(
"[Thread %2u] regwr (r[m] = i)\n", p->thread_id);
3544 regarray = instr_regarray_regarray(p, ip);
3545 idx = instr_regarray_idx_hbo(p, t, ip);
3546 src = ip->regarray.dstsrc_val;
3547 regarray[idx] = src;
3551 __instr_regwr_rih_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3553 uint64_t *regarray, idx, src;
3555 TRACE(
"[Thread %2u] regwr (r[i] = h)\n", p->thread_id);
3557 regarray = instr_regarray_regarray(p, ip);
3558 idx = instr_regarray_idx_imm(p, ip);
3559 src = instr_regarray_src_nbo(t, ip);
3560 regarray[idx] = src;
3564 __instr_regwr_rim_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3566 uint64_t *regarray, idx, src;
3568 TRACE(
"[Thread %2u] regwr (r[i] = m)\n", p->thread_id);
3570 regarray = instr_regarray_regarray(p, ip);
3571 idx = instr_regarray_idx_imm(p, ip);
3572 src = instr_regarray_src_hbo(t, ip);
3573 regarray[idx] = src;
3577 __instr_regwr_rii_exec(
struct rte_swx_pipeline *p,
3578 struct thread *t __rte_unused,
3579 const struct instruction *ip)
3581 uint64_t *regarray, idx, src;
3583 TRACE(
"[Thread %2u] regwr (r[i] = i)\n", p->thread_id);
3585 regarray = instr_regarray_regarray(p, ip);
3586 idx = instr_regarray_idx_imm(p, ip);
3587 src = ip->regarray.dstsrc_val;
3588 regarray[idx] = src;
3592 __instr_regadd_rhh_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3594 uint64_t *regarray, idx, src;
3596 TRACE(
"[Thread %2u] regadd (r[h] += h)\n", p->thread_id);
3598 regarray = instr_regarray_regarray(p, ip);
3599 idx = instr_regarray_idx_nbo(p, t, ip);
3600 src = instr_regarray_src_nbo(t, ip);
3601 regarray[idx] += src;
3605 __instr_regadd_rhm_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3607 uint64_t *regarray, idx, src;
3609 TRACE(
"[Thread %2u] regadd (r[h] += m)\n", p->thread_id);
3611 regarray = instr_regarray_regarray(p, ip);
3612 idx = instr_regarray_idx_nbo(p, t, ip);
3613 src = instr_regarray_src_hbo(t, ip);
3614 regarray[idx] += src;
3618 __instr_regadd_rmh_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3620 uint64_t *regarray, idx, src;
3622 TRACE(
"[Thread %2u] regadd (r[m] += h)\n", p->thread_id);
3624 regarray = instr_regarray_regarray(p, ip);
3625 idx = instr_regarray_idx_hbo(p, t, ip);
3626 src = instr_regarray_src_nbo(t, ip);
3627 regarray[idx] += src;
3631 __instr_regadd_rmm_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3633 uint64_t *regarray, idx, src;
3635 TRACE(
"[Thread %2u] regadd (r[m] += m)\n", p->thread_id);
3637 regarray = instr_regarray_regarray(p, ip);
3638 idx = instr_regarray_idx_hbo(p, t, ip);
3639 src = instr_regarray_src_hbo(t, ip);
3640 regarray[idx] += src;
3644 __instr_regadd_rhi_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3646 uint64_t *regarray, idx, src;
3648 TRACE(
"[Thread %2u] regadd (r[h] += i)\n", p->thread_id);
3650 regarray = instr_regarray_regarray(p, ip);
3651 idx = instr_regarray_idx_nbo(p, t, ip);
3652 src = ip->regarray.dstsrc_val;
3653 regarray[idx] += src;
3657 __instr_regadd_rmi_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3659 uint64_t *regarray, idx, src;
3661 TRACE(
"[Thread %2u] regadd (r[m] += i)\n", p->thread_id);
3663 regarray = instr_regarray_regarray(p, ip);
3664 idx = instr_regarray_idx_hbo(p, t, ip);
3665 src = ip->regarray.dstsrc_val;
3666 regarray[idx] += src;
3670 __instr_regadd_rih_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3672 uint64_t *regarray, idx, src;
3674 TRACE(
"[Thread %2u] regadd (r[i] += h)\n", p->thread_id);
3676 regarray = instr_regarray_regarray(p, ip);
3677 idx = instr_regarray_idx_imm(p, ip);
3678 src = instr_regarray_src_nbo(t, ip);
3679 regarray[idx] += src;
3683 __instr_regadd_rim_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3685 uint64_t *regarray, idx, src;
3687 TRACE(
"[Thread %2u] regadd (r[i] += m)\n", p->thread_id);
3689 regarray = instr_regarray_regarray(p, ip);
3690 idx = instr_regarray_idx_imm(p, ip);
3691 src = instr_regarray_src_hbo(t, ip);
3692 regarray[idx] += src;
3696 __instr_regadd_rii_exec(
struct rte_swx_pipeline *p,
3697 struct thread *t __rte_unused,
3698 const struct instruction *ip)
3700 uint64_t *regarray, idx, src;
3702 TRACE(
"[Thread %2u] regadd (r[i] += i)\n", p->thread_id);
3704 regarray = instr_regarray_regarray(p, ip);
3705 idx = instr_regarray_idx_imm(p, ip);
3706 src = ip->regarray.dstsrc_val;
3707 regarray[idx] += src;
3713 static inline struct meter *
3714 instr_meter_idx_hbo(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3716 struct metarray_runtime *r = &p->metarray_runtime[ip->meter.metarray_id];
3718 uint8_t *idx_struct = t->structs[ip->meter.idx.struct_id];
3719 uint64_t *idx64_ptr = (uint64_t *)&idx_struct[ip->meter.idx.offset];
3720 uint64_t idx64 = *idx64_ptr;
3721 uint64_t idx64_mask = UINT64_MAX >> (64 - (ip)->meter.idx.n_bits);
3722 uint64_t idx = idx64 & idx64_mask & r->size_mask;
3724 return &r->metarray[idx];
3727 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN 3729 static inline struct meter *
3730 instr_meter_idx_nbo(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3732 struct metarray_runtime *r = &p->metarray_runtime[ip->meter.metarray_id];
3734 uint8_t *idx_struct = t->structs[ip->meter.idx.struct_id];
3735 uint64_t *idx64_ptr = (uint64_t *)&idx_struct[ip->meter.idx.offset];
3736 uint64_t idx64 = *idx64_ptr;
3737 uint64_t idx = (ntoh64(idx64) >> (64 - ip->meter.idx.n_bits)) & r->size_mask;
3739 return &r->metarray[idx];
3744 #define instr_meter_idx_nbo instr_meter_idx_hbo 3748 static inline struct meter *
3749 instr_meter_idx_imm(
struct rte_swx_pipeline *p,
const struct instruction *ip)
3751 struct metarray_runtime *r = &p->metarray_runtime[ip->meter.metarray_id];
3753 uint64_t idx = ip->meter.idx_val & r->size_mask;
3755 return &r->metarray[idx];
3758 static inline uint32_t
3759 instr_meter_length_hbo(
struct thread *t,
const struct instruction *ip)
3761 uint8_t *src_struct = t->structs[ip->meter.length.struct_id];
3762 uint64_t *src64_ptr = (uint64_t *)&src_struct[ip->meter.length.offset];
3763 uint64_t src64 = *src64_ptr;
3764 uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->meter.length.n_bits);
3765 uint64_t src = src64 & src64_mask;
3767 return (uint32_t)src;
3770 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN 3772 static inline uint32_t
3773 instr_meter_length_nbo(
struct thread *t,
const struct instruction *ip)
3775 uint8_t *src_struct = t->structs[ip->meter.length.struct_id];
3776 uint64_t *src64_ptr = (uint64_t *)&src_struct[ip->meter.length.offset];
3777 uint64_t src64 = *src64_ptr;
3778 uint64_t src = ntoh64(src64) >> (64 - ip->meter.length.n_bits);
3780 return (uint32_t)src;
3785 #define instr_meter_length_nbo instr_meter_length_hbo 3790 instr_meter_color_in_hbo(
struct thread *t,
const struct instruction *ip)
3792 uint8_t *src_struct = t->structs[ip->meter.color_in.struct_id];
3793 uint64_t *src64_ptr = (uint64_t *)&src_struct[ip->meter.color_in.offset];
3794 uint64_t src64 = *src64_ptr;
3795 uint64_t src64_mask = UINT64_MAX >> (64 - ip->meter.color_in.n_bits);
3796 uint64_t src = src64 & src64_mask;
3802 instr_meter_color_out_hbo_set(
struct thread *t,
3803 const struct instruction *ip,
3806 uint8_t *dst_struct = t->structs[ip->meter.color_out.struct_id];
3807 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[ip->meter.color_out.offset];
3808 uint64_t dst64 = *dst64_ptr;
3809 uint64_t dst64_mask = UINT64_MAX >> (64 - ip->meter.color_out.n_bits);
3811 uint64_t src = (uint64_t)color_out;
3813 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask);
3817 __instr_metprefetch_h_exec(
struct rte_swx_pipeline *p,
3819 const struct instruction *ip)
3823 TRACE(
"[Thread %2u] metprefetch (h)\n", p->thread_id);
3825 m = instr_meter_idx_nbo(p, t, ip);
3830 __instr_metprefetch_m_exec(
struct rte_swx_pipeline *p,
3832 const struct instruction *ip)
3836 TRACE(
"[Thread %2u] metprefetch (m)\n", p->thread_id);
3838 m = instr_meter_idx_hbo(p, t, ip);
3843 __instr_metprefetch_i_exec(
struct rte_swx_pipeline *p,
3844 struct thread *t __rte_unused,
3845 const struct instruction *ip)
3849 TRACE(
"[Thread %2u] metprefetch (i)\n", p->thread_id);
3851 m = instr_meter_idx_imm(p, ip);
3856 __instr_meter_hhm_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3859 uint64_t time, n_pkts, n_bytes;
3863 TRACE(
"[Thread %2u] meter (hhm)\n", p->thread_id);
3865 m = instr_meter_idx_nbo(p, t, ip);
3868 length = instr_meter_length_nbo(t, ip);
3869 color_in = instr_meter_color_in_hbo(t, ip);
3872 &m->profile->profile,
3877 color_out &= m->color_mask;
3879 n_pkts = m->n_pkts[color_out];
3880 n_bytes = m->n_bytes[color_out];
3882 instr_meter_color_out_hbo_set(t, ip, color_out);
3884 m->n_pkts[color_out] = n_pkts + 1;
3885 m->n_bytes[color_out] = n_bytes + length;
3889 __instr_meter_hhi_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3892 uint64_t time, n_pkts, n_bytes;
3896 TRACE(
"[Thread %2u] meter (hhi)\n", p->thread_id);
3898 m = instr_meter_idx_nbo(p, t, ip);
3901 length = instr_meter_length_nbo(t, ip);
3902 color_in = (
enum rte_color)ip->meter.color_in_val;
3905 &m->profile->profile,
3910 color_out &= m->color_mask;
3912 n_pkts = m->n_pkts[color_out];
3913 n_bytes = m->n_bytes[color_out];
3915 instr_meter_color_out_hbo_set(t, ip, color_out);
3917 m->n_pkts[color_out] = n_pkts + 1;
3918 m->n_bytes[color_out] = n_bytes + length;
3922 __instr_meter_hmm_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3925 uint64_t time, n_pkts, n_bytes;
3929 TRACE(
"[Thread %2u] meter (hmm)\n", p->thread_id);
3931 m = instr_meter_idx_nbo(p, t, ip);
3934 length = instr_meter_length_hbo(t, ip);
3935 color_in = instr_meter_color_in_hbo(t, ip);
3938 &m->profile->profile,
3943 color_out &= m->color_mask;
3945 n_pkts = m->n_pkts[color_out];
3946 n_bytes = m->n_bytes[color_out];
3948 instr_meter_color_out_hbo_set(t, ip, color_out);
3950 m->n_pkts[color_out] = n_pkts + 1;
3951 m->n_bytes[color_out] = n_bytes + length;
3955 __instr_meter_hmi_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3958 uint64_t time, n_pkts, n_bytes;
3962 TRACE(
"[Thread %2u] meter (hmi)\n", p->thread_id);
3964 m = instr_meter_idx_nbo(p, t, ip);
3967 length = instr_meter_length_hbo(t, ip);
3968 color_in = (
enum rte_color)ip->meter.color_in_val;
3971 &m->profile->profile,
3976 color_out &= m->color_mask;
3978 n_pkts = m->n_pkts[color_out];
3979 n_bytes = m->n_bytes[color_out];
3981 instr_meter_color_out_hbo_set(t, ip, color_out);
3983 m->n_pkts[color_out] = n_pkts + 1;
3984 m->n_bytes[color_out] = n_bytes + length;
3988 __instr_meter_mhm_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3991 uint64_t time, n_pkts, n_bytes;
3995 TRACE(
"[Thread %2u] meter (mhm)\n", p->thread_id);
3997 m = instr_meter_idx_hbo(p, t, ip);
4000 length = instr_meter_length_nbo(t, ip);
4001 color_in = instr_meter_color_in_hbo(t, ip);
4004 &m->profile->profile,
4009 color_out &= m->color_mask;
4011 n_pkts = m->n_pkts[color_out];
4012 n_bytes = m->n_bytes[color_out];
4014 instr_meter_color_out_hbo_set(t, ip, color_out);
4016 m->n_pkts[color_out] = n_pkts + 1;
4017 m->n_bytes[color_out] = n_bytes + length;
4021 __instr_meter_mhi_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
4024 uint64_t time, n_pkts, n_bytes;
4028 TRACE(
"[Thread %2u] meter (mhi)\n", p->thread_id);
4030 m = instr_meter_idx_hbo(p, t, ip);
4033 length = instr_meter_length_nbo(t, ip);
4034 color_in = (
enum rte_color)ip->meter.color_in_val;
4037 &m->profile->profile,
4042 color_out &= m->color_mask;
4044 n_pkts = m->n_pkts[color_out];
4045 n_bytes = m->n_bytes[color_out];
4047 instr_meter_color_out_hbo_set(t, ip, color_out);
4049 m->n_pkts[color_out] = n_pkts + 1;
4050 m->n_bytes[color_out] = n_bytes + length;
4054 __instr_meter_mmm_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
4057 uint64_t time, n_pkts, n_bytes;
4061 TRACE(
"[Thread %2u] meter (mmm)\n", p->thread_id);
4063 m = instr_meter_idx_hbo(p, t, ip);
4066 length = instr_meter_length_hbo(t, ip);
4067 color_in = instr_meter_color_in_hbo(t, ip);
4070 &m->profile->profile,
4075 color_out &= m->color_mask;
4077 n_pkts = m->n_pkts[color_out];
4078 n_bytes = m->n_bytes[color_out];
4080 instr_meter_color_out_hbo_set(t, ip, color_out);
4082 m->n_pkts[color_out] = n_pkts + 1;
4083 m->n_bytes[color_out] = n_bytes + length;
4087 __instr_meter_mmi_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
4090 uint64_t time, n_pkts, n_bytes;
4094 TRACE(
"[Thread %2u] meter (mmi)\n", p->thread_id);
4096 m = instr_meter_idx_hbo(p, t, ip);
4099 length = instr_meter_length_hbo(t, ip);
4100 color_in = (
enum rte_color)ip->meter.color_in_val;
4103 &m->profile->profile,
4108 color_out &= m->color_mask;
4110 n_pkts = m->n_pkts[color_out];
4111 n_bytes = m->n_bytes[color_out];
4113 instr_meter_color_out_hbo_set(t, ip, color_out);
4115 m->n_pkts[color_out] = n_pkts + 1;
4116 m->n_bytes[color_out] = n_bytes + length;
4120 __instr_meter_ihm_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
4123 uint64_t time, n_pkts, n_bytes;
4127 TRACE(
"[Thread %2u] meter (ihm)\n", p->thread_id);
4129 m = instr_meter_idx_imm(p, ip);
4132 length = instr_meter_length_nbo(t, ip);
4133 color_in = instr_meter_color_in_hbo(t, ip);
4136 &m->profile->profile,
4141 color_out &= m->color_mask;
4143 n_pkts = m->n_pkts[color_out];
4144 n_bytes = m->n_bytes[color_out];
4146 instr_meter_color_out_hbo_set(t, ip, color_out);
4148 m->n_pkts[color_out] = n_pkts + 1;
4149 m->n_bytes[color_out] = n_bytes + length;
4153 __instr_meter_ihi_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
4156 uint64_t time, n_pkts, n_bytes;
4160 TRACE(
"[Thread %2u] meter (ihi)\n", p->thread_id);
4162 m = instr_meter_idx_imm(p, ip);
4165 length = instr_meter_length_nbo(t, ip);
4166 color_in = (
enum rte_color)ip->meter.color_in_val;
4169 &m->profile->profile,
4174 color_out &= m->color_mask;
4176 n_pkts = m->n_pkts[color_out];
4177 n_bytes = m->n_bytes[color_out];
4179 instr_meter_color_out_hbo_set(t, ip, color_out);
4181 m->n_pkts[color_out] = n_pkts + 1;
4182 m->n_bytes[color_out] = n_bytes + length;
4186 __instr_meter_imm_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
4189 uint64_t time, n_pkts, n_bytes;
4193 TRACE(
"[Thread %2u] meter (imm)\n", p->thread_id);
4195 m = instr_meter_idx_imm(p, ip);
4198 length = instr_meter_length_hbo(t, ip);
4199 color_in = instr_meter_color_in_hbo(t, ip);
4202 &m->profile->profile,
4207 color_out &= m->color_mask;
4209 n_pkts = m->n_pkts[color_out];
4210 n_bytes = m->n_bytes[color_out];
4212 instr_meter_color_out_hbo_set(t, ip, color_out);
4214 m->n_pkts[color_out] = n_pkts + 1;
4215 m->n_bytes[color_out] = n_bytes + length;
4219 __instr_meter_imi_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
4222 uint64_t time, n_pkts, n_bytes;
4226 TRACE(
"[Thread %2u] meter (imi)\n", p->thread_id);
4228 m = instr_meter_idx_imm(p, ip);
4231 length = instr_meter_length_hbo(t, ip);
4232 color_in = (
enum rte_color)ip->meter.color_in_val;
4235 &m->profile->profile,
4240 color_out &= m->color_mask;
4242 n_pkts = m->n_pkts[color_out];
4243 n_bytes = m->n_bytes[color_out];
4245 instr_meter_color_out_hbo_set(t, ip, color_out);
4247 m->n_pkts[color_out] = n_pkts + 1;
4248 m->n_bytes[color_out] = n_bytes + length;
int(* rte_swx_extern_func_t)(void *mailbox)
__rte_experimental void rte_swx_table_learner_delete(void *table, void *mailbox)
__rte_experimental void rte_swx_table_learner_rearm_new(void *table, void *mailbox, uint64_t time, uint32_t key_timeout_id)
__rte_experimental uint32_t rte_swx_table_learner_add(void *table, void *mailbox, uint64_t time, uint64_t action_id, uint8_t *action_data, uint32_t key_timeout_id)
void(* rte_swx_port_out_flush_t)(void *port)
static enum rte_color rte_meter_trtcm_color_aware_check(struct rte_meter_trtcm *m, struct rte_meter_trtcm_profile *p, uint64_t time, uint32_t pkt_len, enum rte_color pkt_color)
static uint64_t rte_get_tsc_cycles(void)
void(* rte_swx_port_out_pkt_clone_tx_t)(void *port, struct rte_swx_pkt *pkt, uint32_t truncation_length)
void(* rte_swx_port_out_pkt_fast_clone_tx_t)(void *port, struct rte_swx_pkt *pkt)
void(* rte_swx_extern_type_destructor_t)(void *object)
void *(* rte_swx_extern_type_constructor_t)(const char *args)
void(* rte_swx_port_out_pkt_tx_t)(void *port, struct rte_swx_pkt *pkt)
uint32_t(* rte_swx_hash_func_t)(const void *key, uint32_t length, uint32_t seed)
__rte_experimental void rte_swx_table_learner_rearm(void *table, void *mailbox, uint64_t time)
#define RTE_SWX_TABLE_LEARNER_N_KEY_TIMEOUTS_MAX
#define RTE_SWX_NAME_SIZE
int(* rte_swx_table_lookup_t)(void *table, void *mailbox, uint8_t **key, uint64_t *action_id, uint8_t **action_data, int *hit)
int(* rte_swx_extern_type_member_func_t)(void *object, void *mailbox)
static void rte_prefetch0(const volatile void *p)
int(* rte_swx_port_in_pkt_rx_t)(void *port, struct rte_swx_pkt *pkt)