4#ifndef __INCLUDE_RTE_SWX_PIPELINE_INTERNAL_H__
5#define __INCLUDE_RTE_SWX_PIPELINE_INTERNAL_H__
27#define TRACE(...) printf(__VA_ARGS__)
35#define ntoh64(x) rte_be_to_cpu_64(x)
36#define hton64(x) rte_cpu_to_be_64(x)
49 TAILQ_ENTRY(struct_type) node;
58TAILQ_HEAD(struct_type_tailq, struct_type);
64 TAILQ_ENTRY(port_in_type) node;
69TAILQ_HEAD(port_in_type_tailq, port_in_type);
72 TAILQ_ENTRY(port_in) node;
73 struct port_in_type *type;
78TAILQ_HEAD(port_in_tailq, port_in);
80struct port_in_runtime {
89 TAILQ_ENTRY(port_out_type) node;
94TAILQ_HEAD(port_out_type_tailq, port_out_type);
97 TAILQ_ENTRY(port_out) node;
98 struct port_out_type *type;
103TAILQ_HEAD(port_out_tailq, port_out);
105struct port_out_runtime {
114struct extern_type_member_func {
115 TAILQ_ENTRY(extern_type_member_func) node;
121TAILQ_HEAD(extern_type_member_func_tailq, extern_type_member_func);
124 TAILQ_ENTRY(extern_type) node;
126 struct struct_type *mailbox_struct_type;
129 struct extern_type_member_func_tailq funcs;
133TAILQ_HEAD(extern_type_tailq, extern_type);
136 TAILQ_ENTRY(extern_obj) node;
138 struct extern_type *type;
144TAILQ_HEAD(extern_obj_tailq, extern_obj);
146#ifndef RTE_SWX_EXTERN_TYPE_MEMBER_FUNCS_MAX
147#define RTE_SWX_EXTERN_TYPE_MEMBER_FUNCS_MAX 8
150struct extern_obj_runtime {
160 TAILQ_ENTRY(extern_func) node;
162 struct struct_type *mailbox_struct_type;
168TAILQ_HEAD(extern_func_tailq, extern_func);
170struct extern_func_runtime {
179 TAILQ_ENTRY(header) node;
181 struct struct_type *st;
186TAILQ_HEAD(header_tailq, header);
188struct header_runtime {
193struct header_out_runtime {
219enum instruction_type {
260 INSTR_HDR_INVALIDATE,
310 INSTR_ALU_CKADD_FIELD,
311 INSTR_ALU_CKADD_STRUCT20,
312 INSTR_ALU_CKADD_STRUCT,
318 INSTR_ALU_CKSUB_FIELD,
376 INSTR_REGPREFETCH_RH,
377 INSTR_REGPREFETCH_RM,
378 INSTR_REGPREFETCH_RI,
455 INSTR_LEARNER_FORGET,
491 INSTR_JMP_ACTION_HIT,
496 INSTR_JMP_ACTION_MISS,
549struct instr_operand {
570 uint8_t header_id[8];
571 uint8_t struct_id[8];
576struct instr_hdr_validity {
589struct instr_extern_obj {
594struct instr_extern_func {
598struct instr_dst_src {
599 struct instr_operand dst;
601 struct instr_operand src;
606struct instr_regarray {
611 struct instr_operand idx;
616 struct instr_operand dstsrc;
626 struct instr_operand idx;
630 struct instr_operand length;
633 struct instr_operand color_in;
634 uint32_t color_in_val;
637 struct instr_operand color_out;
642 uint8_t header_id[8];
643 uint8_t struct_id[8];
654 struct instruction *ip;
657 struct instr_operand a;
663 struct instr_operand b;
669 enum instruction_type type;
672 struct instr_hdr_validity valid;
673 struct instr_dst_src mov;
674 struct instr_regarray regarray;
675 struct instr_meter meter;
676 struct instr_dma dma;
677 struct instr_dst_src alu;
678 struct instr_table table;
679 struct instr_learn learn;
680 struct instr_extern_obj ext_obj;
681 struct instr_extern_func ext_func;
682 struct instr_jmp jmp;
686struct instruction_data {
693typedef void (*instr_exec_t)(
struct rte_swx_pipeline *);
699(*action_func_t)(
struct rte_swx_pipeline *p);
702 TAILQ_ENTRY(action) node;
704 struct struct_type *st;
705 int *args_endianness;
706 struct instruction *instructions;
707 struct instruction_data *instruction_data;
708 uint32_t n_instructions;
712TAILQ_HEAD(action_tailq, action);
718 TAILQ_ENTRY(table_type) node;
724TAILQ_HEAD(table_type_tailq, table_type);
732 TAILQ_ENTRY(table) node;
735 struct table_type *type;
738 struct match_field *fields;
740 struct header *header;
743 struct action **actions;
744 struct action *default_action;
745 uint8_t *default_action_data;
747 int default_action_is_const;
748 uint32_t action_data_size_max;
749 int *action_is_for_table_entries;
750 int *action_is_for_default_entry;
756TAILQ_HEAD(table_tailq, table);
758struct table_runtime {
764struct table_statistics {
765 uint64_t n_pkts_hit[2];
766 uint64_t *n_pkts_action;
773 TAILQ_ENTRY(selector) node;
776 struct field *group_id_field;
777 struct field **selector_fields;
778 uint32_t n_selector_fields;
779 struct header *selector_header;
780 struct field *member_id_field;
782 uint32_t n_groups_max;
783 uint32_t n_members_per_group_max;
788TAILQ_HEAD(selector_tailq, selector);
790struct selector_runtime {
792 uint8_t **group_id_buffer;
793 uint8_t **selector_buffer;
794 uint8_t **member_id_buffer;
797struct selector_statistics {
805 TAILQ_ENTRY(learner) node;
809 struct field **fields;
811 struct header *header;
814 struct action **actions;
815 struct action *default_action;
816 uint8_t *default_action_data;
818 int default_action_is_const;
819 uint32_t action_data_size_max;
820 int *action_is_for_table_entries;
821 int *action_is_for_default_entry;
828TAILQ_HEAD(learner_tailq, learner);
830struct learner_runtime {
835struct learner_statistics {
836 uint64_t n_pkts_hit[2];
837 uint64_t n_pkts_learn[2];
838 uint64_t n_pkts_forget;
839 uint64_t *n_pkts_action;
846 TAILQ_ENTRY(regarray) node;
853TAILQ_HEAD(regarray_tailq, regarray);
855struct regarray_runtime {
863struct meter_profile {
864 TAILQ_ENTRY(meter_profile) node;
867 struct rte_meter_trtcm_profile profile;
871TAILQ_HEAD(meter_profile_tailq, meter_profile);
874 TAILQ_ENTRY(metarray) node;
880TAILQ_HEAD(metarray_tailq, metarray);
884 struct meter_profile *profile;
892struct metarray_runtime {
893 struct meter *metarray;
909 struct header_runtime *headers;
910 struct header_out_runtime *headers_out;
911 uint8_t *header_storage;
912 uint8_t *header_out_storage;
913 uint64_t valid_headers;
914 uint32_t n_headers_out;
920 struct table_runtime *tables;
921 struct selector_runtime *selectors;
922 struct learner_runtime *learners;
930 struct extern_obj_runtime *extern_objs;
931 struct extern_func_runtime *extern_funcs;
934 struct instruction *ip;
935 struct instruction *ret;
938#define MASK64_BIT_GET(mask, pos) ((mask) & (1LLU << (pos)))
939#define MASK64_BIT_SET(mask, pos) ((mask) | (1LLU << (pos)))
940#define MASK64_BIT_CLR(mask, pos) ((mask) & ~(1LLU << (pos)))
942#define HEADER_VALID(thread, header_id) \
943 MASK64_BIT_GET((thread)->valid_headers, header_id)
945static inline uint64_t
946instr_operand_hbo(
struct thread *t,
const struct instr_operand *x)
948 uint8_t *x_struct = t->structs[x->struct_id];
949 uint64_t *x64_ptr = (uint64_t *)&x_struct[x->offset];
950 uint64_t x64 = *x64_ptr;
951 uint64_t x64_mask = UINT64_MAX >> (64 - x->n_bits);
953 return x64 & x64_mask;
956#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
958static inline uint64_t
959instr_operand_nbo(
struct thread *t,
const struct instr_operand *x)
961 uint8_t *x_struct = t->structs[x->struct_id];
962 uint64_t *x64_ptr = (uint64_t *)&x_struct[x->offset];
963 uint64_t x64 = *x64_ptr;
965 return ntoh64(x64) >> (64 - x->n_bits);
970#define instr_operand_nbo instr_operand_hbo
974#define ALU(thread, ip, operator) \
976 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
977 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
978 uint64_t dst64 = *dst64_ptr; \
979 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
980 uint64_t dst = dst64 & dst64_mask; \
982 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \
983 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \
984 uint64_t src64 = *src64_ptr; \
985 uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->alu.src.n_bits); \
986 uint64_t src = src64 & src64_mask; \
988 uint64_t result = dst operator src; \
990 *dst64_ptr = (dst64 & ~dst64_mask) | (result & dst64_mask); \
993#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
995#define ALU_MH(thread, ip, operator) \
997 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
998 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
999 uint64_t dst64 = *dst64_ptr; \
1000 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
1001 uint64_t dst = dst64 & dst64_mask; \
1003 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \
1004 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \
1005 uint64_t src64 = *src64_ptr; \
1006 uint64_t src = ntoh64(src64) >> (64 - (ip)->alu.src.n_bits); \
1008 uint64_t result = dst operator src; \
1010 *dst64_ptr = (dst64 & ~dst64_mask) | (result & dst64_mask); \
1013#define ALU_HM(thread, ip, operator) \
1015 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
1016 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
1017 uint64_t dst64 = *dst64_ptr; \
1018 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
1019 uint64_t dst = ntoh64(dst64) >> (64 - (ip)->alu.dst.n_bits); \
1021 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \
1022 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \
1023 uint64_t src64 = *src64_ptr; \
1024 uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->alu.src.n_bits); \
1025 uint64_t src = src64 & src64_mask; \
1027 uint64_t result = dst operator src; \
1028 result = hton64(result << (64 - (ip)->alu.dst.n_bits)); \
1030 *dst64_ptr = (dst64 & ~dst64_mask) | result; \
1033#define ALU_HM_FAST(thread, ip, operator) \
1035 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
1036 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
1037 uint64_t dst64 = *dst64_ptr; \
1038 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
1039 uint64_t dst = dst64 & dst64_mask; \
1041 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \
1042 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \
1043 uint64_t src64 = *src64_ptr; \
1044 uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->alu.src.n_bits); \
1045 uint64_t src = hton64(src64 & src64_mask) >> (64 - (ip)->alu.dst.n_bits); \
1047 uint64_t result = dst operator src; \
1049 *dst64_ptr = (dst64 & ~dst64_mask) | result; \
1052#define ALU_HH(thread, ip, operator) \
1054 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
1055 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
1056 uint64_t dst64 = *dst64_ptr; \
1057 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
1058 uint64_t dst = ntoh64(dst64) >> (64 - (ip)->alu.dst.n_bits); \
1060 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \
1061 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \
1062 uint64_t src64 = *src64_ptr; \
1063 uint64_t src = ntoh64(src64) >> (64 - (ip)->alu.src.n_bits); \
1065 uint64_t result = dst operator src; \
1066 result = hton64(result << (64 - (ip)->alu.dst.n_bits)); \
1068 *dst64_ptr = (dst64 & ~dst64_mask) | result; \
1071#define ALU_HH_FAST(thread, ip, operator) \
1073 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
1074 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
1075 uint64_t dst64 = *dst64_ptr; \
1076 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
1077 uint64_t dst = dst64 & dst64_mask; \
1079 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \
1080 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \
1081 uint64_t src64 = *src64_ptr; \
1082 uint64_t src = (src64 << (64 - (ip)->alu.src.n_bits)) >> (64 - (ip)->alu.dst.n_bits); \
1084 uint64_t result = dst operator src; \
1086 *dst64_ptr = (dst64 & ~dst64_mask) | result; \
1093#define ALU_HM_FAST ALU
1095#define ALU_HH_FAST ALU
1099#define ALU_I(thread, ip, operator) \
1101 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
1102 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
1103 uint64_t dst64 = *dst64_ptr; \
1104 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
1105 uint64_t dst = dst64 & dst64_mask; \
1107 uint64_t src = (ip)->alu.src_val; \
1109 uint64_t result = dst operator src; \
1111 *dst64_ptr = (dst64 & ~dst64_mask) | (result & dst64_mask); \
1116#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1118#define ALU_HI(thread, ip, operator) \
1120 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
1121 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
1122 uint64_t dst64 = *dst64_ptr; \
1123 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
1124 uint64_t dst = ntoh64(dst64) >> (64 - (ip)->alu.dst.n_bits); \
1126 uint64_t src = (ip)->alu.src_val; \
1128 uint64_t result = dst operator src; \
1129 result = hton64(result << (64 - (ip)->alu.dst.n_bits)); \
1131 *dst64_ptr = (dst64 & ~dst64_mask) | result; \
1140#define MOV(thread, ip) \
1142 uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id]; \
1143 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset]; \
1144 uint64_t dst64 = *dst64_ptr; \
1145 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits); \
1147 uint8_t *src_struct = (thread)->structs[(ip)->mov.src.struct_id]; \
1148 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->mov.src.offset]; \
1149 uint64_t src64 = *src64_ptr; \
1150 uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->mov.src.n_bits); \
1151 uint64_t src = src64 & src64_mask; \
1153 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask); \
1156#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1158#define MOV_MH(thread, ip) \
1160 uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id]; \
1161 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset]; \
1162 uint64_t dst64 = *dst64_ptr; \
1163 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits); \
1165 uint8_t *src_struct = (thread)->structs[(ip)->mov.src.struct_id]; \
1166 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->mov.src.offset]; \
1167 uint64_t src64 = *src64_ptr; \
1168 uint64_t src = ntoh64(src64) >> (64 - (ip)->mov.src.n_bits); \
1170 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask); \
1173#define MOV_HM(thread, ip) \
1175 uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id]; \
1176 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset]; \
1177 uint64_t dst64 = *dst64_ptr; \
1178 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits); \
1180 uint8_t *src_struct = (thread)->structs[(ip)->mov.src.struct_id]; \
1181 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->mov.src.offset]; \
1182 uint64_t src64 = *src64_ptr; \
1183 uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->mov.src.n_bits); \
1184 uint64_t src = src64 & src64_mask; \
1186 src = hton64(src) >> (64 - (ip)->mov.dst.n_bits); \
1187 *dst64_ptr = (dst64 & ~dst64_mask) | src; \
1190#define MOV_HH(thread, ip) \
1192 uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id]; \
1193 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset]; \
1194 uint64_t dst64 = *dst64_ptr; \
1195 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits); \
1197 uint8_t *src_struct = (thread)->structs[(ip)->mov.src.struct_id]; \
1198 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->mov.src.offset]; \
1199 uint64_t src64 = *src64_ptr; \
1201 uint64_t src = src64 << (64 - (ip)->mov.src.n_bits); \
1202 src = src >> (64 - (ip)->mov.dst.n_bits); \
1203 *dst64_ptr = (dst64 & ~dst64_mask) | src; \
1214#define MOV_I(thread, ip) \
1216 uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id]; \
1217 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset]; \
1218 uint64_t dst64 = *dst64_ptr; \
1219 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits); \
1221 uint64_t src = (ip)->mov.src_val; \
1223 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask); \
1226#define JMP_CMP(thread, ip, operator) \
1228 uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \
1229 uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \
1230 uint64_t a64 = *a64_ptr; \
1231 uint64_t a64_mask = UINT64_MAX >> (64 - (ip)->jmp.a.n_bits); \
1232 uint64_t a = a64 & a64_mask; \
1234 uint8_t *b_struct = (thread)->structs[(ip)->jmp.b.struct_id]; \
1235 uint64_t *b64_ptr = (uint64_t *)&b_struct[(ip)->jmp.b.offset]; \
1236 uint64_t b64 = *b64_ptr; \
1237 uint64_t b64_mask = UINT64_MAX >> (64 - (ip)->jmp.b.n_bits); \
1238 uint64_t b = b64 & b64_mask; \
1240 (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \
1243#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1245#define JMP_CMP_MH(thread, ip, operator) \
1247 uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \
1248 uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \
1249 uint64_t a64 = *a64_ptr; \
1250 uint64_t a64_mask = UINT64_MAX >> (64 - (ip)->jmp.a.n_bits); \
1251 uint64_t a = a64 & a64_mask; \
1253 uint8_t *b_struct = (thread)->structs[(ip)->jmp.b.struct_id]; \
1254 uint64_t *b64_ptr = (uint64_t *)&b_struct[(ip)->jmp.b.offset]; \
1255 uint64_t b64 = *b64_ptr; \
1256 uint64_t b = ntoh64(b64) >> (64 - (ip)->jmp.b.n_bits); \
1258 (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \
1261#define JMP_CMP_HM(thread, ip, operator) \
1263 uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \
1264 uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \
1265 uint64_t a64 = *a64_ptr; \
1266 uint64_t a = ntoh64(a64) >> (64 - (ip)->jmp.a.n_bits); \
1268 uint8_t *b_struct = (thread)->structs[(ip)->jmp.b.struct_id]; \
1269 uint64_t *b64_ptr = (uint64_t *)&b_struct[(ip)->jmp.b.offset]; \
1270 uint64_t b64 = *b64_ptr; \
1271 uint64_t b64_mask = UINT64_MAX >> (64 - (ip)->jmp.b.n_bits); \
1272 uint64_t b = b64 & b64_mask; \
1274 (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \
1277#define JMP_CMP_HH(thread, ip, operator) \
1279 uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \
1280 uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \
1281 uint64_t a64 = *a64_ptr; \
1282 uint64_t a = ntoh64(a64) >> (64 - (ip)->jmp.a.n_bits); \
1284 uint8_t *b_struct = (thread)->structs[(ip)->jmp.b.struct_id]; \
1285 uint64_t *b64_ptr = (uint64_t *)&b_struct[(ip)->jmp.b.offset]; \
1286 uint64_t b64 = *b64_ptr; \
1287 uint64_t b = ntoh64(b64) >> (64 - (ip)->jmp.b.n_bits); \
1289 (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \
1292#define JMP_CMP_HH_FAST(thread, ip, operator) \
1294 uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \
1295 uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \
1296 uint64_t a64 = *a64_ptr; \
1297 uint64_t a = a64 << (64 - (ip)->jmp.a.n_bits); \
1299 uint8_t *b_struct = (thread)->structs[(ip)->jmp.b.struct_id]; \
1300 uint64_t *b64_ptr = (uint64_t *)&b_struct[(ip)->jmp.b.offset]; \
1301 uint64_t b64 = *b64_ptr; \
1302 uint64_t b = b64 << (64 - (ip)->jmp.b.n_bits); \
1304 (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \
1309#define JMP_CMP_MH JMP_CMP
1310#define JMP_CMP_HM JMP_CMP
1311#define JMP_CMP_HH JMP_CMP
1312#define JMP_CMP_HH_FAST JMP_CMP
1316#define JMP_CMP_I(thread, ip, operator) \
1318 uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \
1319 uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \
1320 uint64_t a64 = *a64_ptr; \
1321 uint64_t a64_mask = UINT64_MAX >> (64 - (ip)->jmp.a.n_bits); \
1322 uint64_t a = a64 & a64_mask; \
1324 uint64_t b = (ip)->jmp.b_val; \
1326 (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \
1329#define JMP_CMP_MI JMP_CMP_I
1331#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1333#define JMP_CMP_HI(thread, ip, operator) \
1335 uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \
1336 uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \
1337 uint64_t a64 = *a64_ptr; \
1338 uint64_t a = ntoh64(a64) >> (64 - (ip)->jmp.a.n_bits); \
1340 uint64_t b = (ip)->jmp.b_val; \
1342 (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \
1347#define JMP_CMP_HI JMP_CMP_I
1351#define METADATA_READ(thread, offset, n_bits) \
1353 uint64_t *m64_ptr = (uint64_t *)&(thread)->metadata[offset]; \
1354 uint64_t m64 = *m64_ptr; \
1355 uint64_t m64_mask = UINT64_MAX >> (64 - (n_bits)); \
1359#define METADATA_WRITE(thread, offset, n_bits, value) \
1361 uint64_t *m64_ptr = (uint64_t *)&(thread)->metadata[offset]; \
1362 uint64_t m64 = *m64_ptr; \
1363 uint64_t m64_mask = UINT64_MAX >> (64 - (n_bits)); \
1365 uint64_t m_new = value; \
1367 *m64_ptr = (m64 & ~m64_mask) | (m_new & m64_mask); \
1370#ifndef RTE_SWX_PIPELINE_THREADS_MAX
1371#define RTE_SWX_PIPELINE_THREADS_MAX 16
1374#ifndef RTE_SWX_PIPELINE_INSTRUCTION_TABLE_SIZE_MAX
1375#define RTE_SWX_PIPELINE_INSTRUCTION_TABLE_SIZE_MAX 256
1378struct rte_swx_pipeline {
1379 struct struct_type_tailq struct_types;
1380 struct port_in_type_tailq port_in_types;
1381 struct port_in_tailq ports_in;
1382 struct port_out_type_tailq port_out_types;
1383 struct port_out_tailq ports_out;
1384 struct extern_type_tailq extern_types;
1385 struct extern_obj_tailq extern_objs;
1386 struct extern_func_tailq extern_funcs;
1387 struct header_tailq headers;
1388 struct struct_type *metadata_st;
1389 uint32_t metadata_struct_id;
1390 struct action_tailq actions;
1391 struct table_type_tailq table_types;
1392 struct table_tailq tables;
1393 struct selector_tailq selectors;
1394 struct learner_tailq learners;
1395 struct regarray_tailq regarrays;
1396 struct meter_profile_tailq meter_profiles;
1397 struct metarray_tailq metarrays;
1399 struct port_in_runtime *in;
1400 struct port_out_runtime *out;
1401 struct instruction **action_instructions;
1402 action_func_t *action_funcs;
1404 struct table_statistics *table_stats;
1405 struct selector_statistics *selector_stats;
1406 struct learner_statistics *learner_stats;
1407 struct regarray_runtime *regarray_runtime;
1408 struct metarray_runtime *metarray_runtime;
1409 struct instruction *instructions;
1410 struct instruction_data *instruction_data;
1411 instr_exec_t *instruction_table;
1412 struct thread threads[RTE_SWX_PIPELINE_THREADS_MAX];
1416 uint32_t n_ports_in;
1417 uint32_t n_ports_out;
1418 uint32_t n_extern_objs;
1419 uint32_t n_extern_funcs;
1422 uint32_t n_selectors;
1423 uint32_t n_learners;
1424 uint32_t n_regarrays;
1425 uint32_t n_metarrays;
1429 uint32_t n_instructions;
1438pipeline_port_inc(
struct rte_swx_pipeline *p)
1440 p->port_id = (p->port_id + 1) & (p->n_ports_in - 1);
1444thread_ip_reset(
struct rte_swx_pipeline *p,
struct thread *t)
1446 t->ip = p->instructions;
1450thread_ip_set(
struct thread *t,
struct instruction *ip)
1456thread_ip_action_call(
struct rte_swx_pipeline *p,
1461 t->ip = p->action_instructions[action_id];
1465thread_ip_inc(
struct rte_swx_pipeline *p);
1468thread_ip_inc(
struct rte_swx_pipeline *p)
1470 struct thread *t = &p->threads[p->thread_id];
1476thread_ip_inc_cond(
struct thread *t,
int cond)
1482thread_yield(
struct rte_swx_pipeline *p)
1484 p->thread_id = (p->thread_id + 1) & (RTE_SWX_PIPELINE_THREADS_MAX - 1);
1488thread_yield_cond(
struct rte_swx_pipeline *p,
int cond)
1490 p->thread_id = (p->thread_id + cond) & (RTE_SWX_PIPELINE_THREADS_MAX - 1);
1497__instr_rx_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
1499 struct port_in_runtime *port = &p->in[p->port_id];
1504 pkt_received = port->pkt_rx(port->obj,
pkt);
1505 t->ptr = &
pkt->pkt[
pkt->offset];
1508 TRACE(
"[Thread %2u] rx %s from port %u\n",
1510 pkt_received ?
"1 pkt" :
"0 pkts",
1514 t->valid_headers = 0;
1515 t->n_headers_out = 0;
1518 METADATA_WRITE(t, ip->io.io.offset, ip->io.io.n_bits, p->port_id);
1521 t->table_state = p->table_state;
1524 pipeline_port_inc(p);
1526 return pkt_received;
1530instr_rx_exec(
struct rte_swx_pipeline *p)
1532 struct thread *t = &p->threads[p->thread_id];
1533 struct instruction *ip = t->ip;
1537 pkt_received = __instr_rx_exec(p, t, ip);
1540 thread_ip_inc_cond(t, pkt_received);
1548emit_handler(
struct thread *t)
1550 struct header_out_runtime *h0 = &t->headers_out[0];
1551 struct header_out_runtime *h1 = &t->headers_out[1];
1552 uint32_t offset = 0, i;
1555 if ((t->n_headers_out == 1) &&
1556 (h0->ptr + h0->n_bytes == t->ptr)) {
1557 TRACE(
"Emit handler: no header change or header decap.\n");
1559 t->pkt.offset -= h0->n_bytes;
1560 t->pkt.length += h0->n_bytes;
1566 if ((t->n_headers_out == 2) &&
1567 (h1->ptr + h1->n_bytes == t->ptr) &&
1568 (h0->ptr == h0->ptr0)) {
1571 TRACE(
"Emit handler: header encapsulation.\n");
1573 offset = h0->n_bytes + h1->n_bytes;
1574 memcpy(t->ptr - offset, h0->ptr, h0->n_bytes);
1575 t->pkt.offset -= offset;
1576 t->pkt.length += offset;
1582 TRACE(
"Emit handler: complex case.\n");
1584 for (i = 0; i < t->n_headers_out; i++) {
1585 struct header_out_runtime *h = &t->headers_out[i];
1587 memcpy(&t->header_out_storage[offset], h->ptr, h->n_bytes);
1588 offset += h->n_bytes;
1592 memcpy(t->ptr - offset, t->header_out_storage, offset);
1593 t->pkt.offset -= offset;
1594 t->pkt.length += offset;
1599__instr_tx_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
1601 uint64_t port_id = METADATA_READ(t, ip->io.io.offset, ip->io.io.n_bits);
1602 struct port_out_runtime *port = &p->out[port_id];
1605 TRACE(
"[Thread %2u]: tx 1 pkt to port %u\n",
1613 port->pkt_tx(port->obj,
pkt);
1617__instr_tx_i_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
1619 uint64_t port_id = ip->io.io.val;
1620 struct port_out_runtime *port = &p->out[port_id];
1623 TRACE(
"[Thread %2u]: tx (i) 1 pkt to port %u\n",
1631 port->pkt_tx(port->obj,
pkt);
1638__instr_hdr_extract_many_exec(
struct rte_swx_pipeline *p
__rte_unused,
1640 const struct instruction *ip,
1643 uint64_t valid_headers = t->valid_headers;
1644 uint8_t *ptr = t->ptr;
1645 uint32_t
offset = t->pkt.offset;
1646 uint32_t
length = t->pkt.length;
1649 for (i = 0; i < n_extract; i++) {
1650 uint32_t header_id = ip->io.hdr.header_id[i];
1651 uint32_t struct_id = ip->io.hdr.struct_id[i];
1652 uint32_t n_bytes = ip->io.hdr.n_bytes[i];
1654 TRACE(
"[Thread %2u]: extract header %u (%u bytes)\n",
1660 t->structs[struct_id] = ptr;
1661 valid_headers = MASK64_BIT_SET(valid_headers, header_id);
1670 t->valid_headers = valid_headers;
1679__instr_hdr_extract_exec(
struct rte_swx_pipeline *p,
1681 const struct instruction *ip)
1683 __instr_hdr_extract_many_exec(p, t, ip, 1);
1687__instr_hdr_extract2_exec(
struct rte_swx_pipeline *p,
1689 const struct instruction *ip)
1691 TRACE(
"[Thread %2u] *** The next 2 instructions are fused. ***\n", p->thread_id);
1693 __instr_hdr_extract_many_exec(p, t, ip, 2);
1697__instr_hdr_extract3_exec(
struct rte_swx_pipeline *p,
1699 const struct instruction *ip)
1701 TRACE(
"[Thread %2u] *** The next 3 instructions are fused. ***\n", p->thread_id);
1703 __instr_hdr_extract_many_exec(p, t, ip, 3);
1707__instr_hdr_extract4_exec(
struct rte_swx_pipeline *p,
1709 const struct instruction *ip)
1711 TRACE(
"[Thread %2u] *** The next 4 instructions are fused. ***\n", p->thread_id);
1713 __instr_hdr_extract_many_exec(p, t, ip, 4);
1717__instr_hdr_extract5_exec(
struct rte_swx_pipeline *p,
1719 const struct instruction *ip)
1721 TRACE(
"[Thread %2u] *** The next 5 instructions are fused. ***\n", p->thread_id);
1723 __instr_hdr_extract_many_exec(p, t, ip, 5);
1727__instr_hdr_extract6_exec(
struct rte_swx_pipeline *p,
1729 const struct instruction *ip)
1731 TRACE(
"[Thread %2u] *** The next 6 instructions are fused. ***\n", p->thread_id);
1733 __instr_hdr_extract_many_exec(p, t, ip, 6);
1737__instr_hdr_extract7_exec(
struct rte_swx_pipeline *p,
1739 const struct instruction *ip)
1741 TRACE(
"[Thread %2u] *** The next 7 instructions are fused. ***\n", p->thread_id);
1743 __instr_hdr_extract_many_exec(p, t, ip, 7);
1747__instr_hdr_extract8_exec(
struct rte_swx_pipeline *p,
1749 const struct instruction *ip)
1751 TRACE(
"[Thread %2u] *** The next 8 instructions are fused. ***\n", p->thread_id);
1753 __instr_hdr_extract_many_exec(p, t, ip, 8);
1757__instr_hdr_extract_m_exec(
struct rte_swx_pipeline *p
__rte_unused,
1759 const struct instruction *ip)
1761 uint64_t valid_headers = t->valid_headers;
1762 uint8_t *ptr = t->ptr;
1763 uint32_t
offset = t->pkt.offset;
1764 uint32_t
length = t->pkt.length;
1766 uint32_t n_bytes_last = METADATA_READ(t, ip->io.io.offset, ip->io.io.n_bits);
1767 uint32_t header_id = ip->io.hdr.header_id[0];
1768 uint32_t struct_id = ip->io.hdr.struct_id[0];
1769 uint32_t n_bytes = ip->io.hdr.n_bytes[0];
1771 struct header_runtime *h = &t->headers[header_id];
1773 TRACE(
"[Thread %2u]: extract header %u (%u + %u bytes)\n",
1779 n_bytes += n_bytes_last;
1782 t->structs[struct_id] = ptr;
1783 t->valid_headers = MASK64_BIT_SET(valid_headers, header_id);
1784 h->n_bytes = n_bytes;
1787 t->pkt.offset = offset + n_bytes;
1788 t->pkt.length = length - n_bytes;
1789 t->ptr = ptr + n_bytes;
1793__instr_hdr_lookahead_exec(
struct rte_swx_pipeline *p
__rte_unused,
1795 const struct instruction *ip)
1797 uint64_t valid_headers = t->valid_headers;
1798 uint8_t *ptr = t->ptr;
1800 uint32_t header_id = ip->io.hdr.header_id[0];
1801 uint32_t struct_id = ip->io.hdr.struct_id[0];
1803 TRACE(
"[Thread %2u]: lookahead header %u\n",
1808 t->structs[struct_id] = ptr;
1809 t->valid_headers = MASK64_BIT_SET(valid_headers, header_id);
1816__instr_hdr_emit_many_exec(
struct rte_swx_pipeline *p
__rte_unused,
1818 const struct instruction *ip,
1821 uint64_t valid_headers = t->valid_headers;
1822 uint32_t n_headers_out = t->n_headers_out;
1823 struct header_out_runtime *ho = NULL;
1824 uint8_t *ho_ptr = NULL;
1825 uint32_t ho_nbytes = 0, i;
1827 for (i = 0; i < n_emit; i++) {
1828 uint32_t header_id = ip->io.hdr.header_id[i];
1829 uint32_t struct_id = ip->io.hdr.struct_id[i];
1831 struct header_runtime *hi = &t->headers[header_id];
1832 uint8_t *hi_ptr0 = hi->ptr0;
1833 uint32_t n_bytes = hi->n_bytes;
1835 uint8_t *hi_ptr = t->structs[struct_id];
1837 if (!MASK64_BIT_GET(valid_headers, header_id)) {
1838 TRACE(
"[Thread %2u]: emit header %u (invalid)\n",
1845 TRACE(
"[Thread %2u]: emit header %u (valid)\n",
1851 if (!n_headers_out) {
1852 ho = &t->headers_out[0];
1858 ho_nbytes = n_bytes;
1864 ho = &t->headers_out[n_headers_out - 1];
1867 ho_nbytes = ho->n_bytes;
1871 if (ho_ptr + ho_nbytes == hi_ptr) {
1872 ho_nbytes += n_bytes;
1874 ho->n_bytes = ho_nbytes;
1881 ho_nbytes = n_bytes;
1888 ho->n_bytes = ho_nbytes;
1889 t->n_headers_out = n_headers_out;
1893__instr_hdr_emit_exec(
struct rte_swx_pipeline *p,
1895 const struct instruction *ip)
1897 __instr_hdr_emit_many_exec(p, t, ip, 1);
1901__instr_hdr_emit_tx_exec(
struct rte_swx_pipeline *p,
1903 const struct instruction *ip)
1905 TRACE(
"[Thread %2u] *** The next 2 instructions are fused. ***\n", p->thread_id);
1907 __instr_hdr_emit_many_exec(p, t, ip, 1);
1908 __instr_tx_exec(p, t, ip);
1912__instr_hdr_emit2_tx_exec(
struct rte_swx_pipeline *p,
1914 const struct instruction *ip)
1916 TRACE(
"[Thread %2u] *** The next 3 instructions are fused. ***\n", p->thread_id);
1918 __instr_hdr_emit_many_exec(p, t, ip, 2);
1919 __instr_tx_exec(p, t, ip);
1923__instr_hdr_emit3_tx_exec(
struct rte_swx_pipeline *p,
1925 const struct instruction *ip)
1927 TRACE(
"[Thread %2u] *** The next 4 instructions are fused. ***\n", p->thread_id);
1929 __instr_hdr_emit_many_exec(p, t, ip, 3);
1930 __instr_tx_exec(p, t, ip);
1934__instr_hdr_emit4_tx_exec(
struct rte_swx_pipeline *p,
1936 const struct instruction *ip)
1938 TRACE(
"[Thread %2u] *** The next 5 instructions are fused. ***\n", p->thread_id);
1940 __instr_hdr_emit_many_exec(p, t, ip, 4);
1941 __instr_tx_exec(p, t, ip);
1945__instr_hdr_emit5_tx_exec(
struct rte_swx_pipeline *p,
1947 const struct instruction *ip)
1949 TRACE(
"[Thread %2u] *** The next 6 instructions are fused. ***\n", p->thread_id);
1951 __instr_hdr_emit_many_exec(p, t, ip, 5);
1952 __instr_tx_exec(p, t, ip);
1956__instr_hdr_emit6_tx_exec(
struct rte_swx_pipeline *p,
1958 const struct instruction *ip)
1960 TRACE(
"[Thread %2u] *** The next 7 instructions are fused. ***\n", p->thread_id);
1962 __instr_hdr_emit_many_exec(p, t, ip, 6);
1963 __instr_tx_exec(p, t, ip);
1967__instr_hdr_emit7_tx_exec(
struct rte_swx_pipeline *p,
1969 const struct instruction *ip)
1971 TRACE(
"[Thread %2u] *** The next 8 instructions are fused. ***\n", p->thread_id);
1973 __instr_hdr_emit_many_exec(p, t, ip, 7);
1974 __instr_tx_exec(p, t, ip);
1978__instr_hdr_emit8_tx_exec(
struct rte_swx_pipeline *p,
1980 const struct instruction *ip)
1982 TRACE(
"[Thread %2u] *** The next 9 instructions are fused. ***\n", p->thread_id);
1984 __instr_hdr_emit_many_exec(p, t, ip, 8);
1985 __instr_tx_exec(p, t, ip);
1992__instr_hdr_validate_exec(
struct rte_swx_pipeline *p
__rte_unused,
1994 const struct instruction *ip)
1996 uint32_t header_id = ip->valid.header_id;
1998 TRACE(
"[Thread %2u] validate header %u\n", p->thread_id, header_id);
2001 t->valid_headers = MASK64_BIT_SET(t->valid_headers, header_id);
2008__instr_hdr_invalidate_exec(
struct rte_swx_pipeline *p
__rte_unused,
2010 const struct instruction *ip)
2012 uint32_t header_id = ip->valid.header_id;
2014 TRACE(
"[Thread %2u] invalidate header %u\n", p->thread_id, header_id);
2017 t->valid_headers = MASK64_BIT_CLR(t->valid_headers, header_id);
2024__instr_learn_exec(
struct rte_swx_pipeline *p,
2026 const struct instruction *ip)
2028 uint64_t action_id = ip->learn.action_id;
2029 uint32_t mf_offset = ip->learn.mf_offset;
2030 uint32_t learner_id = t->learner_id;
2032 p->n_selectors + learner_id];
2033 struct learner_runtime *l = &t->learners[learner_id];
2034 struct learner_statistics *stats = &p->learner_stats[learner_id];
2042 &t->metadata[mf_offset]);
2044 TRACE(
"[Thread %2u] learner %u learn %s\n",
2047 status ?
"ok" :
"error");
2049 stats->n_pkts_learn[status] += 1;
2056__instr_forget_exec(
struct rte_swx_pipeline *p,
2060 uint32_t learner_id = t->learner_id;
2062 p->n_selectors + learner_id];
2063 struct learner_runtime *l = &t->learners[learner_id];
2064 struct learner_statistics *stats = &p->learner_stats[learner_id];
2069 TRACE(
"[Thread %2u] learner %u forget\n",
2073 stats->n_pkts_forget += 1;
2079static inline uint32_t
2080__instr_extern_obj_exec(
struct rte_swx_pipeline *p
__rte_unused,
2082 const struct instruction *ip)
2084 uint32_t obj_id = ip->ext_obj.ext_obj_id;
2085 uint32_t func_id = ip->ext_obj.func_id;
2086 struct extern_obj_runtime *obj = &t->extern_objs[obj_id];
2090 TRACE(
"[Thread %2u] extern obj %u member func %u\n",
2095 done = func(obj->obj, obj->mailbox);
2100static inline uint32_t
2101__instr_extern_func_exec(
struct rte_swx_pipeline *p
__rte_unused,
2103 const struct instruction *ip)
2105 uint32_t ext_func_id = ip->ext_func.ext_func_id;
2106 struct extern_func_runtime *ext_func = &t->extern_funcs[ext_func_id];
2110 TRACE(
"[Thread %2u] extern func %u\n",
2114 done = func(ext_func->mailbox);
2123__instr_mov_exec(
struct rte_swx_pipeline *p
__rte_unused,
2125 const struct instruction *ip)
2127 TRACE(
"[Thread %2u] mov\n", p->thread_id);
2133__instr_mov_mh_exec(
struct rte_swx_pipeline *p
__rte_unused,
2135 const struct instruction *ip)
2137 TRACE(
"[Thread %2u] mov (mh)\n", p->thread_id);
2143__instr_mov_hm_exec(
struct rte_swx_pipeline *p
__rte_unused,
2145 const struct instruction *ip)
2147 TRACE(
"[Thread %2u] mov (hm)\n", p->thread_id);
2153__instr_mov_hh_exec(
struct rte_swx_pipeline *p
__rte_unused,
2155 const struct instruction *ip)
2157 TRACE(
"[Thread %2u] mov (hh)\n", p->thread_id);
2163__instr_mov_i_exec(
struct rte_swx_pipeline *p
__rte_unused,
2165 const struct instruction *ip)
2167 TRACE(
"[Thread %2u] mov m.f %" PRIx64
"\n", p->thread_id, ip->mov.src_val);
2176__instr_dma_ht_many_exec(
struct rte_swx_pipeline *p
__rte_unused,
2178 const struct instruction *ip,
2181 uint8_t *action_data = t->structs[0];
2182 uint64_t valid_headers = t->valid_headers;
2185 for (i = 0; i < n_dma; i++) {
2186 uint32_t header_id = ip->dma.dst.header_id[i];
2187 uint32_t struct_id = ip->dma.dst.struct_id[i];
2188 uint32_t offset = ip->dma.src.offset[i];
2189 uint32_t n_bytes = ip->dma.n_bytes[i];
2191 struct header_runtime *h = &t->headers[header_id];
2192 uint8_t *h_ptr0 = h->ptr0;
2193 uint8_t *h_ptr = t->structs[struct_id];
2195 void *dst = MASK64_BIT_GET(valid_headers, header_id) ?
2197 void *src = &action_data[offset];
2199 TRACE(
"[Thread %2u] dma h.s t.f\n", p->thread_id);
2202 memcpy(dst, src, n_bytes);
2203 t->structs[struct_id] = dst;
2204 valid_headers = MASK64_BIT_SET(valid_headers, header_id);
2207 t->valid_headers = valid_headers;
2211__instr_dma_ht_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
2213 __instr_dma_ht_many_exec(p, t, ip, 1);
2217__instr_dma_ht2_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
2219 TRACE(
"[Thread %2u] *** The next 2 instructions are fused. ***\n", p->thread_id);
2221 __instr_dma_ht_many_exec(p, t, ip, 2);
2225__instr_dma_ht3_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
2227 TRACE(
"[Thread %2u] *** The next 3 instructions are fused. ***\n", p->thread_id);
2229 __instr_dma_ht_many_exec(p, t, ip, 3);
2233__instr_dma_ht4_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
2235 TRACE(
"[Thread %2u] *** The next 4 instructions are fused. ***\n", p->thread_id);
2237 __instr_dma_ht_many_exec(p, t, ip, 4);
2241__instr_dma_ht5_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
2243 TRACE(
"[Thread %2u] *** The next 5 instructions are fused. ***\n", p->thread_id);
2245 __instr_dma_ht_many_exec(p, t, ip, 5);
2249__instr_dma_ht6_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
2251 TRACE(
"[Thread %2u] *** The next 6 instructions are fused. ***\n", p->thread_id);
2253 __instr_dma_ht_many_exec(p, t, ip, 6);
2257__instr_dma_ht7_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
2259 TRACE(
"[Thread %2u] *** The next 7 instructions are fused. ***\n", p->thread_id);
2261 __instr_dma_ht_many_exec(p, t, ip, 7);
2265__instr_dma_ht8_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
2267 TRACE(
"[Thread %2u] *** The next 8 instructions are fused. ***\n", p->thread_id);
2269 __instr_dma_ht_many_exec(p, t, ip, 8);
2276__instr_alu_add_exec(
struct rte_swx_pipeline *p
__rte_unused,
2278 const struct instruction *ip)
2280 TRACE(
"[Thread %2u] add\n", p->thread_id);
2286__instr_alu_add_mh_exec(
struct rte_swx_pipeline *p
__rte_unused,
2288 const struct instruction *ip)
2290 TRACE(
"[Thread %2u] add (mh)\n", p->thread_id);
2296__instr_alu_add_hm_exec(
struct rte_swx_pipeline *p
__rte_unused,
2298 const struct instruction *ip)
2300 TRACE(
"[Thread %2u] add (hm)\n", p->thread_id);
2306__instr_alu_add_hh_exec(
struct rte_swx_pipeline *p
__rte_unused,
2308 const struct instruction *ip)
2310 TRACE(
"[Thread %2u] add (hh)\n", p->thread_id);
2316__instr_alu_add_mi_exec(
struct rte_swx_pipeline *p
__rte_unused,
2318 const struct instruction *ip)
2320 TRACE(
"[Thread %2u] add (mi)\n", p->thread_id);
2326__instr_alu_add_hi_exec(
struct rte_swx_pipeline *p
__rte_unused,
2328 const struct instruction *ip)
2330 TRACE(
"[Thread %2u] add (hi)\n", p->thread_id);
2336__instr_alu_sub_exec(
struct rte_swx_pipeline *p
__rte_unused,
2338 const struct instruction *ip)
2340 TRACE(
"[Thread %2u] sub\n", p->thread_id);
2346__instr_alu_sub_mh_exec(
struct rte_swx_pipeline *p
__rte_unused,
2348 const struct instruction *ip)
2350 TRACE(
"[Thread %2u] sub (mh)\n", p->thread_id);
2356__instr_alu_sub_hm_exec(
struct rte_swx_pipeline *p
__rte_unused,
2358 const struct instruction *ip)
2360 TRACE(
"[Thread %2u] sub (hm)\n", p->thread_id);
2366__instr_alu_sub_hh_exec(
struct rte_swx_pipeline *p
__rte_unused,
2368 const struct instruction *ip)
2370 TRACE(
"[Thread %2u] sub (hh)\n", p->thread_id);
2376__instr_alu_sub_mi_exec(
struct rte_swx_pipeline *p
__rte_unused,
2378 const struct instruction *ip)
2380 TRACE(
"[Thread %2u] sub (mi)\n", p->thread_id);
2386__instr_alu_sub_hi_exec(
struct rte_swx_pipeline *p
__rte_unused,
2388 const struct instruction *ip)
2390 TRACE(
"[Thread %2u] sub (hi)\n", p->thread_id);
2396__instr_alu_shl_exec(
struct rte_swx_pipeline *p
__rte_unused,
2398 const struct instruction *ip)
2400 TRACE(
"[Thread %2u] shl\n", p->thread_id);
2406__instr_alu_shl_mh_exec(
struct rte_swx_pipeline *p
__rte_unused,
2408 const struct instruction *ip)
2410 TRACE(
"[Thread %2u] shl (mh)\n", p->thread_id);
2416__instr_alu_shl_hm_exec(
struct rte_swx_pipeline *p
__rte_unused,
2418 const struct instruction *ip)
2420 TRACE(
"[Thread %2u] shl (hm)\n", p->thread_id);
2426__instr_alu_shl_hh_exec(
struct rte_swx_pipeline *p
__rte_unused,
2428 const struct instruction *ip)
2430 TRACE(
"[Thread %2u] shl (hh)\n", p->thread_id);
2436__instr_alu_shl_mi_exec(
struct rte_swx_pipeline *p
__rte_unused,
2438 const struct instruction *ip)
2440 TRACE(
"[Thread %2u] shl (mi)\n", p->thread_id);
2446__instr_alu_shl_hi_exec(
struct rte_swx_pipeline *p
__rte_unused,
2448 const struct instruction *ip)
2450 TRACE(
"[Thread %2u] shl (hi)\n", p->thread_id);
2456__instr_alu_shr_exec(
struct rte_swx_pipeline *p
__rte_unused,
2458 const struct instruction *ip)
2460 TRACE(
"[Thread %2u] shr\n", p->thread_id);
2466__instr_alu_shr_mh_exec(
struct rte_swx_pipeline *p
__rte_unused,
2468 const struct instruction *ip)
2470 TRACE(
"[Thread %2u] shr (mh)\n", p->thread_id);
2476__instr_alu_shr_hm_exec(
struct rte_swx_pipeline *p
__rte_unused,
2478 const struct instruction *ip)
2480 TRACE(
"[Thread %2u] shr (hm)\n", p->thread_id);
2486__instr_alu_shr_hh_exec(
struct rte_swx_pipeline *p
__rte_unused,
2488 const struct instruction *ip)
2490 TRACE(
"[Thread %2u] shr (hh)\n", p->thread_id);
2496__instr_alu_shr_mi_exec(
struct rte_swx_pipeline *p
__rte_unused,
2498 const struct instruction *ip)
2500 TRACE(
"[Thread %2u] shr (mi)\n", p->thread_id);
2507__instr_alu_shr_hi_exec(
struct rte_swx_pipeline *p
__rte_unused,
2509 const struct instruction *ip)
2511 TRACE(
"[Thread %2u] shr (hi)\n", p->thread_id);
2517__instr_alu_and_exec(
struct rte_swx_pipeline *p
__rte_unused,
2519 const struct instruction *ip)
2521 TRACE(
"[Thread %2u] and\n", p->thread_id);
2527__instr_alu_and_mh_exec(
struct rte_swx_pipeline *p
__rte_unused,
2529 const struct instruction *ip)
2531 TRACE(
"[Thread %2u] and (mh)\n", p->thread_id);
2537__instr_alu_and_hm_exec(
struct rte_swx_pipeline *p
__rte_unused,
2539 const struct instruction *ip)
2541 TRACE(
"[Thread %2u] and (hm)\n", p->thread_id);
2543 ALU_HM_FAST(t, ip, &);
2547__instr_alu_and_hh_exec(
struct rte_swx_pipeline *p
__rte_unused,
2549 const struct instruction *ip)
2551 TRACE(
"[Thread %2u] and (hh)\n", p->thread_id);
2553 ALU_HH_FAST(t, ip, &);
2557__instr_alu_and_i_exec(
struct rte_swx_pipeline *p
__rte_unused,
2559 const struct instruction *ip)
2561 TRACE(
"[Thread %2u] and (i)\n", p->thread_id);
2567__instr_alu_or_exec(
struct rte_swx_pipeline *p
__rte_unused,
2569 const struct instruction *ip)
2571 TRACE(
"[Thread %2u] or\n", p->thread_id);
2577__instr_alu_or_mh_exec(
struct rte_swx_pipeline *p
__rte_unused,
2579 const struct instruction *ip)
2581 TRACE(
"[Thread %2u] or (mh)\n", p->thread_id);
2587__instr_alu_or_hm_exec(
struct rte_swx_pipeline *p
__rte_unused,
2589 const struct instruction *ip)
2591 TRACE(
"[Thread %2u] or (hm)\n", p->thread_id);
2593 ALU_HM_FAST(t, ip, |);
2597__instr_alu_or_hh_exec(
struct rte_swx_pipeline *p
__rte_unused,
2599 const struct instruction *ip)
2601 TRACE(
"[Thread %2u] or (hh)\n", p->thread_id);
2603 ALU_HH_FAST(t, ip, |);
2607__instr_alu_or_i_exec(
struct rte_swx_pipeline *p
__rte_unused,
2609 const struct instruction *ip)
2611 TRACE(
"[Thread %2u] or (i)\n", p->thread_id);
2617__instr_alu_xor_exec(
struct rte_swx_pipeline *p
__rte_unused,
2619 const struct instruction *ip)
2621 TRACE(
"[Thread %2u] xor\n", p->thread_id);
2627__instr_alu_xor_mh_exec(
struct rte_swx_pipeline *p
__rte_unused,
2629 const struct instruction *ip)
2631 TRACE(
"[Thread %2u] xor (mh)\n", p->thread_id);
2637__instr_alu_xor_hm_exec(
struct rte_swx_pipeline *p
__rte_unused,
2639 const struct instruction *ip)
2641 TRACE(
"[Thread %2u] xor (hm)\n", p->thread_id);
2643 ALU_HM_FAST(t, ip, ^);
2647__instr_alu_xor_hh_exec(
struct rte_swx_pipeline *p
__rte_unused,
2649 const struct instruction *ip)
2651 TRACE(
"[Thread %2u] xor (hh)\n", p->thread_id);
2653 ALU_HH_FAST(t, ip, ^);
2657__instr_alu_xor_i_exec(
struct rte_swx_pipeline *p
__rte_unused,
2659 const struct instruction *ip)
2661 TRACE(
"[Thread %2u] xor (i)\n", p->thread_id);
2667__instr_alu_ckadd_field_exec(
struct rte_swx_pipeline *p
__rte_unused,
2669 const struct instruction *ip)
2671 uint8_t *dst_struct, *src_struct;
2672 uint16_t *dst16_ptr, dst;
2673 uint64_t *src64_ptr, src64, src64_mask, src;
2676 TRACE(
"[Thread %2u] ckadd (field)\n", p->thread_id);
2679 dst_struct = t->structs[ip->alu.dst.struct_id];
2680 dst16_ptr = (uint16_t *)&dst_struct[ip->alu.dst.offset];
2683 src_struct = t->structs[ip->alu.src.struct_id];
2684 src64_ptr = (uint64_t *)&src_struct[ip->alu.src.offset];
2686 src64_mask = UINT64_MAX >> (64 - ip->alu.src.n_bits);
2687 src = src64 & src64_mask;
2696 r += (src >> 32) + (src & 0xFFFFFFFF);
2702 r = (r & 0xFFFF) + (r >> 16);
2707 r = (r & 0xFFFF) + (r >> 16);
2714 r = (r & 0xFFFF) + (r >> 16);
2719 *dst16_ptr = (uint16_t)r;
2723__instr_alu_cksub_field_exec(
struct rte_swx_pipeline *p
__rte_unused,
2725 const struct instruction *ip)
2727 uint8_t *dst_struct, *src_struct;
2728 uint16_t *dst16_ptr, dst;
2729 uint64_t *src64_ptr, src64, src64_mask, src;
2732 TRACE(
"[Thread %2u] cksub (field)\n", p->thread_id);
2735 dst_struct = t->structs[ip->alu.dst.struct_id];
2736 dst16_ptr = (uint16_t *)&dst_struct[ip->alu.dst.offset];
2739 src_struct = t->structs[ip->alu.src.struct_id];
2740 src64_ptr = (uint64_t *)&src_struct[ip->alu.src.offset];
2742 src64_mask = UINT64_MAX >> (64 - ip->alu.src.n_bits);
2743 src = src64 & src64_mask;
2760 r += 0xFFFF00000ULL;
2765 r -= (src >> 32) + (src & 0xFFFFFFFF);
2770 r = (r & 0xFFFF) + (r >> 16);
2775 r = (r & 0xFFFF) + (r >> 16);
2782 r = (r & 0xFFFF) + (r >> 16);
2787 *dst16_ptr = (uint16_t)r;
2791__instr_alu_ckadd_struct20_exec(
struct rte_swx_pipeline *p
__rte_unused,
2793 const struct instruction *ip)
2795 uint8_t *dst_struct, *src_struct;
2796 uint16_t *dst16_ptr;
2797 uint32_t *src32_ptr;
2800 TRACE(
"[Thread %2u] ckadd (struct of 20 bytes)\n", p->thread_id);
2803 dst_struct = t->structs[ip->alu.dst.struct_id];
2804 dst16_ptr = (uint16_t *)&dst_struct[ip->alu.dst.offset];
2806 src_struct = t->structs[ip->alu.src.struct_id];
2807 src32_ptr = (uint32_t *)&src_struct[0];
2813 r0 += r1 + src32_ptr[4];
2818 r0 = (r0 & 0xFFFF) + (r0 >> 16);
2823 r0 = (r0 & 0xFFFF) + (r0 >> 16);
2830 r0 = (r0 & 0xFFFF) + (r0 >> 16);
2833 r0 = r0 ? r0 : 0xFFFF;
2835 *dst16_ptr = (uint16_t)r0;
2839__instr_alu_ckadd_struct_exec(
struct rte_swx_pipeline *p
__rte_unused,
2841 const struct instruction *ip)
2843 uint8_t *dst_struct, *src_struct;
2844 uint16_t *dst16_ptr;
2845 uint32_t *src32_ptr;
2849 TRACE(
"[Thread %2u] ckadd (struct)\n", p->thread_id);
2852 dst_struct = t->structs[ip->alu.dst.struct_id];
2853 dst16_ptr = (uint16_t *)&dst_struct[ip->alu.dst.offset];
2855 src_struct = t->structs[ip->alu.src.struct_id];
2856 src32_ptr = (uint32_t *)&src_struct[0];
2862 for (i = 0; i < ip->alu.src.n_bits / 32; i++, src32_ptr++)
2868 r = (r & 0xFFFF) + (r >> 16);
2873 r = (r & 0xFFFF) + (r >> 16);
2880 r = (r & 0xFFFF) + (r >> 16);
2885 *dst16_ptr = (uint16_t)r;
2891static inline uint64_t *
2892instr_regarray_regarray(
struct rte_swx_pipeline *p,
const struct instruction *ip)
2894 struct regarray_runtime *r = &p->regarray_runtime[ip->regarray.regarray_id];
2898static inline uint64_t
2899instr_regarray_idx_hbo(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
2901 struct regarray_runtime *r = &p->regarray_runtime[ip->regarray.regarray_id];
2903 uint8_t *idx_struct = t->structs[ip->regarray.idx.struct_id];
2904 uint64_t *idx64_ptr = (uint64_t *)&idx_struct[ip->regarray.idx.offset];
2905 uint64_t idx64 = *idx64_ptr;
2906 uint64_t idx64_mask = UINT64_MAX >> (64 - ip->regarray.idx.n_bits);
2907 uint64_t idx = idx64 & idx64_mask & r->size_mask;
2912#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
2914static inline uint64_t
2915instr_regarray_idx_nbo(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
2917 struct regarray_runtime *r = &p->regarray_runtime[ip->regarray.regarray_id];
2919 uint8_t *idx_struct = t->structs[ip->regarray.idx.struct_id];
2920 uint64_t *idx64_ptr = (uint64_t *)&idx_struct[ip->regarray.idx.offset];
2921 uint64_t idx64 = *idx64_ptr;
2922 uint64_t idx = (ntoh64(idx64) >> (64 - ip->regarray.idx.n_bits)) & r->size_mask;
2929#define instr_regarray_idx_nbo instr_regarray_idx_hbo
2933static inline uint64_t
2934instr_regarray_idx_imm(
struct rte_swx_pipeline *p,
const struct instruction *ip)
2936 struct regarray_runtime *r = &p->regarray_runtime[ip->regarray.regarray_id];
2938 uint64_t idx = ip->regarray.idx_val & r->size_mask;
2943static inline uint64_t
2944instr_regarray_src_hbo(
struct thread *t,
const struct instruction *ip)
2946 uint8_t *src_struct = t->structs[ip->regarray.dstsrc.struct_id];
2947 uint64_t *src64_ptr = (uint64_t *)&src_struct[ip->regarray.dstsrc.offset];
2948 uint64_t src64 = *src64_ptr;
2949 uint64_t src64_mask = UINT64_MAX >> (64 - ip->regarray.dstsrc.n_bits);
2950 uint64_t src = src64 & src64_mask;
2955#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
2957static inline uint64_t
2958instr_regarray_src_nbo(
struct thread *t,
const struct instruction *ip)
2960 uint8_t *src_struct = t->structs[ip->regarray.dstsrc.struct_id];
2961 uint64_t *src64_ptr = (uint64_t *)&src_struct[ip->regarray.dstsrc.offset];
2962 uint64_t src64 = *src64_ptr;
2963 uint64_t src = ntoh64(src64) >> (64 - ip->regarray.dstsrc.n_bits);
2970#define instr_regarray_src_nbo instr_regarray_src_hbo
2975instr_regarray_dst_hbo_src_hbo_set(
struct thread *t,
const struct instruction *ip, uint64_t src)
2977 uint8_t *dst_struct = t->structs[ip->regarray.dstsrc.struct_id];
2978 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[ip->regarray.dstsrc.offset];
2979 uint64_t dst64 = *dst64_ptr;
2980 uint64_t dst64_mask = UINT64_MAX >> (64 - ip->regarray.dstsrc.n_bits);
2982 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask);
2986#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
2989instr_regarray_dst_nbo_src_hbo_set(
struct thread *t,
const struct instruction *ip, uint64_t src)
2991 uint8_t *dst_struct = t->structs[ip->regarray.dstsrc.struct_id];
2992 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[ip->regarray.dstsrc.offset];
2993 uint64_t dst64 = *dst64_ptr;
2994 uint64_t dst64_mask = UINT64_MAX >> (64 - ip->regarray.dstsrc.n_bits);
2996 src = hton64(src) >> (64 - ip->regarray.dstsrc.n_bits);
2997 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask);
3002#define instr_regarray_dst_nbo_src_hbo_set instr_regarray_dst_hbo_src_hbo_set
3007__instr_regprefetch_rh_exec(
struct rte_swx_pipeline *p,
3009 const struct instruction *ip)
3011 uint64_t *regarray, idx;
3013 TRACE(
"[Thread %2u] regprefetch (r[h])\n", p->thread_id);
3015 regarray = instr_regarray_regarray(p, ip);
3016 idx = instr_regarray_idx_nbo(p, t, ip);
3021__instr_regprefetch_rm_exec(
struct rte_swx_pipeline *p,
3023 const struct instruction *ip)
3025 uint64_t *regarray, idx;
3027 TRACE(
"[Thread %2u] regprefetch (r[m])\n", p->thread_id);
3029 regarray = instr_regarray_regarray(p, ip);
3030 idx = instr_regarray_idx_hbo(p, t, ip);
3035__instr_regprefetch_ri_exec(
struct rte_swx_pipeline *p,
3037 const struct instruction *ip)
3039 uint64_t *regarray, idx;
3041 TRACE(
"[Thread %2u] regprefetch (r[i])\n", p->thread_id);
3043 regarray = instr_regarray_regarray(p, ip);
3044 idx = instr_regarray_idx_imm(p, ip);
3049__instr_regrd_hrh_exec(
struct rte_swx_pipeline *p,
3051 const struct instruction *ip)
3053 uint64_t *regarray, idx;
3055 TRACE(
"[Thread %2u] regrd (h = r[h])\n", p->thread_id);
3057 regarray = instr_regarray_regarray(p, ip);
3058 idx = instr_regarray_idx_nbo(p, t, ip);
3059 instr_regarray_dst_nbo_src_hbo_set(t, ip, regarray[idx]);
3063__instr_regrd_hrm_exec(
struct rte_swx_pipeline *p,
3065 const struct instruction *ip)
3067 uint64_t *regarray, idx;
3069 TRACE(
"[Thread %2u] regrd (h = r[m])\n", p->thread_id);
3072 regarray = instr_regarray_regarray(p, ip);
3073 idx = instr_regarray_idx_hbo(p, t, ip);
3074 instr_regarray_dst_nbo_src_hbo_set(t, ip, regarray[idx]);
3078__instr_regrd_mrh_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3080 uint64_t *regarray, idx;
3082 TRACE(
"[Thread %2u] regrd (m = r[h])\n", p->thread_id);
3084 regarray = instr_regarray_regarray(p, ip);
3085 idx = instr_regarray_idx_nbo(p, t, ip);
3086 instr_regarray_dst_hbo_src_hbo_set(t, ip, regarray[idx]);
3090__instr_regrd_mrm_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3092 uint64_t *regarray, idx;
3094 TRACE(
"[Thread %2u] regrd (m = r[m])\n", p->thread_id);
3096 regarray = instr_regarray_regarray(p, ip);
3097 idx = instr_regarray_idx_hbo(p, t, ip);
3098 instr_regarray_dst_hbo_src_hbo_set(t, ip, regarray[idx]);
3102__instr_regrd_hri_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3104 uint64_t *regarray, idx;
3106 TRACE(
"[Thread %2u] regrd (h = r[i])\n", p->thread_id);
3108 regarray = instr_regarray_regarray(p, ip);
3109 idx = instr_regarray_idx_imm(p, ip);
3110 instr_regarray_dst_nbo_src_hbo_set(t, ip, regarray[idx]);
3114__instr_regrd_mri_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3116 uint64_t *regarray, idx;
3118 TRACE(
"[Thread %2u] regrd (m = r[i])\n", p->thread_id);
3120 regarray = instr_regarray_regarray(p, ip);
3121 idx = instr_regarray_idx_imm(p, ip);
3122 instr_regarray_dst_hbo_src_hbo_set(t, ip, regarray[idx]);
3126__instr_regwr_rhh_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3128 uint64_t *regarray, idx, src;
3130 TRACE(
"[Thread %2u] regwr (r[h] = h)\n", p->thread_id);
3132 regarray = instr_regarray_regarray(p, ip);
3133 idx = instr_regarray_idx_nbo(p, t, ip);
3134 src = instr_regarray_src_nbo(t, ip);
3135 regarray[idx] = src;
3139__instr_regwr_rhm_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3141 uint64_t *regarray, idx, src;
3143 TRACE(
"[Thread %2u] regwr (r[h] = m)\n", p->thread_id);
3145 regarray = instr_regarray_regarray(p, ip);
3146 idx = instr_regarray_idx_nbo(p, t, ip);
3147 src = instr_regarray_src_hbo(t, ip);
3148 regarray[idx] = src;
3152__instr_regwr_rmh_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3154 uint64_t *regarray, idx, src;
3156 TRACE(
"[Thread %2u] regwr (r[m] = h)\n", p->thread_id);
3158 regarray = instr_regarray_regarray(p, ip);
3159 idx = instr_regarray_idx_hbo(p, t, ip);
3160 src = instr_regarray_src_nbo(t, ip);
3161 regarray[idx] = src;
3165__instr_regwr_rmm_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3167 uint64_t *regarray, idx, src;
3169 TRACE(
"[Thread %2u] regwr (r[m] = m)\n", p->thread_id);
3171 regarray = instr_regarray_regarray(p, ip);
3172 idx = instr_regarray_idx_hbo(p, t, ip);
3173 src = instr_regarray_src_hbo(t, ip);
3174 regarray[idx] = src;
3178__instr_regwr_rhi_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3180 uint64_t *regarray, idx, src;
3182 TRACE(
"[Thread %2u] regwr (r[h] = i)\n", p->thread_id);
3184 regarray = instr_regarray_regarray(p, ip);
3185 idx = instr_regarray_idx_nbo(p, t, ip);
3186 src = ip->regarray.dstsrc_val;
3187 regarray[idx] = src;
3191__instr_regwr_rmi_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3193 uint64_t *regarray, idx, src;
3195 TRACE(
"[Thread %2u] regwr (r[m] = i)\n", p->thread_id);
3197 regarray = instr_regarray_regarray(p, ip);
3198 idx = instr_regarray_idx_hbo(p, t, ip);
3199 src = ip->regarray.dstsrc_val;
3200 regarray[idx] = src;
3204__instr_regwr_rih_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3206 uint64_t *regarray, idx, src;
3208 TRACE(
"[Thread %2u] regwr (r[i] = h)\n", p->thread_id);
3210 regarray = instr_regarray_regarray(p, ip);
3211 idx = instr_regarray_idx_imm(p, ip);
3212 src = instr_regarray_src_nbo(t, ip);
3213 regarray[idx] = src;
3217__instr_regwr_rim_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3219 uint64_t *regarray, idx, src;
3221 TRACE(
"[Thread %2u] regwr (r[i] = m)\n", p->thread_id);
3223 regarray = instr_regarray_regarray(p, ip);
3224 idx = instr_regarray_idx_imm(p, ip);
3225 src = instr_regarray_src_hbo(t, ip);
3226 regarray[idx] = src;
3230__instr_regwr_rii_exec(
struct rte_swx_pipeline *p,
3232 const struct instruction *ip)
3234 uint64_t *regarray, idx, src;
3236 TRACE(
"[Thread %2u] regwr (r[i] = i)\n", p->thread_id);
3238 regarray = instr_regarray_regarray(p, ip);
3239 idx = instr_regarray_idx_imm(p, ip);
3240 src = ip->regarray.dstsrc_val;
3241 regarray[idx] = src;
3245__instr_regadd_rhh_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3247 uint64_t *regarray, idx, src;
3249 TRACE(
"[Thread %2u] regadd (r[h] += h)\n", p->thread_id);
3251 regarray = instr_regarray_regarray(p, ip);
3252 idx = instr_regarray_idx_nbo(p, t, ip);
3253 src = instr_regarray_src_nbo(t, ip);
3254 regarray[idx] += src;
3258__instr_regadd_rhm_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3260 uint64_t *regarray, idx, src;
3262 TRACE(
"[Thread %2u] regadd (r[h] += m)\n", p->thread_id);
3264 regarray = instr_regarray_regarray(p, ip);
3265 idx = instr_regarray_idx_nbo(p, t, ip);
3266 src = instr_regarray_src_hbo(t, ip);
3267 regarray[idx] += src;
3271__instr_regadd_rmh_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3273 uint64_t *regarray, idx, src;
3275 TRACE(
"[Thread %2u] regadd (r[m] += h)\n", p->thread_id);
3277 regarray = instr_regarray_regarray(p, ip);
3278 idx = instr_regarray_idx_hbo(p, t, ip);
3279 src = instr_regarray_src_nbo(t, ip);
3280 regarray[idx] += src;
3284__instr_regadd_rmm_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3286 uint64_t *regarray, idx, src;
3288 TRACE(
"[Thread %2u] regadd (r[m] += m)\n", p->thread_id);
3290 regarray = instr_regarray_regarray(p, ip);
3291 idx = instr_regarray_idx_hbo(p, t, ip);
3292 src = instr_regarray_src_hbo(t, ip);
3293 regarray[idx] += src;
3297__instr_regadd_rhi_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3299 uint64_t *regarray, idx, src;
3301 TRACE(
"[Thread %2u] regadd (r[h] += i)\n", p->thread_id);
3303 regarray = instr_regarray_regarray(p, ip);
3304 idx = instr_regarray_idx_nbo(p, t, ip);
3305 src = ip->regarray.dstsrc_val;
3306 regarray[idx] += src;
3310__instr_regadd_rmi_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3312 uint64_t *regarray, idx, src;
3314 TRACE(
"[Thread %2u] regadd (r[m] += i)\n", p->thread_id);
3316 regarray = instr_regarray_regarray(p, ip);
3317 idx = instr_regarray_idx_hbo(p, t, ip);
3318 src = ip->regarray.dstsrc_val;
3319 regarray[idx] += src;
3323__instr_regadd_rih_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3325 uint64_t *regarray, idx, src;
3327 TRACE(
"[Thread %2u] regadd (r[i] += h)\n", p->thread_id);
3329 regarray = instr_regarray_regarray(p, ip);
3330 idx = instr_regarray_idx_imm(p, ip);
3331 src = instr_regarray_src_nbo(t, ip);
3332 regarray[idx] += src;
3336__instr_regadd_rim_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3338 uint64_t *regarray, idx, src;
3340 TRACE(
"[Thread %2u] regadd (r[i] += m)\n", p->thread_id);
3342 regarray = instr_regarray_regarray(p, ip);
3343 idx = instr_regarray_idx_imm(p, ip);
3344 src = instr_regarray_src_hbo(t, ip);
3345 regarray[idx] += src;
3349__instr_regadd_rii_exec(
struct rte_swx_pipeline *p,
3351 const struct instruction *ip)
3353 uint64_t *regarray, idx, src;
3355 TRACE(
"[Thread %2u] regadd (r[i] += i)\n", p->thread_id);
3357 regarray = instr_regarray_regarray(p, ip);
3358 idx = instr_regarray_idx_imm(p, ip);
3359 src = ip->regarray.dstsrc_val;
3360 regarray[idx] += src;
3366static inline struct meter *
3367instr_meter_idx_hbo(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3369 struct metarray_runtime *r = &p->metarray_runtime[ip->meter.metarray_id];
3371 uint8_t *idx_struct = t->structs[ip->meter.idx.struct_id];
3372 uint64_t *idx64_ptr = (uint64_t *)&idx_struct[ip->meter.idx.offset];
3373 uint64_t idx64 = *idx64_ptr;
3374 uint64_t idx64_mask = UINT64_MAX >> (64 - (ip)->meter.idx.n_bits);
3375 uint64_t idx = idx64 & idx64_mask & r->size_mask;
3377 return &r->metarray[idx];
3380#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
3382static inline struct meter *
3383instr_meter_idx_nbo(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3385 struct metarray_runtime *r = &p->metarray_runtime[ip->meter.metarray_id];
3387 uint8_t *idx_struct = t->structs[ip->meter.idx.struct_id];
3388 uint64_t *idx64_ptr = (uint64_t *)&idx_struct[ip->meter.idx.offset];
3389 uint64_t idx64 = *idx64_ptr;
3390 uint64_t idx = (ntoh64(idx64) >> (64 - ip->meter.idx.n_bits)) & r->size_mask;
3392 return &r->metarray[idx];
3397#define instr_meter_idx_nbo instr_meter_idx_hbo
3401static inline struct meter *
3402instr_meter_idx_imm(
struct rte_swx_pipeline *p,
const struct instruction *ip)
3404 struct metarray_runtime *r = &p->metarray_runtime[ip->meter.metarray_id];
3406 uint64_t idx = ip->meter.idx_val & r->size_mask;
3408 return &r->metarray[idx];
3411static inline uint32_t
3412instr_meter_length_hbo(
struct thread *t,
const struct instruction *ip)
3414 uint8_t *src_struct = t->structs[ip->meter.length.struct_id];
3415 uint64_t *src64_ptr = (uint64_t *)&src_struct[ip->meter.length.offset];
3416 uint64_t src64 = *src64_ptr;
3417 uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->meter.length.n_bits);
3418 uint64_t src = src64 & src64_mask;
3420 return (uint32_t)src;
3423#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
3425static inline uint32_t
3426instr_meter_length_nbo(
struct thread *t,
const struct instruction *ip)
3428 uint8_t *src_struct = t->structs[ip->meter.length.struct_id];
3429 uint64_t *src64_ptr = (uint64_t *)&src_struct[ip->meter.length.offset];
3430 uint64_t src64 = *src64_ptr;
3431 uint64_t src = ntoh64(src64) >> (64 - ip->meter.length.n_bits);
3433 return (uint32_t)src;
3438#define instr_meter_length_nbo instr_meter_length_hbo
3443instr_meter_color_in_hbo(
struct thread *t,
const struct instruction *ip)
3445 uint8_t *src_struct = t->structs[ip->meter.color_in.struct_id];
3446 uint64_t *src64_ptr = (uint64_t *)&src_struct[ip->meter.color_in.offset];
3447 uint64_t src64 = *src64_ptr;
3448 uint64_t src64_mask = UINT64_MAX >> (64 - ip->meter.color_in.n_bits);
3449 uint64_t src = src64 & src64_mask;
3455instr_meter_color_out_hbo_set(
struct thread *t,
3456 const struct instruction *ip,
3459 uint8_t *dst_struct = t->structs[ip->meter.color_out.struct_id];
3460 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[ip->meter.color_out.offset];
3461 uint64_t dst64 = *dst64_ptr;
3462 uint64_t dst64_mask = UINT64_MAX >> (64 - ip->meter.color_out.n_bits);
3464 uint64_t src = (uint64_t)color_out;
3466 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask);
3470__instr_metprefetch_h_exec(
struct rte_swx_pipeline *p,
3472 const struct instruction *ip)
3476 TRACE(
"[Thread %2u] metprefetch (h)\n", p->thread_id);
3478 m = instr_meter_idx_nbo(p, t, ip);
3483__instr_metprefetch_m_exec(
struct rte_swx_pipeline *p,
3485 const struct instruction *ip)
3489 TRACE(
"[Thread %2u] metprefetch (m)\n", p->thread_id);
3491 m = instr_meter_idx_hbo(p, t, ip);
3496__instr_metprefetch_i_exec(
struct rte_swx_pipeline *p,
3498 const struct instruction *ip)
3502 TRACE(
"[Thread %2u] metprefetch (i)\n", p->thread_id);
3504 m = instr_meter_idx_imm(p, ip);
3509__instr_meter_hhm_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3512 uint64_t time, n_pkts, n_bytes;
3516 TRACE(
"[Thread %2u] meter (hhm)\n", p->thread_id);
3518 m = instr_meter_idx_nbo(p, t, ip);
3521 length = instr_meter_length_nbo(t, ip);
3522 color_in = instr_meter_color_in_hbo(t, ip);
3525 &m->profile->profile,
3530 color_out &= m->color_mask;
3532 n_pkts = m->n_pkts[color_out];
3533 n_bytes = m->n_bytes[color_out];
3535 instr_meter_color_out_hbo_set(t, ip, color_out);
3537 m->n_pkts[color_out] = n_pkts + 1;
3538 m->n_bytes[color_out] = n_bytes + length;
3542__instr_meter_hhi_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3545 uint64_t time, n_pkts, n_bytes;
3549 TRACE(
"[Thread %2u] meter (hhi)\n", p->thread_id);
3551 m = instr_meter_idx_nbo(p, t, ip);
3554 length = instr_meter_length_nbo(t, ip);
3555 color_in = (
enum rte_color)ip->meter.color_in_val;
3558 &m->profile->profile,
3563 color_out &= m->color_mask;
3565 n_pkts = m->n_pkts[color_out];
3566 n_bytes = m->n_bytes[color_out];
3568 instr_meter_color_out_hbo_set(t, ip, color_out);
3570 m->n_pkts[color_out] = n_pkts + 1;
3571 m->n_bytes[color_out] = n_bytes + length;
3575__instr_meter_hmm_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3578 uint64_t time, n_pkts, n_bytes;
3582 TRACE(
"[Thread %2u] meter (hmm)\n", p->thread_id);
3584 m = instr_meter_idx_nbo(p, t, ip);
3587 length = instr_meter_length_hbo(t, ip);
3588 color_in = instr_meter_color_in_hbo(t, ip);
3591 &m->profile->profile,
3596 color_out &= m->color_mask;
3598 n_pkts = m->n_pkts[color_out];
3599 n_bytes = m->n_bytes[color_out];
3601 instr_meter_color_out_hbo_set(t, ip, color_out);
3603 m->n_pkts[color_out] = n_pkts + 1;
3604 m->n_bytes[color_out] = n_bytes + length;
3608__instr_meter_hmi_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3611 uint64_t time, n_pkts, n_bytes;
3615 TRACE(
"[Thread %2u] meter (hmi)\n", p->thread_id);
3617 m = instr_meter_idx_nbo(p, t, ip);
3620 length = instr_meter_length_hbo(t, ip);
3621 color_in = (
enum rte_color)ip->meter.color_in_val;
3624 &m->profile->profile,
3629 color_out &= m->color_mask;
3631 n_pkts = m->n_pkts[color_out];
3632 n_bytes = m->n_bytes[color_out];
3634 instr_meter_color_out_hbo_set(t, ip, color_out);
3636 m->n_pkts[color_out] = n_pkts + 1;
3637 m->n_bytes[color_out] = n_bytes + length;
3641__instr_meter_mhm_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3644 uint64_t time, n_pkts, n_bytes;
3648 TRACE(
"[Thread %2u] meter (mhm)\n", p->thread_id);
3650 m = instr_meter_idx_hbo(p, t, ip);
3653 length = instr_meter_length_nbo(t, ip);
3654 color_in = instr_meter_color_in_hbo(t, ip);
3657 &m->profile->profile,
3662 color_out &= m->color_mask;
3664 n_pkts = m->n_pkts[color_out];
3665 n_bytes = m->n_bytes[color_out];
3667 instr_meter_color_out_hbo_set(t, ip, color_out);
3669 m->n_pkts[color_out] = n_pkts + 1;
3670 m->n_bytes[color_out] = n_bytes + length;
3674__instr_meter_mhi_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3677 uint64_t time, n_pkts, n_bytes;
3681 TRACE(
"[Thread %2u] meter (mhi)\n", p->thread_id);
3683 m = instr_meter_idx_hbo(p, t, ip);
3686 length = instr_meter_length_nbo(t, ip);
3687 color_in = (
enum rte_color)ip->meter.color_in_val;
3690 &m->profile->profile,
3695 color_out &= m->color_mask;
3697 n_pkts = m->n_pkts[color_out];
3698 n_bytes = m->n_bytes[color_out];
3700 instr_meter_color_out_hbo_set(t, ip, color_out);
3702 m->n_pkts[color_out] = n_pkts + 1;
3703 m->n_bytes[color_out] = n_bytes + length;
3707__instr_meter_mmm_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3710 uint64_t time, n_pkts, n_bytes;
3714 TRACE(
"[Thread %2u] meter (mmm)\n", p->thread_id);
3716 m = instr_meter_idx_hbo(p, t, ip);
3719 length = instr_meter_length_hbo(t, ip);
3720 color_in = instr_meter_color_in_hbo(t, ip);
3723 &m->profile->profile,
3728 color_out &= m->color_mask;
3730 n_pkts = m->n_pkts[color_out];
3731 n_bytes = m->n_bytes[color_out];
3733 instr_meter_color_out_hbo_set(t, ip, color_out);
3735 m->n_pkts[color_out] = n_pkts + 1;
3736 m->n_bytes[color_out] = n_bytes + length;
3740__instr_meter_mmi_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3743 uint64_t time, n_pkts, n_bytes;
3747 TRACE(
"[Thread %2u] meter (mmi)\n", p->thread_id);
3749 m = instr_meter_idx_hbo(p, t, ip);
3752 length = instr_meter_length_hbo(t, ip);
3753 color_in = (
enum rte_color)ip->meter.color_in_val;
3756 &m->profile->profile,
3761 color_out &= m->color_mask;
3763 n_pkts = m->n_pkts[color_out];
3764 n_bytes = m->n_bytes[color_out];
3766 instr_meter_color_out_hbo_set(t, ip, color_out);
3768 m->n_pkts[color_out] = n_pkts + 1;
3769 m->n_bytes[color_out] = n_bytes + length;
3773__instr_meter_ihm_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3776 uint64_t time, n_pkts, n_bytes;
3780 TRACE(
"[Thread %2u] meter (ihm)\n", p->thread_id);
3782 m = instr_meter_idx_imm(p, ip);
3785 length = instr_meter_length_nbo(t, ip);
3786 color_in = instr_meter_color_in_hbo(t, ip);
3789 &m->profile->profile,
3794 color_out &= m->color_mask;
3796 n_pkts = m->n_pkts[color_out];
3797 n_bytes = m->n_bytes[color_out];
3799 instr_meter_color_out_hbo_set(t, ip, color_out);
3801 m->n_pkts[color_out] = n_pkts + 1;
3802 m->n_bytes[color_out] = n_bytes + length;
3806__instr_meter_ihi_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3809 uint64_t time, n_pkts, n_bytes;
3813 TRACE(
"[Thread %2u] meter (ihi)\n", p->thread_id);
3815 m = instr_meter_idx_imm(p, ip);
3818 length = instr_meter_length_nbo(t, ip);
3819 color_in = (
enum rte_color)ip->meter.color_in_val;
3822 &m->profile->profile,
3827 color_out &= m->color_mask;
3829 n_pkts = m->n_pkts[color_out];
3830 n_bytes = m->n_bytes[color_out];
3832 instr_meter_color_out_hbo_set(t, ip, color_out);
3834 m->n_pkts[color_out] = n_pkts + 1;
3835 m->n_bytes[color_out] = n_bytes + length;
3839__instr_meter_imm_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3842 uint64_t time, n_pkts, n_bytes;
3846 TRACE(
"[Thread %2u] meter (imm)\n", p->thread_id);
3848 m = instr_meter_idx_imm(p, ip);
3851 length = instr_meter_length_hbo(t, ip);
3852 color_in = instr_meter_color_in_hbo(t, ip);
3855 &m->profile->profile,
3860 color_out &= m->color_mask;
3862 n_pkts = m->n_pkts[color_out];
3863 n_bytes = m->n_bytes[color_out];
3865 instr_meter_color_out_hbo_set(t, ip, color_out);
3867 m->n_pkts[color_out] = n_pkts + 1;
3868 m->n_bytes[color_out] = n_bytes + length;
3872__instr_meter_imi_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3875 uint64_t time, n_pkts, n_bytes;
3879 TRACE(
"[Thread %2u] meter (imi)\n", p->thread_id);
3881 m = instr_meter_idx_imm(p, ip);
3884 length = instr_meter_length_hbo(t, ip);
3885 color_in = (
enum rte_color)ip->meter.color_in_val;
3888 &m->profile->profile,
3893 color_out &= m->color_mask;
3895 n_pkts = m->n_pkts[color_out];
3896 n_bytes = m->n_bytes[color_out];
3898 instr_meter_color_out_hbo_set(t, ip, color_out);
3900 m->n_pkts[color_out] = n_pkts + 1;
3901 m->n_bytes[color_out] = n_bytes + length;
static uint64_t rte_get_tsc_cycles(void)
static enum rte_color rte_meter_trtcm_color_aware_check(struct rte_meter_trtcm *m, struct rte_meter_trtcm_profile *p, uint64_t time, uint32_t pkt_len, enum rte_color pkt_color)
static void rte_prefetch0(const volatile void *p)
void(* rte_swx_extern_type_destructor_t)(void *object)
int(* rte_swx_extern_func_t)(void *mailbox)
int(* rte_swx_extern_type_member_func_t)(void *object, void *mailbox)
void *(* rte_swx_extern_type_constructor_t)(const char *args)
#define RTE_SWX_NAME_SIZE
int(* rte_swx_port_in_pkt_rx_t)(void *port, struct rte_swx_pkt *pkt)
void(* rte_swx_port_out_flush_t)(void *port)
void(* rte_swx_port_out_pkt_tx_t)(void *port, struct rte_swx_pkt *pkt)
int(* rte_swx_table_lookup_t)(void *table, void *mailbox, uint8_t **key, uint64_t *action_id, uint8_t **action_data, int *hit)
__rte_experimental uint32_t rte_swx_table_learner_add(void *table, void *mailbox, uint64_t time, uint64_t action_id, uint8_t *action_data)
__rte_experimental void rte_swx_table_learner_delete(void *table, void *mailbox)