4 #ifndef __INCLUDE_RTE_SWX_PIPELINE_INTERNAL_H__
5 #define __INCLUDE_RTE_SWX_PIPELINE_INTERNAL_H__
28 #define TRACE(...) printf(__VA_ARGS__)
36 #define ntoh64(x) rte_be_to_cpu_64(x)
37 #define hton64(x) rte_cpu_to_be_64(x)
50 TAILQ_ENTRY(struct_type) node;
59 TAILQ_HEAD(struct_type_tailq, struct_type);
65 TAILQ_ENTRY(port_in_type) node;
70 TAILQ_HEAD(port_in_type_tailq, port_in_type);
73 TAILQ_ENTRY(port_in) node;
74 struct port_in_type *type;
79 TAILQ_HEAD(port_in_tailq, port_in);
81 struct port_in_runtime {
89 struct port_out_type {
90 TAILQ_ENTRY(port_out_type) node;
95 TAILQ_HEAD(port_out_type_tailq, port_out_type);
98 TAILQ_ENTRY(port_out) node;
99 struct port_out_type *type;
104 TAILQ_HEAD(port_out_tailq, port_out);
106 struct port_out_runtime {
117 struct mirroring_session {
120 uint32_t truncation_length;
126 struct extern_type_member_func {
127 TAILQ_ENTRY(extern_type_member_func) node;
133 TAILQ_HEAD(extern_type_member_func_tailq, extern_type_member_func);
136 TAILQ_ENTRY(extern_type) node;
138 struct struct_type *mailbox_struct_type;
141 struct extern_type_member_func_tailq funcs;
145 TAILQ_HEAD(extern_type_tailq, extern_type);
148 TAILQ_ENTRY(extern_obj) node;
150 struct extern_type *type;
156 TAILQ_HEAD(extern_obj_tailq, extern_obj);
158 #ifndef RTE_SWX_EXTERN_TYPE_MEMBER_FUNCS_MAX
159 #define RTE_SWX_EXTERN_TYPE_MEMBER_FUNCS_MAX 8
162 struct extern_obj_runtime {
172 TAILQ_ENTRY(extern_func) node;
174 struct struct_type *mailbox_struct_type;
180 TAILQ_HEAD(extern_func_tailq, extern_func);
182 struct extern_func_runtime {
191 TAILQ_ENTRY(hash_func) node;
197 TAILQ_HEAD(hash_func_tailq, hash_func);
199 struct hash_func_runtime {
207 TAILQ_ENTRY(rss) node;
212 TAILQ_HEAD(rss_tailq, rss);
223 TAILQ_ENTRY(header) node;
225 struct struct_type *st;
230 TAILQ_HEAD(header_tailq, header);
232 struct header_runtime {
237 struct header_out_runtime {
263 enum instruction_type {
321 INSTR_HDR_INVALIDATE,
374 INSTR_ALU_CKADD_FIELD,
375 INSTR_ALU_CKADD_STRUCT20,
376 INSTR_ALU_CKADD_STRUCT,
382 INSTR_ALU_CKSUB_FIELD,
440 INSTR_REGPREFETCH_RH,
441 INSTR_REGPREFETCH_RM,
442 INSTR_REGPREFETCH_RI,
520 INSTR_LEARNER_REARM_NEW,
523 INSTR_LEARNER_FORGET,
582 INSTR_JMP_ACTION_HIT,
587 INSTR_JMP_ACTION_MISS,
640 struct instr_operand {
661 uint8_t header_id[8];
662 uint8_t struct_id[8];
667 struct instr_hdr_validity {
678 uint8_t mf_first_arg_offset;
679 uint8_t mf_timeout_id_offset;
680 uint8_t mf_timeout_id_n_bits;
683 struct instr_extern_obj {
688 struct instr_extern_func {
692 struct instr_hash_func {
693 uint8_t hash_func_id;
722 struct instr_dst_src {
723 struct instr_operand dst;
725 struct instr_operand src;
730 struct instr_regarray {
735 struct instr_operand idx;
740 struct instr_operand dstsrc;
750 struct instr_operand idx;
754 struct instr_operand length;
757 struct instr_operand color_in;
758 uint32_t color_in_val;
761 struct instr_operand color_out;
766 uint8_t header_id[8];
767 uint8_t struct_id[8];
778 struct instruction *ip;
781 struct instr_operand a;
787 struct instr_operand b;
793 enum instruction_type type;
796 struct instr_dst_src mirror;
797 struct instr_hdr_validity valid;
798 struct instr_dst_src mov;
799 struct instr_regarray regarray;
800 struct instr_meter meter;
801 struct instr_dma dma;
802 struct instr_dst_src alu;
803 struct instr_table table;
804 struct instr_learn learn;
805 struct instr_extern_obj ext_obj;
806 struct instr_extern_func ext_func;
807 struct instr_hash_func hash_func;
808 struct instr_rss rss;
809 struct instr_jmp jmp;
813 struct instruction_data {
820 typedef void (*instr_exec_t)(
struct rte_swx_pipeline *);
826 (*action_func_t)(
struct rte_swx_pipeline *p);
829 TAILQ_ENTRY(action) node;
831 struct struct_type *st;
832 int *args_endianness;
833 struct instruction *instructions;
834 struct instruction_data *instruction_data;
835 uint32_t n_instructions;
839 TAILQ_HEAD(action_tailq, action);
845 TAILQ_ENTRY(table_type) node;
851 TAILQ_HEAD(table_type_tailq, table_type);
859 TAILQ_ENTRY(table) node;
862 struct table_type *type;
865 struct match_field *fields;
867 struct header *header;
870 struct action **actions;
871 struct action *default_action;
872 uint8_t *default_action_data;
874 int default_action_is_const;
875 uint32_t action_data_size_max;
876 int *action_is_for_table_entries;
877 int *action_is_for_default_entry;
879 struct hash_func *hf;
884 TAILQ_HEAD(table_tailq, table);
886 struct table_runtime {
892 struct table_statistics {
893 uint64_t n_pkts_hit[2];
894 uint64_t *n_pkts_action;
901 TAILQ_ENTRY(selector) node;
904 struct field *group_id_field;
905 struct field **selector_fields;
906 uint32_t n_selector_fields;
907 struct header *selector_header;
908 struct field *member_id_field;
910 uint32_t n_groups_max;
911 uint32_t n_members_per_group_max;
916 TAILQ_HEAD(selector_tailq, selector);
918 struct selector_runtime {
920 uint8_t **group_id_buffer;
921 uint8_t **selector_buffer;
922 uint8_t **member_id_buffer;
925 struct selector_statistics {
933 TAILQ_ENTRY(learner) node;
937 struct field **fields;
939 struct header *header;
942 struct action **actions;
943 struct action *default_action;
944 uint8_t *default_action_data;
946 int default_action_is_const;
947 uint32_t action_data_size_max;
948 int *action_is_for_table_entries;
949 int *action_is_for_default_entry;
951 struct hash_func *hf;
958 TAILQ_HEAD(learner_tailq, learner);
960 struct learner_runtime {
965 struct learner_statistics {
966 uint64_t n_pkts_hit[2];
967 uint64_t n_pkts_learn[2];
968 uint64_t n_pkts_rearm;
969 uint64_t n_pkts_forget;
970 uint64_t *n_pkts_action;
977 TAILQ_ENTRY(regarray) node;
984 TAILQ_HEAD(regarray_tailq, regarray);
986 struct regarray_runtime {
994 struct meter_profile {
995 TAILQ_ENTRY(meter_profile) node;
998 struct rte_meter_trtcm_profile profile;
1002 TAILQ_HEAD(meter_profile_tailq, meter_profile);
1005 TAILQ_ENTRY(metarray) node;
1011 TAILQ_HEAD(metarray_tailq, metarray);
1015 struct meter_profile *profile;
1023 struct metarray_runtime {
1024 struct meter *metarray;
1035 uint32_t *mirroring_slots;
1036 uint64_t mirroring_slots_mask;
1038 uint32_t recirc_pass_id;
1044 struct header_runtime *headers;
1045 struct header_out_runtime *headers_out;
1046 uint8_t *header_storage;
1047 uint8_t *header_out_storage;
1048 uint64_t valid_headers;
1049 uint32_t n_headers_out;
1055 struct table_runtime *tables;
1056 struct selector_runtime *selectors;
1057 struct learner_runtime *learners;
1062 uint32_t learner_id;
1066 struct extern_obj_runtime *extern_objs;
1067 struct extern_func_runtime *extern_funcs;
1070 struct instruction *ip;
1071 struct instruction *ret;
1074 #define MASK64_BIT_GET(mask, pos) ((mask) & (1LLU << (pos)))
1075 #define MASK64_BIT_SET(mask, pos) ((mask) | (1LLU << (pos)))
1076 #define MASK64_BIT_CLR(mask, pos) ((mask) & ~(1LLU << (pos)))
1078 #define HEADER_VALID(thread, header_id) \
1079 MASK64_BIT_GET((thread)->valid_headers, header_id)
1081 static inline uint64_t
1082 instr_operand_hbo(
struct thread *t,
const struct instr_operand *x)
1084 uint8_t *x_struct = t->structs[x->struct_id];
1085 uint64_t *x64_ptr = (uint64_t *)&x_struct[x->offset];
1086 uint64_t x64 = *x64_ptr;
1087 uint64_t x64_mask = UINT64_MAX >> (64 - x->n_bits);
1089 return x64 & x64_mask;
1092 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1094 static inline uint64_t
1095 instr_operand_nbo(
struct thread *t,
const struct instr_operand *x)
1097 uint8_t *x_struct = t->structs[x->struct_id];
1098 uint64_t *x64_ptr = (uint64_t *)&x_struct[x->offset];
1099 uint64_t x64 = *x64_ptr;
1101 return ntoh64(x64) >> (64 - x->n_bits);
1106 #define instr_operand_nbo instr_operand_hbo
1110 #define ALU(thread, ip, operator) \
1112 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
1113 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
1114 uint64_t dst64 = *dst64_ptr; \
1115 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
1116 uint64_t dst = dst64 & dst64_mask; \
1118 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \
1119 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \
1120 uint64_t src64 = *src64_ptr; \
1121 uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->alu.src.n_bits); \
1122 uint64_t src = src64 & src64_mask; \
1124 uint64_t result = dst operator src; \
1126 *dst64_ptr = (dst64 & ~dst64_mask) | (result & dst64_mask); \
1129 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1131 #define ALU_MH(thread, ip, operator) \
1133 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
1134 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
1135 uint64_t dst64 = *dst64_ptr; \
1136 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
1137 uint64_t dst = dst64 & dst64_mask; \
1139 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \
1140 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \
1141 uint64_t src64 = *src64_ptr; \
1142 uint64_t src = ntoh64(src64) >> (64 - (ip)->alu.src.n_bits); \
1144 uint64_t result = dst operator src; \
1146 *dst64_ptr = (dst64 & ~dst64_mask) | (result & dst64_mask); \
1149 #define ALU_HM(thread, ip, operator) \
1151 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
1152 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
1153 uint64_t dst64 = *dst64_ptr; \
1154 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
1155 uint64_t dst = ntoh64(dst64) >> (64 - (ip)->alu.dst.n_bits); \
1157 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \
1158 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \
1159 uint64_t src64 = *src64_ptr; \
1160 uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->alu.src.n_bits); \
1161 uint64_t src = src64 & src64_mask; \
1163 uint64_t result = dst operator src; \
1164 result = hton64(result << (64 - (ip)->alu.dst.n_bits)); \
1166 *dst64_ptr = (dst64 & ~dst64_mask) | result; \
1169 #define ALU_HM_FAST(thread, ip, operator) \
1171 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
1172 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
1173 uint64_t dst64 = *dst64_ptr; \
1174 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
1175 uint64_t dst = dst64 & dst64_mask; \
1177 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \
1178 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \
1179 uint64_t src64 = *src64_ptr; \
1180 uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->alu.src.n_bits); \
1181 uint64_t src = hton64(src64 & src64_mask) >> (64 - (ip)->alu.dst.n_bits); \
1183 uint64_t result = dst operator src; \
1185 *dst64_ptr = (dst64 & ~dst64_mask) | result; \
1188 #define ALU_HH(thread, ip, operator) \
1190 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
1191 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
1192 uint64_t dst64 = *dst64_ptr; \
1193 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
1194 uint64_t dst = ntoh64(dst64) >> (64 - (ip)->alu.dst.n_bits); \
1196 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \
1197 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \
1198 uint64_t src64 = *src64_ptr; \
1199 uint64_t src = ntoh64(src64) >> (64 - (ip)->alu.src.n_bits); \
1201 uint64_t result = dst operator src; \
1202 result = hton64(result << (64 - (ip)->alu.dst.n_bits)); \
1204 *dst64_ptr = (dst64 & ~dst64_mask) | result; \
1207 #define ALU_HH_FAST(thread, ip, operator) \
1209 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
1210 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
1211 uint64_t dst64 = *dst64_ptr; \
1212 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
1213 uint64_t dst = dst64 & dst64_mask; \
1215 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \
1216 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \
1217 uint64_t src64 = *src64_ptr; \
1218 uint64_t src = (src64 << (64 - (ip)->alu.src.n_bits)) >> (64 - (ip)->alu.dst.n_bits); \
1220 uint64_t result = dst operator src; \
1222 *dst64_ptr = (dst64 & ~dst64_mask) | result; \
1229 #define ALU_HM_FAST ALU
1231 #define ALU_HH_FAST ALU
1235 #define ALU_I(thread, ip, operator) \
1237 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
1238 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
1239 uint64_t dst64 = *dst64_ptr; \
1240 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
1241 uint64_t dst = dst64 & dst64_mask; \
1243 uint64_t src = (ip)->alu.src_val; \
1245 uint64_t result = dst operator src; \
1247 *dst64_ptr = (dst64 & ~dst64_mask) | (result & dst64_mask); \
1250 #define ALU_MI ALU_I
1252 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1254 #define ALU_HI(thread, ip, operator) \
1256 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
1257 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
1258 uint64_t dst64 = *dst64_ptr; \
1259 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
1260 uint64_t dst = ntoh64(dst64) >> (64 - (ip)->alu.dst.n_bits); \
1262 uint64_t src = (ip)->alu.src_val; \
1264 uint64_t result = dst operator src; \
1265 result = hton64(result << (64 - (ip)->alu.dst.n_bits)); \
1267 *dst64_ptr = (dst64 & ~dst64_mask) | result; \
1272 #define ALU_HI ALU_I
1276 #define MOV(thread, ip) \
1278 uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id]; \
1279 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset]; \
1280 uint64_t dst64 = *dst64_ptr; \
1281 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits); \
1283 uint8_t *src_struct = (thread)->structs[(ip)->mov.src.struct_id]; \
1284 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->mov.src.offset]; \
1285 uint64_t src64 = *src64_ptr; \
1286 uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->mov.src.n_bits); \
1287 uint64_t src = src64 & src64_mask; \
1289 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask); \
1292 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1294 #define MOV_MH(thread, ip) \
1296 uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id]; \
1297 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset]; \
1298 uint64_t dst64 = *dst64_ptr; \
1299 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits); \
1301 uint8_t *src_struct = (thread)->structs[(ip)->mov.src.struct_id]; \
1302 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->mov.src.offset]; \
1303 uint64_t src64 = *src64_ptr; \
1304 uint64_t src = ntoh64(src64) >> (64 - (ip)->mov.src.n_bits); \
1306 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask); \
1309 #define MOV_HM(thread, ip) \
1311 uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id]; \
1312 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset]; \
1313 uint64_t dst64 = *dst64_ptr; \
1314 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits); \
1316 uint8_t *src_struct = (thread)->structs[(ip)->mov.src.struct_id]; \
1317 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->mov.src.offset]; \
1318 uint64_t src64 = *src64_ptr; \
1319 uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->mov.src.n_bits); \
1320 uint64_t src = src64 & src64_mask; \
1322 src = hton64(src) >> (64 - (ip)->mov.dst.n_bits); \
1323 *dst64_ptr = (dst64 & ~dst64_mask) | src; \
1326 #define MOV_HH(thread, ip) \
1328 uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id]; \
1329 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset]; \
1330 uint64_t dst64 = *dst64_ptr; \
1331 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits); \
1333 uint8_t *src_struct = (thread)->structs[(ip)->mov.src.struct_id]; \
1334 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->mov.src.offset]; \
1335 uint64_t src64 = *src64_ptr; \
1337 uint64_t src = src64 << (64 - (ip)->mov.src.n_bits); \
1338 src = src >> (64 - (ip)->mov.dst.n_bits); \
1339 *dst64_ptr = (dst64 & ~dst64_mask) | src; \
1350 #define MOV_I(thread, ip) \
1352 uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id]; \
1353 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset]; \
1354 uint64_t dst64 = *dst64_ptr; \
1355 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits); \
1357 uint64_t src = (ip)->mov.src_val; \
1359 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask); \
1362 #define JMP_CMP(thread, ip, operator) \
1364 uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \
1365 uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \
1366 uint64_t a64 = *a64_ptr; \
1367 uint64_t a64_mask = UINT64_MAX >> (64 - (ip)->jmp.a.n_bits); \
1368 uint64_t a = a64 & a64_mask; \
1370 uint8_t *b_struct = (thread)->structs[(ip)->jmp.b.struct_id]; \
1371 uint64_t *b64_ptr = (uint64_t *)&b_struct[(ip)->jmp.b.offset]; \
1372 uint64_t b64 = *b64_ptr; \
1373 uint64_t b64_mask = UINT64_MAX >> (64 - (ip)->jmp.b.n_bits); \
1374 uint64_t b = b64 & b64_mask; \
1376 (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \
1379 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1381 #define JMP_CMP_MH(thread, ip, operator) \
1383 uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \
1384 uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \
1385 uint64_t a64 = *a64_ptr; \
1386 uint64_t a64_mask = UINT64_MAX >> (64 - (ip)->jmp.a.n_bits); \
1387 uint64_t a = a64 & a64_mask; \
1389 uint8_t *b_struct = (thread)->structs[(ip)->jmp.b.struct_id]; \
1390 uint64_t *b64_ptr = (uint64_t *)&b_struct[(ip)->jmp.b.offset]; \
1391 uint64_t b64 = *b64_ptr; \
1392 uint64_t b = ntoh64(b64) >> (64 - (ip)->jmp.b.n_bits); \
1394 (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \
1397 #define JMP_CMP_HM(thread, ip, operator) \
1399 uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \
1400 uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \
1401 uint64_t a64 = *a64_ptr; \
1402 uint64_t a = ntoh64(a64) >> (64 - (ip)->jmp.a.n_bits); \
1404 uint8_t *b_struct = (thread)->structs[(ip)->jmp.b.struct_id]; \
1405 uint64_t *b64_ptr = (uint64_t *)&b_struct[(ip)->jmp.b.offset]; \
1406 uint64_t b64 = *b64_ptr; \
1407 uint64_t b64_mask = UINT64_MAX >> (64 - (ip)->jmp.b.n_bits); \
1408 uint64_t b = b64 & b64_mask; \
1410 (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \
1413 #define JMP_CMP_HH(thread, ip, operator) \
1415 uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \
1416 uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \
1417 uint64_t a64 = *a64_ptr; \
1418 uint64_t a = ntoh64(a64) >> (64 - (ip)->jmp.a.n_bits); \
1420 uint8_t *b_struct = (thread)->structs[(ip)->jmp.b.struct_id]; \
1421 uint64_t *b64_ptr = (uint64_t *)&b_struct[(ip)->jmp.b.offset]; \
1422 uint64_t b64 = *b64_ptr; \
1423 uint64_t b = ntoh64(b64) >> (64 - (ip)->jmp.b.n_bits); \
1425 (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \
1428 #define JMP_CMP_HH_FAST(thread, ip, operator) \
1430 uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \
1431 uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \
1432 uint64_t a64 = *a64_ptr; \
1433 uint64_t a = a64 << (64 - (ip)->jmp.a.n_bits); \
1435 uint8_t *b_struct = (thread)->structs[(ip)->jmp.b.struct_id]; \
1436 uint64_t *b64_ptr = (uint64_t *)&b_struct[(ip)->jmp.b.offset]; \
1437 uint64_t b64 = *b64_ptr; \
1438 uint64_t b = b64 << (64 - (ip)->jmp.b.n_bits); \
1440 (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \
1445 #define JMP_CMP_MH JMP_CMP
1446 #define JMP_CMP_HM JMP_CMP
1447 #define JMP_CMP_HH JMP_CMP
1448 #define JMP_CMP_HH_FAST JMP_CMP
1452 #define JMP_CMP_I(thread, ip, operator) \
1454 uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \
1455 uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \
1456 uint64_t a64 = *a64_ptr; \
1457 uint64_t a64_mask = UINT64_MAX >> (64 - (ip)->jmp.a.n_bits); \
1458 uint64_t a = a64 & a64_mask; \
1460 uint64_t b = (ip)->jmp.b_val; \
1462 (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \
1465 #define JMP_CMP_MI JMP_CMP_I
1467 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1469 #define JMP_CMP_HI(thread, ip, operator) \
1471 uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \
1472 uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \
1473 uint64_t a64 = *a64_ptr; \
1474 uint64_t a = ntoh64(a64) >> (64 - (ip)->jmp.a.n_bits); \
1476 uint64_t b = (ip)->jmp.b_val; \
1478 (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \
1483 #define JMP_CMP_HI JMP_CMP_I
1487 #define METADATA_READ(thread, offset, n_bits) \
1489 uint64_t *m64_ptr = (uint64_t *)&(thread)->metadata[offset]; \
1490 uint64_t m64 = *m64_ptr; \
1491 uint64_t m64_mask = UINT64_MAX >> (64 - (n_bits)); \
1495 #define METADATA_WRITE(thread, offset, n_bits, value) \
1497 uint64_t *m64_ptr = (uint64_t *)&(thread)->metadata[offset]; \
1498 uint64_t m64 = *m64_ptr; \
1499 uint64_t m64_mask = UINT64_MAX >> (64 - (n_bits)); \
1501 uint64_t m_new = value; \
1503 *m64_ptr = (m64 & ~m64_mask) | (m_new & m64_mask); \
1506 #ifndef RTE_SWX_PIPELINE_THREADS_MAX
1507 #define RTE_SWX_PIPELINE_THREADS_MAX 16
1510 #ifndef RTE_SWX_PIPELINE_INSTRUCTION_TABLE_SIZE_MAX
1511 #define RTE_SWX_PIPELINE_INSTRUCTION_TABLE_SIZE_MAX 1024
1514 struct rte_swx_pipeline {
1517 struct struct_type_tailq struct_types;
1518 struct port_in_type_tailq port_in_types;
1519 struct port_in_tailq ports_in;
1520 struct port_out_type_tailq port_out_types;
1521 struct port_out_tailq ports_out;
1522 struct extern_type_tailq extern_types;
1523 struct extern_obj_tailq extern_objs;
1524 struct extern_func_tailq extern_funcs;
1525 struct hash_func_tailq hash_funcs;
1526 struct rss_tailq rss;
1527 struct header_tailq headers;
1528 struct struct_type *metadata_st;
1529 uint32_t metadata_struct_id;
1530 struct action_tailq actions;
1531 struct table_type_tailq table_types;
1532 struct table_tailq tables;
1533 struct selector_tailq selectors;
1534 struct learner_tailq learners;
1535 struct regarray_tailq regarrays;
1536 struct meter_profile_tailq meter_profiles;
1537 struct metarray_tailq metarrays;
1539 struct port_in_runtime *in;
1540 struct port_out_runtime *out;
1541 struct mirroring_session *mirroring_sessions;
1542 struct instruction **action_instructions;
1543 action_func_t *action_funcs;
1545 struct table_statistics *table_stats;
1546 struct selector_statistics *selector_stats;
1547 struct learner_statistics *learner_stats;
1548 struct hash_func_runtime *hash_func_runtime;
1549 struct rss_runtime **rss_runtime;
1550 struct regarray_runtime *regarray_runtime;
1551 struct metarray_runtime *metarray_runtime;
1552 struct instruction *instructions;
1553 struct instruction_data *instruction_data;
1554 instr_exec_t *instruction_table;
1555 struct thread threads[RTE_SWX_PIPELINE_THREADS_MAX];
1559 uint32_t n_ports_in;
1560 uint32_t n_ports_out;
1561 uint32_t n_mirroring_slots;
1562 uint32_t n_mirroring_sessions;
1563 uint32_t n_extern_objs;
1564 uint32_t n_extern_funcs;
1565 uint32_t n_hash_funcs;
1569 uint32_t n_selectors;
1570 uint32_t n_learners;
1571 uint32_t n_regarrays;
1572 uint32_t n_metarrays;
1576 uint32_t n_instructions;
1585 pipeline_port_inc(
struct rte_swx_pipeline *p)
1587 p->port_id = (p->port_id + 1) & (p->n_ports_in - 1);
1591 thread_ip_reset(
struct rte_swx_pipeline *p,
struct thread *t)
1593 t->ip = p->instructions;
1597 thread_ip_set(
struct thread *t,
struct instruction *ip)
1603 thread_ip_action_call(
struct rte_swx_pipeline *p,
1608 t->ip = p->action_instructions[action_id];
1612 thread_ip_inc(
struct rte_swx_pipeline *p);
1615 thread_ip_inc(
struct rte_swx_pipeline *p)
1617 struct thread *t = &p->threads[p->thread_id];
1623 thread_ip_inc_cond(
struct thread *t,
int cond)
1629 thread_yield(
struct rte_swx_pipeline *p)
1631 p->thread_id = (p->thread_id + 1) & (RTE_SWX_PIPELINE_THREADS_MAX - 1);
1635 thread_yield_cond(
struct rte_swx_pipeline *p,
int cond)
1637 p->thread_id = (p->thread_id + cond) & (RTE_SWX_PIPELINE_THREADS_MAX - 1);
1644 __instr_rx_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
1646 struct port_in_runtime *port = &p->in[p->port_id];
1651 if (t->recirculate) {
1652 TRACE(
"[Thread %2u] rx - recirculate (pass %u)\n",
1654 t->recirc_pass_id + 1);
1657 t->ptr = &
pkt->pkt[
pkt->offset];
1658 t->mirroring_slots_mask = 0;
1660 t->recirc_pass_id++;
1663 t->valid_headers = 0;
1664 t->n_headers_out = 0;
1667 t->table_state = p->table_state;
1673 pkt_received = port->pkt_rx(port->obj,
pkt);
1674 t->ptr = &
pkt->pkt[
pkt->offset];
1677 TRACE(
"[Thread %2u] rx %s from port %u\n",
1679 pkt_received ?
"1 pkt" :
"0 pkts",
1682 t->mirroring_slots_mask = 0;
1683 t->recirc_pass_id = 0;
1686 t->valid_headers = 0;
1687 t->n_headers_out = 0;
1690 METADATA_WRITE(t, ip->io.io.offset, ip->io.io.n_bits, p->port_id);
1693 t->table_state = p->table_state;
1696 pipeline_port_inc(p);
1698 return pkt_received;
1702 instr_rx_exec(
struct rte_swx_pipeline *p)
1704 struct thread *t = &p->threads[p->thread_id];
1705 struct instruction *ip = t->ip;
1709 pkt_received = __instr_rx_exec(p, t, ip);
1712 thread_ip_inc_cond(t, pkt_received);
1720 emit_handler(
struct thread *t)
1722 struct header_out_runtime *h0 = &t->headers_out[0];
1723 struct header_out_runtime *h1 = &t->headers_out[1];
1724 uint32_t offset = 0, i;
1727 if ((t->n_headers_out == 1) &&
1728 (h0->ptr + h0->n_bytes == t->ptr)) {
1729 TRACE(
"Emit handler: no header change or header decap.\n");
1731 t->pkt.offset -= h0->n_bytes;
1732 t->pkt.length += h0->n_bytes;
1738 if ((t->n_headers_out == 2) &&
1739 (h1->ptr + h1->n_bytes == t->ptr) &&
1740 (h0->ptr == h0->ptr0)) {
1743 TRACE(
"Emit handler: header encapsulation.\n");
1745 offset = h0->n_bytes + h1->n_bytes;
1746 memcpy(t->ptr - offset, h0->ptr, h0->n_bytes);
1747 t->pkt.offset -= offset;
1748 t->pkt.length += offset;
1754 TRACE(
"Emit handler: complex case.\n");
1756 for (i = 0; i < t->n_headers_out; i++) {
1757 struct header_out_runtime *h = &t->headers_out[i];
1759 memcpy(&t->header_out_storage[offset], h->ptr, h->n_bytes);
1760 offset += h->n_bytes;
1764 memcpy(t->ptr - offset, t->header_out_storage, offset);
1765 t->pkt.offset -= offset;
1766 t->pkt.length += offset;
1771 mirroring_handler(
struct rte_swx_pipeline *p,
struct thread *t,
struct rte_swx_pkt *pkt)
1773 uint64_t slots_mask = t->mirroring_slots_mask, slot_mask;
1776 for (slot_id = 0, slot_mask = 1LLU ; slots_mask; slot_id++, slot_mask <<= 1)
1777 if (slot_mask & slots_mask) {
1778 struct port_out_runtime *port;
1779 struct mirroring_session *session;
1780 uint32_t port_id, session_id;
1782 session_id = t->mirroring_slots[slot_id];
1783 session = &p->mirroring_sessions[session_id];
1785 port_id = session->port_id;
1786 port = &p->out[port_id];
1788 if (session->fast_clone)
1789 port->pkt_fast_clone_tx(port->obj, pkt);
1791 port->pkt_clone_tx(port->obj, pkt, session->truncation_length);
1793 slots_mask &= ~slot_mask;
1798 __instr_tx_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
1800 uint64_t port_id = METADATA_READ(t, ip->io.io.offset, ip->io.io.n_bits);
1801 struct port_out_runtime *port = &p->out[port_id];
1805 if (t->recirculate) {
1806 TRACE(
"[Thread %2u]: tx 1 pkt - recirculate\n",
1813 mirroring_handler(p, t,
pkt);
1818 TRACE(
"[Thread %2u]: tx 1 pkt to port %u\n",
1826 mirroring_handler(p, t,
pkt);
1827 port->pkt_tx(port->obj,
pkt);
1831 __instr_tx_i_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
1833 uint64_t port_id = ip->io.io.val;
1834 struct port_out_runtime *port = &p->out[port_id];
1838 if (t->recirculate) {
1839 TRACE(
"[Thread %2u]: tx (i) 1 pkt - recirculate\n",
1846 mirroring_handler(p, t,
pkt);
1851 TRACE(
"[Thread %2u]: tx (i) 1 pkt to port %u\n",
1859 mirroring_handler(p, t,
pkt);
1860 port->pkt_tx(port->obj,
pkt);
1864 __instr_drop_exec(
struct rte_swx_pipeline *p,
1868 uint64_t port_id = p->n_ports_out - 1;
1869 struct port_out_runtime *port = &p->out[port_id];
1872 TRACE(
"[Thread %2u]: drop 1 pkt\n",
1879 mirroring_handler(p, t,
pkt);
1880 port->pkt_tx(port->obj,
pkt);
1884 __instr_mirror_exec(
struct rte_swx_pipeline *p,
1886 const struct instruction *ip)
1888 uint64_t slot_id = instr_operand_hbo(t, &ip->mirror.dst);
1889 uint64_t session_id = instr_operand_hbo(t, &ip->mirror.src);
1891 slot_id &= p->n_mirroring_slots - 1;
1892 session_id &= p->n_mirroring_sessions - 1;
1894 TRACE(
"[Thread %2u]: mirror pkt (slot = %u, session = %u)\n",
1897 (uint32_t)session_id);
1899 t->mirroring_slots[slot_id] = session_id;
1900 t->mirroring_slots_mask |= 1LLU << slot_id;
1904 __instr_recirculate_exec(
struct rte_swx_pipeline *p
__rte_unused,
1908 TRACE(
"[Thread %2u]: recirculate\n",
1915 __instr_recircid_exec(
struct rte_swx_pipeline *p
__rte_unused,
1917 const struct instruction *ip)
1919 TRACE(
"[Thread %2u]: recircid (pass %u)\n",
1924 METADATA_WRITE(t, ip->io.io.offset, ip->io.io.n_bits, t->recirc_pass_id);
1931 __instr_hdr_extract_many_exec(
struct rte_swx_pipeline *p
__rte_unused,
1933 const struct instruction *ip,
1936 uint64_t valid_headers = t->valid_headers;
1937 uint8_t *ptr = t->ptr;
1938 uint32_t
offset = t->pkt.offset;
1939 uint32_t
length = t->pkt.length;
1942 for (i = 0; i < n_extract; i++) {
1943 uint32_t header_id = ip->io.hdr.header_id[i];
1944 uint32_t struct_id = ip->io.hdr.struct_id[i];
1945 uint32_t n_bytes = ip->io.hdr.n_bytes[i];
1947 TRACE(
"[Thread %2u]: extract header %u (%u bytes)\n",
1953 t->structs[struct_id] = ptr;
1954 valid_headers = MASK64_BIT_SET(valid_headers, header_id);
1963 t->valid_headers = valid_headers;
1972 __instr_hdr_extract_exec(
struct rte_swx_pipeline *p,
1974 const struct instruction *ip)
1976 __instr_hdr_extract_many_exec(p, t, ip, 1);
1980 __instr_hdr_extract2_exec(
struct rte_swx_pipeline *p,
1982 const struct instruction *ip)
1984 TRACE(
"[Thread %2u] *** The next 2 instructions are fused. ***\n", p->thread_id);
1986 __instr_hdr_extract_many_exec(p, t, ip, 2);
1990 __instr_hdr_extract3_exec(
struct rte_swx_pipeline *p,
1992 const struct instruction *ip)
1994 TRACE(
"[Thread %2u] *** The next 3 instructions are fused. ***\n", p->thread_id);
1996 __instr_hdr_extract_many_exec(p, t, ip, 3);
2000 __instr_hdr_extract4_exec(
struct rte_swx_pipeline *p,
2002 const struct instruction *ip)
2004 TRACE(
"[Thread %2u] *** The next 4 instructions are fused. ***\n", p->thread_id);
2006 __instr_hdr_extract_many_exec(p, t, ip, 4);
2010 __instr_hdr_extract5_exec(
struct rte_swx_pipeline *p,
2012 const struct instruction *ip)
2014 TRACE(
"[Thread %2u] *** The next 5 instructions are fused. ***\n", p->thread_id);
2016 __instr_hdr_extract_many_exec(p, t, ip, 5);
2020 __instr_hdr_extract6_exec(
struct rte_swx_pipeline *p,
2022 const struct instruction *ip)
2024 TRACE(
"[Thread %2u] *** The next 6 instructions are fused. ***\n", p->thread_id);
2026 __instr_hdr_extract_many_exec(p, t, ip, 6);
2030 __instr_hdr_extract7_exec(
struct rte_swx_pipeline *p,
2032 const struct instruction *ip)
2034 TRACE(
"[Thread %2u] *** The next 7 instructions are fused. ***\n", p->thread_id);
2036 __instr_hdr_extract_many_exec(p, t, ip, 7);
2040 __instr_hdr_extract8_exec(
struct rte_swx_pipeline *p,
2042 const struct instruction *ip)
2044 TRACE(
"[Thread %2u] *** The next 8 instructions are fused. ***\n", p->thread_id);
2046 __instr_hdr_extract_many_exec(p, t, ip, 8);
2050 __instr_hdr_extract_m_exec(
struct rte_swx_pipeline *p
__rte_unused,
2052 const struct instruction *ip)
2054 uint64_t valid_headers = t->valid_headers;
2055 uint8_t *ptr = t->ptr;
2056 uint32_t
offset = t->pkt.offset;
2057 uint32_t
length = t->pkt.length;
2059 uint32_t n_bytes_last = METADATA_READ(t, ip->io.io.offset, ip->io.io.n_bits);
2060 uint32_t header_id = ip->io.hdr.header_id[0];
2061 uint32_t struct_id = ip->io.hdr.struct_id[0];
2062 uint32_t n_bytes = ip->io.hdr.n_bytes[0];
2064 struct header_runtime *h = &t->headers[header_id];
2066 TRACE(
"[Thread %2u]: extract header %u (%u + %u bytes)\n",
2072 n_bytes += n_bytes_last;
2075 t->structs[struct_id] = ptr;
2076 t->valid_headers = MASK64_BIT_SET(valid_headers, header_id);
2077 h->n_bytes = n_bytes;
2080 t->pkt.offset = offset + n_bytes;
2081 t->pkt.length = length - n_bytes;
2082 t->ptr = ptr + n_bytes;
2086 __instr_hdr_lookahead_exec(
struct rte_swx_pipeline *p
__rte_unused,
2088 const struct instruction *ip)
2090 uint64_t valid_headers = t->valid_headers;
2091 uint8_t *ptr = t->ptr;
2093 uint32_t header_id = ip->io.hdr.header_id[0];
2094 uint32_t struct_id = ip->io.hdr.struct_id[0];
2096 TRACE(
"[Thread %2u]: lookahead header %u\n",
2101 t->structs[struct_id] = ptr;
2102 t->valid_headers = MASK64_BIT_SET(valid_headers, header_id);
2109 __instr_hdr_emit_many_exec(
struct rte_swx_pipeline *p
__rte_unused,
2111 const struct instruction *ip,
2114 uint64_t valid_headers = t->valid_headers;
2115 uint32_t n_headers_out = t->n_headers_out;
2116 struct header_out_runtime *ho = NULL;
2117 uint8_t *ho_ptr = NULL;
2118 uint32_t ho_nbytes = 0, i;
2120 for (i = 0; i < n_emit; i++) {
2121 uint32_t header_id = ip->io.hdr.header_id[i];
2122 uint32_t struct_id = ip->io.hdr.struct_id[i];
2124 struct header_runtime *hi = &t->headers[header_id];
2125 uint8_t *hi_ptr0 = hi->ptr0;
2126 uint32_t n_bytes = hi->n_bytes;
2128 uint8_t *hi_ptr = t->structs[struct_id];
2130 if (!MASK64_BIT_GET(valid_headers, header_id)) {
2131 TRACE(
"[Thread %2u]: emit header %u (invalid)\n",
2138 TRACE(
"[Thread %2u]: emit header %u (valid)\n",
2144 if (!n_headers_out) {
2145 ho = &t->headers_out[0];
2151 ho_nbytes = n_bytes;
2157 ho = &t->headers_out[n_headers_out - 1];
2160 ho_nbytes = ho->n_bytes;
2164 if (ho_ptr + ho_nbytes == hi_ptr) {
2165 ho_nbytes += n_bytes;
2167 ho->n_bytes = ho_nbytes;
2174 ho_nbytes = n_bytes;
2181 ho->n_bytes = ho_nbytes;
2182 t->n_headers_out = n_headers_out;
2186 __instr_hdr_emit_exec(
struct rte_swx_pipeline *p,
2188 const struct instruction *ip)
2190 __instr_hdr_emit_many_exec(p, t, ip, 1);
2194 __instr_hdr_emit_tx_exec(
struct rte_swx_pipeline *p,
2196 const struct instruction *ip)
2198 TRACE(
"[Thread %2u] *** The next 2 instructions are fused. ***\n", p->thread_id);
2200 __instr_hdr_emit_many_exec(p, t, ip, 1);
2201 __instr_tx_exec(p, t, ip);
2205 __instr_hdr_emit2_tx_exec(
struct rte_swx_pipeline *p,
2207 const struct instruction *ip)
2209 TRACE(
"[Thread %2u] *** The next 3 instructions are fused. ***\n", p->thread_id);
2211 __instr_hdr_emit_many_exec(p, t, ip, 2);
2212 __instr_tx_exec(p, t, ip);
2216 __instr_hdr_emit3_tx_exec(
struct rte_swx_pipeline *p,
2218 const struct instruction *ip)
2220 TRACE(
"[Thread %2u] *** The next 4 instructions are fused. ***\n", p->thread_id);
2222 __instr_hdr_emit_many_exec(p, t, ip, 3);
2223 __instr_tx_exec(p, t, ip);
2227 __instr_hdr_emit4_tx_exec(
struct rte_swx_pipeline *p,
2229 const struct instruction *ip)
2231 TRACE(
"[Thread %2u] *** The next 5 instructions are fused. ***\n", p->thread_id);
2233 __instr_hdr_emit_many_exec(p, t, ip, 4);
2234 __instr_tx_exec(p, t, ip);
2238 __instr_hdr_emit5_tx_exec(
struct rte_swx_pipeline *p,
2240 const struct instruction *ip)
2242 TRACE(
"[Thread %2u] *** The next 6 instructions are fused. ***\n", p->thread_id);
2244 __instr_hdr_emit_many_exec(p, t, ip, 5);
2245 __instr_tx_exec(p, t, ip);
2249 __instr_hdr_emit6_tx_exec(
struct rte_swx_pipeline *p,
2251 const struct instruction *ip)
2253 TRACE(
"[Thread %2u] *** The next 7 instructions are fused. ***\n", p->thread_id);
2255 __instr_hdr_emit_many_exec(p, t, ip, 6);
2256 __instr_tx_exec(p, t, ip);
2260 __instr_hdr_emit7_tx_exec(
struct rte_swx_pipeline *p,
2262 const struct instruction *ip)
2264 TRACE(
"[Thread %2u] *** The next 8 instructions are fused. ***\n", p->thread_id);
2266 __instr_hdr_emit_many_exec(p, t, ip, 7);
2267 __instr_tx_exec(p, t, ip);
2271 __instr_hdr_emit8_tx_exec(
struct rte_swx_pipeline *p,
2273 const struct instruction *ip)
2275 TRACE(
"[Thread %2u] *** The next 9 instructions are fused. ***\n", p->thread_id);
2277 __instr_hdr_emit_many_exec(p, t, ip, 8);
2278 __instr_tx_exec(p, t, ip);
2285 __instr_hdr_validate_exec(
struct rte_swx_pipeline *p
__rte_unused,
2287 const struct instruction *ip)
2289 uint32_t header_id = ip->valid.header_id;
2290 uint32_t struct_id = ip->valid.struct_id;
2291 uint64_t valid_headers = t->valid_headers;
2292 struct header_runtime *h = &t->headers[header_id];
2294 TRACE(
"[Thread %2u] validate header %u\n", p->thread_id, header_id);
2300 if (MASK64_BIT_GET(valid_headers, header_id))
2304 t->structs[struct_id] = h->ptr0;
2305 t->valid_headers = MASK64_BIT_SET(valid_headers, header_id);
2312 __instr_hdr_invalidate_exec(
struct rte_swx_pipeline *p
__rte_unused,
2314 const struct instruction *ip)
2316 uint32_t header_id = ip->valid.header_id;
2318 TRACE(
"[Thread %2u] invalidate header %u\n", p->thread_id, header_id);
2321 t->valid_headers = MASK64_BIT_CLR(t->valid_headers, header_id);
2328 __instr_learn_exec(
struct rte_swx_pipeline *p,
2330 const struct instruction *ip)
2332 uint64_t action_id = ip->learn.action_id;
2333 uint32_t mf_first_arg_offset = ip->learn.mf_first_arg_offset;
2334 uint32_t timeout_id = METADATA_READ(t, ip->learn.mf_timeout_id_offset,
2335 ip->learn.mf_timeout_id_n_bits);
2336 uint32_t learner_id = t->learner_id;
2338 p->n_selectors + learner_id];
2339 struct learner_runtime *l = &t->learners[learner_id];
2340 struct learner_statistics *stats = &p->learner_stats[learner_id];
2348 &t->metadata[mf_first_arg_offset],
2351 TRACE(
"[Thread %2u] learner %u learn %s\n",
2354 status ?
"ok" :
"error");
2356 stats->n_pkts_learn[status] += 1;
2363 __instr_rearm_exec(
struct rte_swx_pipeline *p,
2367 uint32_t learner_id = t->learner_id;
2369 p->n_selectors + learner_id];
2370 struct learner_runtime *l = &t->learners[learner_id];
2371 struct learner_statistics *stats = &p->learner_stats[learner_id];
2376 TRACE(
"[Thread %2u] learner %u rearm\n",
2380 stats->n_pkts_rearm += 1;
2384 __instr_rearm_new_exec(
struct rte_swx_pipeline *p,
2386 const struct instruction *ip)
2388 uint32_t timeout_id = METADATA_READ(t, ip->learn.mf_timeout_id_offset,
2389 ip->learn.mf_timeout_id_n_bits);
2390 uint32_t learner_id = t->learner_id;
2392 p->n_selectors + learner_id];
2393 struct learner_runtime *l = &t->learners[learner_id];
2394 struct learner_statistics *stats = &p->learner_stats[learner_id];
2399 TRACE(
"[Thread %2u] learner %u rearm with timeout ID %u\n",
2404 stats->n_pkts_rearm += 1;
2411 __instr_forget_exec(
struct rte_swx_pipeline *p,
2415 uint32_t learner_id = t->learner_id;
2417 p->n_selectors + learner_id];
2418 struct learner_runtime *l = &t->learners[learner_id];
2419 struct learner_statistics *stats = &p->learner_stats[learner_id];
2424 TRACE(
"[Thread %2u] learner %u forget\n",
2428 stats->n_pkts_forget += 1;
2435 __instr_entryid_exec(
struct rte_swx_pipeline *p
__rte_unused,
2437 const struct instruction *ip)
2439 TRACE(
"[Thread %2u]: entryid\n",
2443 METADATA_WRITE(t, ip->mov.dst.offset, ip->mov.dst.n_bits, t->entry_id);
2449 static inline uint32_t
2450 __instr_extern_obj_exec(
struct rte_swx_pipeline *p
__rte_unused,
2452 const struct instruction *ip)
2454 uint32_t obj_id = ip->ext_obj.ext_obj_id;
2455 uint32_t func_id = ip->ext_obj.func_id;
2456 struct extern_obj_runtime *obj = &t->extern_objs[obj_id];
2460 TRACE(
"[Thread %2u] extern obj %u member func %u\n",
2465 done = func(obj->obj, obj->mailbox);
2470 static inline uint32_t
2471 __instr_extern_func_exec(
struct rte_swx_pipeline *p
__rte_unused,
2473 const struct instruction *ip)
2475 uint32_t ext_func_id = ip->ext_func.ext_func_id;
2476 struct extern_func_runtime *ext_func = &t->extern_funcs[ext_func_id];
2480 TRACE(
"[Thread %2u] extern func %u\n",
2484 done = func(ext_func->mailbox);
2493 __instr_hash_func_exec(
struct rte_swx_pipeline *p,
2495 const struct instruction *ip)
2497 uint32_t hash_func_id = ip->hash_func.hash_func_id;
2498 uint32_t dst_offset = ip->hash_func.dst.offset;
2499 uint32_t n_dst_bits = ip->hash_func.dst.n_bits;
2500 uint32_t src_struct_id = ip->hash_func.src.struct_id;
2501 uint32_t src_offset = ip->hash_func.src.offset;
2502 uint32_t n_src_bytes = ip->hash_func.src.n_bytes;
2504 struct hash_func_runtime *func = &p->hash_func_runtime[hash_func_id];
2505 uint8_t *src_ptr = t->structs[src_struct_id];
2508 TRACE(
"[Thread %2u] hash %u\n",
2512 result = func->func(&src_ptr[src_offset], n_src_bytes, 0);
2513 METADATA_WRITE(t, dst_offset, n_dst_bits, result);
2519 static inline uint32_t
2520 rss_func(
void *rss_key, uint32_t rss_key_size,
void *input_data, uint32_t input_data_size)
2522 uint32_t *key = (uint32_t *)rss_key;
2523 uint32_t *data = (uint32_t *)input_data;
2524 uint32_t key_size = rss_key_size >> 2;
2525 uint32_t data_size = input_data_size >> 2;
2526 uint32_t hash_val = 0, i;
2528 for (i = 0; i < data_size; i++) {
2531 for (d = data[i]; d; d &= (d - 1)) {
2532 uint32_t key0, key1, pos;
2535 key0 = key[i % key_size] << (31 - pos);
2536 key1 = key[(i + 1) % key_size] >> (pos + 1);
2537 hash_val ^= key0 | key1;
2545 __instr_rss_exec(
struct rte_swx_pipeline *p,
2547 const struct instruction *ip)
2549 uint32_t rss_obj_id = ip->rss.rss_obj_id;
2550 uint32_t dst_offset = ip->rss.dst.offset;
2551 uint32_t n_dst_bits = ip->rss.dst.n_bits;
2552 uint32_t src_struct_id = ip->rss.src.struct_id;
2553 uint32_t src_offset = ip->rss.src.offset;
2554 uint32_t n_src_bytes = ip->rss.src.n_bytes;
2556 struct rss_runtime *r = p->rss_runtime[rss_obj_id];
2557 uint8_t *src_ptr = t->structs[src_struct_id];
2560 TRACE(
"[Thread %2u] rss %u\n",
2564 result = rss_func(r->key, r->key_size, &src_ptr[src_offset], n_src_bytes);
2565 METADATA_WRITE(t, dst_offset, n_dst_bits, result);
2572 __instr_mov_exec(
struct rte_swx_pipeline *p
__rte_unused,
2574 const struct instruction *ip)
2576 TRACE(
"[Thread %2u] mov\n", p->thread_id);
2582 __instr_mov_mh_exec(
struct rte_swx_pipeline *p
__rte_unused,
2584 const struct instruction *ip)
2586 TRACE(
"[Thread %2u] mov (mh)\n", p->thread_id);
2592 __instr_mov_hm_exec(
struct rte_swx_pipeline *p
__rte_unused,
2594 const struct instruction *ip)
2596 TRACE(
"[Thread %2u] mov (hm)\n", p->thread_id);
2602 __instr_mov_hh_exec(
struct rte_swx_pipeline *p
__rte_unused,
2604 const struct instruction *ip)
2606 TRACE(
"[Thread %2u] mov (hh)\n", p->thread_id);
2612 __instr_mov_dma_exec(
struct rte_swx_pipeline *p
__rte_unused,
2614 const struct instruction *ip)
2616 uint8_t *dst = t->structs[ip->mov.dst.struct_id] + ip->mov.dst.offset;
2617 uint8_t *src = t->structs[ip->mov.src.struct_id] + ip->mov.src.offset;
2619 uint32_t n_dst = ip->mov.dst.n_bits >> 3;
2620 uint32_t n_src = ip->mov.src.n_bits >> 3;
2622 TRACE(
"[Thread %2u] mov (dma) %u bytes\n", p->thread_id, n);
2625 if (n_dst > n_src) {
2626 uint32_t n_dst_zero = n_dst - n_src;
2629 memset(dst, 0, n_dst_zero);
2633 memcpy(dst, src, n_src);
2635 uint32_t n_src_skipped = n_src - n_dst;
2638 src += n_src_skipped;
2639 memcpy(dst, src, n_dst);
2644 __instr_mov_128_exec(
struct rte_swx_pipeline *p
__rte_unused,
2646 const struct instruction *ip)
2648 uint8_t *dst_struct = t->structs[ip->mov.dst.struct_id];
2649 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[ip->mov.dst.offset];
2651 uint8_t *src_struct = t->structs[ip->mov.src.struct_id];
2652 uint64_t *src64_ptr = (uint64_t *)&src_struct[ip->mov.src.offset];
2654 TRACE(
"[Thread %2u] mov (128)\n", p->thread_id);
2656 dst64_ptr[0] = src64_ptr[0];
2657 dst64_ptr[1] = src64_ptr[1];
2661 __instr_mov_128_32_exec(
struct rte_swx_pipeline *p
__rte_unused,
2663 const struct instruction *ip)
2665 uint8_t *dst = t->structs[ip->mov.dst.struct_id] + ip->mov.dst.offset;
2666 uint8_t *src = t->structs[ip->mov.src.struct_id] + ip->mov.src.offset;
2668 uint32_t *dst32 = (uint32_t *)dst;
2669 uint32_t *src32 = (uint32_t *)src;
2671 TRACE(
"[Thread %2u] mov (128 <- 32)\n", p->thread_id);
2676 dst32[3] = src32[0];
2680 __instr_mov_i_exec(
struct rte_swx_pipeline *p
__rte_unused,
2682 const struct instruction *ip)
2684 TRACE(
"[Thread %2u] mov m.f %" PRIx64
"\n", p->thread_id, ip->mov.src_val);
2693 __instr_dma_ht_many_exec(
struct rte_swx_pipeline *p
__rte_unused,
2695 const struct instruction *ip,
2698 uint8_t *action_data = t->structs[0];
2699 uint64_t valid_headers = t->valid_headers;
2702 for (i = 0; i < n_dma; i++) {
2703 uint32_t header_id = ip->dma.dst.header_id[i];
2704 uint32_t struct_id = ip->dma.dst.struct_id[i];
2705 uint32_t offset = ip->dma.src.offset[i];
2706 uint32_t n_bytes = ip->dma.n_bytes[i];
2708 struct header_runtime *h = &t->headers[header_id];
2709 uint8_t *h_ptr0 = h->ptr0;
2710 uint8_t *h_ptr = t->structs[struct_id];
2712 void *dst = MASK64_BIT_GET(valid_headers, header_id) ?
2714 void *src = &action_data[offset];
2716 TRACE(
"[Thread %2u] dma h.s t.f\n", p->thread_id);
2719 memcpy(dst, src, n_bytes);
2720 t->structs[struct_id] = dst;
2721 valid_headers = MASK64_BIT_SET(valid_headers, header_id);
2724 t->valid_headers = valid_headers;
2728 __instr_dma_ht_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
2730 __instr_dma_ht_many_exec(p, t, ip, 1);
2734 __instr_dma_ht2_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
2736 TRACE(
"[Thread %2u] *** The next 2 instructions are fused. ***\n", p->thread_id);
2738 __instr_dma_ht_many_exec(p, t, ip, 2);
2742 __instr_dma_ht3_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
2744 TRACE(
"[Thread %2u] *** The next 3 instructions are fused. ***\n", p->thread_id);
2746 __instr_dma_ht_many_exec(p, t, ip, 3);
2750 __instr_dma_ht4_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
2752 TRACE(
"[Thread %2u] *** The next 4 instructions are fused. ***\n", p->thread_id);
2754 __instr_dma_ht_many_exec(p, t, ip, 4);
2758 __instr_dma_ht5_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
2760 TRACE(
"[Thread %2u] *** The next 5 instructions are fused. ***\n", p->thread_id);
2762 __instr_dma_ht_many_exec(p, t, ip, 5);
2766 __instr_dma_ht6_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
2768 TRACE(
"[Thread %2u] *** The next 6 instructions are fused. ***\n", p->thread_id);
2770 __instr_dma_ht_many_exec(p, t, ip, 6);
2774 __instr_dma_ht7_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
2776 TRACE(
"[Thread %2u] *** The next 7 instructions are fused. ***\n", p->thread_id);
2778 __instr_dma_ht_many_exec(p, t, ip, 7);
2782 __instr_dma_ht8_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
2784 TRACE(
"[Thread %2u] *** The next 8 instructions are fused. ***\n", p->thread_id);
2786 __instr_dma_ht_many_exec(p, t, ip, 8);
2793 __instr_alu_add_exec(
struct rte_swx_pipeline *p
__rte_unused,
2795 const struct instruction *ip)
2797 TRACE(
"[Thread %2u] add\n", p->thread_id);
2803 __instr_alu_add_mh_exec(
struct rte_swx_pipeline *p
__rte_unused,
2805 const struct instruction *ip)
2807 TRACE(
"[Thread %2u] add (mh)\n", p->thread_id);
2813 __instr_alu_add_hm_exec(
struct rte_swx_pipeline *p
__rte_unused,
2815 const struct instruction *ip)
2817 TRACE(
"[Thread %2u] add (hm)\n", p->thread_id);
2823 __instr_alu_add_hh_exec(
struct rte_swx_pipeline *p
__rte_unused,
2825 const struct instruction *ip)
2827 TRACE(
"[Thread %2u] add (hh)\n", p->thread_id);
2833 __instr_alu_add_mi_exec(
struct rte_swx_pipeline *p
__rte_unused,
2835 const struct instruction *ip)
2837 TRACE(
"[Thread %2u] add (mi)\n", p->thread_id);
2843 __instr_alu_add_hi_exec(
struct rte_swx_pipeline *p
__rte_unused,
2845 const struct instruction *ip)
2847 TRACE(
"[Thread %2u] add (hi)\n", p->thread_id);
2853 __instr_alu_sub_exec(
struct rte_swx_pipeline *p
__rte_unused,
2855 const struct instruction *ip)
2857 TRACE(
"[Thread %2u] sub\n", p->thread_id);
2863 __instr_alu_sub_mh_exec(
struct rte_swx_pipeline *p
__rte_unused,
2865 const struct instruction *ip)
2867 TRACE(
"[Thread %2u] sub (mh)\n", p->thread_id);
2873 __instr_alu_sub_hm_exec(
struct rte_swx_pipeline *p
__rte_unused,
2875 const struct instruction *ip)
2877 TRACE(
"[Thread %2u] sub (hm)\n", p->thread_id);
2883 __instr_alu_sub_hh_exec(
struct rte_swx_pipeline *p
__rte_unused,
2885 const struct instruction *ip)
2887 TRACE(
"[Thread %2u] sub (hh)\n", p->thread_id);
2893 __instr_alu_sub_mi_exec(
struct rte_swx_pipeline *p
__rte_unused,
2895 const struct instruction *ip)
2897 TRACE(
"[Thread %2u] sub (mi)\n", p->thread_id);
2903 __instr_alu_sub_hi_exec(
struct rte_swx_pipeline *p
__rte_unused,
2905 const struct instruction *ip)
2907 TRACE(
"[Thread %2u] sub (hi)\n", p->thread_id);
2913 __instr_alu_shl_exec(
struct rte_swx_pipeline *p
__rte_unused,
2915 const struct instruction *ip)
2917 TRACE(
"[Thread %2u] shl\n", p->thread_id);
2923 __instr_alu_shl_mh_exec(
struct rte_swx_pipeline *p
__rte_unused,
2925 const struct instruction *ip)
2927 TRACE(
"[Thread %2u] shl (mh)\n", p->thread_id);
2933 __instr_alu_shl_hm_exec(
struct rte_swx_pipeline *p
__rte_unused,
2935 const struct instruction *ip)
2937 TRACE(
"[Thread %2u] shl (hm)\n", p->thread_id);
2943 __instr_alu_shl_hh_exec(
struct rte_swx_pipeline *p
__rte_unused,
2945 const struct instruction *ip)
2947 TRACE(
"[Thread %2u] shl (hh)\n", p->thread_id);
2953 __instr_alu_shl_mi_exec(
struct rte_swx_pipeline *p
__rte_unused,
2955 const struct instruction *ip)
2957 TRACE(
"[Thread %2u] shl (mi)\n", p->thread_id);
2963 __instr_alu_shl_hi_exec(
struct rte_swx_pipeline *p
__rte_unused,
2965 const struct instruction *ip)
2967 TRACE(
"[Thread %2u] shl (hi)\n", p->thread_id);
2973 __instr_alu_shr_exec(
struct rte_swx_pipeline *p
__rte_unused,
2975 const struct instruction *ip)
2977 TRACE(
"[Thread %2u] shr\n", p->thread_id);
2983 __instr_alu_shr_mh_exec(
struct rte_swx_pipeline *p
__rte_unused,
2985 const struct instruction *ip)
2987 TRACE(
"[Thread %2u] shr (mh)\n", p->thread_id);
2993 __instr_alu_shr_hm_exec(
struct rte_swx_pipeline *p
__rte_unused,
2995 const struct instruction *ip)
2997 TRACE(
"[Thread %2u] shr (hm)\n", p->thread_id);
3003 __instr_alu_shr_hh_exec(
struct rte_swx_pipeline *p
__rte_unused,
3005 const struct instruction *ip)
3007 TRACE(
"[Thread %2u] shr (hh)\n", p->thread_id);
3013 __instr_alu_shr_mi_exec(
struct rte_swx_pipeline *p
__rte_unused,
3015 const struct instruction *ip)
3017 TRACE(
"[Thread %2u] shr (mi)\n", p->thread_id);
3024 __instr_alu_shr_hi_exec(
struct rte_swx_pipeline *p
__rte_unused,
3026 const struct instruction *ip)
3028 TRACE(
"[Thread %2u] shr (hi)\n", p->thread_id);
3034 __instr_alu_and_exec(
struct rte_swx_pipeline *p
__rte_unused,
3036 const struct instruction *ip)
3038 TRACE(
"[Thread %2u] and\n", p->thread_id);
3044 __instr_alu_and_mh_exec(
struct rte_swx_pipeline *p
__rte_unused,
3046 const struct instruction *ip)
3048 TRACE(
"[Thread %2u] and (mh)\n", p->thread_id);
3054 __instr_alu_and_hm_exec(
struct rte_swx_pipeline *p
__rte_unused,
3056 const struct instruction *ip)
3058 TRACE(
"[Thread %2u] and (hm)\n", p->thread_id);
3060 ALU_HM_FAST(t, ip, &);
3064 __instr_alu_and_hh_exec(
struct rte_swx_pipeline *p
__rte_unused,
3066 const struct instruction *ip)
3068 TRACE(
"[Thread %2u] and (hh)\n", p->thread_id);
3070 ALU_HH_FAST(t, ip, &);
3074 __instr_alu_and_i_exec(
struct rte_swx_pipeline *p
__rte_unused,
3076 const struct instruction *ip)
3078 TRACE(
"[Thread %2u] and (i)\n", p->thread_id);
3084 __instr_alu_or_exec(
struct rte_swx_pipeline *p
__rte_unused,
3086 const struct instruction *ip)
3088 TRACE(
"[Thread %2u] or\n", p->thread_id);
3094 __instr_alu_or_mh_exec(
struct rte_swx_pipeline *p
__rte_unused,
3096 const struct instruction *ip)
3098 TRACE(
"[Thread %2u] or (mh)\n", p->thread_id);
3104 __instr_alu_or_hm_exec(
struct rte_swx_pipeline *p
__rte_unused,
3106 const struct instruction *ip)
3108 TRACE(
"[Thread %2u] or (hm)\n", p->thread_id);
3110 ALU_HM_FAST(t, ip, |);
3114 __instr_alu_or_hh_exec(
struct rte_swx_pipeline *p
__rte_unused,
3116 const struct instruction *ip)
3118 TRACE(
"[Thread %2u] or (hh)\n", p->thread_id);
3120 ALU_HH_FAST(t, ip, |);
3124 __instr_alu_or_i_exec(
struct rte_swx_pipeline *p
__rte_unused,
3126 const struct instruction *ip)
3128 TRACE(
"[Thread %2u] or (i)\n", p->thread_id);
3134 __instr_alu_xor_exec(
struct rte_swx_pipeline *p
__rte_unused,
3136 const struct instruction *ip)
3138 TRACE(
"[Thread %2u] xor\n", p->thread_id);
3144 __instr_alu_xor_mh_exec(
struct rte_swx_pipeline *p
__rte_unused,
3146 const struct instruction *ip)
3148 TRACE(
"[Thread %2u] xor (mh)\n", p->thread_id);
3154 __instr_alu_xor_hm_exec(
struct rte_swx_pipeline *p
__rte_unused,
3156 const struct instruction *ip)
3158 TRACE(
"[Thread %2u] xor (hm)\n", p->thread_id);
3160 ALU_HM_FAST(t, ip, ^);
3164 __instr_alu_xor_hh_exec(
struct rte_swx_pipeline *p
__rte_unused,
3166 const struct instruction *ip)
3168 TRACE(
"[Thread %2u] xor (hh)\n", p->thread_id);
3170 ALU_HH_FAST(t, ip, ^);
3174 __instr_alu_xor_i_exec(
struct rte_swx_pipeline *p
__rte_unused,
3176 const struct instruction *ip)
3178 TRACE(
"[Thread %2u] xor (i)\n", p->thread_id);
3184 __instr_alu_ckadd_field_exec(
struct rte_swx_pipeline *p
__rte_unused,
3186 const struct instruction *ip)
3188 uint8_t *dst_struct, *src_struct;
3189 uint16_t *dst16_ptr, dst;
3190 uint64_t *src64_ptr, src64, src64_mask, src;
3193 TRACE(
"[Thread %2u] ckadd (field)\n", p->thread_id);
3196 dst_struct = t->structs[ip->alu.dst.struct_id];
3197 dst16_ptr = (uint16_t *)&dst_struct[ip->alu.dst.offset];
3200 src_struct = t->structs[ip->alu.src.struct_id];
3201 src64_ptr = (uint64_t *)&src_struct[ip->alu.src.offset];
3203 src64_mask = UINT64_MAX >> (64 - ip->alu.src.n_bits);
3204 src = src64 & src64_mask;
3214 r += (src >> 32) + (src & 0xFFFFFFFF);
3220 r = (r & 0xFFFF) + (r >> 16);
3225 r = (r & 0xFFFF) + (r >> 16);
3232 r = (r & 0xFFFF) + (r >> 16);
3238 *dst16_ptr = (uint16_t)r;
3242 __instr_alu_cksub_field_exec(
struct rte_swx_pipeline *p
__rte_unused,
3244 const struct instruction *ip)
3246 uint8_t *dst_struct, *src_struct;
3247 uint16_t *dst16_ptr, dst;
3248 uint64_t *src64_ptr, src64, src64_mask, src;
3251 TRACE(
"[Thread %2u] cksub (field)\n", p->thread_id);
3254 dst_struct = t->structs[ip->alu.dst.struct_id];
3255 dst16_ptr = (uint16_t *)&dst_struct[ip->alu.dst.offset];
3258 src_struct = t->structs[ip->alu.src.struct_id];
3259 src64_ptr = (uint64_t *)&src_struct[ip->alu.src.offset];
3261 src64_mask = UINT64_MAX >> (64 - ip->alu.src.n_bits);
3262 src = src64 & src64_mask;
3280 r += 0xFFFF00000ULL;
3285 r -= (src >> 32) + (src & 0xFFFFFFFF);
3290 r = (r & 0xFFFF) + (r >> 16);
3295 r = (r & 0xFFFF) + (r >> 16);
3302 r = (r & 0xFFFF) + (r >> 16);
3308 *dst16_ptr = (uint16_t)r;
3312 __instr_alu_ckadd_struct20_exec(
struct rte_swx_pipeline *p
__rte_unused,
3314 const struct instruction *ip)
3316 uint8_t *dst_struct, *src_struct;
3317 uint16_t *dst16_ptr, dst;
3318 uint32_t *src32_ptr;
3321 TRACE(
"[Thread %2u] ckadd (struct of 20 bytes)\n", p->thread_id);
3324 dst_struct = t->structs[ip->alu.dst.struct_id];
3325 dst16_ptr = (uint16_t *)&dst_struct[ip->alu.dst.offset];
3328 src_struct = t->structs[ip->alu.src.struct_id];
3329 src32_ptr = (uint32_t *)&src_struct[0];
3339 r0 += r1 + src32_ptr[4];
3344 r0 = (r0 & 0xFFFF) + (r0 >> 16);
3349 r0 = (r0 & 0xFFFF) + (r0 >> 16);
3356 r0 = (r0 & 0xFFFF) + (r0 >> 16);
3360 r0 = r0 ? r0 : 0xFFFF;
3362 *dst16_ptr = (uint16_t)r0;
3366 __instr_alu_ckadd_struct_exec(
struct rte_swx_pipeline *p
__rte_unused,
3368 const struct instruction *ip)
3370 uint32_t src_header_id = ip->alu.src.n_bits;
3371 uint32_t n_src_header_bytes = t->headers[src_header_id].n_bytes;
3372 uint8_t *dst_struct, *src_struct;
3373 uint16_t *dst16_ptr, dst;
3374 uint32_t *src32_ptr;
3378 if (n_src_header_bytes == 20) {
3379 __instr_alu_ckadd_struct20_exec(p, t, ip);
3383 TRACE(
"[Thread %2u] ckadd (struct)\n", p->thread_id);
3386 dst_struct = t->structs[ip->alu.dst.struct_id];
3387 dst16_ptr = (uint16_t *)&dst_struct[ip->alu.dst.offset];
3390 src_struct = t->structs[ip->alu.src.struct_id];
3391 src32_ptr = (uint32_t *)&src_struct[0];
3401 for (i = 0; i < n_src_header_bytes / 4; i++, src32_ptr++)
3407 r = (r & 0xFFFF) + (r >> 16);
3412 r = (r & 0xFFFF) + (r >> 16);
3419 r = (r & 0xFFFF) + (r >> 16);
3425 *dst16_ptr = (uint16_t)r;
3431 static inline uint64_t *
3432 instr_regarray_regarray(
struct rte_swx_pipeline *p,
const struct instruction *ip)
3434 struct regarray_runtime *r = &p->regarray_runtime[ip->regarray.regarray_id];
3438 static inline uint64_t
3439 instr_regarray_idx_hbo(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3441 struct regarray_runtime *r = &p->regarray_runtime[ip->regarray.regarray_id];
3443 uint8_t *idx_struct = t->structs[ip->regarray.idx.struct_id];
3444 uint64_t *idx64_ptr = (uint64_t *)&idx_struct[ip->regarray.idx.offset];
3445 uint64_t idx64 = *idx64_ptr;
3446 uint64_t idx64_mask = UINT64_MAX >> (64 - ip->regarray.idx.n_bits);
3447 uint64_t idx = idx64 & idx64_mask & r->size_mask;
3452 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
3454 static inline uint64_t
3455 instr_regarray_idx_nbo(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3457 struct regarray_runtime *r = &p->regarray_runtime[ip->regarray.regarray_id];
3459 uint8_t *idx_struct = t->structs[ip->regarray.idx.struct_id];
3460 uint64_t *idx64_ptr = (uint64_t *)&idx_struct[ip->regarray.idx.offset];
3461 uint64_t idx64 = *idx64_ptr;
3462 uint64_t idx = (ntoh64(idx64) >> (64 - ip->regarray.idx.n_bits)) & r->size_mask;
3469 #define instr_regarray_idx_nbo instr_regarray_idx_hbo
3473 static inline uint64_t
3474 instr_regarray_idx_imm(
struct rte_swx_pipeline *p,
const struct instruction *ip)
3476 struct regarray_runtime *r = &p->regarray_runtime[ip->regarray.regarray_id];
3478 uint64_t idx = ip->regarray.idx_val & r->size_mask;
3483 static inline uint64_t
3484 instr_regarray_src_hbo(
struct thread *t,
const struct instruction *ip)
3486 uint8_t *src_struct = t->structs[ip->regarray.dstsrc.struct_id];
3487 uint64_t *src64_ptr = (uint64_t *)&src_struct[ip->regarray.dstsrc.offset];
3488 uint64_t src64 = *src64_ptr;
3489 uint64_t src64_mask = UINT64_MAX >> (64 - ip->regarray.dstsrc.n_bits);
3490 uint64_t src = src64 & src64_mask;
3495 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
3497 static inline uint64_t
3498 instr_regarray_src_nbo(
struct thread *t,
const struct instruction *ip)
3500 uint8_t *src_struct = t->structs[ip->regarray.dstsrc.struct_id];
3501 uint64_t *src64_ptr = (uint64_t *)&src_struct[ip->regarray.dstsrc.offset];
3502 uint64_t src64 = *src64_ptr;
3503 uint64_t src = ntoh64(src64) >> (64 - ip->regarray.dstsrc.n_bits);
3510 #define instr_regarray_src_nbo instr_regarray_src_hbo
3515 instr_regarray_dst_hbo_src_hbo_set(
struct thread *t,
const struct instruction *ip, uint64_t src)
3517 uint8_t *dst_struct = t->structs[ip->regarray.dstsrc.struct_id];
3518 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[ip->regarray.dstsrc.offset];
3519 uint64_t dst64 = *dst64_ptr;
3520 uint64_t dst64_mask = UINT64_MAX >> (64 - ip->regarray.dstsrc.n_bits);
3522 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask);
3526 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
3529 instr_regarray_dst_nbo_src_hbo_set(
struct thread *t,
const struct instruction *ip, uint64_t src)
3531 uint8_t *dst_struct = t->structs[ip->regarray.dstsrc.struct_id];
3532 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[ip->regarray.dstsrc.offset];
3533 uint64_t dst64 = *dst64_ptr;
3534 uint64_t dst64_mask = UINT64_MAX >> (64 - ip->regarray.dstsrc.n_bits);
3536 src = hton64(src) >> (64 - ip->regarray.dstsrc.n_bits);
3537 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask);
3542 #define instr_regarray_dst_nbo_src_hbo_set instr_regarray_dst_hbo_src_hbo_set
3547 __instr_regprefetch_rh_exec(
struct rte_swx_pipeline *p,
3549 const struct instruction *ip)
3551 uint64_t *regarray, idx;
3553 TRACE(
"[Thread %2u] regprefetch (r[h])\n", p->thread_id);
3555 regarray = instr_regarray_regarray(p, ip);
3556 idx = instr_regarray_idx_nbo(p, t, ip);
3561 __instr_regprefetch_rm_exec(
struct rte_swx_pipeline *p,
3563 const struct instruction *ip)
3565 uint64_t *regarray, idx;
3567 TRACE(
"[Thread %2u] regprefetch (r[m])\n", p->thread_id);
3569 regarray = instr_regarray_regarray(p, ip);
3570 idx = instr_regarray_idx_hbo(p, t, ip);
3575 __instr_regprefetch_ri_exec(
struct rte_swx_pipeline *p,
3577 const struct instruction *ip)
3579 uint64_t *regarray, idx;
3581 TRACE(
"[Thread %2u] regprefetch (r[i])\n", p->thread_id);
3583 regarray = instr_regarray_regarray(p, ip);
3584 idx = instr_regarray_idx_imm(p, ip);
3589 __instr_regrd_hrh_exec(
struct rte_swx_pipeline *p,
3591 const struct instruction *ip)
3593 uint64_t *regarray, idx;
3595 TRACE(
"[Thread %2u] regrd (h = r[h])\n", p->thread_id);
3597 regarray = instr_regarray_regarray(p, ip);
3598 idx = instr_regarray_idx_nbo(p, t, ip);
3599 instr_regarray_dst_nbo_src_hbo_set(t, ip, regarray[idx]);
3603 __instr_regrd_hrm_exec(
struct rte_swx_pipeline *p,
3605 const struct instruction *ip)
3607 uint64_t *regarray, idx;
3609 TRACE(
"[Thread %2u] regrd (h = r[m])\n", p->thread_id);
3612 regarray = instr_regarray_regarray(p, ip);
3613 idx = instr_regarray_idx_hbo(p, t, ip);
3614 instr_regarray_dst_nbo_src_hbo_set(t, ip, regarray[idx]);
3618 __instr_regrd_mrh_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3620 uint64_t *regarray, idx;
3622 TRACE(
"[Thread %2u] regrd (m = r[h])\n", p->thread_id);
3624 regarray = instr_regarray_regarray(p, ip);
3625 idx = instr_regarray_idx_nbo(p, t, ip);
3626 instr_regarray_dst_hbo_src_hbo_set(t, ip, regarray[idx]);
3630 __instr_regrd_mrm_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3632 uint64_t *regarray, idx;
3634 TRACE(
"[Thread %2u] regrd (m = r[m])\n", p->thread_id);
3636 regarray = instr_regarray_regarray(p, ip);
3637 idx = instr_regarray_idx_hbo(p, t, ip);
3638 instr_regarray_dst_hbo_src_hbo_set(t, ip, regarray[idx]);
3642 __instr_regrd_hri_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3644 uint64_t *regarray, idx;
3646 TRACE(
"[Thread %2u] regrd (h = r[i])\n", p->thread_id);
3648 regarray = instr_regarray_regarray(p, ip);
3649 idx = instr_regarray_idx_imm(p, ip);
3650 instr_regarray_dst_nbo_src_hbo_set(t, ip, regarray[idx]);
3654 __instr_regrd_mri_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3656 uint64_t *regarray, idx;
3658 TRACE(
"[Thread %2u] regrd (m = r[i])\n", p->thread_id);
3660 regarray = instr_regarray_regarray(p, ip);
3661 idx = instr_regarray_idx_imm(p, ip);
3662 instr_regarray_dst_hbo_src_hbo_set(t, ip, regarray[idx]);
3666 __instr_regwr_rhh_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3668 uint64_t *regarray, idx, src;
3670 TRACE(
"[Thread %2u] regwr (r[h] = h)\n", p->thread_id);
3672 regarray = instr_regarray_regarray(p, ip);
3673 idx = instr_regarray_idx_nbo(p, t, ip);
3674 src = instr_regarray_src_nbo(t, ip);
3675 regarray[idx] = src;
3679 __instr_regwr_rhm_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3681 uint64_t *regarray, idx, src;
3683 TRACE(
"[Thread %2u] regwr (r[h] = m)\n", p->thread_id);
3685 regarray = instr_regarray_regarray(p, ip);
3686 idx = instr_regarray_idx_nbo(p, t, ip);
3687 src = instr_regarray_src_hbo(t, ip);
3688 regarray[idx] = src;
3692 __instr_regwr_rmh_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3694 uint64_t *regarray, idx, src;
3696 TRACE(
"[Thread %2u] regwr (r[m] = h)\n", p->thread_id);
3698 regarray = instr_regarray_regarray(p, ip);
3699 idx = instr_regarray_idx_hbo(p, t, ip);
3700 src = instr_regarray_src_nbo(t, ip);
3701 regarray[idx] = src;
3705 __instr_regwr_rmm_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3707 uint64_t *regarray, idx, src;
3709 TRACE(
"[Thread %2u] regwr (r[m] = m)\n", p->thread_id);
3711 regarray = instr_regarray_regarray(p, ip);
3712 idx = instr_regarray_idx_hbo(p, t, ip);
3713 src = instr_regarray_src_hbo(t, ip);
3714 regarray[idx] = src;
3718 __instr_regwr_rhi_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3720 uint64_t *regarray, idx, src;
3722 TRACE(
"[Thread %2u] regwr (r[h] = i)\n", p->thread_id);
3724 regarray = instr_regarray_regarray(p, ip);
3725 idx = instr_regarray_idx_nbo(p, t, ip);
3726 src = ip->regarray.dstsrc_val;
3727 regarray[idx] = src;
3731 __instr_regwr_rmi_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3733 uint64_t *regarray, idx, src;
3735 TRACE(
"[Thread %2u] regwr (r[m] = i)\n", p->thread_id);
3737 regarray = instr_regarray_regarray(p, ip);
3738 idx = instr_regarray_idx_hbo(p, t, ip);
3739 src = ip->regarray.dstsrc_val;
3740 regarray[idx] = src;
3744 __instr_regwr_rih_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3746 uint64_t *regarray, idx, src;
3748 TRACE(
"[Thread %2u] regwr (r[i] = h)\n", p->thread_id);
3750 regarray = instr_regarray_regarray(p, ip);
3751 idx = instr_regarray_idx_imm(p, ip);
3752 src = instr_regarray_src_nbo(t, ip);
3753 regarray[idx] = src;
3757 __instr_regwr_rim_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3759 uint64_t *regarray, idx, src;
3761 TRACE(
"[Thread %2u] regwr (r[i] = m)\n", p->thread_id);
3763 regarray = instr_regarray_regarray(p, ip);
3764 idx = instr_regarray_idx_imm(p, ip);
3765 src = instr_regarray_src_hbo(t, ip);
3766 regarray[idx] = src;
3770 __instr_regwr_rii_exec(
struct rte_swx_pipeline *p,
3772 const struct instruction *ip)
3774 uint64_t *regarray, idx, src;
3776 TRACE(
"[Thread %2u] regwr (r[i] = i)\n", p->thread_id);
3778 regarray = instr_regarray_regarray(p, ip);
3779 idx = instr_regarray_idx_imm(p, ip);
3780 src = ip->regarray.dstsrc_val;
3781 regarray[idx] = src;
3785 __instr_regadd_rhh_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3787 uint64_t *regarray, idx, src;
3789 TRACE(
"[Thread %2u] regadd (r[h] += h)\n", p->thread_id);
3791 regarray = instr_regarray_regarray(p, ip);
3792 idx = instr_regarray_idx_nbo(p, t, ip);
3793 src = instr_regarray_src_nbo(t, ip);
3794 regarray[idx] += src;
3798 __instr_regadd_rhm_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3800 uint64_t *regarray, idx, src;
3802 TRACE(
"[Thread %2u] regadd (r[h] += m)\n", p->thread_id);
3804 regarray = instr_regarray_regarray(p, ip);
3805 idx = instr_regarray_idx_nbo(p, t, ip);
3806 src = instr_regarray_src_hbo(t, ip);
3807 regarray[idx] += src;
3811 __instr_regadd_rmh_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3813 uint64_t *regarray, idx, src;
3815 TRACE(
"[Thread %2u] regadd (r[m] += h)\n", p->thread_id);
3817 regarray = instr_regarray_regarray(p, ip);
3818 idx = instr_regarray_idx_hbo(p, t, ip);
3819 src = instr_regarray_src_nbo(t, ip);
3820 regarray[idx] += src;
3824 __instr_regadd_rmm_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3826 uint64_t *regarray, idx, src;
3828 TRACE(
"[Thread %2u] regadd (r[m] += m)\n", p->thread_id);
3830 regarray = instr_regarray_regarray(p, ip);
3831 idx = instr_regarray_idx_hbo(p, t, ip);
3832 src = instr_regarray_src_hbo(t, ip);
3833 regarray[idx] += src;
3837 __instr_regadd_rhi_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3839 uint64_t *regarray, idx, src;
3841 TRACE(
"[Thread %2u] regadd (r[h] += i)\n", p->thread_id);
3843 regarray = instr_regarray_regarray(p, ip);
3844 idx = instr_regarray_idx_nbo(p, t, ip);
3845 src = ip->regarray.dstsrc_val;
3846 regarray[idx] += src;
3850 __instr_regadd_rmi_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3852 uint64_t *regarray, idx, src;
3854 TRACE(
"[Thread %2u] regadd (r[m] += i)\n", p->thread_id);
3856 regarray = instr_regarray_regarray(p, ip);
3857 idx = instr_regarray_idx_hbo(p, t, ip);
3858 src = ip->regarray.dstsrc_val;
3859 regarray[idx] += src;
3863 __instr_regadd_rih_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3865 uint64_t *regarray, idx, src;
3867 TRACE(
"[Thread %2u] regadd (r[i] += h)\n", p->thread_id);
3869 regarray = instr_regarray_regarray(p, ip);
3870 idx = instr_regarray_idx_imm(p, ip);
3871 src = instr_regarray_src_nbo(t, ip);
3872 regarray[idx] += src;
3876 __instr_regadd_rim_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3878 uint64_t *regarray, idx, src;
3880 TRACE(
"[Thread %2u] regadd (r[i] += m)\n", p->thread_id);
3882 regarray = instr_regarray_regarray(p, ip);
3883 idx = instr_regarray_idx_imm(p, ip);
3884 src = instr_regarray_src_hbo(t, ip);
3885 regarray[idx] += src;
3889 __instr_regadd_rii_exec(
struct rte_swx_pipeline *p,
3891 const struct instruction *ip)
3893 uint64_t *regarray, idx, src;
3895 TRACE(
"[Thread %2u] regadd (r[i] += i)\n", p->thread_id);
3897 regarray = instr_regarray_regarray(p, ip);
3898 idx = instr_regarray_idx_imm(p, ip);
3899 src = ip->regarray.dstsrc_val;
3900 regarray[idx] += src;
3906 static inline struct meter *
3907 instr_meter_idx_hbo(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3909 struct metarray_runtime *r = &p->metarray_runtime[ip->meter.metarray_id];
3911 uint8_t *idx_struct = t->structs[ip->meter.idx.struct_id];
3912 uint64_t *idx64_ptr = (uint64_t *)&idx_struct[ip->meter.idx.offset];
3913 uint64_t idx64 = *idx64_ptr;
3914 uint64_t idx64_mask = UINT64_MAX >> (64 - (ip)->meter.idx.n_bits);
3915 uint64_t idx = idx64 & idx64_mask & r->size_mask;
3917 return &r->metarray[idx];
3920 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
3922 static inline struct meter *
3923 instr_meter_idx_nbo(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3925 struct metarray_runtime *r = &p->metarray_runtime[ip->meter.metarray_id];
3927 uint8_t *idx_struct = t->structs[ip->meter.idx.struct_id];
3928 uint64_t *idx64_ptr = (uint64_t *)&idx_struct[ip->meter.idx.offset];
3929 uint64_t idx64 = *idx64_ptr;
3930 uint64_t idx = (ntoh64(idx64) >> (64 - ip->meter.idx.n_bits)) & r->size_mask;
3932 return &r->metarray[idx];
3937 #define instr_meter_idx_nbo instr_meter_idx_hbo
3941 static inline struct meter *
3942 instr_meter_idx_imm(
struct rte_swx_pipeline *p,
const struct instruction *ip)
3944 struct metarray_runtime *r = &p->metarray_runtime[ip->meter.metarray_id];
3946 uint64_t idx = ip->meter.idx_val & r->size_mask;
3948 return &r->metarray[idx];
3951 static inline uint32_t
3952 instr_meter_length_hbo(
struct thread *t,
const struct instruction *ip)
3954 uint8_t *src_struct = t->structs[ip->meter.length.struct_id];
3955 uint64_t *src64_ptr = (uint64_t *)&src_struct[ip->meter.length.offset];
3956 uint64_t src64 = *src64_ptr;
3957 uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->meter.length.n_bits);
3958 uint64_t src = src64 & src64_mask;
3960 return (uint32_t)src;
3963 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
3965 static inline uint32_t
3966 instr_meter_length_nbo(
struct thread *t,
const struct instruction *ip)
3968 uint8_t *src_struct = t->structs[ip->meter.length.struct_id];
3969 uint64_t *src64_ptr = (uint64_t *)&src_struct[ip->meter.length.offset];
3970 uint64_t src64 = *src64_ptr;
3971 uint64_t src = ntoh64(src64) >> (64 - ip->meter.length.n_bits);
3973 return (uint32_t)src;
3978 #define instr_meter_length_nbo instr_meter_length_hbo
3983 instr_meter_color_in_hbo(
struct thread *t,
const struct instruction *ip)
3985 uint8_t *src_struct = t->structs[ip->meter.color_in.struct_id];
3986 uint64_t *src64_ptr = (uint64_t *)&src_struct[ip->meter.color_in.offset];
3987 uint64_t src64 = *src64_ptr;
3988 uint64_t src64_mask = UINT64_MAX >> (64 - ip->meter.color_in.n_bits);
3989 uint64_t src = src64 & src64_mask;
3995 instr_meter_color_out_hbo_set(
struct thread *t,
3996 const struct instruction *ip,
3999 uint8_t *dst_struct = t->structs[ip->meter.color_out.struct_id];
4000 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[ip->meter.color_out.offset];
4001 uint64_t dst64 = *dst64_ptr;
4002 uint64_t dst64_mask = UINT64_MAX >> (64 - ip->meter.color_out.n_bits);
4004 uint64_t src = (uint64_t)color_out;
4006 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask);
4010 __instr_metprefetch_h_exec(
struct rte_swx_pipeline *p,
4012 const struct instruction *ip)
4016 TRACE(
"[Thread %2u] metprefetch (h)\n", p->thread_id);
4018 m = instr_meter_idx_nbo(p, t, ip);
4023 __instr_metprefetch_m_exec(
struct rte_swx_pipeline *p,
4025 const struct instruction *ip)
4029 TRACE(
"[Thread %2u] metprefetch (m)\n", p->thread_id);
4031 m = instr_meter_idx_hbo(p, t, ip);
4036 __instr_metprefetch_i_exec(
struct rte_swx_pipeline *p,
4038 const struct instruction *ip)
4042 TRACE(
"[Thread %2u] metprefetch (i)\n", p->thread_id);
4044 m = instr_meter_idx_imm(p, ip);
4049 __instr_meter_hhm_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
4052 uint64_t time, n_pkts, n_bytes;
4056 TRACE(
"[Thread %2u] meter (hhm)\n", p->thread_id);
4058 m = instr_meter_idx_nbo(p, t, ip);
4061 length = instr_meter_length_nbo(t, ip);
4062 color_in = instr_meter_color_in_hbo(t, ip);
4065 &m->profile->profile,
4070 color_out &= m->color_mask;
4072 n_pkts = m->n_pkts[color_out];
4073 n_bytes = m->n_bytes[color_out];
4075 instr_meter_color_out_hbo_set(t, ip, color_out);
4077 m->n_pkts[color_out] = n_pkts + 1;
4078 m->n_bytes[color_out] = n_bytes + length;
4082 __instr_meter_hhi_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
4085 uint64_t time, n_pkts, n_bytes;
4089 TRACE(
"[Thread %2u] meter (hhi)\n", p->thread_id);
4091 m = instr_meter_idx_nbo(p, t, ip);
4094 length = instr_meter_length_nbo(t, ip);
4095 color_in = (
enum rte_color)ip->meter.color_in_val;
4098 &m->profile->profile,
4103 color_out &= m->color_mask;
4105 n_pkts = m->n_pkts[color_out];
4106 n_bytes = m->n_bytes[color_out];
4108 instr_meter_color_out_hbo_set(t, ip, color_out);
4110 m->n_pkts[color_out] = n_pkts + 1;
4111 m->n_bytes[color_out] = n_bytes + length;
4115 __instr_meter_hmm_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
4118 uint64_t time, n_pkts, n_bytes;
4122 TRACE(
"[Thread %2u] meter (hmm)\n", p->thread_id);
4124 m = instr_meter_idx_nbo(p, t, ip);
4127 length = instr_meter_length_hbo(t, ip);
4128 color_in = instr_meter_color_in_hbo(t, ip);
4131 &m->profile->profile,
4136 color_out &= m->color_mask;
4138 n_pkts = m->n_pkts[color_out];
4139 n_bytes = m->n_bytes[color_out];
4141 instr_meter_color_out_hbo_set(t, ip, color_out);
4143 m->n_pkts[color_out] = n_pkts + 1;
4144 m->n_bytes[color_out] = n_bytes + length;
4148 __instr_meter_hmi_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
4151 uint64_t time, n_pkts, n_bytes;
4155 TRACE(
"[Thread %2u] meter (hmi)\n", p->thread_id);
4157 m = instr_meter_idx_nbo(p, t, ip);
4160 length = instr_meter_length_hbo(t, ip);
4161 color_in = (
enum rte_color)ip->meter.color_in_val;
4164 &m->profile->profile,
4169 color_out &= m->color_mask;
4171 n_pkts = m->n_pkts[color_out];
4172 n_bytes = m->n_bytes[color_out];
4174 instr_meter_color_out_hbo_set(t, ip, color_out);
4176 m->n_pkts[color_out] = n_pkts + 1;
4177 m->n_bytes[color_out] = n_bytes + length;
4181 __instr_meter_mhm_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
4184 uint64_t time, n_pkts, n_bytes;
4188 TRACE(
"[Thread %2u] meter (mhm)\n", p->thread_id);
4190 m = instr_meter_idx_hbo(p, t, ip);
4193 length = instr_meter_length_nbo(t, ip);
4194 color_in = instr_meter_color_in_hbo(t, ip);
4197 &m->profile->profile,
4202 color_out &= m->color_mask;
4204 n_pkts = m->n_pkts[color_out];
4205 n_bytes = m->n_bytes[color_out];
4207 instr_meter_color_out_hbo_set(t, ip, color_out);
4209 m->n_pkts[color_out] = n_pkts + 1;
4210 m->n_bytes[color_out] = n_bytes + length;
4214 __instr_meter_mhi_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
4217 uint64_t time, n_pkts, n_bytes;
4221 TRACE(
"[Thread %2u] meter (mhi)\n", p->thread_id);
4223 m = instr_meter_idx_hbo(p, t, ip);
4226 length = instr_meter_length_nbo(t, ip);
4227 color_in = (
enum rte_color)ip->meter.color_in_val;
4230 &m->profile->profile,
4235 color_out &= m->color_mask;
4237 n_pkts = m->n_pkts[color_out];
4238 n_bytes = m->n_bytes[color_out];
4240 instr_meter_color_out_hbo_set(t, ip, color_out);
4242 m->n_pkts[color_out] = n_pkts + 1;
4243 m->n_bytes[color_out] = n_bytes + length;
4247 __instr_meter_mmm_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
4250 uint64_t time, n_pkts, n_bytes;
4254 TRACE(
"[Thread %2u] meter (mmm)\n", p->thread_id);
4256 m = instr_meter_idx_hbo(p, t, ip);
4259 length = instr_meter_length_hbo(t, ip);
4260 color_in = instr_meter_color_in_hbo(t, ip);
4263 &m->profile->profile,
4268 color_out &= m->color_mask;
4270 n_pkts = m->n_pkts[color_out];
4271 n_bytes = m->n_bytes[color_out];
4273 instr_meter_color_out_hbo_set(t, ip, color_out);
4275 m->n_pkts[color_out] = n_pkts + 1;
4276 m->n_bytes[color_out] = n_bytes + length;
4280 __instr_meter_mmi_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
4283 uint64_t time, n_pkts, n_bytes;
4287 TRACE(
"[Thread %2u] meter (mmi)\n", p->thread_id);
4289 m = instr_meter_idx_hbo(p, t, ip);
4292 length = instr_meter_length_hbo(t, ip);
4293 color_in = (
enum rte_color)ip->meter.color_in_val;
4296 &m->profile->profile,
4301 color_out &= m->color_mask;
4303 n_pkts = m->n_pkts[color_out];
4304 n_bytes = m->n_bytes[color_out];
4306 instr_meter_color_out_hbo_set(t, ip, color_out);
4308 m->n_pkts[color_out] = n_pkts + 1;
4309 m->n_bytes[color_out] = n_bytes + length;
4313 __instr_meter_ihm_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
4316 uint64_t time, n_pkts, n_bytes;
4320 TRACE(
"[Thread %2u] meter (ihm)\n", p->thread_id);
4322 m = instr_meter_idx_imm(p, ip);
4325 length = instr_meter_length_nbo(t, ip);
4326 color_in = instr_meter_color_in_hbo(t, ip);
4329 &m->profile->profile,
4334 color_out &= m->color_mask;
4336 n_pkts = m->n_pkts[color_out];
4337 n_bytes = m->n_bytes[color_out];
4339 instr_meter_color_out_hbo_set(t, ip, color_out);
4341 m->n_pkts[color_out] = n_pkts + 1;
4342 m->n_bytes[color_out] = n_bytes + length;
4346 __instr_meter_ihi_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
4349 uint64_t time, n_pkts, n_bytes;
4353 TRACE(
"[Thread %2u] meter (ihi)\n", p->thread_id);
4355 m = instr_meter_idx_imm(p, ip);
4358 length = instr_meter_length_nbo(t, ip);
4359 color_in = (
enum rte_color)ip->meter.color_in_val;
4362 &m->profile->profile,
4367 color_out &= m->color_mask;
4369 n_pkts = m->n_pkts[color_out];
4370 n_bytes = m->n_bytes[color_out];
4372 instr_meter_color_out_hbo_set(t, ip, color_out);
4374 m->n_pkts[color_out] = n_pkts + 1;
4375 m->n_bytes[color_out] = n_bytes + length;
4379 __instr_meter_imm_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
4382 uint64_t time, n_pkts, n_bytes;
4386 TRACE(
"[Thread %2u] meter (imm)\n", p->thread_id);
4388 m = instr_meter_idx_imm(p, ip);
4391 length = instr_meter_length_hbo(t, ip);
4392 color_in = instr_meter_color_in_hbo(t, ip);
4395 &m->profile->profile,
4400 color_out &= m->color_mask;
4402 n_pkts = m->n_pkts[color_out];
4403 n_bytes = m->n_bytes[color_out];
4405 instr_meter_color_out_hbo_set(t, ip, color_out);
4407 m->n_pkts[color_out] = n_pkts + 1;
4408 m->n_bytes[color_out] = n_bytes + length;
4412 __instr_meter_imi_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
4415 uint64_t time, n_pkts, n_bytes;
4419 TRACE(
"[Thread %2u] meter (imi)\n", p->thread_id);
4421 m = instr_meter_idx_imm(p, ip);
4424 length = instr_meter_length_hbo(t, ip);
4425 color_in = (
enum rte_color)ip->meter.color_in_val;
4428 &m->profile->profile,
4433 color_out &= m->color_mask;
4435 n_pkts = m->n_pkts[color_out];
4436 n_bytes = m->n_bytes[color_out];
4438 instr_meter_color_out_hbo_set(t, ip, color_out);
4440 m->n_pkts[color_out] = n_pkts + 1;
4441 m->n_bytes[color_out] = n_bytes + length;
static uint32_t rte_bsf32(uint32_t v)
static uint64_t rte_get_tsc_cycles(void)
static enum rte_color rte_meter_trtcm_color_aware_check(struct rte_meter_trtcm *m, struct rte_meter_trtcm_profile *p, uint64_t time, uint32_t pkt_len, enum rte_color pkt_color)
static void rte_prefetch0(const volatile void *p)
void(* rte_swx_extern_type_destructor_t)(void *object)
void *(* rte_swx_extern_type_constructor_t)(const char *args)
int(* rte_swx_extern_func_t)(void *mailbox)
int(* rte_swx_extern_type_member_func_t)(void *object, void *mailbox)
uint32_t(* rte_swx_hash_func_t)(const void *key, uint32_t length, uint32_t seed)
#define RTE_SWX_NAME_SIZE
int(* rte_swx_port_in_pkt_rx_t)(void *port, struct rte_swx_pkt *pkt)
void(* rte_swx_port_out_flush_t)(void *port)
void(* rte_swx_port_out_pkt_clone_tx_t)(void *port, struct rte_swx_pkt *pkt, uint32_t truncation_length)
void(* rte_swx_port_out_pkt_tx_t)(void *port, struct rte_swx_pkt *pkt)
void(* rte_swx_port_out_pkt_fast_clone_tx_t)(void *port, struct rte_swx_pkt *pkt)
int(* rte_swx_table_lookup_t)(void *table, void *mailbox, uint8_t **key, uint64_t *action_id, uint8_t **action_data, size_t *entry_id, int *hit)
__rte_experimental uint32_t rte_swx_table_learner_add(void *table, void *mailbox, uint64_t time, uint64_t action_id, uint8_t *action_data, uint32_t key_timeout_id)
__rte_experimental void rte_swx_table_learner_rearm_new(void *table, void *mailbox, uint64_t time, uint32_t key_timeout_id)
__rte_experimental void rte_swx_table_learner_delete(void *table, void *mailbox)
__rte_experimental void rte_swx_table_learner_rearm(void *table, void *mailbox, uint64_t time)
#define RTE_SWX_TABLE_LEARNER_N_KEY_TIMEOUTS_MAX