16__rte_lpm_lookup_vec(
const struct rte_lpm *lpm,
const uint32_t *ips,
20 uint64_t vl = svcntw();
21 svuint32_t v_ip, v_idx, v_tbl24, v_tbl8;
22 svuint32_t v_mask_xv, v_mask_v;
23 svbool_t pg = svptrue_b32();
26 for (i = 0; i < n; i++)
29 for (i = 0; i < n - vl; i += vl) {
30 v_ip = svld1(pg, &ips[i]);
32 v_idx = svlsr_x(pg, v_ip, 8);
34 v_tbl24 = svld1_gather_index(pg, (
const uint32_t *)lpm->tbl24,
40 v_mask_xv = svdup_u32_z(pg, RTE_LPM_VALID_EXT_ENTRY_BITMASK);
42 pv = svcmpeq(pg, svand_z(pg, v_tbl24, v_mask_xv), v_mask_v);
43 svst1(pv, &next_hops[i], v_tbl24);
46 pv = svcmpeq(pg, svand_z(pg, v_tbl24, v_mask_xv), v_mask_xv);
47 if (svptest_any(pg, pv)) {
49 v_idx = svand_x(pv, v_tbl24, svdup_u32_z(pv, 0xffffff));
50 v_idx = svmul_x(pv, v_idx, RTE_LPM_TBL8_GROUP_NUM_ENTRIES);
51 v_idx = svadd_x(pv, svand_x(pv, v_ip, svdup_u32_z(pv, 0xff)),
54 v_tbl8 = svld1_gather_index(pv, (
const uint32_t *)lpm->tbl8,
57 pv = svcmpeq(pv, svand_z(pv, v_tbl8, v_mask_v), v_mask_v);
58 svst1(pv, &next_hops[i], v_tbl8);
62 pg = svwhilelt_b32(i, n);
63 if (svptest_any(svptrue_b32(), pg)) {
64 v_ip = svld1(pg, &ips[i]);
66 v_idx = svlsr_x(pg, v_ip, 8);
68 v_tbl24 = svld1_gather_index(pg, (
const uint32_t *)lpm->tbl24,
74 v_mask_xv = svdup_u32_z(pg, RTE_LPM_VALID_EXT_ENTRY_BITMASK);
76 pv = svcmpeq(pg, svand_z(pg, v_tbl24, v_mask_xv), v_mask_v);
77 svst1(pv, &next_hops[i], v_tbl24);
80 pv = svcmpeq(pg, svand_z(pg, v_tbl24, v_mask_xv), v_mask_xv);
81 if (svptest_any(pg, pv)) {
83 v_idx = svand_x(pv, v_tbl24, svdup_u32_z(pv, 0xffffff));
84 v_idx = svmul_x(pv, v_idx, RTE_LPM_TBL8_GROUP_NUM_ENTRIES);
85 v_idx = svadd_x(pv, svand_x(pv, v_ip, svdup_u32_z(pv, 0xff)),
88 v_tbl8 = svld1_gather_index(pv, (
const uint32_t *)lpm->tbl8,
91 pv = svcmpeq(pv, svand_z(pv, v_tbl8, v_mask_v), v_mask_v);
92 svst1(pv, &next_hops[i], v_tbl8);
#define RTE_LPM_LOOKUP_SUCCESS