41 static inline unsigned
48 while (
try < RTE_HASH_TSX_MAX_RETRY) {
49 status = rte_xbegin();
50 if (
likely(status == RTE_XBEGIN_STARTED)) {
54 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
56 if (
likely(prim_bkt->signatures[i].sig ==
58 prim_bkt->signatures[i].current = sig;
59 prim_bkt->signatures[i].alt = alt_hash;
60 prim_bkt->key_idx[i] = new_idx;
66 if (i != RTE_HASH_BUCKET_ENTRIES)
84 rte_hash_cuckoo_move_insert_mw_tm(
const struct rte_hash *h,
85 struct queue_node *leaf, uint32_t leaf_slot,
90 uint32_t prev_alt_bkt_idx;
92 struct queue_node *prev_node, *curr_node = leaf;
94 uint32_t prev_slot, curr_slot = leaf_slot;
96 while (
try < RTE_HASH_TSX_MAX_RETRY) {
97 status = rte_xbegin();
98 if (
likely(status == RTE_XBEGIN_STARTED)) {
99 while (
likely(curr_node->prev != NULL)) {
100 prev_node = curr_node->prev;
101 prev_bkt = prev_node->bkt;
102 prev_slot = curr_node->prev_slot;
105 = prev_bkt->signatures[prev_slot].alt
110 rte_xabort(RTE_XABORT_CUCKOO_PATH_INVALIDED);
117 curr_bkt->signatures[curr_slot].alt =
118 prev_bkt->signatures[prev_slot].current;
119 curr_bkt->signatures[curr_slot].current =
120 prev_bkt->signatures[prev_slot].alt;
121 curr_bkt->key_idx[curr_slot]
122 = prev_bkt->key_idx[prev_slot];
124 curr_slot = prev_slot;
125 curr_node = prev_node;
126 curr_bkt = curr_node->bkt;
129 curr_bkt->signatures[curr_slot].current = sig;
130 curr_bkt->signatures[curr_slot].alt = alt_hash;
131 curr_bkt->key_idx[curr_slot] = new_idx;
153 rte_hash_cuckoo_make_space_mw_tm(
const struct rte_hash *h,
159 struct queue_node queue[RTE_HASH_BFS_QUEUE_MAX_LEN];
160 struct queue_node *tail, *head;
167 tail->prev_slot = -1;
170 while (
likely(tail != head && head <
171 queue + RTE_HASH_BFS_QUEUE_MAX_LEN -
172 RTE_HASH_BUCKET_ENTRIES)) {
173 curr_bkt = tail->bkt;
174 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
175 if (curr_bkt->signatures[i].sig == NULL_SIGNATURE) {
176 if (
likely(rte_hash_cuckoo_move_insert_mw_tm(h,
178 alt_hash, new_idx) == 0))
183 alt_bkt = &(h->
buckets[curr_bkt->signatures[i].alt