41 static inline unsigned
48 while (
try < RTE_HASH_TSX_MAX_RETRY) {
49 status = rte_xbegin();
50 if (
likely(status == RTE_XBEGIN_STARTED)) {
54 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
56 if (
likely(prim_bkt->key_idx[i] == EMPTY_SLOT)) {
57 prim_bkt->sig_current[i] = sig;
58 prim_bkt->sig_alt[i] = alt_hash;
59 prim_bkt->key_idx[i] = new_idx;
65 if (i != RTE_HASH_BUCKET_ENTRIES)
83 rte_hash_cuckoo_move_insert_mw_tm(
const struct rte_hash *h,
84 struct queue_node *leaf, uint32_t leaf_slot,
89 uint32_t prev_alt_bkt_idx;
91 struct queue_node *prev_node, *curr_node = leaf;
93 uint32_t prev_slot, curr_slot = leaf_slot;
95 while (
try < RTE_HASH_TSX_MAX_RETRY) {
96 status = rte_xbegin();
97 if (
likely(status == RTE_XBEGIN_STARTED)) {
98 while (
likely(curr_node->prev != NULL)) {
99 prev_node = curr_node->prev;
100 prev_bkt = prev_node->bkt;
101 prev_slot = curr_node->prev_slot;
104 = prev_bkt->sig_alt[prev_slot]
109 rte_xabort(RTE_XABORT_CUCKOO_PATH_INVALIDED);
116 curr_bkt->sig_alt[curr_slot] =
117 prev_bkt->sig_current[prev_slot];
118 curr_bkt->sig_current[curr_slot] =
119 prev_bkt->sig_alt[prev_slot];
120 curr_bkt->key_idx[curr_slot]
121 = prev_bkt->key_idx[prev_slot];
123 curr_slot = prev_slot;
124 curr_node = prev_node;
125 curr_bkt = curr_node->bkt;
128 curr_bkt->sig_current[curr_slot] = sig;
129 curr_bkt->sig_alt[curr_slot] = alt_hash;
130 curr_bkt->key_idx[curr_slot] = new_idx;
152 rte_hash_cuckoo_make_space_mw_tm(
const struct rte_hash *h,
158 struct queue_node queue[RTE_HASH_BFS_QUEUE_MAX_LEN];
159 struct queue_node *tail, *head;
166 tail->prev_slot = -1;
169 while (
likely(tail != head && head <
170 queue + RTE_HASH_BFS_QUEUE_MAX_LEN -
171 RTE_HASH_BUCKET_ENTRIES)) {
172 curr_bkt = tail->bkt;
173 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
174 if (curr_bkt->key_idx[i] == EMPTY_SLOT) {
175 if (
likely(rte_hash_cuckoo_move_insert_mw_tm(h,
177 alt_hash, new_idx) == 0))
182 alt_bkt = &(h->
buckets[curr_bkt->sig_alt[i]