12 static inline unsigned
19 while (
try < RTE_HASH_TSX_MAX_RETRY) {
20 status = rte_xbegin();
21 if (
likely(status == RTE_XBEGIN_STARTED)) {
25 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
27 if (
likely(prim_bkt->key_idx[i] == EMPTY_SLOT)) {
28 prim_bkt->sig_current[i] = sig;
29 prim_bkt->sig_alt[i] = alt_hash;
30 prim_bkt->key_idx[i] = new_idx;
36 if (i != RTE_HASH_BUCKET_ENTRIES)
54 rte_hash_cuckoo_move_insert_mw_tm(
const struct rte_hash *h,
55 struct queue_node *leaf, uint32_t leaf_slot,
60 uint32_t prev_alt_bkt_idx;
62 struct queue_node *prev_node, *curr_node = leaf;
64 uint32_t prev_slot, curr_slot = leaf_slot;
66 while (
try < RTE_HASH_TSX_MAX_RETRY) {
67 status = rte_xbegin();
68 if (
likely(status == RTE_XBEGIN_STARTED)) {
69 while (
likely(curr_node->prev != NULL)) {
70 prev_node = curr_node->prev;
71 prev_bkt = prev_node->bkt;
72 prev_slot = curr_node->prev_slot;
75 = prev_bkt->sig_alt[prev_slot]
80 rte_xabort(RTE_XABORT_CUCKOO_PATH_INVALIDED);
87 curr_bkt->sig_alt[curr_slot] =
88 prev_bkt->sig_current[prev_slot];
89 curr_bkt->sig_current[curr_slot] =
90 prev_bkt->sig_alt[prev_slot];
91 curr_bkt->key_idx[curr_slot]
92 = prev_bkt->key_idx[prev_slot];
94 curr_slot = prev_slot;
95 curr_node = prev_node;
96 curr_bkt = curr_node->bkt;
99 curr_bkt->sig_current[curr_slot] = sig;
100 curr_bkt->sig_alt[curr_slot] = alt_hash;
101 curr_bkt->key_idx[curr_slot] = new_idx;
123 rte_hash_cuckoo_make_space_mw_tm(
const struct rte_hash *h,
129 struct queue_node queue[RTE_HASH_BFS_QUEUE_MAX_LEN];
130 struct queue_node *tail, *head;
137 tail->prev_slot = -1;
140 while (
likely(tail != head && head <
141 queue + RTE_HASH_BFS_QUEUE_MAX_LEN -
142 RTE_HASH_BUCKET_ENTRIES)) {
143 curr_bkt = tail->bkt;
144 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
145 if (curr_bkt->key_idx[i] == EMPTY_SLOT) {
146 if (
likely(rte_hash_cuckoo_move_insert_mw_tm(h,
148 alt_hash, new_idx) == 0))
153 alt_bkt = &(h->
buckets[curr_bkt->sig_alt[i]