11 #ifndef _RTE_CUCKOO_HASH_H_ 12 #define _RTE_CUCKOO_HASH_H_ 16 #if defined(RTE_ARCH_X86) 17 #include "rte_cmp_x86.h" 20 #if defined(RTE_ARCH_ARM64) 21 #include "rte_cmp_arm64.h" 25 #if defined(RTE_LIBRTE_HASH_DEBUG) 26 #define RETURN_IF_TRUE(cond, retval) do { \ 31 #define RETURN_IF_TRUE(cond, retval) 37 #if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64) 42 enum cmp_jump_table_case {
77 enum cmp_jump_table_case {
102 #define RTE_HASH_BUCKET_ENTRIES 8 104 #if !RTE_IS_POWER_OF_2(RTE_HASH_BUCKET_ENTRIES) 105 #error RTE_HASH_BUCKET_ENTRIES must be a power of 2 108 #define NULL_SIGNATURE 0 112 #define KEY_ALIGNMENT 16 114 #define LCORE_CACHE_SIZE 64 116 #define RTE_HASH_BFS_QUEUE_MAX_LEN 1000 118 #define RTE_XABORT_CUCKOO_PATH_INVALIDED 0x4 120 #define RTE_HASH_TSX_MAX_RETRY 10 124 uint32_t objs[LCORE_CACHE_SIZE];
128 struct rte_hash_key {
131 RTE_ATOMIC(
void *) pdata;
139 uint16_t sig_current[RTE_HASH_BUCKET_ENTRIES];
141 RTE_ATOMIC(uint32_t) key_idx[RTE_HASH_BUCKET_ENTRIES];
143 uint8_t flag[RTE_HASH_BUCKET_ENTRIES];
163 struct rte_rcu_qsbr_dq *
dq;
167 alignas(RTE_CACHE_LINE_SIZE) uint32_t key_len;
192 enum cmp_jump_table_case cmp_jump_table_idx;
214 uint32_t *ext_bkt_to_free;
221 uint32_t cur_bkt_idx;
223 struct queue_node *prev;
228 #define RTE_HASH_RCU_DQ_RECLAIM_MAX 16 uint8_t readwrite_concur_lf_support
uint32_t(* rte_hash_function)(const void *key, uint32_t key_len, uint32_t init_val)
uint8_t readwrite_concur_support
struct rte_rcu_qsbr_dq * dq
rte_hash_function hash_func
#define RTE_HASH_NAMESIZE
#define __rte_cache_aligned
struct rte_ring * free_slots
struct rte_ring * free_ext_bkts
int(* rte_hash_cmp_eq_t)(const void *key1, const void *key2, size_t key_len)
struct lcore_cache * local_free_slots
rte_rwlock_t * readwrite_lock
uint8_t hw_trans_mem_support
struct rte_hash_rcu_config * hash_rcu_cfg
struct rte_hash_bucket * buckets_ext
rte_hash_cmp_eq_t rte_hash_custom_cmp_eq
uint8_t writer_takes_lock
uint8_t ext_table_support
uint32_t hash_func_init_val
struct rte_hash_bucket * buckets