44 RTE_PMD_I40E_PKG_OP_UNDEFINED = 0,
48 RTE_PMD_I40E_PKG_OP_MAX = 32
55 RTE_PMD_I40E_PKG_INFO_UNDEFINED = 0,
56 RTE_PMD_I40E_PKG_INFO_GLOBAL_HEADER,
57 RTE_PMD_I40E_PKG_INFO_GLOBAL_NOTES_SIZE,
58 RTE_PMD_I40E_PKG_INFO_GLOBAL_NOTES,
59 RTE_PMD_I40E_PKG_INFO_GLOBAL_MAX = 1024,
60 RTE_PMD_I40E_PKG_INFO_HEADER,
61 RTE_PMD_I40E_PKG_INFO_DEVID_NUM,
62 RTE_PMD_I40E_PKG_INFO_DEVID_LIST,
63 RTE_PMD_I40E_PKG_INFO_PROTOCOL_NUM,
64 RTE_PMD_I40E_PKG_INFO_PROTOCOL_LIST,
65 RTE_PMD_I40E_PKG_INFO_PCTYPE_NUM,
66 RTE_PMD_I40E_PKG_INFO_PCTYPE_LIST,
67 RTE_PMD_I40E_PKG_INFO_PTYPE_NUM,
68 RTE_PMD_I40E_PKG_INFO_PTYPE_LIST,
69 RTE_PMD_I40E_PKG_INFO_MAX = (int)0xFFFFFFFF
76 RTE_PMD_I40E_RSS_QUEUE_REGION_UNDEFINED,
97 RTE_PMD_I40E_RSS_QUEUE_REGION_INFO_GET,
98 RTE_PMD_I40E_RSS_QUEUE_REGION_OP_MAX
101 #define RTE_PMD_I40E_DDP_NAME_SIZE 32
102 #define RTE_PMD_I40E_PCTYPE_MAX 64
103 #define RTE_PMD_I40E_REGION_MAX_NUM 8
104 #define RTE_PMD_I40E_MAX_USER_PRIORITY 8
121 uint32_t vendor_dev_id;
122 uint32_t sub_vendor_dev_id;
133 uint8_t name[RTE_PMD_I40E_DDP_NAME_SIZE];
136 #define RTE_PMD_I40E_DDP_OWNER_UNKNOWN 0xFF
146 #define RTE_PMD_I40E_PROTO_NUM 6
147 #define RTE_PMD_I40E_PROTO_UNUSED 0xFF
154 char name[RTE_PMD_I40E_DDP_NAME_SIZE];
162 uint8_t protocols[RTE_PMD_I40E_PROTO_NUM];
170 #define RTE_PMD_I40E_PTYPE_USER_DEFINE_MASK 0x80000000
172 struct rte_pmd_i40e_ptype_mapping {
197 struct rte_pmd_i40e_queue_region_info {
201 uint8_t queue_start_index;
205 uint8_t user_priority_num;
207 uint8_t user_priority[RTE_PMD_I40E_MAX_USER_PRIORITY];
209 uint8_t flowtype_num;
215 uint8_t hw_flowtype[RTE_PMD_I40E_PCTYPE_MAX];
218 struct rte_pmd_i40e_queue_regions {
220 uint16_t queue_region_number;
221 struct rte_pmd_i40e_queue_region_info
222 region[RTE_PMD_I40E_REGION_MAX_NUM];
229 RTE_PMD_I40E_PKT_TEMPLATE_ACCEPT,
230 RTE_PMD_I40E_PKT_TEMPLATE_REJECT,
231 RTE_PMD_I40E_PKT_TEMPLATE_PASSTHRU,
292 enum rte_pmd_i40e_inset_type {
299 struct rte_pmd_i40e_inset_mask {
304 struct rte_pmd_i40e_inset {
306 struct rte_pmd_i40e_inset_mask mask[2];
571 uint64_t vf_mask, uint8_t on);
747 uint8_t *info, uint32_t size,
784 struct rte_pmd_i40e_ptype_mapping *mapping_items,
815 struct rte_pmd_i40e_ptype_mapping *mapping_items,
859 #define RTE_PMD_I40E_PCTYPE_MAX 64
860 #define RTE_PMD_I40E_FLOW_TYPE_MAX 64
862 struct rte_pmd_i40e_flow_type_mapping {
886 struct rte_pmd_i40e_flow_type_mapping *mapping_items,
903 struct rte_pmd_i40e_flow_type_mapping *mapping_items);
943 int rte_pmd_i40e_cfg_hash_inset(uint16_t
port,
944 uint64_t pctype, uint64_t inset);
964 struct rte_pmd_i40e_inset *inset,
965 enum rte_pmd_i40e_inset_type inset_type);
985 struct rte_pmd_i40e_inset *inset,
986 enum rte_pmd_i40e_inset_type inset_type);
1007 bit_idx = 63 - field_idx;
1008 if (inset & (1ULL << bit_idx))
1033 bit_idx = 63 - field_idx;
1034 *inset = *inset | (1ULL << bit_idx);
1058 bit_idx = 63 - field_idx;
1059 *inset = *inset & ~(1ULL << bit_idx);