34 #ifndef __INCLUDE_RTE_ETH_SOFTNIC_INTERNALS_H__
35 #define __INCLUDE_RTE_ETH_SOFTNIC_INTERNALS_H__
44 #include "rte_eth_softnic.h"
93 #ifndef DEFAULT_BURST_SIZE
94 #define DEFAULT_BURST_SIZE 32
97 #ifndef FLUSH_COUNT_THRESHOLD
98 #define FLUSH_COUNT_THRESHOLD (1 << 17)
101 struct default_internals {
105 uint32_t flush_count;
112 #ifndef TM_MAX_SUBPORTS
113 #define TM_MAX_SUBPORTS 8
116 #ifndef TM_MAX_PIPES_PER_SUBPORT
117 #define TM_MAX_PIPES_PER_SUBPORT 4096
123 struct rte_sched_subport_params subport_params[TM_MAX_SUBPORTS];
125 struct rte_sched_pipe_params
127 uint32_t n_pipe_profiles;
128 uint32_t pipe_to_profile[TM_MAX_SUBPORTS * TM_MAX_PIPES_PER_SUBPORT];
133 TM_NODE_LEVEL_PORT = 0,
134 TM_NODE_LEVEL_SUBPORT,
142 struct tm_shaper_profile {
143 TAILQ_ENTRY(tm_shaper_profile) node;
144 uint32_t shaper_profile_id;
149 TAILQ_HEAD(tm_shaper_profile_list, tm_shaper_profile);
152 struct tm_shared_shaper {
153 TAILQ_ENTRY(tm_shared_shaper) node;
154 uint32_t shared_shaper_id;
156 uint32_t shaper_profile_id;
159 TAILQ_HEAD(tm_shared_shaper_list, tm_shared_shaper);
162 struct tm_wred_profile {
163 TAILQ_ENTRY(tm_wred_profile) node;
164 uint32_t wred_profile_id;
169 TAILQ_HEAD(tm_wred_profile_list, tm_wred_profile);
173 TAILQ_ENTRY(tm_node) node;
175 uint32_t parent_node_id;
179 struct tm_node *parent_node;
180 struct tm_shaper_profile *shaper_profile;
181 struct tm_wred_profile *wred_profile;
190 struct tm_hierarchy {
191 struct tm_shaper_profile_list shaper_profiles;
192 struct tm_shared_shaper_list shared_shapers;
193 struct tm_wred_profile_list wred_profiles;
194 struct tm_node_list nodes;
196 uint32_t n_shaper_profiles;
197 uint32_t n_shared_shapers;
198 uint32_t n_wred_profiles;
201 uint32_t n_tm_nodes[TM_NODE_LEVEL_MAX];
204 struct tm_internals {
212 struct tm_hierarchy h;
213 int hierarchy_frozen;
216 struct tm_params params;
219 struct rte_sched_port *sched;
222 uint32_t pkts_enq_len;
224 uint32_t flush_count;
232 struct pmd_params params;
236 struct default_internals def;
237 struct tm_internals tm;
246 struct pmd_rx_queue {
250 uint16_t rx_queue_id;
257 extern const struct rte_tm_ops pmd_tm_ops;
260 tm_params_check(
struct pmd_params *params, uint32_t hard_rate);
263 tm_init(
struct pmd_internals *p,
struct pmd_params *params,
int numa_node);
275 tm_enabled(
struct rte_eth_dev *dev)
279 return (p->
params.soft.flags & PMD_FEATURE_TM);
283 tm_used(
struct rte_eth_dev *dev)
287 return (p->
params.soft.flags & PMD_FEATURE_TM) &&
288 p->
soft.
tm.h.n_tm_nodes[TM_NODE_LEVEL_PORT];
TAILQ_HEAD(rte_bus_list, rte_bus)
#define RTE_SCHED_PIPE_PROFILES_PER_PORT
struct pmd_internals::@7 soft
#define RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE