DPDK  17.11.10
rte_eth_softnic_internals.h
1 /*-
2  * BSD LICENSE
3  *
4  * Copyright(c) 2017 Intel Corporation. All rights reserved.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * * Redistributions of source code must retain the above copyright
12  * notice, this list of conditions and the following disclaimer.
13  * * Redistributions in binary form must reproduce the above copyright
14  * notice, this list of conditions and the following disclaimer in
15  * the documentation and/or other materials provided with the
16  * distribution.
17  * * Neither the name of Intel Corporation nor the names of its
18  * contributors may be used to endorse or promote products derived
19  * from this software without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #ifndef __INCLUDE_RTE_ETH_SOFTNIC_INTERNALS_H__
35 #define __INCLUDE_RTE_ETH_SOFTNIC_INTERNALS_H__
36 
37 #include <stdint.h>
38 
39 #include <rte_mbuf.h>
40 #include <rte_sched.h>
41 #include <rte_ethdev.h>
42 #include <rte_tm_driver.h>
43 
44 #include "rte_eth_softnic.h"
45 
50 enum pmd_feature {
51  PMD_FEATURE_TM = 1,
52 };
53 
54 #ifndef INTRUSIVE
55 #define INTRUSIVE 0
56 #endif
57 
58 struct pmd_params {
60  struct {
61  const char *name;
62  uint32_t flags;
69  int intrusive;
70 
72  struct {
73  uint32_t rate;
74  uint32_t nb_queues;
75  uint16_t qsize[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
77  uint32_t enq_bsz;
78  uint32_t deq_bsz;
79  } tm;
80  } soft;
81 
83  struct {
84  char *name;
85  uint16_t tx_queue_id;
86  } hard;
87 };
88 
93 #ifndef DEFAULT_BURST_SIZE
94 #define DEFAULT_BURST_SIZE 32
95 #endif
96 
97 #ifndef FLUSH_COUNT_THRESHOLD
98 #define FLUSH_COUNT_THRESHOLD (1 << 17)
99 #endif
100 
101 struct default_internals {
102  struct rte_mbuf **pkts;
103  uint32_t pkts_len;
104  uint32_t txq_pos;
105  uint32_t flush_count;
106 };
107 
112 #ifndef TM_MAX_SUBPORTS
113 #define TM_MAX_SUBPORTS 8
114 #endif
115 
116 #ifndef TM_MAX_PIPES_PER_SUBPORT
117 #define TM_MAX_PIPES_PER_SUBPORT 4096
118 #endif
119 
120 struct tm_params {
121  struct rte_sched_port_params port_params;
122 
123  struct rte_sched_subport_params subport_params[TM_MAX_SUBPORTS];
124 
125  struct rte_sched_pipe_params
126  pipe_profiles[RTE_SCHED_PIPE_PROFILES_PER_PORT];
127  uint32_t n_pipe_profiles;
128  uint32_t pipe_to_profile[TM_MAX_SUBPORTS * TM_MAX_PIPES_PER_SUBPORT];
129 };
130 
131 /* TM Levels */
132 enum tm_node_level {
133  TM_NODE_LEVEL_PORT = 0,
134  TM_NODE_LEVEL_SUBPORT,
135  TM_NODE_LEVEL_PIPE,
136  TM_NODE_LEVEL_TC,
137  TM_NODE_LEVEL_QUEUE,
138  TM_NODE_LEVEL_MAX,
139 };
140 
141 /* TM Shaper Profile */
142 struct tm_shaper_profile {
143  TAILQ_ENTRY(tm_shaper_profile) node;
144  uint32_t shaper_profile_id;
145  uint32_t n_users;
146  struct rte_tm_shaper_params params;
147 };
148 
149 TAILQ_HEAD(tm_shaper_profile_list, tm_shaper_profile);
150 
151 /* TM Shared Shaper */
152 struct tm_shared_shaper {
153  TAILQ_ENTRY(tm_shared_shaper) node;
154  uint32_t shared_shaper_id;
155  uint32_t n_users;
156  uint32_t shaper_profile_id;
157 };
158 
159 TAILQ_HEAD(tm_shared_shaper_list, tm_shared_shaper);
160 
161 /* TM WRED Profile */
162 struct tm_wred_profile {
163  TAILQ_ENTRY(tm_wred_profile) node;
164  uint32_t wred_profile_id;
165  uint32_t n_users;
166  struct rte_tm_wred_params params;
167 };
168 
169 TAILQ_HEAD(tm_wred_profile_list, tm_wred_profile);
170 
171 /* TM Node */
172 struct tm_node {
173  TAILQ_ENTRY(tm_node) node;
174  uint32_t node_id;
175  uint32_t parent_node_id;
176  uint32_t priority;
177  uint32_t weight;
178  uint32_t level;
179  struct tm_node *parent_node;
180  struct tm_shaper_profile *shaper_profile;
181  struct tm_wred_profile *wred_profile;
182  struct rte_tm_node_params params;
183  struct rte_tm_node_stats stats;
184  uint32_t n_children;
185 };
186 
187 TAILQ_HEAD(tm_node_list, tm_node);
188 
189 /* TM Hierarchy Specification */
190 struct tm_hierarchy {
191  struct tm_shaper_profile_list shaper_profiles;
192  struct tm_shared_shaper_list shared_shapers;
193  struct tm_wred_profile_list wred_profiles;
194  struct tm_node_list nodes;
195 
196  uint32_t n_shaper_profiles;
197  uint32_t n_shared_shapers;
198  uint32_t n_wred_profiles;
199  uint32_t n_nodes;
200 
201  uint32_t n_tm_nodes[TM_NODE_LEVEL_MAX];
202 };
203 
204 struct tm_internals {
212  struct tm_hierarchy h;
213  int hierarchy_frozen;
214 
216  struct tm_params params;
217 
219  struct rte_sched_port *sched;
220  struct rte_mbuf **pkts_enq;
221  struct rte_mbuf **pkts_deq;
222  uint32_t pkts_enq_len;
223  uint32_t txq_pos;
224  uint32_t flush_count;
225 };
226 
232  struct pmd_params params;
233 
235  struct {
236  struct default_internals def;
237  struct tm_internals tm;
238  } soft;
239 
241  struct {
242  uint16_t port_id;
243  } hard;
244 };
245 
246 struct pmd_rx_queue {
248  struct {
249  uint16_t port_id;
250  uint16_t rx_queue_id;
251  } hard;
252 };
253 
257 extern const struct rte_tm_ops pmd_tm_ops;
258 
259 int
260 tm_params_check(struct pmd_params *params, uint32_t hard_rate);
261 
262 int
263 tm_init(struct pmd_internals *p, struct pmd_params *params, int numa_node);
264 
265 void
266 tm_free(struct pmd_internals *p);
267 
268 int
269 tm_start(struct pmd_internals *p);
270 
271 void
272 tm_stop(struct pmd_internals *p);
273 
274 static inline int
275 tm_enabled(struct rte_eth_dev *dev)
276 {
277  struct pmd_internals *p = dev->data->dev_private;
278 
279  return (p->params.soft.flags & PMD_FEATURE_TM);
280 }
281 
282 static inline int
283 tm_used(struct rte_eth_dev *dev)
284 {
285  struct pmd_internals *p = dev->data->dev_private;
286 
287  return (p->params.soft.flags & PMD_FEATURE_TM) &&
288  p->soft.tm.h.n_tm_nodes[TM_NODE_LEVEL_PORT];
289 }
290 
291 #endif /* __INCLUDE_RTE_ETH_SOFTNIC_INTERNALS_H__ */
struct tm_internals tm
TAILQ_HEAD(rte_bus_list, rte_bus)
struct pmd_params params
#define RTE_SCHED_PIPE_PROFILES_PER_PORT
Definition: rte_sched.h:114
struct pmd_internals::@7 soft
#define RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE
Definition: rte_sched.h:100