5#ifndef __INCLUDE_RTE_SCHED_H__
6#define __INCLUDE_RTE_SCHED_H__
73#define RTE_SCHED_QUEUES_PER_PIPE 16
79#define RTE_SCHED_BE_QUEUES_PER_PIPE 4
85#define RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE \
86(RTE_SCHED_QUEUES_PER_PIPE - RTE_SCHED_BE_QUEUES_PER_PIPE + 1)
91#define RTE_SCHED_TRAFFIC_CLASS_BE (RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE - 1)
105#ifndef RTE_SCHED_FRAME_OVERHEAD_DEFAULT
106#define RTE_SCHED_FRAME_OVERHEAD_DEFAULT 24
140struct rte_sched_pipe_params {
154 uint8_t tc_ov_weight;
163struct rte_sched_cman_params {
185struct rte_sched_subport_params {
192 uint32_t n_pipes_per_subport_enabled;
203 struct rte_sched_pipe_params *pipe_profiles;
206 uint32_t n_pipe_profiles;
209 uint32_t n_max_pipe_profiles;
215 struct rte_sched_cman_params *cman_params;
218struct rte_sched_subport_profile_params {
321struct rte_sched_port *
351 struct rte_sched_pipe_params *params,
352 uint32_t *pipe_profile_id);
370 struct rte_sched_subport_profile_params *profile,
371 uint32_t *subport_profile_id);
394 struct rte_sched_subport_params *params,
395 uint32_t subport_profile_id);
415 int32_t pipe_profile);
429 struct rte_sched_subport_params **subport_params);
503 uint32_t subport, uint32_t pipe, uint32_t traffic_class,
529 uint32_t *subport, uint32_t *pipe,
530 uint32_t *traffic_class, uint32_t *queue);
533rte_sched_port_pkt_read_color(
const struct rte_mbuf *pkt);
int rte_sched_pipe_config(struct rte_sched_port *port, uint32_t subport_id, uint32_t pipe_id, int32_t pipe_profile)
int rte_sched_port_enqueue(struct rte_sched_port *port, struct rte_mbuf **pkts, uint32_t n_pkts)
void rte_sched_port_pkt_read_tree_path(struct rte_sched_port *port, const struct rte_mbuf *pkt, uint32_t *subport, uint32_t *pipe, uint32_t *traffic_class, uint32_t *queue)
struct rte_sched_port * rte_sched_port_config(struct rte_sched_port_params *params)
int rte_sched_subport_pipe_profile_add(struct rte_sched_port *port, uint32_t subport_id, struct rte_sched_pipe_params *params, uint32_t *pipe_profile_id)
void rte_sched_port_pkt_write(struct rte_sched_port *port, struct rte_mbuf *pkt, uint32_t subport, uint32_t pipe, uint32_t traffic_class, uint32_t queue, enum rte_color color)
uint32_t rte_sched_port_get_memory_footprint(struct rte_sched_port_params *port_params, struct rte_sched_subport_params **subport_params)
#define RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE
int rte_sched_subport_read_stats(struct rte_sched_port *port, uint32_t subport_id, struct rte_sched_subport_stats *stats, uint32_t *tc_ov)
int rte_sched_subport_tc_ov_config(struct rte_sched_port *port, uint32_t subport_id, bool tc_ov_enable)
int rte_sched_subport_config(struct rte_sched_port *port, uint32_t subport_id, struct rte_sched_subport_params *params, uint32_t subport_profile_id)
void rte_sched_port_free(struct rte_sched_port *port)
int rte_sched_queue_read_stats(struct rte_sched_port *port, uint32_t queue_id, struct rte_sched_queue_stats *stats, uint16_t *qlen)
#define RTE_SCHED_BE_QUEUES_PER_PIPE
int rte_sched_port_subport_profile_add(struct rte_sched_port *port, struct rte_sched_subport_profile_params *profile, uint32_t *subport_profile_id)
int rte_sched_port_dequeue(struct rte_sched_port *port, struct rte_mbuf **pkts, uint32_t n_pkts)
uint32_t n_subport_profiles
struct rte_sched_subport_profile_params * subport_profiles
uint32_t n_subports_per_port
uint32_t n_pipes_per_subport
uint32_t n_max_subport_profiles
uint64_t n_pkts_cman_dropped
uint64_t n_bytes_tc[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE]
uint64_t n_pkts_tc[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE]
uint64_t n_bytes_tc_dropped[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE]
uint64_t n_pkts_cman_dropped[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE]
uint64_t n_pkts_tc_dropped[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE]