5#ifndef _RTE_GRAPH_WORKER_COMMON_H_
6#define _RTE_GRAPH_WORKER_COMMON_H_
33#define RTE_GRAPH_MODEL_RTC 0
34#define RTE_GRAPH_MODEL_MCORE_DISPATCH 1
36#define RTE_GRAPH_MODEL_DEFAULT RTE_GRAPH_MODEL_RTC
43SLIST_HEAD(rte_graph_rq_head, rte_graph);
64 alignas(RTE_CACHE_LINE_SIZE)
struct rte_graph_rq_head *rq;
66 struct rte_graph_rq_head rq_head;
68 unsigned int lcore_id;
75 SLIST_ENTRY(rte_graph) next;
82 uint64_t nb_pkt_captured;
84 uint64_t nb_pkt_to_capture;
101 uint32_t realloc_count;
112 unsigned int lcore_id;
113 uint64_t total_sched_objs;
114 uint64_t total_sched_fail;
115 struct rte_graph *graph;
125#define RTE_NODE_CTX_SZ 16
127 uint8_t ctx[RTE_NODE_CTX_SZ];
128 __extension__
struct {
145 uint64_t process_u64;
151static_assert(
offsetof(
struct rte_node, nodes) -
offsetof(
struct rte_node, ctx)
166void __rte_node_stream_alloc(
struct rte_graph *graph,
struct rte_node *node);
182void __rte_node_stream_alloc_size(
struct rte_graph *graph,
183 struct rte_node *node, uint16_t req_size);
198__rte_node_process(
struct rte_graph *graph,
struct rte_node *node)
210 rc = node->process(graph, node, objs, node->idx);
211 node->total_cycles += rte_rdtsc() - start;
213 node->total_objs += rc;
215 node->process(graph, node, objs, node->idx);
231__rte_node_enqueue_tail_update(
struct rte_graph *graph,
struct rte_node *node)
236 graph->cir_start[tail++] = node->off;
237 graph->tail = tail & graph->cir_mask;
258__rte_node_enqueue_prologue(
struct rte_graph *graph,
struct rte_node *node,
259 const uint16_t idx,
const uint16_t space)
264 __rte_node_enqueue_tail_update(graph, node);
266 if (
unlikely(node->size < (idx + space)))
267 __rte_node_stream_alloc_size(graph, node, node->size + space);
284__rte_node_next_node_get(
struct rte_node *node,
rte_edge_t next)
286 RTE_ASSERT(next < node->nb_edges);
288 node = node->nodes[next];
311 rte_edge_t next,
void **objs, uint16_t nb_objs)
313 node = __rte_node_next_node_get(node, next);
314 const uint16_t idx = node->idx;
316 __rte_node_enqueue_prologue(graph, node, idx, nb_objs);
318 rte_memcpy(&node->objs[idx], objs, nb_objs *
sizeof(
void *));
319 node->idx = idx + nb_objs;
339 node = __rte_node_next_node_get(node, next);
340 uint16_t idx = node->idx;
342 __rte_node_enqueue_prologue(graph, node, idx, 1);
344 node->objs[idx++] = obj;
368 node = __rte_node_next_node_get(node, next);
369 uint16_t idx = node->idx;
371 __rte_node_enqueue_prologue(graph, node, idx, 2);
373 node->objs[idx++] = obj0;
374 node->objs[idx++] = obj1;
400 rte_edge_t next,
void *obj0,
void *obj1,
void *obj2,
403 node = __rte_node_next_node_get(node, next);
404 uint16_t idx = node->idx;
406 __rte_node_enqueue_prologue(graph, node, idx, 4);
408 node->objs[idx++] = obj0;
409 node->objs[idx++] = obj1;
410 node->objs[idx++] = obj2;
411 node->objs[idx++] = obj3;
433 rte_edge_t *nexts,
void **objs, uint16_t nb_objs)
437 for (i = 0; i < nb_objs; i++)
464 node = __rte_node_next_node_get(node, next);
465 const uint16_t idx = node->idx;
466 uint16_t free_space = node->size - idx;
469 __rte_node_stream_alloc_size(graph, node, node->size + nb_objs);
471 return &node->objs[idx];
497 node = __rte_node_next_node_get(node, next);
499 __rte_node_enqueue_tail_update(graph, node);
522 struct rte_node *dst = __rte_node_next_node_get(src, next);
525 if (
likely(dst->idx == 0)) {
526 void **dobjs = dst->objs;
527 uint16_t dsz = dst->size;
528 dst->objs = src->objs;
529 dst->size = src->size;
533 __rte_node_enqueue_tail_update(graph, dst);
612 uint64_t *xstat = (uint64_t *)
RTE_PTR_ADD(node, node->xstat_off);
613 xstat[xstat_id] += value;
#define RTE_CACHE_LINE_MIN_SIZE
#define offsetof(TYPE, MEMBER)
#define RTE_PTR_ADD(ptr, x)
#define __rte_cache_aligned
#define __rte_always_inline
uint16_t(* rte_node_process_t)(struct rte_graph *graph, struct rte_node *node, void **objs, uint16_t nb_objs)
void(* packets_enqueued_cb)(struct rte_graph *graph, uint64_t cb_priv)
#define RTE_NODE_NAMESIZE
#define RTE_GRAPH_PCAP_FILE_SZ
#define RTE_GRAPH_NAMESIZE
static __rte_always_inline int rte_graph_has_stats_feature(void)
static __rte_always_inline uint8_t rte_graph_worker_model_no_check_get(struct rte_graph *graph)
static void rte_node_enqueue_x4(struct rte_graph *graph, struct rte_node *node, rte_edge_t next, void *obj0, void *obj1, void *obj2, void *obj3)
bool rte_graph_model_is_valid(uint8_t model)
static void rte_node_next_stream_put(struct rte_graph *graph, struct rte_node *node, rte_edge_t next, uint16_t idx)
static void rte_node_enqueue(struct rte_graph *graph, struct rte_node *node, rte_edge_t next, void **objs, uint16_t nb_objs)
static void rte_node_next_stream_move(struct rte_graph *graph, struct rte_node *src, rte_edge_t next)
int rte_graph_worker_model_set(uint8_t model)
static void rte_node_enqueue_next(struct rte_graph *graph, struct rte_node *node, rte_edge_t *nexts, void **objs, uint16_t nb_objs)
static void rte_node_enqueue_x1(struct rte_graph *graph, struct rte_node *node, rte_edge_t next, void *obj)
static __rte_experimental void rte_node_xstat_increment(struct rte_node *node, uint16_t xstat_id, uint64_t value)
static void ** rte_node_next_stream_get(struct rte_graph *graph, struct rte_node *node, rte_edge_t next, uint16_t nb_objs)
static void rte_node_enqueue_x2(struct rte_graph *graph, struct rte_node *node, rte_edge_t next, void *obj0, void *obj1)
uint8_t rte_graph_worker_model_get(struct rte_graph *graph)
static void * rte_memcpy(void *dst, const void *src, size_t n)
static void rte_prefetch0(const volatile void *p)
char name[RTE_MEMPOOL_NAMESIZE]
rte_node_process_t process