5 #ifndef _RTE_GRAPH_WORKER_H_ 6 #define _RTE_GRAPH_WORKER_H_ 61 uint32_t realloc_count;
67 #define RTE_NODE_CTX_SZ 16 72 uint64_t total_cycles;
101 void __rte_node_stream_alloc(
struct rte_graph *graph,
struct rte_node *node);
118 void __rte_node_stream_alloc_size(
struct rte_graph *graph,
119 struct rte_node *node, uint16_t req_size);
136 uint32_t head = graph->head;
137 struct rte_node *node;
157 while (
likely(head != graph->tail)) {
158 node = (
struct rte_node *)
RTE_PTR_ADD(graph, cir_start[(int32_t)head++]);
165 rc = node->process(graph, node, objs, node->idx);
166 node->total_cycles += rte_rdtsc() - start;
168 node->total_objs += rc;
170 node->process(graph, node, objs, node->idx);
173 head =
likely((int32_t)head > 0) ? head & mask : head;
191 __rte_node_enqueue_tail_update(
struct rte_graph *graph,
struct rte_node *node)
196 graph->cir_start[tail++] = node->off;
197 graph->tail = tail & graph->cir_mask;
218 __rte_node_enqueue_prologue(
struct rte_graph *graph,
struct rte_node *node,
219 const uint16_t idx,
const uint16_t space)
224 __rte_node_enqueue_tail_update(graph, node);
226 if (
unlikely(node->size < (idx + space)))
227 __rte_node_stream_alloc_size(graph, node, node->size + space);
244 __rte_node_next_node_get(
struct rte_node *node,
rte_edge_t next)
246 RTE_ASSERT(next < node->nb_edges);
248 node = node->nodes[next];
272 rte_edge_t next,
void **objs, uint16_t nb_objs)
274 node = __rte_node_next_node_get(node, next);
275 const uint16_t idx = node->idx;
277 __rte_node_enqueue_prologue(graph, node, idx, nb_objs);
279 rte_memcpy(&node->objs[idx], objs, nb_objs *
sizeof(
void *));
280 node->idx = idx + nb_objs;
301 node = __rte_node_next_node_get(node, next);
302 uint16_t idx = node->idx;
304 __rte_node_enqueue_prologue(graph, node, idx, 1);
306 node->objs[idx++] = obj;
331 node = __rte_node_next_node_get(node, next);
332 uint16_t idx = node->idx;
334 __rte_node_enqueue_prologue(graph, node, idx, 2);
336 node->objs[idx++] = obj0;
337 node->objs[idx++] = obj1;
364 rte_edge_t next,
void *obj0,
void *obj1,
void *obj2,
367 node = __rte_node_next_node_get(node, next);
368 uint16_t idx = node->idx;
370 __rte_node_enqueue_prologue(graph, node, idx, 4);
372 node->objs[idx++] = obj0;
373 node->objs[idx++] = obj1;
374 node->objs[idx++] = obj2;
375 node->objs[idx++] = obj3;
398 rte_edge_t *nexts,
void **objs, uint16_t nb_objs)
402 for (i = 0; i < nb_objs; i++)
426 static inline void **
430 node = __rte_node_next_node_get(node, next);
431 const uint16_t idx = node->idx;
432 uint16_t free_space = node->size - idx;
435 __rte_node_stream_alloc_size(graph, node, node->size + nb_objs);
437 return &node->objs[idx];
464 node = __rte_node_next_node_get(node, next);
466 __rte_node_enqueue_tail_update(graph, node);
490 struct rte_node *dst = __rte_node_next_node_get(src, next);
493 if (
likely(dst->idx == 0)) {
494 void **dobjs = dst->objs;
495 uint16_t dsz = dst->size;
496 dst->objs = src->objs;
497 dst->size = src->size;
501 __rte_node_enqueue_tail_update(graph, dst);
static __rte_experimental void rte_node_enqueue(struct rte_graph *graph, struct rte_node *node, rte_edge_t next, void **objs, uint16_t nb_objs)
#define __rte_always_inline
static __rte_experimental void rte_node_next_stream_move(struct rte_graph *graph, struct rte_node *src, rte_edge_t next)
#define __rte_cache_min_aligned
static __rte_experimental void rte_node_enqueue_x2(struct rte_graph *graph, struct rte_node *node, rte_edge_t next, void *obj0, void *obj1)
#define RTE_NODE_NAMESIZE
static __rte_always_inline int rte_graph_has_stats_feature(void)
uint16_t(* rte_node_process_t)(struct rte_graph *graph, struct rte_node *node, void **objs, uint16_t nb_objs)
#define RTE_GRAPH_NAMESIZE
static __rte_experimental void rte_node_next_stream_put(struct rte_graph *graph, struct rte_node *node, rte_edge_t next, uint16_t idx)
#define RTE_PTR_ADD(ptr, x)
static __rte_experimental void rte_node_enqueue_next(struct rte_graph *graph, struct rte_node *node, rte_edge_t *nexts, void **objs, uint16_t nb_objs)
static __rte_experimental void rte_graph_walk(struct rte_graph *graph)
static __rte_experimental void rte_node_enqueue_x1(struct rte_graph *graph, struct rte_node *node, rte_edge_t next, void *obj)
#define __rte_cache_aligned
static __rte_experimental void ** rte_node_next_stream_get(struct rte_graph *graph, struct rte_node *node, rte_edge_t next, uint16_t nb_objs)
static void * rte_memcpy(void *dst, const void *src, size_t n)
static __rte_experimental void rte_node_enqueue_x4(struct rte_graph *graph, struct rte_node *node, rte_edge_t next, void *obj0, void *obj1, void *obj2, void *obj3)
static void rte_prefetch0(const volatile void *p)