DPDK  24.03.0
rte_graph_worker_common.h
Go to the documentation of this file.
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2020 Marvell International Ltd.
3  */
4 
5 #ifndef _RTE_GRAPH_WORKER_COMMON_H_
6 #define _RTE_GRAPH_WORKER_COMMON_H_
7 
15 #include <stdalign.h>
16 
17 #include <rte_common.h>
18 #include <rte_cycles.h>
19 #include <rte_prefetch.h>
20 #include <rte_memcpy.h>
21 #include <rte_memory.h>
22 
23 #include "rte_graph.h"
24 
25 #ifdef __cplusplus
26 extern "C" {
27 #endif
28 
30 /* When adding a new graph model entry, update rte_graph_model_is_valid() implementation. */
31 #define RTE_GRAPH_MODEL_RTC 0
32 #define RTE_GRAPH_MODEL_MCORE_DISPATCH 1
33 
34 #define RTE_GRAPH_MODEL_DEFAULT RTE_GRAPH_MODEL_RTC
41 SLIST_HEAD(rte_graph_rq_head, rte_graph);
42 
48 struct __rte_cache_aligned rte_graph {
49  /* Fast path area. */
50  uint32_t tail;
51  uint32_t head;
52  uint32_t cir_mask;
53  rte_node_t nb_nodes;
54  rte_graph_off_t *cir_start;
55  rte_graph_off_t nodes_start;
56  uint8_t model;
57  uint8_t reserved1;
58  uint16_t reserved2;
59  union {
60  /* Fast schedule area for mcore dispatch model */
61  struct {
62  alignas(RTE_CACHE_LINE_SIZE) struct rte_graph_rq_head *rq;
63  /* The run-queue */
64  struct rte_graph_rq_head rq_head; /* The head for run-queue list */
65 
66  unsigned int lcore_id;
67  struct rte_ring *wq;
68  struct rte_mempool *mp;
69  } dispatch;
70  };
71  SLIST_ENTRY(rte_graph) next; /* The next for rte_graph list */
72  /* End of Fast path area.*/
73  rte_graph_t id;
74  int socket;
75  char name[RTE_GRAPH_NAMESIZE];
76  bool pcap_enable;
78  uint64_t nb_pkt_captured;
80  uint64_t nb_pkt_to_capture;
81  char pcap_filename[RTE_GRAPH_PCAP_FILE_SZ];
82  uint64_t fence;
83 };
84 
90 struct __rte_cache_aligned rte_node {
91  /* Slow path area */
92  uint64_t fence;
93  rte_graph_off_t next;
94  rte_node_t id;
95  rte_node_t parent_id;
96  rte_edge_t nb_edges;
97  uint32_t realloc_count;
99  char parent[RTE_NODE_NAMESIZE];
100  char name[RTE_NODE_NAMESIZE];
103  rte_node_process_t original_process;
104 
105  union {
106  /* Fast schedule area for mcore dispatch model */
107  struct {
108  unsigned int lcore_id;
109  uint64_t total_sched_objs;
110  uint64_t total_sched_fail;
111  } dispatch;
112  };
113  /* Fast path area */
114 #define RTE_NODE_CTX_SZ 16
115  alignas(RTE_CACHE_LINE_SIZE) uint8_t ctx[RTE_NODE_CTX_SZ];
116  uint16_t size;
117  uint16_t idx;
118  rte_graph_off_t off;
119  uint64_t total_cycles;
120  uint64_t total_calls;
121  uint64_t total_objs;
122  union {
123  void **objs;
124  uint64_t objs_u64;
125  };
126  union {
127  rte_node_process_t process;
128  uint64_t process_u64;
129  };
130  alignas(RTE_CACHE_LINE_MIN_SIZE) struct rte_node *nodes[];
131 };
132 
145 void __rte_node_stream_alloc(struct rte_graph *graph, struct rte_node *node);
146 
161 void __rte_node_stream_alloc_size(struct rte_graph *graph,
162  struct rte_node *node, uint16_t req_size);
163 
164 /* Fast path helper functions */
165 
176 static __rte_always_inline void
177 __rte_node_process(struct rte_graph *graph, struct rte_node *node)
178 {
179  uint64_t start;
180  uint16_t rc;
181  void **objs;
182 
183  RTE_ASSERT(node->fence == RTE_GRAPH_FENCE);
184  objs = node->objs;
185  rte_prefetch0(objs);
186 
188  start = rte_rdtsc();
189  rc = node->process(graph, node, objs, node->idx);
190  node->total_cycles += rte_rdtsc() - start;
191  node->total_calls++;
192  node->total_objs += rc;
193  } else {
194  node->process(graph, node, objs, node->idx);
195  }
196  node->idx = 0;
197 }
198 
209 static __rte_always_inline void
210 __rte_node_enqueue_tail_update(struct rte_graph *graph, struct rte_node *node)
211 {
212  uint32_t tail;
213 
214  tail = graph->tail;
215  graph->cir_start[tail++] = node->off;
216  graph->tail = tail & graph->cir_mask;
217 }
218 
236 static __rte_always_inline void
237 __rte_node_enqueue_prologue(struct rte_graph *graph, struct rte_node *node,
238  const uint16_t idx, const uint16_t space)
239 {
240 
241  /* Add to the pending stream list if the node is new */
242  if (idx == 0)
243  __rte_node_enqueue_tail_update(graph, node);
244 
245  if (unlikely(node->size < (idx + space)))
246  __rte_node_stream_alloc_size(graph, node, node->size + space);
247 }
248 
262 static __rte_always_inline struct rte_node *
263 __rte_node_next_node_get(struct rte_node *node, rte_edge_t next)
264 {
265  RTE_ASSERT(next < node->nb_edges);
266  RTE_ASSERT(node->fence == RTE_GRAPH_FENCE);
267  node = node->nodes[next];
268  RTE_ASSERT(node->fence == RTE_GRAPH_FENCE);
269 
270  return node;
271 }
272 
288 static inline void
289 rte_node_enqueue(struct rte_graph *graph, struct rte_node *node,
290  rte_edge_t next, void **objs, uint16_t nb_objs)
291 {
292  node = __rte_node_next_node_get(node, next);
293  const uint16_t idx = node->idx;
294 
295  __rte_node_enqueue_prologue(graph, node, idx, nb_objs);
296 
297  rte_memcpy(&node->objs[idx], objs, nb_objs * sizeof(void *));
298  node->idx = idx + nb_objs;
299 }
300 
314 static inline void
315 rte_node_enqueue_x1(struct rte_graph *graph, struct rte_node *node,
316  rte_edge_t next, void *obj)
317 {
318  node = __rte_node_next_node_get(node, next);
319  uint16_t idx = node->idx;
320 
321  __rte_node_enqueue_prologue(graph, node, idx, 1);
322 
323  node->objs[idx++] = obj;
324  node->idx = idx;
325 }
326 
343 static inline void
344 rte_node_enqueue_x2(struct rte_graph *graph, struct rte_node *node,
345  rte_edge_t next, void *obj0, void *obj1)
346 {
347  node = __rte_node_next_node_get(node, next);
348  uint16_t idx = node->idx;
349 
350  __rte_node_enqueue_prologue(graph, node, idx, 2);
351 
352  node->objs[idx++] = obj0;
353  node->objs[idx++] = obj1;
354  node->idx = idx;
355 }
356 
377 static inline void
378 rte_node_enqueue_x4(struct rte_graph *graph, struct rte_node *node,
379  rte_edge_t next, void *obj0, void *obj1, void *obj2,
380  void *obj3)
381 {
382  node = __rte_node_next_node_get(node, next);
383  uint16_t idx = node->idx;
384 
385  __rte_node_enqueue_prologue(graph, node, idx, 4);
386 
387  node->objs[idx++] = obj0;
388  node->objs[idx++] = obj1;
389  node->objs[idx++] = obj2;
390  node->objs[idx++] = obj3;
391  node->idx = idx;
392 }
393 
410 static inline void
411 rte_node_enqueue_next(struct rte_graph *graph, struct rte_node *node,
412  rte_edge_t *nexts, void **objs, uint16_t nb_objs)
413 {
414  uint16_t i;
415 
416  for (i = 0; i < nb_objs; i++)
417  rte_node_enqueue_x1(graph, node, nexts[i], objs[i]);
418 }
419 
439 static inline void **
440 rte_node_next_stream_get(struct rte_graph *graph, struct rte_node *node,
441  rte_edge_t next, uint16_t nb_objs)
442 {
443  node = __rte_node_next_node_get(node, next);
444  const uint16_t idx = node->idx;
445  uint16_t free_space = node->size - idx;
446 
447  if (unlikely(free_space < nb_objs))
448  __rte_node_stream_alloc_size(graph, node, node->size + nb_objs);
449 
450  return &node->objs[idx];
451 }
452 
469 static inline void
470 rte_node_next_stream_put(struct rte_graph *graph, struct rte_node *node,
471  rte_edge_t next, uint16_t idx)
472 {
473  if (unlikely(!idx))
474  return;
475 
476  node = __rte_node_next_node_get(node, next);
477  if (node->idx == 0)
478  __rte_node_enqueue_tail_update(graph, node);
479 
480  node->idx += idx;
481 }
482 
497 static inline void
498 rte_node_next_stream_move(struct rte_graph *graph, struct rte_node *src,
499  rte_edge_t next)
500 {
501  struct rte_node *dst = __rte_node_next_node_get(src, next);
502 
503  /* Let swap the pointers if dst don't have valid objs */
504  if (likely(dst->idx == 0)) {
505  void **dobjs = dst->objs;
506  uint16_t dsz = dst->size;
507  dst->objs = src->objs;
508  dst->size = src->size;
509  src->objs = dobjs;
510  src->size = dsz;
511  dst->idx = src->idx;
512  __rte_node_enqueue_tail_update(graph, dst);
513  } else { /* Move the objects from src node to dst node */
514  rte_node_enqueue(graph, src, next, src->objs, src->idx);
515  }
516 }
517 
527 bool
528 rte_graph_model_is_valid(uint8_t model);
529 
540 int rte_graph_worker_model_set(uint8_t model);
541 
554 uint8_t rte_graph_worker_model_get(struct rte_graph *graph);
555 
568 static __rte_always_inline
569 uint8_t rte_graph_worker_model_no_check_get(struct rte_graph *graph)
570 {
571  return graph->model;
572 }
573 
574 #ifdef __cplusplus
575 }
576 #endif
577 
578 #endif /* _RTE_GRAPH_WORKER_COIMMON_H_ */
uint32_t rte_node_t
Definition: rte_graph.h:40
#define __rte_always_inline
Definition: rte_common.h:355
bool rte_graph_model_is_valid(uint8_t model)
uint16_t rte_edge_t
Definition: rte_graph.h:41
static void rte_node_enqueue(struct rte_graph *graph, struct rte_node *node, rte_edge_t next, void **objs, uint16_t nb_objs)
#define likely(x)
int rte_graph_worker_model_set(uint8_t model)
#define RTE_NODE_NAMESIZE
Definition: rte_graph.h:31
#define RTE_GRAPH_PCAP_FILE_SZ
Definition: rte_graph.h:32
static __rte_always_inline int rte_graph_has_stats_feature(void)
Definition: rte_graph.h:686
static __rte_always_inline uint8_t rte_graph_worker_model_no_check_get(struct rte_graph *graph)
uint16_t rte_graph_t
Definition: rte_graph.h:42
uint16_t(* rte_node_process_t)(struct rte_graph *graph, struct rte_node *node, void **objs, uint16_t nb_objs)
Definition: rte_graph.h:93
static void rte_node_enqueue_x1(struct rte_graph *graph, struct rte_node *node, rte_edge_t next, void *obj)
#define RTE_GRAPH_NAMESIZE
Definition: rte_graph.h:30
static void ** rte_node_next_stream_get(struct rte_graph *graph, struct rte_node *node, rte_edge_t next, uint16_t nb_objs)
#define __rte_cache_aligned
Definition: rte_common.h:553
char name[RTE_MEMPOOL_NAMESIZE]
Definition: rte_mempool.h:231
static void rte_node_next_stream_move(struct rte_graph *graph, struct rte_node *src, rte_edge_t next)
#define unlikely(x)
static void rte_node_enqueue_x2(struct rte_graph *graph, struct rte_node *node, rte_edge_t next, void *obj0, void *obj1)
static void rte_node_next_stream_put(struct rte_graph *graph, struct rte_node *node, rte_edge_t next, uint16_t idx)
#define RTE_GRAPH_FENCE
Definition: rte_graph.h:37
#define RTE_CACHE_LINE_MIN_SIZE
Definition: rte_common.h:550
static void rte_node_enqueue_next(struct rte_graph *graph, struct rte_node *node, rte_edge_t *nexts, void **objs, uint16_t nb_objs)
uint8_t rte_graph_worker_model_get(struct rte_graph *graph)
uint32_t rte_graph_off_t
Definition: rte_graph.h:39
static void * rte_memcpy(void *dst, const void *src, size_t n)
static void rte_node_enqueue_x4(struct rte_graph *graph, struct rte_node *node, rte_edge_t next, void *obj0, void *obj1, void *obj2, void *obj3)
static void rte_prefetch0(const volatile void *p)