DPDK 25.11.0-rc3
rte_pmu.h
Go to the documentation of this file.
1/* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2025 Marvell
3 */
4
5#ifndef RTE_PMU_H
6#define RTE_PMU_H
7
28#include <linux/perf_event.h>
29
30#include <rte_atomic.h>
32#include <rte_common.h>
33#include <rte_compat.h>
34#include <rte_debug.h>
35#include <rte_lcore.h>
36
37#if defined(RTE_ARCH_ARM64)
38#include "rte_pmu_pmc_arm64.h"
39#elif defined(RTE_ARCH_X86_64)
40#include "rte_pmu_pmc_x86_64.h"
41#endif
42
43#ifdef __cplusplus
44extern "C" {
45#endif
46
48#define RTE_MAX_NUM_GROUP_EVENTS 8
49
55 struct perf_event_mmap_page *mmap_pages[RTE_MAX_NUM_GROUP_EVENTS];
58 bool enabled;
59};
60
64struct rte_pmu {
65 struct rte_pmu_event_group event_groups[RTE_MAX_LCORE];
66 unsigned int num_group_events;
67 unsigned int initialized;
68 char *name;
69 TAILQ_HEAD(, rte_pmu_event) event_list;
70};
71
73extern struct rte_pmu rte_pmu;
74
75/* Each architecture supporting PMU needs to provide its own version. */
76#ifndef rte_pmu_pmc_read
77#define rte_pmu_pmc_read(index) ({ RTE_SET_USED(index); 0; })
78#endif
79
93__rte_experimental
94static __rte_always_inline uint64_t
95__rte_pmu_read_userpage(struct perf_event_mmap_page *pc)
96{
97#define __RTE_PMU_READ_ONCE(x) (*(const volatile typeof(x) *)&(x))
98 uint64_t width, offset;
99 uint32_t seq, index;
100 int64_t pmc;
101
102 for (;;) {
103 seq = __RTE_PMU_READ_ONCE(pc->lock);
105 index = __RTE_PMU_READ_ONCE(pc->index);
106 offset = __RTE_PMU_READ_ONCE(pc->offset);
107 width = __RTE_PMU_READ_ONCE(pc->pmc_width);
108
109 /* index set to 0 means that particular counter cannot be used */
110 if (likely(pc->cap_user_rdpmc && index)) {
111 pmc = rte_pmu_pmc_read(index - 1);
112 pmc <<= 64 - width;
113 pmc >>= 64 - width;
114 offset += pmc;
115 }
116
118
119 if (likely(__RTE_PMU_READ_ONCE(pc->lock) == seq))
120 return offset;
121 }
122
123 return 0;
124}
125
139__rte_experimental
140int
142
152__rte_experimental
153int
155
162__rte_experimental
163void
165
178__rte_experimental
179int
181
202__rte_experimental
203static __rte_always_inline uint64_t
204rte_pmu_read(unsigned int index)
205{
206#ifdef ALLOW_EXPERIMENTAL_API
207 unsigned int lcore_id = rte_lcore_id();
208 struct rte_pmu_event_group *group;
209
211 return 0;
212
213 /* non-EAL threads are not supported */
214 if (unlikely(lcore_id >= RTE_MAX_LCORE))
215 return 0;
216
217 if (unlikely(index >= rte_pmu.num_group_events))
218 return 0;
219
220 group = &rte_pmu.event_groups[lcore_id];
221 if (unlikely(!group->enabled)) {
222 if (__rte_pmu_enable_group(group))
223 return 0;
224 }
225
226 return __rte_pmu_read_userpage(group->mmap_pages[index]);
227#else
228 RTE_SET_USED(index);
229 RTE_VERIFY(false);
230#endif
231}
232
233#ifdef __cplusplus
234}
235#endif
236
237#endif /* RTE_PMU_H */
#define rte_compiler_barrier()
Definition: rte_atomic.h:157
#define likely(x)
#define unlikely(x)
#define RTE_SET_USED(x)
Definition: rte_common.h:264
#define __rte_cache_aligned
Definition: rte_common.h:739
#define __rte_always_inline
Definition: rte_common.h:490
static unsigned rte_lcore_id(void)
Definition: rte_lcore.h:78
__rte_experimental int rte_pmu_add_event(const char *name)
static __rte_experimental __rte_always_inline uint64_t rte_pmu_read(unsigned int index)
Definition: rte_pmu.h:204
__rte_experimental int __rte_pmu_enable_group(struct rte_pmu_event_group *group)
__rte_experimental int rte_pmu_init(void)
__rte_experimental void rte_pmu_fini(void)
#define RTE_MAX_NUM_GROUP_EVENTS
Definition: rte_pmu.h:48
static __rte_experimental __rte_always_inline uint64_t __rte_pmu_read_userpage(struct perf_event_mmap_page *pc)
Definition: rte_pmu.h:95
struct perf_event_mmap_page * mmap_pages[RTE_MAX_NUM_GROUP_EVENTS]
Definition: rte_pmu.h:55
TAILQ_ENTRY(rte_pmu_event_group) next
TAILQ_HEAD(, rte_pmu_event) event_list
struct rte_pmu_event_group event_groups[RTE_MAX_LCORE]
Definition: rte_pmu.h:65
char * name
Definition: rte_pmu.h:68
unsigned int num_group_events
Definition: rte_pmu.h:66
unsigned int initialized
Definition: rte_pmu.h:67