28#include <linux/perf_event.h>
33#include <rte_compat.h>
36#define RTE_PMU_SUPPORTED
37#if defined(RTE_ARCH_ARM64)
38#include "rte_pmu_pmc_arm64.h"
39#elif defined(RTE_ARCH_X86_64)
40#include "rte_pmu_pmc_x86_64.h"
42#undef RTE_PMU_SUPPORTED
50#define RTE_MAX_NUM_GROUP_EVENTS 8
78#ifndef rte_pmu_pmc_read
79#define rte_pmu_pmc_read(index) ({ RTE_SET_USED(index); 0; })
99#define __RTE_PMU_READ_ONCE(x) (*(const volatile typeof(x) *)&(x))
100 uint64_t width, offset;
105 seq = __RTE_PMU_READ_ONCE(pc->lock);
107 index = __RTE_PMU_READ_ONCE(pc->index);
108 offset = __RTE_PMU_READ_ONCE(pc->offset);
109 width = __RTE_PMU_READ_ONCE(pc->pmc_width);
112 if (
likely(pc->cap_user_rdpmc && index)) {
113 pmc = rte_pmu_pmc_read(index - 1);
121 if (
likely(__RTE_PMU_READ_ONCE(pc->lock) == seq))
185#ifndef ALLOW_EXPERIMENTAL_API
186#define __rte_pmu_enable_group(group) ({ RTE_SET_USED(group); 0; })
187#define __rte_pmu_read_userpage(pc) ({ RTE_SET_USED(pc); 0; })
221 if (
unlikely(lcore_id >= RTE_MAX_LCORE))
#define rte_compiler_barrier()
#define __rte_cache_aligned
#define __rte_always_inline
static unsigned rte_lcore_id(void)
__rte_experimental int rte_pmu_add_event(const char *name)
static __rte_experimental __rte_always_inline uint64_t rte_pmu_read(unsigned int index)
__rte_experimental int __rte_pmu_enable_group(struct rte_pmu_event_group *group)
__rte_experimental int rte_pmu_init(void)
__rte_experimental void rte_pmu_fini(void)
#define RTE_MAX_NUM_GROUP_EVENTS
static __rte_experimental __rte_always_inline uint64_t __rte_pmu_read_userpage(struct perf_event_mmap_page *pc)
struct perf_event_mmap_page * mmap_pages[RTE_MAX_NUM_GROUP_EVENTS]
TAILQ_ENTRY(rte_pmu_event_group) next
TAILQ_HEAD(, rte_pmu_event) event_list
struct rte_pmu_event_group event_groups[RTE_MAX_LCORE]
unsigned int num_group_events