123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160 |
- #ifndef __PERF_EVSEL_H
- #define __PERF_EVSEL_H 1
- #include <linux/list.h>
- #include <stdbool.h>
- #include "../../../include/linux/perf_event.h"
- #include "types.h"
- #include "xyarray.h"
- #include "cgroup.h"
- #include "hist.h"
-
- struct perf_counts_values {
- union {
- struct {
- u64 val;
- u64 ena;
- u64 run;
- };
- u64 values[3];
- };
- };
- struct perf_counts {
- s8 scaled;
- struct perf_counts_values aggr;
- struct perf_counts_values cpu[];
- };
- struct perf_evsel;
- /*
- * Per fd, to map back from PERF_SAMPLE_ID to evsel, only used when there are
- * more than one entry in the evlist.
- */
- struct perf_sample_id {
- struct hlist_node node;
- u64 id;
- struct perf_evsel *evsel;
- };
- /** struct perf_evsel - event selector
- *
- * @name - Can be set to retain the original event name passed by the user,
- * so that when showing results in tools such as 'perf stat', we
- * show the name used, not some alias.
- */
- struct perf_evsel {
- struct list_head node;
- struct perf_event_attr attr;
- char *filter;
- struct xyarray *fd;
- struct xyarray *sample_id;
- u64 *id;
- struct perf_counts *counts;
- int idx;
- int ids;
- struct hists hists;
- char *name;
- union {
- void *priv;
- off_t id_offset;
- };
- struct cgroup_sel *cgrp;
- };
- struct cpu_map;
- struct thread_map;
- struct perf_evlist;
- struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx);
- void perf_evsel__init(struct perf_evsel *evsel,
- struct perf_event_attr *attr, int idx);
- void perf_evsel__exit(struct perf_evsel *evsel);
- void perf_evsel__delete(struct perf_evsel *evsel);
- int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads);
- int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads);
- int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus);
- void perf_evsel__free_fd(struct perf_evsel *evsel);
- void perf_evsel__free_id(struct perf_evsel *evsel);
- void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads);
- int perf_evsel__open_per_cpu(struct perf_evsel *evsel,
- struct cpu_map *cpus, bool group);
- int perf_evsel__open_per_thread(struct perf_evsel *evsel,
- struct thread_map *threads, bool group);
- int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
- struct thread_map *threads, bool group);
- #define perf_evsel__match(evsel, t, c) \
- (evsel->attr.type == PERF_TYPE_##t && \
- evsel->attr.config == PERF_COUNT_##c)
- int __perf_evsel__read_on_cpu(struct perf_evsel *evsel,
- int cpu, int thread, bool scale);
- /**
- * perf_evsel__read_on_cpu - Read out the results on a CPU and thread
- *
- * @evsel - event selector to read value
- * @cpu - CPU of interest
- * @thread - thread of interest
- */
- static inline int perf_evsel__read_on_cpu(struct perf_evsel *evsel,
- int cpu, int thread)
- {
- return __perf_evsel__read_on_cpu(evsel, cpu, thread, false);
- }
- /**
- * perf_evsel__read_on_cpu_scaled - Read out the results on a CPU and thread, scaled
- *
- * @evsel - event selector to read value
- * @cpu - CPU of interest
- * @thread - thread of interest
- */
- static inline int perf_evsel__read_on_cpu_scaled(struct perf_evsel *evsel,
- int cpu, int thread)
- {
- return __perf_evsel__read_on_cpu(evsel, cpu, thread, true);
- }
- int __perf_evsel__read(struct perf_evsel *evsel, int ncpus, int nthreads,
- bool scale);
- /**
- * perf_evsel__read - Read the aggregate results on all CPUs
- *
- * @evsel - event selector to read value
- * @ncpus - Number of cpus affected, from zero
- * @nthreads - Number of threads affected, from zero
- */
- static inline int perf_evsel__read(struct perf_evsel *evsel,
- int ncpus, int nthreads)
- {
- return __perf_evsel__read(evsel, ncpus, nthreads, false);
- }
- /**
- * perf_evsel__read_scaled - Read the aggregate results on all CPUs, scaled
- *
- * @evsel - event selector to read value
- * @ncpus - Number of cpus affected, from zero
- * @nthreads - Number of threads affected, from zero
- */
- static inline int perf_evsel__read_scaled(struct perf_evsel *evsel,
- int ncpus, int nthreads)
- {
- return __perf_evsel__read(evsel, ncpus, nthreads, true);
- }
- int __perf_evsel__sample_size(u64 sample_type);
- static inline int perf_evsel__sample_size(struct perf_evsel *evsel)
- {
- return __perf_evsel__sample_size(evsel->attr.sample_type);
- }
- #endif /* __PERF_EVSEL_H */
|