Commit 54a0f913 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

* 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  perf top: Fix live annotation in the --stdio interface
  perf top tui: Don't recalc column widths considering just the first page
  perf report: Add progress bar when processing time ordered events
  perf hists browser: Warn about lost events
  perf tools: Fix a typo of command name as trace-cmd
  perf hists: Fix recalculation of total_period when sorting entries
  perf header: Fix build on old systems
  perf ui browser: Handle K_RESIZE in dialog windows
  perf ui browser: No need to switch char sets that often
  perf hists browser: Use K_TIMER
  perf ui: Rename ui__warning_paranoid to ui__error_paranoid
  perf ui: Reimplement the popup windows using libslang
  perf ui: Reimplement ui__popup_menu using ui__browser
  perf ui: Reimplement ui_helpline using libslang
  perf ui: Improve handling sigwinch a bit
  perf ui progress: Reimplement using slang
  perf evlist: Fix grouping of multiple events
parents 94956eed f9e3d4b1
...@@ -262,13 +262,16 @@ static bool perf_evlist__equal(struct perf_evlist *evlist, ...@@ -262,13 +262,16 @@ static bool perf_evlist__equal(struct perf_evlist *evlist,
static void open_counters(struct perf_evlist *evlist) static void open_counters(struct perf_evlist *evlist)
{ {
struct perf_evsel *pos; struct perf_evsel *pos, *first;
if (evlist->cpus->map[0] < 0) if (evlist->cpus->map[0] < 0)
no_inherit = true; no_inherit = true;
first = list_entry(evlist->entries.next, struct perf_evsel, node);
list_for_each_entry(pos, &evlist->entries, node) { list_for_each_entry(pos, &evlist->entries, node) {
struct perf_event_attr *attr = &pos->attr; struct perf_event_attr *attr = &pos->attr;
struct xyarray *group_fd = NULL;
/* /*
* Check if parse_single_tracepoint_event has already asked for * Check if parse_single_tracepoint_event has already asked for
* PERF_SAMPLE_TIME. * PERF_SAMPLE_TIME.
...@@ -283,15 +286,19 @@ static void open_counters(struct perf_evlist *evlist) ...@@ -283,15 +286,19 @@ static void open_counters(struct perf_evlist *evlist)
*/ */
bool time_needed = attr->sample_type & PERF_SAMPLE_TIME; bool time_needed = attr->sample_type & PERF_SAMPLE_TIME;
if (group && pos != first)
group_fd = first->fd;
config_attr(pos, evlist); config_attr(pos, evlist);
retry_sample_id: retry_sample_id:
attr->sample_id_all = sample_id_all_avail ? 1 : 0; attr->sample_id_all = sample_id_all_avail ? 1 : 0;
try_again: try_again:
if (perf_evsel__open(pos, evlist->cpus, evlist->threads, group) < 0) { if (perf_evsel__open(pos, evlist->cpus, evlist->threads, group,
group_fd) < 0) {
int err = errno; int err = errno;
if (err == EPERM || err == EACCES) { if (err == EPERM || err == EACCES) {
ui__warning_paranoid(); ui__error_paranoid();
exit(EXIT_FAILURE); exit(EXIT_FAILURE);
} else if (err == ENODEV && cpu_list) { } else if (err == ENODEV && cpu_list) {
die("No such device - did you specify" die("No such device - did you specify"
......
...@@ -278,9 +278,14 @@ struct stats runtime_itlb_cache_stats[MAX_NR_CPUS]; ...@@ -278,9 +278,14 @@ struct stats runtime_itlb_cache_stats[MAX_NR_CPUS];
struct stats runtime_dtlb_cache_stats[MAX_NR_CPUS]; struct stats runtime_dtlb_cache_stats[MAX_NR_CPUS];
struct stats walltime_nsecs_stats; struct stats walltime_nsecs_stats;
static int create_perf_stat_counter(struct perf_evsel *evsel) static int create_perf_stat_counter(struct perf_evsel *evsel,
struct perf_evsel *first)
{ {
struct perf_event_attr *attr = &evsel->attr; struct perf_event_attr *attr = &evsel->attr;
struct xyarray *group_fd = NULL;
if (group && evsel != first)
group_fd = first->fd;
if (scale) if (scale)
attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED | attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
...@@ -289,14 +294,15 @@ static int create_perf_stat_counter(struct perf_evsel *evsel) ...@@ -289,14 +294,15 @@ static int create_perf_stat_counter(struct perf_evsel *evsel)
attr->inherit = !no_inherit; attr->inherit = !no_inherit;
if (system_wide) if (system_wide)
return perf_evsel__open_per_cpu(evsel, evsel_list->cpus, group); return perf_evsel__open_per_cpu(evsel, evsel_list->cpus,
group, group_fd);
if (target_pid == -1 && target_tid == -1) { if (target_pid == -1 && target_tid == -1) {
attr->disabled = 1; attr->disabled = 1;
attr->enable_on_exec = 1; attr->enable_on_exec = 1;
} }
return perf_evsel__open_per_thread(evsel, evsel_list->threads, group); return perf_evsel__open_per_thread(evsel, evsel_list->threads,
group, group_fd);
} }
/* /*
...@@ -396,7 +402,7 @@ static int read_counter(struct perf_evsel *counter) ...@@ -396,7 +402,7 @@ static int read_counter(struct perf_evsel *counter)
static int run_perf_stat(int argc __used, const char **argv) static int run_perf_stat(int argc __used, const char **argv)
{ {
unsigned long long t0, t1; unsigned long long t0, t1;
struct perf_evsel *counter; struct perf_evsel *counter, *first;
int status = 0; int status = 0;
int child_ready_pipe[2], go_pipe[2]; int child_ready_pipe[2], go_pipe[2];
const bool forks = (argc > 0); const bool forks = (argc > 0);
...@@ -453,8 +459,10 @@ static int run_perf_stat(int argc __used, const char **argv) ...@@ -453,8 +459,10 @@ static int run_perf_stat(int argc __used, const char **argv)
close(child_ready_pipe[0]); close(child_ready_pipe[0]);
} }
first = list_entry(evsel_list->entries.next, struct perf_evsel, node);
list_for_each_entry(counter, &evsel_list->entries, node) { list_for_each_entry(counter, &evsel_list->entries, node) {
if (create_perf_stat_counter(counter) < 0) { if (create_perf_stat_counter(counter, first) < 0) {
if (errno == EINVAL || errno == ENOSYS || errno == ENOENT) { if (errno == EINVAL || errno == ENOSYS || errno == ENOENT) {
if (verbose) if (verbose)
ui__warning("%s event is not supported by the kernel.\n", ui__warning("%s event is not supported by the kernel.\n",
......
...@@ -291,7 +291,7 @@ static int test__open_syscall_event(void) ...@@ -291,7 +291,7 @@ static int test__open_syscall_event(void)
goto out_thread_map_delete; goto out_thread_map_delete;
} }
if (perf_evsel__open_per_thread(evsel, threads, false) < 0) { if (perf_evsel__open_per_thread(evsel, threads, false, NULL) < 0) {
pr_debug("failed to open counter: %s, " pr_debug("failed to open counter: %s, "
"tweak /proc/sys/kernel/perf_event_paranoid?\n", "tweak /proc/sys/kernel/perf_event_paranoid?\n",
strerror(errno)); strerror(errno));
...@@ -366,7 +366,7 @@ static int test__open_syscall_event_on_all_cpus(void) ...@@ -366,7 +366,7 @@ static int test__open_syscall_event_on_all_cpus(void)
goto out_thread_map_delete; goto out_thread_map_delete;
} }
if (perf_evsel__open(evsel, cpus, threads, false) < 0) { if (perf_evsel__open(evsel, cpus, threads, false, NULL) < 0) {
pr_debug("failed to open counter: %s, " pr_debug("failed to open counter: %s, "
"tweak /proc/sys/kernel/perf_event_paranoid?\n", "tweak /proc/sys/kernel/perf_event_paranoid?\n",
strerror(errno)); strerror(errno));
...@@ -531,7 +531,7 @@ static int test__basic_mmap(void) ...@@ -531,7 +531,7 @@ static int test__basic_mmap(void)
perf_evlist__add(evlist, evsels[i]); perf_evlist__add(evlist, evsels[i]);
if (perf_evsel__open(evsels[i], cpus, threads, false) < 0) { if (perf_evsel__open(evsels[i], cpus, threads, false, NULL) < 0) {
pr_debug("failed to open counter: %s, " pr_debug("failed to open counter: %s, "
"tweak /proc/sys/kernel/perf_event_paranoid?\n", "tweak /proc/sys/kernel/perf_event_paranoid?\n",
strerror(errno)); strerror(errno));
......
...@@ -89,6 +89,7 @@ static bool vmlinux_warned; ...@@ -89,6 +89,7 @@ static bool vmlinux_warned;
static bool inherit = false; static bool inherit = false;
static int realtime_prio = 0; static int realtime_prio = 0;
static bool group = false; static bool group = false;
static bool sample_id_all_avail = true;
static unsigned int mmap_pages = 128; static unsigned int mmap_pages = 128;
static bool dump_symtab = false; static bool dump_symtab = false;
...@@ -199,7 +200,8 @@ static void record_precise_ip(struct hist_entry *he, int counter, u64 ip) ...@@ -199,7 +200,8 @@ static void record_precise_ip(struct hist_entry *he, int counter, u64 ip)
struct symbol *sym; struct symbol *sym;
if (he == NULL || he->ms.sym == NULL || if (he == NULL || he->ms.sym == NULL ||
(he != top.sym_filter_entry && use_browser != 1)) ((top.sym_filter_entry == NULL ||
top.sym_filter_entry->ms.sym != he->ms.sym) && use_browser != 1))
return; return;
sym = he->ms.sym; sym = he->ms.sym;
...@@ -289,11 +291,13 @@ static void print_sym_table(void) ...@@ -289,11 +291,13 @@ static void print_sym_table(void)
printf("%-*.*s\n", win_width, win_width, graph_dotted_line); printf("%-*.*s\n", win_width, win_width, graph_dotted_line);
if (top.total_lost_warned != top.session->hists.stats.total_lost) { if (top.sym_evsel->hists.stats.nr_lost_warned !=
top.total_lost_warned = top.session->hists.stats.total_lost; top.sym_evsel->hists.stats.nr_events[PERF_RECORD_LOST]) {
color_fprintf(stdout, PERF_COLOR_RED, "WARNING:"); top.sym_evsel->hists.stats.nr_lost_warned =
printf(" LOST %" PRIu64 " events, Check IO/CPU overload\n", top.sym_evsel->hists.stats.nr_events[PERF_RECORD_LOST];
top.total_lost_warned); color_fprintf(stdout, PERF_COLOR_RED,
"WARNING: LOST %d chunks, Check IO/CPU overload",
top.sym_evsel->hists.stats.nr_lost_warned);
++printed; ++printed;
} }
...@@ -561,7 +565,6 @@ static void perf_top__sort_new_samples(void *arg) ...@@ -561,7 +565,6 @@ static void perf_top__sort_new_samples(void *arg)
hists__decay_entries_threaded(&t->sym_evsel->hists, hists__decay_entries_threaded(&t->sym_evsel->hists,
top.hide_user_symbols, top.hide_user_symbols,
top.hide_kernel_symbols); top.hide_kernel_symbols);
hists__output_recalc_col_len(&t->sym_evsel->hists, winsize.ws_row - 3);
} }
static void *display_thread_tui(void *arg __used) static void *display_thread_tui(void *arg __used)
...@@ -671,6 +674,7 @@ static int symbol_filter(struct map *map __used, struct symbol *sym) ...@@ -671,6 +674,7 @@ static int symbol_filter(struct map *map __used, struct symbol *sym)
} }
static void perf_event__process_sample(const union perf_event *event, static void perf_event__process_sample(const union perf_event *event,
struct perf_evsel *evsel,
struct perf_sample *sample, struct perf_sample *sample,
struct perf_session *session) struct perf_session *session)
{ {
...@@ -770,12 +774,8 @@ static void perf_event__process_sample(const union perf_event *event, ...@@ -770,12 +774,8 @@ static void perf_event__process_sample(const union perf_event *event,
} }
if (al.sym == NULL || !al.sym->ignore) { if (al.sym == NULL || !al.sym->ignore) {
struct perf_evsel *evsel;
struct hist_entry *he; struct hist_entry *he;
evsel = perf_evlist__id2evsel(top.evlist, sample->id);
assert(evsel != NULL);
if ((sort__has_parent || symbol_conf.use_callchain) && if ((sort__has_parent || symbol_conf.use_callchain) &&
sample->callchain) { sample->callchain) {
err = perf_session__resolve_callchain(session, al.thread, err = perf_session__resolve_callchain(session, al.thread,
...@@ -807,6 +807,7 @@ static void perf_event__process_sample(const union perf_event *event, ...@@ -807,6 +807,7 @@ static void perf_event__process_sample(const union perf_event *event,
static void perf_session__mmap_read_idx(struct perf_session *self, int idx) static void perf_session__mmap_read_idx(struct perf_session *self, int idx)
{ {
struct perf_sample sample; struct perf_sample sample;
struct perf_evsel *evsel;
union perf_event *event; union perf_event *event;
int ret; int ret;
...@@ -817,10 +818,16 @@ static void perf_session__mmap_read_idx(struct perf_session *self, int idx) ...@@ -817,10 +818,16 @@ static void perf_session__mmap_read_idx(struct perf_session *self, int idx)
continue; continue;
} }
evsel = perf_evlist__id2evsel(self->evlist, sample.id);
assert(evsel != NULL);
if (event->header.type == PERF_RECORD_SAMPLE) if (event->header.type == PERF_RECORD_SAMPLE)
perf_event__process_sample(event, &sample, self); perf_event__process_sample(event, evsel, &sample, self);
else else if (event->header.type < PERF_RECORD_MAX) {
hists__inc_nr_events(&evsel->hists, event->header.type);
perf_event__process(event, &sample, self); perf_event__process(event, &sample, self);
} else
++self->hists.stats.nr_unknown_events;
} }
} }
...@@ -834,10 +841,16 @@ static void perf_session__mmap_read(struct perf_session *self) ...@@ -834,10 +841,16 @@ static void perf_session__mmap_read(struct perf_session *self)
static void start_counters(struct perf_evlist *evlist) static void start_counters(struct perf_evlist *evlist)
{ {
struct perf_evsel *counter; struct perf_evsel *counter, *first;
first = list_entry(evlist->entries.next, struct perf_evsel, node);
list_for_each_entry(counter, &evlist->entries, node) { list_for_each_entry(counter, &evlist->entries, node) {
struct perf_event_attr *attr = &counter->attr; struct perf_event_attr *attr = &counter->attr;
struct xyarray *group_fd = NULL;
if (group && counter != first)
group_fd = first->fd;
attr->sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_TID; attr->sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_TID;
...@@ -858,14 +871,23 @@ static void start_counters(struct perf_evlist *evlist) ...@@ -858,14 +871,23 @@ static void start_counters(struct perf_evlist *evlist)
attr->mmap = 1; attr->mmap = 1;
attr->comm = 1; attr->comm = 1;
attr->inherit = inherit; attr->inherit = inherit;
retry_sample_id:
attr->sample_id_all = sample_id_all_avail ? 1 : 0;
try_again: try_again:
if (perf_evsel__open(counter, top.evlist->cpus, if (perf_evsel__open(counter, top.evlist->cpus,
top.evlist->threads, group) < 0) { top.evlist->threads, group,
group_fd) < 0) {
int err = errno; int err = errno;
if (err == EPERM || err == EACCES) { if (err == EPERM || err == EACCES) {
ui__warning_paranoid(); ui__error_paranoid();
goto out_err; goto out_err;
} else if (err == EINVAL && sample_id_all_avail) {
/*
* Old kernel, no attr->sample_id_type_all field
*/
sample_id_all_avail = false;
goto retry_sample_id;
} }
/* /*
* If it's cycles then fall back to hrtimer * If it's cycles then fall back to hrtimer
......
...@@ -310,9 +310,12 @@ fallback: ...@@ -310,9 +310,12 @@ fallback:
} }
err = -ENOENT; err = -ENOENT;
dso->annotate_warned = 1; dso->annotate_warned = 1;
pr_err("Can't annotate %s: No vmlinux file%s was found in the " pr_err("Can't annotate %s:\n\n"
"path.\nPlease use 'perf buildid-cache -av vmlinux' or " "No vmlinux file%s\nwas found in the path.\n\n"
"--vmlinux vmlinux.\n", "Please use:\n\n"
" perf buildid-cache -av vmlinux\n\n"
"or:\n\n"
" --vmlinux vmlinux",
sym->name, build_id_msg ?: ""); sym->name, build_id_msg ?: "");
goto out_free_filename; goto out_free_filename;
} }
......
...@@ -47,19 +47,20 @@ int dump_printf(const char *fmt, ...) ...@@ -47,19 +47,20 @@ int dump_printf(const char *fmt, ...)
} }
#ifdef NO_NEWT_SUPPORT #ifdef NO_NEWT_SUPPORT
void ui__warning(const char *format, ...) int ui__warning(const char *format, ...)
{ {
va_list args; va_list args;
va_start(args, format); va_start(args, format);
vfprintf(stderr, format, args); vfprintf(stderr, format, args);
va_end(args); va_end(args);
return 0;
} }
#endif #endif
void ui__warning_paranoid(void) int ui__error_paranoid(void)
{ {
ui__warning("Permission error - are you root?\n" return ui__error("Permission error - are you root?\n"
"Consider tweaking /proc/sys/kernel/perf_event_paranoid:\n" "Consider tweaking /proc/sys/kernel/perf_event_paranoid:\n"
" -1 - Not paranoid at all\n" " -1 - Not paranoid at all\n"
" 0 - Disallow raw tracepoint access for unpriv\n" " 0 - Disallow raw tracepoint access for unpriv\n"
......
...@@ -19,23 +19,18 @@ static inline int ui_helpline__show_help(const char *format __used, va_list ap _ ...@@ -19,23 +19,18 @@ static inline int ui_helpline__show_help(const char *format __used, va_list ap _
return 0; return 0;
} }
static inline struct ui_progress *ui_progress__new(const char *title __used, static inline void ui_progress__update(u64 curr __used, u64 total __used,
u64 total __used) const char *title __used) {}
{
return (struct ui_progress *)1;
}
static inline void ui_progress__update(struct ui_progress *self __used,
u64 curr __used) {}
static inline void ui_progress__delete(struct ui_progress *self __used) {} #define ui__error(format, arg...) ui__warning(format, ##arg)
#else #else
extern char ui_helpline__last_msg[]; extern char ui_helpline__last_msg[];
int ui_helpline__show_help(const char *format, va_list ap); int ui_helpline__show_help(const char *format, va_list ap);
#include "ui/progress.h" #include "ui/progress.h"
int ui__error(const char *format, ...) __attribute__((format(printf, 1, 2)));
#endif #endif
void ui__warning(const char *format, ...) __attribute__((format(printf, 1, 2))); int ui__warning(const char *format, ...) __attribute__((format(printf, 1, 2)));
void ui__warning_paranoid(void); int ui__error_paranoid(void);
#endif /* __PERF_DEBUG_H */ #endif /* __PERF_DEBUG_H */
...@@ -539,3 +539,33 @@ void perf_evlist__set_selected(struct perf_evlist *evlist, ...@@ -539,3 +539,33 @@ void perf_evlist__set_selected(struct perf_evlist *evlist,
{ {
evlist->selected = evsel; evlist->selected = evsel;
} }
int perf_evlist__open(struct perf_evlist *evlist, bool group)
{
struct perf_evsel *evsel, *first;
int err, ncpus, nthreads;
first = list_entry(evlist->entries.next, struct perf_evsel, node);
list_for_each_entry(evsel, &evlist->entries, node) {
struct xyarray *group_fd = NULL;
if (group && evsel != first)
group_fd = first->fd;
err = perf_evsel__open(evsel, evlist->cpus, evlist->threads,
group, group_fd);
if (err < 0)
goto out_err;
}
return 0;
out_err:
ncpus = evlist->cpus ? evlist->cpus->nr : 1;
nthreads = evlist->threads ? evlist->threads->nr : 1;
list_for_each_entry_reverse(evsel, &evlist->entries, node)
perf_evsel__close(evsel, ncpus, nthreads);
return err;
}
...@@ -50,6 +50,8 @@ struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id); ...@@ -50,6 +50,8 @@ struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id);
union perf_event *perf_evlist__mmap_read(struct perf_evlist *self, int idx); union perf_event *perf_evlist__mmap_read(struct perf_evlist *self, int idx);
int perf_evlist__open(struct perf_evlist *evlist, bool group);
int perf_evlist__alloc_mmap(struct perf_evlist *evlist); int perf_evlist__alloc_mmap(struct perf_evlist *evlist);
int perf_evlist__mmap(struct perf_evlist *evlist, int pages, bool overwrite); int perf_evlist__mmap(struct perf_evlist *evlist, int pages, bool overwrite);
void perf_evlist__munmap(struct perf_evlist *evlist); void perf_evlist__munmap(struct perf_evlist *evlist);
......
...@@ -16,6 +16,7 @@ ...@@ -16,6 +16,7 @@
#include "thread_map.h" #include "thread_map.h"
#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y)) #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
#define GROUP_FD(group_fd, cpu) (*(int *)xyarray__entry(group_fd, cpu, 0))
int __perf_evsel__sample_size(u64 sample_type) int __perf_evsel__sample_size(u64 sample_type)
{ {
...@@ -204,15 +205,16 @@ int __perf_evsel__read(struct perf_evsel *evsel, ...@@ -204,15 +205,16 @@ int __perf_evsel__read(struct perf_evsel *evsel,
} }
static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus, static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
struct thread_map *threads, bool group) struct thread_map *threads, bool group,
struct xyarray *group_fds)
{ {
int cpu, thread; int cpu, thread;
unsigned long flags = 0; unsigned long flags = 0;
int pid = -1; int pid = -1, err;
if (evsel->fd == NULL && if (evsel->fd == NULL &&
perf_evsel__alloc_fd(evsel, cpus->nr, threads->nr) < 0) perf_evsel__alloc_fd(evsel, cpus->nr, threads->nr) < 0)
return -1; return -ENOMEM;
if (evsel->cgrp) { if (evsel->cgrp) {
flags = PERF_FLAG_PID_CGROUP; flags = PERF_FLAG_PID_CGROUP;
...@@ -220,7 +222,7 @@ static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus, ...@@ -220,7 +222,7 @@ static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
} }
for (cpu = 0; cpu < cpus->nr; cpu++) { for (cpu = 0; cpu < cpus->nr; cpu++) {
int group_fd = -1; int group_fd = group_fds ? GROUP_FD(group_fds, cpu) : -1;
for (thread = 0; thread < threads->nr; thread++) { for (thread = 0; thread < threads->nr; thread++) {
...@@ -231,8 +233,10 @@ static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus, ...@@ -231,8 +233,10 @@ static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
pid, pid,
cpus->map[cpu], cpus->map[cpu],
group_fd, flags); group_fd, flags);
if (FD(evsel, cpu, thread) < 0) if (FD(evsel, cpu, thread) < 0) {
err = -errno;
goto out_close; goto out_close;
}
if (group && group_fd == -1) if (group && group_fd == -1)
group_fd = FD(evsel, cpu, thread); group_fd = FD(evsel, cpu, thread);
...@@ -249,7 +253,17 @@ out_close: ...@@ -249,7 +253,17 @@ out_close:
} }
thread = threads->nr; thread = threads->nr;
} while (--cpu >= 0); } while (--cpu >= 0);
return -1; return err;
}
void perf_evsel__close(struct perf_evsel *evsel, int ncpus, int nthreads)
{
if (evsel->fd == NULL)
return;