dect
/
linux-2.6
Archived
13
0
Fork 0

Merge branch 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull perf updates from Ingo Molnar:
 "Lots of activity:

   211 files changed, 8328 insertions(+), 4116 deletions(-)

  most of it on the tooling side.

  Main changes:

   * ftrace enhancements and fixes from Steve Rostedt.

   * uprobes fixes, cleanups and preparation for the ARM port from Oleg
     Nesterov.

   * UAPI fixes, from David Howels - prepares the arch/x86 UAPI
     transition

   * Separate perf tests into multiple objects, one per test, from Jiri
     Olsa.

   * Make hardware event translations available in sysfs, from Jiri
     Olsa.

   * Fixes to /proc/pid/maps parsing, preparatory to supporting data
     maps, from Namhyung Kim

   * Implement ui_progress for GTK, from Namhyung Kim

   * Add framework for automated perf_event_attr tests, where tools with
     different command line options will be run from a 'perf test', via
     python glue, and the perf syscall will be intercepted to verify
     that the perf_event_attr fields set by the tool are those expected,
     from Jiri Olsa

   * Add a 'link' method for hists, so that we can have the leader with
     buckets for all the entries in all the hists.  This new method is
     now used in the default 'diff' output, making the sum of the
     'baseline' column be 100%, eliminating blind spots.

   * libtraceevent fixes for compiler warnings trying to make perf it
     build on some distros, like fedora 14, 32-bit, some of the warnings
     really pointed to real bugs.

   * Add a browser for 'perf script' and make it available from the
     report and annotate browsers.  It does filtering to find the
     scripts that handle events found in the perf.data file used.  From
     Feng Tang

   * perf inject changes to allow showing where a task sleeps, from
     Andrew Vagin.

   * Makefile improvements from Namhyung Kim.

   * Add --pre and --post command hooks in 'stat', from Peter Zijlstra.

   * Don't stop synthesizing threads when one vanishes, this is for the
     existing threads when we start a tool like trace.

   * Use sched:sched_stat_runtime to provide a thread summary, this
     produces the same output as the 'trace summary' subcommand of
     tglx's original "trace" tool.

   * Support interrupted syscalls in 'trace'

   * Add an event duration column and filter in 'trace'.

   * There are references to the man pages in some tools, so try to
     build Documentation when installing, warning the user if that is
     not possible, from Borislav Petkov.

   * Give user better message if precise is not supported, from David
     Ahern.

   * Try to find cross-built objdump path by using the session
     environment information in the perf.data file header, from Irina
     Tirdea, original patch and idea by Namhyung Kim.

   * Diplays more output on features check for make V=1, so that one can
     figure out what is happening by looking at gcc output, etc.  From
     Jiri Olsa.

   * Add on_exit implementation for systems without one, e.g.  Android,
     from Bernhard Rosenkraenzer.

   * Only process events for vcpus of interest, helps handling large
     number of events, from David Ahern.

   * Cross compilation fixes for Android, from Irina Tirdea.

   * Add documentation on compiling for Android, from Irina Tirdea.

   * perf diff improvements from Jiri Olsa.

   * Target (task/user/cpu/syswide) handling improvements, from Namhyung
     Kim.

   * Add support in 'trace' for tracing workload given by command line,
     from Namhyung Kim.

   * ... and much more."

* 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (194 commits)
  uprobes: Use percpu_rw_semaphore to fix register/unregister vs dup_mmap() race
  perf evsel: Introduce is_group_member method
  perf powerpc: Use uapi/unistd.h to fix build error
  tools: Pass the target in descend
  tools: Honour the O= flag when tool build called from a higher Makefile
  tools: Define a Makefile function to do subdir processing
  perf ui: Always compile browser setup code
  perf ui: Add ui_progress__finish()
  perf ui gtk: Implement ui_progress functions
  perf ui: Introduce generic ui_progress helper
  perf ui tui: Move progress.c under ui/tui directory
  perf tools: Add basic event modifier sanity check
  perf tools: Omit group members from perf_evlist__disable/enable
  perf tools: Ensure single disable call per event in record comand
  perf tools: Fix 'disabled' attribute config for record command
  perf tools: Fix attributes for '{}' defined event groups
  perf tools: Use sscanf for parsing /proc/pid/maps
  perf tools: Add gtk.<command> config option for launching GTK browser
  perf tools: Fix compile error on NO_NEWT=1 build
  perf hists: Initialize all of he->stat with zeroes
  ...
This commit is contained in:
Linus Torvalds 2012-12-11 18:14:31 -08:00
commit 090f8ccba3
211 changed files with 8326 additions and 4114 deletions

View File

@ -2880,6 +2880,22 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
to facilitate early boot debugging.
See also Documentation/trace/events.txt
trace_options=[option-list]
[FTRACE] Enable or disable tracer options at boot.
The option-list is a comma delimited list of options
that can be enabled or disabled just as if you were
to echo the option name into
/sys/kernel/debug/tracing/trace_options
For example, to enable stacktrace option (to dump the
stack trace of each event), add to the command line:
trace_options=stacktrace
See also Documentation/trace/ftrace.txt "trace options"
section.
transparent_hugepage=
[KNL]
Format: [always|madvise|never]

View File

@ -11,3 +11,4 @@ header-y += reg.h
header-y += regdef.h
header-y += sysinfo.h
generic-y += exec.h
generic-y += trace_clock.h

View File

@ -31,5 +31,6 @@ generic-y += sockios.h
generic-y += termbits.h
generic-y += termios.h
generic-y += timex.h
generic-y += trace_clock.h
generic-y += types.h
generic-y += unaligned.h

View File

@ -43,6 +43,7 @@ generic-y += swab.h
generic-y += termbits.h
generic-y += termios.h
generic-y += topology.h
generic-y += trace_clock.h
generic-y += types.h
generic-y += unaligned.h
generic-y += user.h

View File

@ -1,3 +1,4 @@
generic-y += clkdev.h
generic-y += exec.h
generic-y += trace_clock.h

View File

@ -38,6 +38,7 @@ generic-y += statfs.h
generic-y += termbits.h
generic-y += termios.h
generic-y += topology.h
generic-y += trace_clock.h
generic-y += types.h
generic-y += ucontext.h
generic-y += unaligned.h

View File

@ -49,6 +49,7 @@ generic-y += termbits.h
generic-y += termios.h
generic-y += tlbflush.h
generic-y += topology.h
generic-y += trace_clock.h
generic-y += types.h
generic-y += ucontext.h
generic-y += user.h

View File

@ -11,3 +11,4 @@ header-y += sync_serial.h
generic-y += clkdev.h
generic-y += exec.h
generic-y += module.h
generic-y += trace_clock.h

View File

@ -1,3 +1,4 @@
generic-y += clkdev.h
generic-y += exec.h
generic-y += trace_clock.h

View File

@ -3,3 +3,4 @@ include include/asm-generic/Kbuild.asm
generic-y += clkdev.h
generic-y += exec.h
generic-y += module.h
generic-y += trace_clock.h

View File

@ -48,6 +48,7 @@ generic-y += stat.h
generic-y += termbits.h
generic-y += termios.h
generic-y += topology.h
generic-y += trace_clock.h
generic-y += types.h
generic-y += ucontext.h
generic-y += unaligned.h

View File

@ -2,3 +2,4 @@
generic-y += clkdev.h
generic-y += exec.h
generic-y += kvm_para.h
generic-y += trace_clock.h

View File

@ -3,3 +3,4 @@ include include/asm-generic/Kbuild.asm
generic-y += clkdev.h
generic-y += exec.h
generic-y += module.h
generic-y += trace_clock.h

View File

@ -24,6 +24,7 @@ generic-y += sections.h
generic-y += siginfo.h
generic-y += statfs.h
generic-y += topology.h
generic-y += trace_clock.h
generic-y += types.h
generic-y += word-at-a-time.h
generic-y += xor.h

View File

@ -3,3 +3,4 @@ include include/asm-generic/Kbuild.asm
header-y += elf.h
generic-y += clkdev.h
generic-y += exec.h
generic-y += trace_clock.h

View File

@ -1 +1,2 @@
# MIPS headers
generic-y += trace_clock.h

View File

@ -1,3 +1,4 @@
generic-y += clkdev.h
generic-y += exec.h
generic-y += trace_clock.h

View File

@ -60,6 +60,7 @@ generic-y += swab.h
generic-y += termbits.h
generic-y += termios.h
generic-y += topology.h
generic-y += trace_clock.h
generic-y += types.h
generic-y += ucontext.h
generic-y += user.h

View File

@ -3,3 +3,4 @@ generic-y += word-at-a-time.h auxvec.h user.h cputime.h emergency-restart.h \
segment.h topology.h vga.h device.h percpu.h hw_irq.h mutex.h \
div64.h irq_regs.h kdebug.h kvm_para.h local64.h local.h param.h \
poll.h xor.h clkdev.h exec.h
generic-y += trace_clock.h

View File

@ -2,3 +2,4 @@
generic-y += clkdev.h
generic-y += rwsem.h
generic-y += trace_clock.h

View File

@ -158,10 +158,8 @@ static int do_signal(struct pt_regs *regs)
void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags)
{
if (thread_info_flags & _TIF_UPROBE) {
clear_thread_flag(TIF_UPROBE);
if (thread_info_flags & _TIF_UPROBE)
uprobe_notify_resume(regs);
}
if (thread_info_flags & _TIF_SIGPENDING)
do_signal(regs);

View File

@ -64,6 +64,8 @@ int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
autask->saved_trap_nr = current->thread.trap_nr;
current->thread.trap_nr = UPROBE_TRAP_NR;
regs->nip = current->utask->xol_vaddr;
user_enable_single_step(current);
return 0;
}
@ -119,6 +121,8 @@ int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
* to be executed.
*/
regs->nip = utask->vaddr + MAX_UINSN_BYTES;
user_disable_single_step(current);
return 0;
}
@ -162,6 +166,8 @@ void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
current->thread.trap_nr = utask->autask.saved_trap_nr;
instruction_pointer_set(regs, utask->vaddr);
user_disable_single_step(current);
}
/*

View File

@ -1,3 +1,4 @@
generic-y += clkdev.h
generic-y += trace_clock.h

View File

@ -3,3 +3,4 @@ include include/asm-generic/Kbuild.asm
header-y +=
generic-y += clkdev.h
generic-y += trace_clock.h

View File

@ -31,5 +31,6 @@ generic-y += socket.h
generic-y += statfs.h
generic-y += termbits.h
generic-y += termios.h
generic-y += trace_clock.h
generic-y += ucontext.h
generic-y += xor.h

View File

@ -8,4 +8,5 @@ generic-y += local64.h
generic-y += irq_regs.h
generic-y += local.h
generic-y += module.h
generic-y += trace_clock.h
generic-y += word-at-a-time.h

View File

@ -34,5 +34,6 @@ generic-y += sockios.h
generic-y += statfs.h
generic-y += termbits.h
generic-y += termios.h
generic-y += trace_clock.h
generic-y += types.h
generic-y += xor.h

View File

@ -2,3 +2,4 @@ generic-y += bug.h cputime.h device.h emergency-restart.h futex.h hardirq.h
generic-y += hw_irq.h irq_regs.h kdebug.h percpu.h sections.h topology.h xor.h
generic-y += ftrace.h pci.h io.h param.h delay.h mutex.h current.h exec.h
generic-y += switch_to.h clkdev.h
generic-y += trace_clock.h

View File

@ -53,6 +53,7 @@ generic-y += syscalls.h
generic-y += termbits.h
generic-y += termios.h
generic-y += topology.h
generic-y += trace_clock.h
generic-y += types.h
generic-y += ucontext.h
generic-y += unaligned.h

View File

@ -0,0 +1,20 @@
#ifndef _ASM_X86_TRACE_CLOCK_H
#define _ASM_X86_TRACE_CLOCK_H
#include <linux/compiler.h>
#include <linux/types.h>
#ifdef CONFIG_X86_TSC
extern u64 notrace trace_clock_x86_tsc(void);
# define ARCH_TRACE_CLOCKS \
{ trace_clock_x86_tsc, "x86-tsc", .in_ns = 0 },
#else /* !CONFIG_X86_TSC */
#define ARCH_TRACE_CLOCKS
#endif
#endif /* _ASM_X86_TRACE_CLOCK_H */

View File

@ -9,7 +9,6 @@ CPPFLAGS_vmlinux.lds += -U$(UTS_MACHINE)
ifdef CONFIG_FUNCTION_TRACER
# Do not profile debug and lowlevel utilities
CFLAGS_REMOVE_tsc.o = -pg
CFLAGS_REMOVE_rtc.o = -pg
CFLAGS_REMOVE_paravirt-spinlocks.o = -pg
CFLAGS_REMOVE_pvclock.o = -pg
CFLAGS_REMOVE_kvmclock.o = -pg
@ -62,6 +61,7 @@ obj-$(CONFIG_X86_REBOOTFIXUPS) += reboot_fixups_32.o
obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o
obj-$(CONFIG_X86_TSC) += trace_clock.o
obj-$(CONFIG_KEXEC) += machine_kexec_$(BITS).o
obj-$(CONFIG_KEXEC) += relocate_kernel_$(BITS).o crash.o
obj-$(CONFIG_CRASH_DUMP) += crash_dump_$(BITS).o

View File

@ -1316,6 +1316,121 @@ static struct attribute_group x86_pmu_format_group = {
.attrs = NULL,
};
struct perf_pmu_events_attr {
struct device_attribute attr;
u64 id;
};
/*
* Remove all undefined events (x86_pmu.event_map(id) == 0)
* out of events_attr attributes.
*/
static void __init filter_events(struct attribute **attrs)
{
int i, j;
for (i = 0; attrs[i]; i++) {
if (x86_pmu.event_map(i))
continue;
for (j = i; attrs[j]; j++)
attrs[j] = attrs[j + 1];
/* Check the shifted attr. */
i--;
}
}
static ssize_t events_sysfs_show(struct device *dev, struct device_attribute *attr,
char *page)
{
struct perf_pmu_events_attr *pmu_attr = \
container_of(attr, struct perf_pmu_events_attr, attr);
u64 config = x86_pmu.event_map(pmu_attr->id);
return x86_pmu.events_sysfs_show(page, config);
}
#define EVENT_VAR(_id) event_attr_##_id
#define EVENT_PTR(_id) &event_attr_##_id.attr.attr
#define EVENT_ATTR(_name, _id) \
static struct perf_pmu_events_attr EVENT_VAR(_id) = { \
.attr = __ATTR(_name, 0444, events_sysfs_show, NULL), \
.id = PERF_COUNT_HW_##_id, \
};
EVENT_ATTR(cpu-cycles, CPU_CYCLES );
EVENT_ATTR(instructions, INSTRUCTIONS );
EVENT_ATTR(cache-references, CACHE_REFERENCES );
EVENT_ATTR(cache-misses, CACHE_MISSES );
EVENT_ATTR(branch-instructions, BRANCH_INSTRUCTIONS );
EVENT_ATTR(branch-misses, BRANCH_MISSES );
EVENT_ATTR(bus-cycles, BUS_CYCLES );
EVENT_ATTR(stalled-cycles-frontend, STALLED_CYCLES_FRONTEND );
EVENT_ATTR(stalled-cycles-backend, STALLED_CYCLES_BACKEND );
EVENT_ATTR(ref-cycles, REF_CPU_CYCLES );
static struct attribute *empty_attrs;
static struct attribute *events_attr[] = {
EVENT_PTR(CPU_CYCLES),
EVENT_PTR(INSTRUCTIONS),
EVENT_PTR(CACHE_REFERENCES),
EVENT_PTR(CACHE_MISSES),
EVENT_PTR(BRANCH_INSTRUCTIONS),
EVENT_PTR(BRANCH_MISSES),
EVENT_PTR(BUS_CYCLES),
EVENT_PTR(STALLED_CYCLES_FRONTEND),
EVENT_PTR(STALLED_CYCLES_BACKEND),
EVENT_PTR(REF_CPU_CYCLES),
NULL,
};
static struct attribute_group x86_pmu_events_group = {
.name = "events",
.attrs = events_attr,
};
ssize_t x86_event_sysfs_show(char *page, u64 config, u64 event)
{
u64 umask = (config & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;
u64 cmask = (config & ARCH_PERFMON_EVENTSEL_CMASK) >> 24;
bool edge = (config & ARCH_PERFMON_EVENTSEL_EDGE);
bool pc = (config & ARCH_PERFMON_EVENTSEL_PIN_CONTROL);
bool any = (config & ARCH_PERFMON_EVENTSEL_ANY);
bool inv = (config & ARCH_PERFMON_EVENTSEL_INV);
ssize_t ret;
/*
* We have whole page size to spend and just little data
* to write, so we can safely use sprintf.
*/
ret = sprintf(page, "event=0x%02llx", event);
if (umask)
ret += sprintf(page + ret, ",umask=0x%02llx", umask);
if (edge)
ret += sprintf(page + ret, ",edge");
if (pc)
ret += sprintf(page + ret, ",pc");
if (any)
ret += sprintf(page + ret, ",any");
if (inv)
ret += sprintf(page + ret, ",inv");
if (cmask)
ret += sprintf(page + ret, ",cmask=0x%02llx", cmask);
ret += sprintf(page + ret, "\n");
return ret;
}
static int __init init_hw_perf_events(void)
{
struct x86_pmu_quirk *quirk;
@ -1362,6 +1477,11 @@ static int __init init_hw_perf_events(void)
x86_pmu.attr_rdpmc = 1; /* enable userspace RDPMC usage by default */
x86_pmu_format_group.attrs = x86_pmu.format_attrs;
if (!x86_pmu.events_sysfs_show)
x86_pmu_events_group.attrs = &empty_attrs;
else
filter_events(x86_pmu_events_group.attrs);
pr_info("... version: %d\n", x86_pmu.version);
pr_info("... bit width: %d\n", x86_pmu.cntval_bits);
pr_info("... generic registers: %d\n", x86_pmu.num_counters);
@ -1651,6 +1771,7 @@ static struct attribute_group x86_pmu_attr_group = {
static const struct attribute_group *x86_pmu_attr_groups[] = {
&x86_pmu_attr_group,
&x86_pmu_format_group,
&x86_pmu_events_group,
NULL,
};

View File

@ -354,6 +354,8 @@ struct x86_pmu {
int attr_rdpmc;
struct attribute **format_attrs;
ssize_t (*events_sysfs_show)(char *page, u64 config);
/*
* CPU Hotplug hooks
*/
@ -536,6 +538,9 @@ static inline void set_linear_ip(struct pt_regs *regs, unsigned long ip)
regs->ip = ip;
}
ssize_t x86_event_sysfs_show(char *page, u64 config, u64 event);
ssize_t intel_event_sysfs_show(char *page, u64 config);
#ifdef CONFIG_CPU_SUP_AMD
int amd_pmu_init(void);

View File

@ -568,6 +568,14 @@ amd_get_event_constraints_f15h(struct cpu_hw_events *cpuc, struct perf_event *ev
}
}
static ssize_t amd_event_sysfs_show(char *page, u64 config)
{
u64 event = (config & ARCH_PERFMON_EVENTSEL_EVENT) |
(config & AMD64_EVENTSEL_EVENT) >> 24;
return x86_event_sysfs_show(page, config, event);
}
static __initconst const struct x86_pmu amd_pmu = {
.name = "AMD",
.handle_irq = x86_pmu_handle_irq,
@ -591,6 +599,7 @@ static __initconst const struct x86_pmu amd_pmu = {
.put_event_constraints = amd_put_event_constraints,
.format_attrs = amd_format_attr,
.events_sysfs_show = amd_event_sysfs_show,
.cpu_prepare = amd_pmu_cpu_prepare,
.cpu_starting = amd_pmu_cpu_starting,

View File

@ -1603,6 +1603,13 @@ static struct attribute *intel_arch_formats_attr[] = {
NULL,
};
ssize_t intel_event_sysfs_show(char *page, u64 config)
{
u64 event = (config & ARCH_PERFMON_EVENTSEL_EVENT);
return x86_event_sysfs_show(page, config, event);
}
static __initconst const struct x86_pmu core_pmu = {
.name = "core",
.handle_irq = x86_pmu_handle_irq,
@ -1628,6 +1635,7 @@ static __initconst const struct x86_pmu core_pmu = {
.event_constraints = intel_core_event_constraints,
.guest_get_msrs = core_guest_get_msrs,
.format_attrs = intel_arch_formats_attr,
.events_sysfs_show = intel_event_sysfs_show,
};
struct intel_shared_regs *allocate_shared_regs(int cpu)
@ -1766,6 +1774,7 @@ static __initconst const struct x86_pmu intel_pmu = {
.pebs_aliases = intel_pebs_aliases_core2,
.format_attrs = intel_arch3_formats_attr,
.events_sysfs_show = intel_event_sysfs_show,
.cpu_prepare = intel_pmu_cpu_prepare,
.cpu_starting = intel_pmu_cpu_starting,

View File

@ -227,6 +227,8 @@ static __initconst const struct x86_pmu p6_pmu = {
.event_constraints = p6_event_constraints,
.format_attrs = intel_p6_formats_attr,
.events_sysfs_show = intel_event_sysfs_show,
};
__init int p6_pmu_init(void)

View File

@ -195,12 +195,6 @@ void read_persistent_clock(struct timespec *ts)
ts->tv_nsec = 0;
}
unsigned long long native_read_tsc(void)
{
return __native_read_tsc();
}
EXPORT_SYMBOL(native_read_tsc);
static struct resource rtc_resources[] = {
[0] = {

View File

@ -0,0 +1,21 @@
/*
* X86 trace clocks
*/
#include <asm/trace_clock.h>
#include <asm/barrier.h>
#include <asm/msr.h>
/*
* trace_clock_x86_tsc(): A clock that is just the cycle counter.
*
* Unlike the other clocks, this is not in nanoseconds.
*/
u64 notrace trace_clock_x86_tsc(void)
{
u64 ret;
rdtsc_barrier();
rdtscll(ret);
return ret;
}

View File

@ -77,6 +77,12 @@ unsigned long long
sched_clock(void) __attribute__((alias("native_sched_clock")));
#endif
unsigned long long native_read_tsc(void)
{
return __native_read_tsc();
}
EXPORT_SYMBOL(native_read_tsc);
int check_tsc_unstable(void)
{
return tsc_unstable;

View File

@ -478,6 +478,11 @@ int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
regs->ip = current->utask->xol_vaddr;
pre_xol_rip_insn(auprobe, regs, autask);
autask->saved_tf = !!(regs->flags & X86_EFLAGS_TF);
regs->flags |= X86_EFLAGS_TF;
if (test_tsk_thread_flag(current, TIF_BLOCKSTEP))
set_task_blockstep(current, false);
return 0;
}
@ -603,6 +608,16 @@ int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
if (auprobe->fixups & UPROBE_FIX_CALL)
result = adjust_ret_addr(regs->sp, correction);
/*
* arch_uprobe_pre_xol() doesn't save the state of TIF_BLOCKSTEP
* so we can get an extra SIGTRAP if we do not clear TF. We need
* to examine the opcode to make it right.
*/
if (utask->autask.saved_tf)
send_sig(SIGTRAP, current, 0);
else if (!(auprobe->fixups & UPROBE_FIX_SETF))
regs->flags &= ~X86_EFLAGS_TF;
return result;
}
@ -647,6 +662,10 @@ void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
current->thread.trap_nr = utask->autask.saved_trap_nr;
handle_riprel_post_xol(auprobe, regs, NULL);
instruction_pointer_set(regs, utask->vaddr);
/* clear TF if it was set by us in arch_uprobe_pre_xol() */
if (!utask->autask.saved_tf)
regs->flags &= ~X86_EFLAGS_TF;
}
/*
@ -676,38 +695,3 @@ bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
send_sig(SIGTRAP, current, 0);
return ret;
}
void arch_uprobe_enable_step(struct arch_uprobe *auprobe)
{
struct task_struct *task = current;
struct arch_uprobe_task *autask = &task->utask->autask;
struct pt_regs *regs = task_pt_regs(task);
autask->saved_tf = !!(regs->flags & X86_EFLAGS_TF);
regs->flags |= X86_EFLAGS_TF;
if (test_tsk_thread_flag(task, TIF_BLOCKSTEP))
set_task_blockstep(task, false);
}
void arch_uprobe_disable_step(struct arch_uprobe *auprobe)
{
struct task_struct *task = current;
struct arch_uprobe_task *autask = &task->utask->autask;
bool trapped = (task->utask->state == UTASK_SSTEP_TRAPPED);
struct pt_regs *regs = task_pt_regs(task);
/*
* The state of TIF_BLOCKSTEP was not saved so we can get an extra
* SIGTRAP if we do not clear TF. We need to examine the opcode to
* make it right.
*/
if (unlikely(trapped)) {
if (!autask->saved_tf)
regs->flags &= ~X86_EFLAGS_TF;
} else {
if (autask->saved_tf)
send_sig(SIGTRAP, task, 0);
else if (!(auprobe->fixups & UPROBE_FIX_SETF))
regs->flags &= ~X86_EFLAGS_TF;
}
}

View File

@ -25,4 +25,5 @@ generic-y += siginfo.h
generic-y += statfs.h
generic-y += termios.h
generic-y += topology.h
generic-y += trace_clock.h
generic-y += xor.h

View File

@ -0,0 +1,16 @@
#ifndef _ASM_GENERIC_TRACE_CLOCK_H
#define _ASM_GENERIC_TRACE_CLOCK_H
/*
* Arch-specific trace clocks.
*/
/*
* Additional trace clocks added to the trace_clocks
* array in kernel/trace/trace.c
* None if the architecture has not defined it.
*/
#ifndef ARCH_TRACE_CLOCKS
# define ARCH_TRACE_CLOCKS
#endif
#endif /* _ASM_GENERIC_TRACE_CLOCK_H */

View File

@ -86,6 +86,12 @@ struct trace_iterator {
cpumask_var_t started;
};
enum trace_iter_flags {
TRACE_FILE_LAT_FMT = 1,
TRACE_FILE_ANNOTATE = 2,
TRACE_FILE_TIME_IN_NS = 4,
};
struct trace_event;
@ -127,13 +133,13 @@ trace_current_buffer_lock_reserve(struct ring_buffer **current_buffer,
void trace_current_buffer_unlock_commit(struct ring_buffer *buffer,
struct ring_buffer_event *event,
unsigned long flags, int pc);
void trace_nowake_buffer_unlock_commit(struct ring_buffer *buffer,
struct ring_buffer_event *event,
unsigned long flags, int pc);
void trace_nowake_buffer_unlock_commit_regs(struct ring_buffer *buffer,
struct ring_buffer_event *event,
unsigned long flags, int pc,
struct pt_regs *regs);
void trace_buffer_unlock_commit(struct ring_buffer *buffer,
struct ring_buffer_event *event,
unsigned long flags, int pc);
void trace_buffer_unlock_commit_regs(struct ring_buffer *buffer,
struct ring_buffer_event *event,
unsigned long flags, int pc,
struct pt_regs *regs);
void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
struct ring_buffer_event *event);

View File

@ -527,9 +527,6 @@ __ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap);
extern void ftrace_dump(enum ftrace_dump_mode oops_dump_mode);
#else
static inline __printf(1, 2)
int trace_printk(const char *fmt, ...);
static inline void tracing_start(void) { }
static inline void tracing_stop(void) { }
static inline void ftrace_off_permanent(void) { }
@ -539,8 +536,8 @@ static inline void tracing_on(void) { }
static inline void tracing_off(void) { }
static inline int tracing_is_on(void) { return 0; }
static inline int
trace_printk(const char *fmt, ...)
static inline __printf(1, 2)
int trace_printk(const char *fmt, ...)
{
return 0;
}

View File

@ -159,13 +159,14 @@ int ring_buffer_record_is_on(struct ring_buffer *buffer);
void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu);
void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu);
unsigned long ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu);
u64 ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu);
unsigned long ring_buffer_bytes_cpu(struct ring_buffer *buffer, int cpu);
unsigned long ring_buffer_entries(struct ring_buffer *buffer);
unsigned long ring_buffer_overruns(struct ring_buffer *buffer);
unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu);
unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu);
unsigned long ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu);
unsigned long ring_buffer_dropped_events_cpu(struct ring_buffer *buffer, int cpu);
u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu);
void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer,

View File

@ -12,6 +12,8 @@
#include <linux/compiler.h>
#include <linux/types.h>
#include <asm/trace_clock.h>
extern u64 notrace trace_clock_local(void);
extern u64 notrace trace_clock(void);
extern u64 notrace trace_clock_global(void);

View File

@ -97,12 +97,12 @@ extern int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_con
extern void uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consumer *uc);
extern int uprobe_mmap(struct vm_area_struct *vma);
extern void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end);
extern void uprobe_start_dup_mmap(void);
extern void uprobe_end_dup_mmap(void);
extern void uprobe_dup_mmap(struct mm_struct *oldmm, struct mm_struct *newmm);
extern void uprobe_free_utask(struct task_struct *t);
extern void uprobe_copy_process(struct task_struct *t);
extern unsigned long __weak uprobe_get_swbp_addr(struct pt_regs *regs);
extern void __weak arch_uprobe_enable_step(struct arch_uprobe *arch);
extern void __weak arch_uprobe_disable_step(struct arch_uprobe *arch);
extern int uprobe_post_sstep_notifier(struct pt_regs *regs);
extern int uprobe_pre_sstep_notifier(struct pt_regs *regs);
extern void uprobe_notify_resume(struct pt_regs *regs);
@ -129,6 +129,12 @@ static inline void
uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end)
{
}
static inline void uprobe_start_dup_mmap(void)
{
}
static inline void uprobe_end_dup_mmap(void)
{
}
static inline void
uprobe_dup_mmap(struct mm_struct *oldmm, struct mm_struct *newmm)
{

View File

@ -545,8 +545,7 @@ ftrace_raw_event_##call(void *__data, proto) \
{ assign; } \
\
if (!filter_current_check_discard(buffer, event_call, entry, event)) \
trace_nowake_buffer_unlock_commit(buffer, \
event, irq_flags, pc); \
trace_buffer_unlock_commit(buffer, event, irq_flags, pc); \
}
/*
* The ftrace_test_probe is compiled out, it is only here as a build time check
@ -620,79 +619,6 @@ __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
/*
* Define the insertion callback to perf events
*
* The job is very similar to ftrace_raw_event_<call> except that we don't
* insert in the ring buffer but in a perf counter.
*
* static void ftrace_perf_<call>(proto)
* {
* struct ftrace_data_offsets_<call> __maybe_unused __data_offsets;
* struct ftrace_event_call *event_call = &event_<call>;
* extern void perf_tp_event(int, u64, u64, void *, int);
* struct ftrace_raw_##call *entry;
* struct perf_trace_buf *trace_buf;
* u64 __addr = 0, __count = 1;
* unsigned long irq_flags;
* struct trace_entry *ent;
* int __entry_size;
* int __data_size;
* int __cpu
* int pc;
*
* pc = preempt_count();
*
* __data_size = ftrace_get_offsets_<call>(&__data_offsets, args);
*
* // Below we want to get the aligned size by taking into account
* // the u32 field that will later store the buffer size
* __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),
* sizeof(u64));
* __entry_size -= sizeof(u32);
*
* // Protect the non nmi buffer
* // This also protects the rcu read side
* local_irq_save(irq_flags);
* __cpu = smp_processor_id();
*
* if (in_nmi())
* trace_buf = rcu_dereference_sched(perf_trace_buf_nmi);
* else
* trace_buf = rcu_dereference_sched(perf_trace_buf);
*
* if (!trace_buf)
* goto end;
*
* trace_buf = per_cpu_ptr(trace_buf, __cpu);
*
* // Avoid recursion from perf that could mess up the buffer
* if (trace_buf->recursion++)
* goto end_recursion;
*
* raw_data = trace_buf->buf;
*
* // Make recursion update visible before entering perf_tp_event
* // so that we protect from perf recursions.
*
* barrier();
*
* //zero dead bytes from alignment to avoid stack leak to userspace:
* *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL;
* entry = (struct ftrace_raw_<call> *)raw_data;
* ent = &entry->ent;
* tracing_generic_entry_update(ent, irq_flags, pc);
* ent->type = event_call->id;
*
* <tstruct> <- do some jobs with dynamic arrays
*
* <assign> <- affect our values
*
* perf_tp_event(event_call->id, __addr, __count, entry,
* __entry_size); <- submit them to perf counter
*
* }
*/
#ifdef CONFIG_PERF_EVENTS

View File

@ -31,27 +31,4 @@ struct syscall_metadata {
struct ftrace_event_call *exit_event;
};
#ifdef CONFIG_FTRACE_SYSCALLS
extern unsigned long arch_syscall_addr(int nr);
extern int init_syscall_trace(struct ftrace_event_call *call);
extern int reg_event_syscall_enter(struct ftrace_event_call *call);
extern void unreg_event_syscall_enter(struct ftrace_event_call *call);
extern int reg_event_syscall_exit(struct ftrace_event_call *call);
extern void unreg_event_syscall_exit(struct ftrace_event_call *call);
extern int
ftrace_format_syscall(struct ftrace_event_call *call, struct trace_seq *s);
enum print_line_t print_syscall_enter(struct trace_iterator *iter, int flags,
struct trace_event *event);
enum print_line_t print_syscall_exit(struct trace_iterator *iter, int flags,
struct trace_event *event);
#endif
#ifdef CONFIG_PERF_EVENTS
int perf_sysenter_enable(struct ftrace_event_call *call);
void perf_sysenter_disable(struct ftrace_event_call *call);
int perf_sysexit_enable(struct ftrace_event_call *call);
void perf_sysexit_disable(struct ftrace_event_call *call);
#endif
#endif /* _TRACE_SYSCALL_H */

View File

@ -33,6 +33,7 @@
#include <linux/ptrace.h> /* user_enable_single_step */
#include <linux/kdebug.h> /* notifier mechanism */
#include "../../mm/internal.h" /* munlock_vma_page */
#include <linux/percpu-rwsem.h>
#include <linux/uprobes.h>
@ -71,6 +72,8 @@ static struct mutex uprobes_mutex[UPROBES_HASH_SZ];
static struct mutex uprobes_mmap_mutex[UPROBES_HASH_SZ];
#define uprobes_mmap_hash(v) (&uprobes_mmap_mutex[((unsigned long)(v)) % UPROBES_HASH_SZ])
static struct percpu_rw_semaphore dup_mmap_sem;
/*
* uprobe_events allows us to skip the uprobe_mmap if there are no uprobe
* events active at this time. Probably a fine grained per inode count is
@ -766,10 +769,13 @@ static int register_for_each_vma(struct uprobe *uprobe, bool is_register)
struct map_info *info;
int err = 0;
percpu_down_write(&dup_mmap_sem);
info = build_map_info(uprobe->inode->i_mapping,
uprobe->offset, is_register);
if (IS_ERR(info))
return PTR_ERR(info);
if (IS_ERR(info)) {
err = PTR_ERR(info);
goto out;
}
while (info) {
struct mm_struct *mm = info->mm;
@ -799,7 +805,8 @@ static int register_for_each_vma(struct uprobe *uprobe, bool is_register)
mmput(mm);
info = free_map_info(info);
}
out:
percpu_up_write(&dup_mmap_sem);
return err;
}
@ -1131,6 +1138,16 @@ void uprobe_clear_state(struct mm_struct *mm)
kfree(area);
}
void uprobe_start_dup_mmap(void)
{
percpu_down_read(&dup_mmap_sem);
}
void uprobe_end_dup_mmap(void)
{
percpu_up_read(&dup_mmap_sem);
}
void uprobe_dup_mmap(struct mm_struct *oldmm, struct mm_struct *newmm)
{
newmm->uprobes_state.xol_area = NULL;
@ -1199,6 +1216,11 @@ static unsigned long xol_get_insn_slot(struct uprobe *uprobe, unsigned long slot
vaddr = kmap_atomic(area->page);
memcpy(vaddr + offset, uprobe->arch.insn, MAX_UINSN_BYTES);
kunmap_atomic(vaddr);
/*
* We probably need flush_icache_user_range() but it needs vma.
* This should work on supported architectures too.
*/
flush_dcache_page(area->page);
return current->utask->xol_vaddr;
}
@ -1430,16 +1452,6 @@ static struct uprobe *find_active_uprobe(unsigned long bp_vaddr, int *is_swbp)
return uprobe;
}
void __weak arch_uprobe_enable_step(struct arch_uprobe *arch)
{
user_enable_single_step(current);
}
void __weak arch_uprobe_disable_step(struct arch_uprobe *arch)
{
user_disable_single_step(current);
}
/*
* Run handler and ask thread to singlestep.
* Ensure all non-fatal signals cannot interrupt thread while it singlesteps.
@ -1493,7 +1505,6 @@ static void handle_swbp(struct pt_regs *regs)
goto out;
if (!pre_ssout(uprobe, regs, bp_vaddr)) {
arch_uprobe_enable_step(&uprobe->arch);
utask->active_uprobe = uprobe;
utask->state = UTASK_SSTEP;
return;
@ -1525,7 +1536,6 @@ static void handle_singlestep(struct uprobe_task *utask, struct pt_regs *regs)
else
WARN_ON_ONCE(1);
arch_uprobe_disable_step(&uprobe->arch);
put_uprobe(uprobe);
utask->active_uprobe = NULL;
utask->state = UTASK_RUNNING;
@ -1604,6 +1614,9 @@ static int __init init_uprobes(void)
mutex_init(&uprobes_mmap_mutex[i]);
}
if (percpu_init_rwsem(&dup_mmap_sem))
return -ENOMEM;
return register_die_notifier(&uprobe_exception_nb);
}
module_init(init_uprobes);

View File

@ -352,6 +352,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
unsigned long charge;
struct mempolicy *pol;
uprobe_start_dup_mmap();
down_write(&oldmm->mmap_sem);
flush_cache_dup_mm(oldmm);
uprobe_dup_mmap(oldmm, mm);
@ -469,6 +470,7 @@ out:
up_write(&mm->mmap_sem);
flush_tlb_mm(oldmm);
up_write(&oldmm->mmap_sem);
uprobe_end_dup_mmap();
return retval;
fail_nomem_anon_vma_fork:
mpol_put(pol);

View File

@ -119,6 +119,7 @@ config TRACING
select BINARY_PRINTF
select EVENT_TRACING
select TRACE_CLOCK
select IRQ_WORK
config GENERIC_TRACER
bool

View File

@ -2868,7 +2868,7 @@ static int __init ftrace_mod_cmd_init(void)
{
return register_ftrace_command(&ftrace_mod_cmd);
}
device_initcall(ftrace_mod_cmd_init);
core_initcall(ftrace_mod_cmd_init);
static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op, struct pt_regs *pt_regs)
@ -4055,7 +4055,7 @@ static int __init ftrace_nodyn_init(void)
ftrace_enabled = 1;
return 0;
}
device_initcall(ftrace_nodyn_init);
core_initcall(ftrace_nodyn_init);
static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
static inline void ftrace_startup_enable(int command) { }
@ -4381,7 +4381,7 @@ ftrace_pid_write(struct file *filp, const char __user *ubuf,
if (strlen(tmp) == 0)
return 1;
ret = strict_strtol(tmp, 10, &val);
ret = kstrtol(tmp, 10, &val);
if (ret < 0)
return ret;

View File

@ -460,9 +460,10 @@ struct ring_buffer_per_cpu {
unsigned long lost_events;
unsigned long last_overrun;
local_t entries_bytes;
local_t commit_overrun;
local_t overrun;
local_t entries;
local_t overrun;
local_t commit_overrun;
local_t dropped_events;
local_t committing;
local_t commits;
unsigned long read;
@ -1820,7 +1821,7 @@ rb_add_time_stamp(struct ring_buffer_event *event, u64 delta)
}
/**
* ring_buffer_update_event - update event type and data
* rb_update_event - update event type and data
* @event: the even to update
* @type: the type of event
* @length: the size of the event field in the ring buffer
@ -2155,8 +2156,10 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
* If we are not in overwrite mode,
* this is easy, just stop here.
*/
if (!(buffer->flags & RB_FL_OVERWRITE))
if (!(buffer->flags & RB_FL_OVERWRITE)) {
local_inc(&cpu_buffer->dropped_events);
goto out_reset;
}
ret = rb_handle_head_page(cpu_buffer,
tail_page,
@ -2720,8 +2723,8 @@ EXPORT_SYMBOL_GPL(ring_buffer_discard_commit);
* and not the length of the event which would hold the header.
*/
int ring_buffer_write(struct ring_buffer *buffer,
unsigned long length,
void *data)
unsigned long length,
void *data)
{
struct ring_buffer_per_cpu *cpu_buffer;
struct ring_buffer_event *event;
@ -2929,12 +2932,12 @@ rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
* @buffer: The ring buffer
* @cpu: The per CPU buffer to read from.
*/
unsigned long ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu)
u64 ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu)
{
unsigned long flags;
struct ring_buffer_per_cpu *cpu_buffer;
struct buffer_page *bpage;
unsigned long ret;
u64 ret;
if (!cpumask_test_cpu(cpu, buffer->cpumask))
return 0;
@ -2995,7 +2998,8 @@ unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu);
/**
* ring_buffer_overrun_cpu - get the number of overruns in a cpu_buffer
* ring_buffer_overrun_cpu - get the number of overruns caused by the ring
* buffer wrapping around (only if RB_FL_OVERWRITE is on).
* @buffer: The ring buffer
* @cpu: The per CPU buffer to get the number of overruns from
*/
@ -3015,7 +3019,9 @@ unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu);
/**
* ring_buffer_commit_overrun_cpu - get the number of overruns caused by commits
* ring_buffer_commit_overrun_cpu - get the number of overruns caused by
* commits failing due to the buffer wrapping around while there are uncommitted
* events, such as during an interrupt storm.
* @buffer: The ring buffer
* @cpu: The per CPU buffer to get the number of overruns from
*/
@ -3035,6 +3041,28 @@ ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
}
EXPORT_SYMBOL_GPL(ring_buffer_commit_overrun_cpu);
/**
* ring_buffer_dropped_events_cpu - get the number of dropped events caused by
* the ring buffer filling up (only if RB_FL_OVERWRITE is off).
* @buffer: The ring buffer
* @cpu: The per CPU buffer to get the number of overruns from
*/
unsigned long
ring_buffer_dropped_events_cpu(struct ring_buffer *buffer, int cpu)
{
struct ring_buffer_per_cpu *cpu_buffer;
unsigned long ret;
if (!cpumask_test_cpu(cpu, buffer->cpumask))
return 0;
cpu_buffer = buffer->buffers[cpu];
ret = local_read(&cpu_buffer->dropped_events);
return ret;
}
EXPORT_SYMBOL_GPL(ring_buffer_dropped_events_cpu);
/**
* ring_buffer_entries - get the number of entries in a buffer
* @buffer: The ring buffer
@ -3864,9 +3892,10 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
local_set(&cpu_buffer->reader_page->page->commit, 0);
cpu_buffer->reader_page->read = 0;
local_set(&cpu_buffer->commit_overrun, 0);
local_set(&cpu_buffer->entries_bytes, 0);
local_set(&cpu_buffer->overrun, 0);
local_set(&cpu_buffer->commit_overrun, 0);
local_set(&cpu_buffer->dropped_events, 0);
local_set(&cpu_buffer->entries, 0);
local_set(&cpu_buffer->committing, 0);
local_set(&cpu_buffer->commits, 0);

View File

@ -19,6 +19,7 @@
#include <linux/seq_file.h>
#include <linux/notifier.h>
#include <linux/irqflags.h>
#include <linux/irq_work.h>
#include <linux/debugfs.h>
#include <linux/pagemap.h>
#include <linux/hardirq.h>
@ -77,6 +78,21 @@ static int dummy_set_flag(u32 old_flags, u32 bit, int set)
return 0;
}
/*
* To prevent the comm cache from being overwritten when no
* tracing is active, only save the comm when a trace event
* occurred.
*/
static DEFINE_PER_CPU(bool, trace_cmdline_save);
/*
* When a reader is waiting for data, then this variable is
* set to true.
*/
static bool trace_wakeup_needed;
static struct irq_work trace_work_wakeup;
/*
* Kill all tracing for good (never come back).
* It is initialized to 1 but will turn to zero if the initialization
@ -139,6 +155,18 @@ static int __init set_ftrace_dump_on_oops(char *str)
}
__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
static char *trace_boot_options __initdata;
static int __init set_trace_boot_options(char *str)
{
strncpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
trace_boot_options = trace_boot_options_buf;
return 0;
}
__setup("trace_options=", set_trace_boot_options);
unsigned long long ns2usecs(cycle_t nsec)
{
nsec += 500;
@ -198,20 +226,9 @@ static struct trace_array max_tr;
static DEFINE_PER_CPU(struct trace_array_cpu, max_tr_data);
/* tracer_enabled is used to toggle activation of a tracer */
static int tracer_enabled = 1;
/**
* tracing_is_enabled - return tracer_enabled status
*
* This function is used by other tracers to know the status
* of the tracer_enabled flag. Tracers may use this function
* to know if it should enable their features when starting
* up. See irqsoff tracer for an example (start_irqsoff_tracer).
*/
int tracing_is_enabled(void)
{
return tracer_enabled;
return tracing_is_on();
}
/*
@ -333,12 +350,18 @@ unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
static int trace_stop_count;
static DEFINE_RAW_SPINLOCK(tracing_start_lock);
static void wakeup_work_handler(struct work_struct *work)
/**
* trace_wake_up - wake up tasks waiting for trace input
*
* Schedules a delayed work to wake up any task that is blocked on the
* trace_wait queue. These is used with trace_poll for tasks polling the
* trace.
*/
static void trace_wake_up(struct irq_work *work)
{
wake_up(&trace_wait);
}
wake_up_all(&trace_wait);
static DECLARE_DELAYED_WORK(wakeup_work, wakeup_work_handler);
}
/**
* tracing_on - enable tracing buffers
@ -393,22 +416,6 @@ int tracing_is_on(void)
}
EXPORT_SYMBOL_GPL(tracing_is_on);
/**
* trace_wake_up - wake up tasks waiting for trace input
*
* Schedules a delayed work to wake up any task that is blocked on the
* trace_wait queue. These is used with trace_poll for tasks polling the
* trace.
*/
void trace_wake_up(void)
{
const unsigned long delay = msecs_to_jiffies(2);
if (trace_flags & TRACE_ITER_BLOCK)
return;
schedule_delayed_work(&wakeup_work, delay);
}
static int __init set_buf_size(char *str)
{
unsigned long buf_size;
@ -431,7 +438,7 @@ static int __init set_tracing_thresh(char *str)
if (!str)
return 0;
ret = strict_strtoul(str, 0, &threshold);
ret = kstrtoul(str, 0, &threshold);
if (ret < 0)
return 0;
tracing_thresh = threshold * 1000;
@ -477,10 +484,12 @@ static const char *trace_options[] = {
static struct {
u64 (*func)(void);
const char *name;
int in_ns; /* is this clock in nanoseconds? */
} trace_clocks[] = {
{ trace_clock_local, "local" },
{ trace_clock_global, "global" },
{ trace_clock_counter, "counter" },
{ trace_clock_local, "local", 1 },
{ trace_clock_global, "global", 1 },
{ trace_clock_counter, "counter", 0 },
ARCH_TRACE_CLOCKS
};
int trace_clock_id;
@ -757,6 +766,40 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
}
#endif /* CONFIG_TRACER_MAX_TRACE */
static void default_wait_pipe(struct trace_iterator *iter)
{
DEFINE_WAIT(wait);
prepare_to_wait(&trace_wait, &wait, TASK_INTERRUPTIBLE);
/*
* The events can happen in critical sections where
* checking a work queue can cause deadlocks.
* After adding a task to the queue, this flag is set
* only to notify events to try to wake up the queue
* using irq_work.
*
* We don't clear it even if the buffer is no longer
* empty. The flag only causes the next event to run
* irq_work to do the work queue wake up. The worse
* that can happen if we race with !trace_empty() is that
* an event will cause an irq_work to try to wake up
* an empty queue.
*
* There's no reason to protect this flag either, as
* the work queue and irq_work logic will do the necessary
* synchronization for the wake ups. The only thing
* that is necessary is that the wake up happens after
* a task has been queued. It's OK for spurious wake ups.
*/
trace_wakeup_needed = true;
if (trace_empty(iter))
schedule();
finish_wait(&trace_wait, &wait);
}
/**
* register_tracer - register a tracer with the ftrace system.
* @type - the plugin for the tracer
@ -875,32 +918,6 @@ int register_tracer(struct tracer *type)
return ret;
}
void unregister_tracer(struct tracer *type)
{
struct tracer **t;
mutex_lock(&trace_types_lock);
for (t = &trace_types; *t; t = &(*t)->next) {
if (*t == type)
goto found;
}
pr_info("Tracer %s not registered\n", type->name);
goto out;
found:
*t = (*t)->next;
if (type == current_trace && tracer_enabled) {
tracer_enabled = 0;
tracing_stop();
if (current_trace->stop)
current_trace->stop(&global_trace);
current_trace = &nop_trace;
}
out:
mutex_unlock(&trace_types_lock);
}
void tracing_reset(struct trace_array *tr, int cpu)
{
struct ring_buffer *buffer = tr->buffer;
@ -1131,10 +1148,14 @@ void trace_find_cmdline(int pid, char comm[])
void tracing_record_cmdline(struct task_struct *tsk)
{
if (atomic_read(&trace_record_cmdline_disabled) || !tracer_enabled ||
!tracing_is_on())
if (atomic_read(&trace_record_cmdline_disabled) || !tracing_is_on())
return;
if (!__this_cpu_read(trace_cmdline_save))
return;
__this_cpu_write(trace_cmdline_save, false);
trace_save_cmdline(tsk);
}
@ -1178,27 +1199,36 @@ trace_buffer_lock_reserve(struct ring_buffer *buffer,
return event;
}
void
__buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
{
__this_cpu_write(trace_cmdline_save, true);
if (trace_wakeup_needed) {
trace_wakeup_needed = false;
/* irq_work_queue() supplies it's own memory barriers */
irq_work_queue(&trace_work_wakeup);
}
ring_buffer_unlock_commit(buffer, event);
}
static inline void
__trace_buffer_unlock_commit(struct ring_buffer *buffer,
struct ring_buffer_event *event,
unsigned long flags, int pc,
int wake)
unsigned long flags, int pc)
{
ring_buffer_unlock_commit(buffer, event);
__buffer_unlock_commit(buffer, event);
ftrace_trace_stack(buffer, flags, 6, pc);
ftrace_trace_userstack(buffer, flags, pc);
if (wake)
trace_wake_up();
}
void trace_buffer_unlock_commit(struct ring_buffer *buffer,
struct ring_buffer_event *event,
unsigned long flags, int pc)
{
__trace_buffer_unlock_commit(buffer, event, flags, pc, 1);
__trace_buffer_unlock_commit(buffer, event, flags, pc);
}
EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit);
struct ring_buffer_event *
trace_current_buffer_lock_reserve(struct ring_buffer **current_rb,
@ -1215,29 +1245,21 @@ void trace_current_buffer_unlock_commit(struct ring_buffer *buffer,
struct ring_buffer_event *event,
unsigned long flags, int pc)
{
__trace_buffer_unlock_commit(buffer, event, flags, pc, 1);
__trace_buffer_unlock_commit(buffer, event, flags, pc);
}
EXPORT_SYMBOL_GPL(trace_current_buffer_unlock_commit);
void trace_nowake_buffer_unlock_commit(struct ring_buffer *buffer,
struct ring_buffer_event *event,
unsigned long flags, int pc)
void trace_buffer_unlock_commit_regs(struct ring_buffer *buffer,
struct ring_buffer_event *event,
unsigned long flags, int pc,
struct pt_regs *regs)
{
__trace_buffer_unlock_commit(buffer, event, flags, pc, 0);
}
EXPORT_SYMBOL_GPL(trace_nowake_buffer_unlock_commit);
void trace_nowake_buffer_unlock_commit_regs(struct ring_buffer *buffer,
struct ring_buffer_event *event,
unsigned long flags, int pc,
struct pt_regs *regs)
{
ring_buffer_unlock_commit(buffer, event);
__buffer_unlock_commit(buffer, event);
ftrace_trace_stack_regs(buffer, flags, 0, pc, regs);
ftrace_trace_userstack(buffer, flags, pc);
}
EXPORT_SYMBOL_GPL(trace_nowake_buffer_unlock_commit_regs);
EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit_regs);
void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
struct ring_buffer_event *event)
@ -1269,7 +1291,7 @@ trace_function(struct trace_array *tr,
entry->parent_ip = parent_ip;
if (!filter_check_discard(call, entry, buffer, event))
ring_buffer_unlock_commit(buffer, event);
__buffer_unlock_commit(buffer, event);
}
void
@ -1362,7 +1384,7 @@ static void __ftrace_trace_stack(struct ring_buffer *buffer,
entry->size = trace.nr_entries;
if (!filter_check_discard(call, entry, buffer, event))
ring_buffer_unlock_commit(buffer, event);
__buffer_unlock_commit(buffer, event);
out:
/* Again, don't let gcc optimize things here */
@ -1458,7 +1480,7 @@ ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
save_stack_trace_user(&trace);
if (!filter_check_discard(call, entry, buffer, event))
ring_buffer_unlock_commit(buffer, event);
__buffer_unlock_commit(buffer, event);
out_drop_count:
__this_cpu_dec(user_stack_count);
@ -1559,10 +1581,10 @@ static int alloc_percpu_trace_buffer(void)
return -ENOMEM;
}
static int buffers_allocated;
void trace_printk_init_buffers(void)
{
static int buffers_allocated;
if (buffers_allocated)
return;
@ -1571,7 +1593,38 @@ void trace_printk_init_buffers(void)
pr_info("ftrace: Allocated trace_printk buffers\n");
/* Expand the buffers to set size */
tracing_update_buffers();
buffers_allocated = 1;
/*
* trace_printk_init_buffers() can be called by modules.
* If that happens, then we need to start cmdline recording
* directly here. If the global_trace.buffer is already
* allocated here, then this was called by module code.
*/
if (global_trace.buffer)
tracing_start_cmdline_record();
}
void trace_printk_start_comm(void)
{
/* Start tracing comms if trace printk is set */
if (!buffers_allocated)
return;
tracing_start_cmdline_record();
}
static void trace_printk_start_stop_comm(int enabled)
{
if (!buffers_allocated)
return;
if (enabled)
tracing_start_cmdline_record();
else
tracing_stop_cmdline_record();
}
/**
@ -1622,7 +1675,7 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
memcpy(entry->buf, tbuffer, sizeof(u32) * len);
if (!filter_check_discard(call, entry, buffer, event)) {
ring_buffer_unlock_commit(buffer, event);
__buffer_unlock_commit(buffer, event);
ftrace_trace_stack(buffer, flags, 6, pc);
}
@ -1693,7 +1746,7 @@ int trace_array_vprintk(struct trace_array *tr,
memcpy(&entry->buf, tbuffer, len);
entry->buf[len] = '\0';
if (!filter_check_discard(call, entry, buffer, event)) {
ring_buffer_unlock_commit(buffer, event);
__buffer_unlock_commit(buffer, event);
ftrace_trace_stack(buffer, flags, 6, pc);
}
out:
@ -2426,6 +2479,10 @@ __tracing_open(struct inode *inode, struct file *file)
if (ring_buffer_overruns(iter->tr->buffer))
iter->iter_flags |= TRACE_FILE_ANNOTATE;
/* Output in nanoseconds only if we are using a clock in nanoseconds. */
if (trace_clocks[trace_clock_id].in_ns)
iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
/* stop the trace while dumping */
tracing_stop();
@ -2794,26 +2851,19 @@ static void set_tracer_flags(unsigned int mask, int enabled)
if (mask == TRACE_ITER_OVERWRITE)
ring_buffer_change_overwrite(global_trace.buffer, enabled);
if (mask == TRACE_ITER_PRINTK)
trace_printk_start_stop_comm(enabled);
}
static ssize_t
tracing_trace_options_write(struct file *filp, const char __user *ubuf,
size_t cnt, loff_t *ppos)
static int trace_set_options(char *option)
{
char buf[64];
char *cmp;
int neg = 0;
int ret;
int ret = 0;
int i;
if (cnt >= sizeof(buf))
return -EINVAL;
if (copy_from_user(&buf, ubuf, cnt))
return -EFAULT;
buf[cnt] = 0;
cmp = strstrip(buf);
cmp = strstrip(option);
if (strncmp(cmp, "no", 2) == 0) {
neg = 1;
@ -2832,10 +2882,25 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf,
mutex_lock(&trace_types_lock);
ret = set_tracer_option(current_trace, cmp, neg);
mutex_unlock(&trace_types_lock);
if (ret)
return ret;
}
return ret;
}
static ssize_t
tracing_trace_options_write(struct file *filp, const char __user *ubuf,
size_t cnt, loff_t *ppos)
{
char buf[64];
if (cnt >= sizeof(buf))
return -EINVAL;
if (copy_from_user(&buf, ubuf, cnt))
return -EFAULT;
trace_set_options(buf);
*ppos += cnt;
return cnt;
@ -2939,56 +3004,6 @@ static const struct file_operations tracing_saved_cmdlines_fops = {
.llseek = generic_file_llseek,
};
static ssize_t
tracing_ctrl_read(struct file *filp, char __user *ubuf,
size_t cnt, loff_t *ppos)
{
char buf[64];
int r;
r = sprintf(buf, "%u\n", tracer_enabled);
return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
}
static ssize_t
tracing_ctrl_write(struct file *filp, const char __user *ubuf,
size_t cnt, loff_t *ppos)
{
struct trace_array *tr = filp->private_data;
unsigned long val;
int ret;
ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
if (ret)
return ret;
val = !!val;
mutex_lock(&trace_types_lock);
if (tracer_enabled ^ val) {
/* Only need to warn if this is used to change the state */
WARN_ONCE(1, "tracing_enabled is deprecated. Use tracing_on");
if (val) {
tracer_enabled = 1;
if (current_trace->start)
current_trace->start(tr);
tracing_start();
} else {
tracer_enabled = 0;
tracing_stop();
if (current_trace->stop)
current_trace->stop(tr);
}
}
mutex_unlock(&trace_types_lock);
*ppos += cnt;
return cnt;
}
static ssize_t
tracing_set_trace_read(struct file *filp, char __user *ubuf,
size_t cnt, loff_t *ppos)
@ -3030,6 +3045,10 @@ static int __tracing_resize_ring_buffer(unsigned long size, int cpu)
*/
ring_buffer_expanded = 1;
/* May be called before buffers are initialized */
if (!global_trace.buffer)
return 0;
ret = ring_buffer_resize(global_trace.buffer, size, cpu);
if (ret < 0)
return ret;
@ -3325,6 +3344,10 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp)
if (trace_flags & TRACE_ITER_LATENCY_FMT)
iter->iter_flags |= TRACE_FILE_LAT_FMT;
/* Output in nanoseconds only if we are using a clock in nanoseconds. */
if (trace_clocks[trace_clock_id].in_ns)
iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
iter->cpu_file = cpu_file;
iter->tr = &global_trace;
mutex_init(&iter->mutex);
@ -3385,19 +3408,6 @@ tracing_poll_pipe(struct file *filp, poll_table *poll_table)
}
}
void default_wait_pipe(struct trace_iterator *iter)
{
DEFINE_WAIT(wait);
prepare_to_wait(&trace_wait, &wait, TASK_INTERRUPTIBLE);
if (trace_empty(iter))
schedule();
finish_wait(&trace_wait, &wait);
}
/*
* This is a make-shift waitqueue.
* A tracer might use this callback on some rare cases:
@ -3438,7 +3448,7 @@ static int tracing_wait_pipe(struct file *filp)
return -EINTR;
/*
* We block until we read something and tracing is disabled.
* We block until we read something and tracing is enabled.
* We still block if tracing is disabled, but we have never
* read anything. This allows a user to cat this file, and
* then enable tracing. But after we have read something,
@ -3446,7 +3456,7 @@ static int tracing_wait_pipe(struct file *filp)
*
* iter->pos will be 0 if we haven't read anything.
*/
if (!tracer_enabled && iter->pos)
if (tracing_is_enabled() && iter->pos)
break;
}
@ -3955,7 +3965,7 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
} else
entry->buf[cnt] = '\0';
ring_buffer_unlock_commit(buffer, event);
__buffer_unlock_commit(buffer, event);
written = cnt;
@ -4016,6 +4026,14 @@ static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
if (max_tr.buffer)
ring_buffer_set_clock(max_tr.buffer, trace_clocks[i].func);
/*
* New clock may not be consistent with the previous clock.
* Reset the buffer so that it doesn't have incomparable timestamps.
*/
tracing_reset_online_cpus(&global_trace);
if (max_tr.buffer)
tracing_reset_online_cpus(&max_tr);
mutex_unlock(&trace_types_lock);
*fpos += cnt;
@ -4037,13 +4055,6 @@ static const struct file_operations tracing_max_lat_fops = {
.llseek = generic_file_llseek,
};
static const struct file_operations tracing_ctrl_fops = {
.open = tracing_open_generic,
.read = tracing_ctrl_read,
.write = tracing_ctrl_write,
.llseek = generic_file_llseek,
};
static const struct file_operations set_tracer_fops = {
.open = tracing_open_generic,
.read = tracing_set_trace_read,
@ -4377,13 +4388,27 @@ tracing_stats_read(struct file *filp, char __user *ubuf,
cnt = ring_buffer_bytes_cpu(tr->buffer, cpu);
trace_seq_printf(s, "bytes: %ld\n", cnt);
t = ns2usecs(ring_buffer_oldest_event_ts(tr->buffer, cpu));
usec_rem = do_div(t, USEC_PER_SEC);
trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n", t, usec_rem);
if (trace_clocks[trace_clock_id].in_ns) {
/* local or global for trace_clock */
t = ns2usecs(ring_buffer_oldest_event_ts(tr->buffer, cpu));
usec_rem = do_div(t, USEC_PER_SEC);
trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
t, usec_rem);
t = ns2usecs(ring_buffer_time_stamp(tr->buffer, cpu));
usec_rem = do_div(t, USEC_PER_SEC);
trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
t = ns2usecs(ring_buffer_time_stamp(tr->buffer, cpu));
usec_rem = do_div(t, USEC_PER_SEC);
trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
} else {
/* counter or tsc mode for trace_clock */
trace_seq_printf(s, "oldest event ts: %llu\n",
ring_buffer_oldest_event_ts(tr->buffer, cpu));
trace_seq_printf(s, "now ts: %llu\n",
ring_buffer_time_stamp(tr->buffer, cpu));
}
cnt = ring_buffer_dropped_events_cpu(tr->buffer, cpu);
trace_seq_printf(s, "dropped events: %ld\n", cnt);
count = simple_read_from_buffer(ubuf, count, ppos, s->buffer, s->len);
@ -4815,9 +4840,6 @@ static __init int tracer_init_debugfs(void)
d_tracer = tracing_init_dentry();
trace_create_file("tracing_enabled", 0644, d_tracer,
&global_trace, &tracing_ctrl_fops);
trace_create_file("trace_options", 0644, d_tracer,
NULL, &tracing_iter_fops);
@ -5089,6 +5111,7 @@ __init static int tracer_alloc_buffers(void)
/* Only allocate trace_printk buffers if a trace_printk exists */
if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
/* Must be called before global_trace.buffer is allocated */
trace_printk_init_buffers();
/* To save memory, keep the ring buffer size to its minimum */
@ -5136,6 +5159,7 @@ __init static int tracer_alloc_buffers(void)
#endif
trace_init_cmdlines();
init_irq_work(&trace_work_wakeup, trace_wake_up);
register_tracer(&nop_trace);
current_trace = &nop_trace;
@ -5147,6 +5171,13 @@ __init static int tracer_alloc_buffers(void)
register_die_notifier(&trace_die_notifier);
while (trace_boot_options) {
char *option;
option = strsep(&trace_boot_options, ",");
trace_set_options(option);
}
return 0;
out_free_cpumask:

View File

@ -285,8 +285,8 @@ struct tracer {
int (*set_flag)(u32 old_flags, u32 bit, int set);
struct tracer *next;
struct tracer_flags *flags;
int print_max;
int use_max_tr;
bool print_max;
bool use_max_tr;
};
@ -327,7 +327,6 @@ trace_buffer_iter(struct trace_iterator *iter, int cpu)
int tracer_init(struct tracer *t, struct trace_array *tr);
int tracing_is_enabled(void);
void trace_wake_up(void);
void tracing_reset(struct trace_array *tr, int cpu);
void tracing_reset_online_cpus(struct trace_array *tr);
void tracing_reset_current(int cpu);
@ -349,9 +348,6 @@ trace_buffer_lock_reserve(struct ring_buffer *buffer,
unsigned long len,
unsigned long flags,
int pc);
void trace_buffer_unlock_commit(struct ring_buffer *buffer,
struct ring_buffer_event *event,
unsigned long flags, int pc);
struct trace_entry *tracing_get_trace_entry(struct trace_array *tr,
struct trace_array_cpu *data);
@ -359,6 +355,9 @@ struct trace_entry *tracing_get_trace_entry(struct trace_array *tr,
struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
int *ent_cpu, u64 *ent_ts);
void __buffer_unlock_commit(struct ring_buffer *buffer,
struct ring_buffer_event *event);
int trace_empty(struct trace_iterator *iter);
void *trace_find_next_entry_inc(struct trace_iterator *iter);
@ -367,7 +366,6 @@ void trace_init_global_iter(struct trace_iterator *iter);
void tracing_iter_reset(struct trace_iterator *iter, int cpu);
void default_wait_pipe(struct trace_iterator *iter);
void poll_wait_pipe(struct trace_iterator *iter);
void ftrace(struct trace_array *tr,
@ -407,12 +405,7 @@ void tracing_sched_switch_assign_trace(struct trace_array *tr);
void tracing_stop_sched_switch_record(void);
void tracing_start_sched_switch_record(void);
int register_tracer(struct tracer *type);
void unregister_tracer(struct tracer *type);
int is_tracing_stopped(void);
enum trace_file_type {
TRACE_FILE_LAT_FMT = 1,
TRACE_FILE_ANNOTATE = 2,
};
extern cpumask_var_t __read_mostly tracing_buffer_mask;
@ -841,6 +834,7 @@ extern const char *__start___trace_bprintk_fmt[];
extern const char *__stop___trace_bprintk_fmt[];
void trace_printk_init_buffers(void);
void trace_printk_start_comm(void);
#undef FTRACE_ENTRY
#define FTRACE_ENTRY(call, struct_name, id, tstruct, print, filter) \

View File

@ -77,7 +77,7 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
entry->correct = val == expect;
if (!filter_check_discard(call, entry, buffer, event))
ring_buffer_unlock_commit(buffer, event);
__buffer_unlock_commit(buffer, event);
out:
atomic_dec(&tr->data[cpu]->disabled);
@ -199,7 +199,7 @@ __init static int init_branch_tracer(void)
}
return register_tracer(&branch_trace);
}
device_initcall(init_branch_tracer);
core_initcall(init_branch_tracer);
#else
static inline

View File

@ -491,19 +491,6 @@ static void t_stop(struct seq_file *m, void *p)
mutex_unlock(&event_mutex);
}
static int
ftrace_event_seq_open(struct inode *inode, struct file *file)
{
const struct seq_operations *seq_ops;
if ((file->f_mode & FMODE_WRITE) &&
(file->f_flags & O_TRUNC))
ftrace_clear_events();
seq_ops = inode->i_private;
return seq_open(file, seq_ops);
}
static ssize_t
event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
loff_t *ppos)
@ -980,6 +967,9 @@ show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
return r;
}
static int ftrace_event_avail_open(struct inode *inode, struct file *file);
static int ftrace_event_set_open(struct inode *inode, struct file *file);
static const struct seq_operations show_event_seq_ops = {
.start = t_start,
.next = t_next,
@ -995,14 +985,14 @@ static const struct seq_operations show_set_event_seq_ops = {
};
static const struct file_operations ftrace_avail_fops = {
.open = ftrace_event_seq_open,
.open = ftrace_event_avail_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
static const struct file_operations ftrace_set_event_fops = {
.open = ftrace_event_seq_open,
.open = ftrace_event_set_open,
.read = seq_read,
.write = ftrace_event_write,
.llseek = seq_lseek,
@ -1078,6 +1068,26 @@ static struct dentry *event_trace_events_dir(void)
return d_events;
}
static int
ftrace_event_avail_open(struct inode *inode, struct file *file)
{
const struct seq_operations *seq_ops = &show_event_seq_ops;
return seq_open(file, seq_ops);
}
static int
ftrace_event_set_open(struct inode *inode, struct file *file)
{
const struct seq_operations *seq_ops = &show_set_event_seq_ops;
if ((file->f_mode & FMODE_WRITE) &&
(file->f_flags & O_TRUNC))
ftrace_clear_events();
return seq_open(file, seq_ops);
}
static struct dentry *
event_subsystem_dir(const char *name, struct dentry *d_events)
{
@ -1489,6 +1499,9 @@ static __init int event_trace_enable(void)
if (ret)
pr_warn("Failed to enable trace event: %s\n", token);
}
trace_printk_start_comm();
return 0;
}
@ -1505,15 +1518,13 @@ static __init int event_trace_init(void)
return 0;
entry = debugfs_create_file("available_events", 0444, d_tracer,
(void *)&show_event_seq_ops,
&ftrace_avail_fops);
NULL, &ftrace_avail_fops);
if (!entry)
pr_warning("Could not create debugfs "
"'available_events' entry\n");
entry = debugfs_create_file("set_event", 0644, d_tracer,
(void *)&show_set_event_seq_ops,
&ftrace_set_event_fops);
NULL, &ftrace_set_event_fops);
if (!entry)
pr_warning("Could not create debugfs "
"'set_event' entry\n");
@ -1749,7 +1760,7 @@ function_test_events_call(unsigned long ip, unsigned long parent_ip,
entry->ip = ip;
entry->parent_ip = parent_ip;
trace_nowake_buffer_unlock_commit(buffer, event, flags, pc);
trace_buffer_unlock_commit(buffer, event, flags, pc);
out:
atomic_dec(&per_cpu(ftrace_test_event_disable, cpu));

View File

@ -1000,9 +1000,9 @@ static int init_pred(struct filter_parse_state *ps,
}
} else {
if (field->is_signed)
ret = strict_strtoll(pred->regex.pattern, 0, &val);
ret = kstrtoll(pred->regex.pattern, 0, &val);
else
ret = strict_strtoull(pred->regex.pattern, 0, &val);
ret = kstrtoull(pred->regex.pattern, 0, &val);
if (ret) {
parse_error(ps, FILT_ERR_ILLEGAL_INTVAL, 0);
return -EINVAL;

View File

@ -366,7 +366,7 @@ ftrace_trace_onoff_callback(struct ftrace_hash *hash,
* We use the callback data field (which is a pointer)
* as our counter.
*/
ret = strict_strtoul(number, 0, (unsigned long *)&count);
ret = kstrtoul(number, 0, (unsigned long *)&count);
if (ret)
return ret;
@ -411,5 +411,4 @@ static __init int init_function_trace(void)
init_func_cmd_traceon();
return register_tracer(&function_trace);
}
device_initcall(init_function_trace);
core_initcall(init_function_trace);

View File

@ -223,7 +223,7 @@ int __trace_graph_entry(struct trace_array *tr,
entry = ring_buffer_event_data(event);
entry->graph_ent = *trace;
if (!filter_current_check_discard(buffer, call, entry, event))
ring_buffer_unlock_commit(buffer, event);
__buffer_unlock_commit(buffer, event);
return 1;
}
@ -327,7 +327,7 @@ void __trace_graph_return(struct trace_array *tr,
entry = ring_buffer_event_data(event);
entry->ret = *trace;
if (!filter_current_check_discard(buffer, call, entry, event))
ring_buffer_unlock_commit(buffer, event);
__buffer_unlock_commit(buffer, event);
}
void trace_graph_return(struct ftrace_graph_ret *trace)
@ -1474,4 +1474,4 @@ static __init int init_graph_trace(void)
return register_tracer(&graph_trace);
}
device_initcall(init_graph_trace);
core_initcall(init_graph_trace);

View File

@ -604,7 +604,7 @@ static struct tracer irqsoff_tracer __read_mostly =
.reset = irqsoff_tracer_reset,
.start = irqsoff_tracer_start,
.stop = irqsoff_tracer_stop,
.print_max = 1,
.print_max = true,
.print_header = irqsoff_print_header,
.print_line = irqsoff_print_line,
.flags = &tracer_flags,
@ -614,7 +614,7 @@ static struct tracer irqsoff_tracer __read_mostly =
#endif
.open = irqsoff_trace_open,
.close = irqsoff_trace_close,
.use_max_tr = 1,
.use_max_tr = true,
};
# define register_irqsoff(trace) register_tracer(&trace)
#else
@ -637,7 +637,7 @@ static struct tracer preemptoff_tracer __read_mostly =
.reset = irqsoff_tracer_reset,
.start = irqsoff_tracer_start,
.stop = irqsoff_tracer_stop,
.print_max = 1,
.print_max = true,
.print_header = irqsoff_print_header,
.print_line = irqsoff_print_line,
.flags = &tracer_flags,
@ -647,7 +647,7 @@ static struct tracer preemptoff_tracer __read_mostly =
#endif
.open = irqsoff_trace_open,
.close = irqsoff_trace_close,
.use_max_tr = 1,
.use_max_tr = true,
};
# define register_preemptoff(trace) register_tracer(&trace)
#else
@ -672,7 +672,7 @@ static struct tracer preemptirqsoff_tracer __read_mostly =
.reset = irqsoff_tracer_reset,
.start = irqsoff_tracer_start,
.stop = irqsoff_tracer_stop,
.print_max = 1,
.print_max = true,
.print_header = irqsoff_print_header,
.print_line = irqsoff_print_line,
.flags = &tracer_flags,
@ -682,7 +682,7 @@ static struct tracer preemptirqsoff_tracer __read_mostly =
#endif
.open = irqsoff_trace_open,
.close = irqsoff_trace_close,
.use_max_tr = 1,
.use_max_tr = true,
};
# define register_preemptirqsoff(trace) register_tracer(&trace)
@ -698,4 +698,4 @@ __init static int init_irqsoff_tracer(void)
return 0;
}
device_initcall(init_irqsoff_tracer);
core_initcall(init_irqsoff_tracer);

View File

@ -444,7 +444,7 @@ static int create_trace_probe(int argc, char **argv)
return -EINVAL;
}
/* an address specified */
ret = strict_strtoul(&argv[1][0], 0, (unsigned long *)&addr);
ret = kstrtoul(&argv[1][0], 0, (unsigned long *)&addr);
if (ret) {
pr_info("Failed to parse address.\n");
return ret;
@ -751,8 +751,8 @@ static __kprobes void kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs)
store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
if (!filter_current_check_discard(buffer, call, entry, event))
trace_nowake_buffer_unlock_commit_regs(buffer, event,
irq_flags, pc, regs);
trace_buffer_unlock_commit_regs(buffer, event,
irq_flags, pc, regs);
}
/* Kretprobe handler */
@ -784,8 +784,8 @@ static __kprobes void kretprobe_trace_func(struct kretprobe_instance *ri,
store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
if (!filter_current_check_discard(buffer, call, entry, event))
trace_nowake_buffer_unlock_commit_regs(buffer, event,
irq_flags, pc, regs);
trace_buffer_unlock_commit_regs(buffer, event,
irq_flags, pc, regs);
}
/* Event entry printers */

View File

@ -610,24 +610,54 @@ lat_print_generic(struct trace_seq *s, struct trace_entry *entry, int cpu)
return trace_print_lat_fmt(s, entry);
}
static unsigned long preempt_mark_thresh = 100;
static unsigned long preempt_mark_thresh_us = 100;
static int
lat_print_timestamp(struct trace_seq *s, u64 abs_usecs,
unsigned long rel_usecs)
lat_print_timestamp(struct trace_iterator *iter, u64 next_ts)
{
return trace_seq_printf(s, " %4lldus%c: ", abs_usecs,
rel_usecs > preempt_mark_thresh ? '!' :
rel_usecs > 1 ? '+' : ' ');
unsigned long verbose = trace_flags & TRACE_ITER_VERBOSE;
unsigned long in_ns = iter->iter_flags & TRACE_FILE_TIME_IN_NS;
unsigned long long abs_ts = iter->ts - iter->tr->time_start;
unsigned long long rel_ts = next_ts - iter->ts;
struct trace_seq *s = &iter->seq;
if (in_ns) {
abs_ts = ns2usecs(abs_ts);
rel_ts = ns2usecs(rel_ts);
}
if (verbose && in_ns) {
unsigned long abs_usec = do_div(abs_ts, USEC_PER_MSEC);
unsigned long abs_msec = (unsigned long)abs_ts;
unsigned long rel_usec = do_div(rel_ts, USEC_PER_MSEC);
unsigned long rel_msec = (unsigned long)rel_ts;
return trace_seq_printf(
s, "[%08llx] %ld.%03ldms (+%ld.%03ldms): ",
ns2usecs(iter->ts),
abs_msec, abs_usec,
rel_msec, rel_usec);
} else if (verbose && !in_ns) {
return trace_seq_printf(
s, "[%016llx] %lld (+%lld): ",
iter->ts, abs_ts, rel_ts);
} else if (!verbose && in_ns) {
return trace_seq_printf(
s, " %4lldus%c: ",
abs_ts,
rel_ts > preempt_mark_thresh_us ? '!' :
rel_ts > 1 ? '+' : ' ');
} else { /* !verbose && !in_ns */
return trace_seq_printf(s, " %4lld: ", abs_ts);
}
}
int trace_print_context(struct trace_iterator *iter)
{
struct trace_seq *s = &iter->seq;
struct trace_entry *entry = iter->ent;
unsigned long long t = ns2usecs(iter->ts);
unsigned long usec_rem = do_div(t, USEC_PER_SEC);
unsigned long secs = (unsigned long)t;
unsigned long long t;
unsigned long secs, usec_rem;
char comm[TASK_COMM_LEN];
int ret;
@ -644,8 +674,13 @@ int trace_print_context(struct trace_iterator *iter)
return 0;
}
return trace_seq_printf(s, " %5lu.%06lu: ",
secs, usec_rem);
if (iter->iter_flags & TRACE_FILE_TIME_IN_NS) {
t = ns2usecs(iter->ts);
usec_rem = do_div(t, USEC_PER_SEC);
secs = (unsigned long)t;
return trace_seq_printf(s, " %5lu.%06lu: ", secs, usec_rem);
} else
return trace_seq_printf(s, " %12llu: ", iter->ts);
}
int trace_print_lat_context(struct trace_iterator *iter)
@ -659,36 +694,29 @@ int trace_print_lat_context(struct trace_iterator *iter)
*next_entry = trace_find_next_entry(iter, NULL,
&next_ts);
unsigned long verbose = (trace_flags & TRACE_ITER_VERBOSE);
unsigned long abs_usecs = ns2usecs(iter->ts - iter->tr->time_start);
unsigned long rel_usecs;
/* Restore the original ent_size */
iter->ent_size = ent_size;
if (!next_entry)
next_ts = iter->ts;
rel_usecs = ns2usecs(next_ts - iter->ts);
if (verbose) {
char comm[TASK_COMM_LEN];
trace_find_cmdline(entry->pid, comm);
ret = trace_seq_printf(s, "%16s %5d %3d %d %08x %08lx [%08llx]"
" %ld.%03ldms (+%ld.%03ldms): ", comm,
entry->pid, iter->cpu, entry->flags,
entry->preempt_count, iter->idx,
ns2usecs(iter->ts),
abs_usecs / USEC_PER_MSEC,
abs_usecs % USEC_PER_MSEC,
rel_usecs / USEC_PER_MSEC,
rel_usecs % USEC_PER_MSEC);
ret = trace_seq_printf(
s, "%16s %5d %3d %d %08x %08lx ",
comm, entry->pid, iter->cpu, entry->flags,
entry->preempt_count, iter->idx);
} else {
ret = lat_print_generic(s, entry, iter->cpu);
if (ret)
ret = lat_print_timestamp(s, abs_usecs, rel_usecs);
}
if (ret)
ret = lat_print_timestamp(iter, next_ts);
return ret;
}

View File

@ -441,7 +441,7 @@ static const struct fetch_type *find_fetch_type(const char *type)
goto fail;
type++;
if (strict_strtoul(type, 0, &bs))
if (kstrtoul(type, 0, &bs))
goto fail;
switch (bs) {
@ -501,8 +501,8 @@ int traceprobe_split_symbol_offset(char *symbol, unsigned long *offset)
tmp = strchr(symbol, '+');
if (tmp) {
/* skip sign because strict_strtol doesn't accept '+' */
ret = strict_strtoul(tmp + 1, 0, offset);
/* skip sign because kstrtoul doesn't accept '+' */
ret = kstrtoul(tmp + 1, 0, offset);
if (ret)
return ret;
@ -533,7 +533,7 @@ static int parse_probe_vars(char *arg, const struct fetch_type *t,
else
ret = -EINVAL;
} else if (isdigit(arg[5])) {
ret = strict_strtoul(arg + 5, 10, &param);
ret = kstrtoul(arg + 5, 10, &param);
if (ret || param > PARAM_MAX_STACK)
ret = -EINVAL;
else {
@ -579,7 +579,7 @@ static int parse_probe_arg(char *arg, const struct fetch_type *t,
case '@': /* memory or symbol */
if (isdigit(arg[1])) {
ret = strict_strtoul(arg + 1, 0, &param);
ret = kstrtoul(arg + 1, 0, &param);
if (ret)
break;
@ -597,14 +597,14 @@ static int parse_probe_arg(char *arg, const struct fetch_type *t,
break;
case '+': /* deref memory */
arg++; /* Skip '+', because strict_strtol() rejects it. */
arg++; /* Skip '+', because kstrtol() rejects it. */
case '-':
tmp = strchr(arg, '(');
if (!tmp)
break;
*tmp = '\0';
ret = strict_strtol(arg, 0, &offset);
ret = kstrtol(arg, 0, &offset);
if (ret)
break;

View File

@ -102,9 +102,7 @@ tracing_sched_wakeup_trace(struct trace_array *tr,
entry->next_cpu = task_cpu(wakee);
if (!filter_check_discard(call, entry, buffer, event))
ring_buffer_unlock_commit(buffer, event);
ftrace_trace_stack(tr->buffer, flags, 6, pc);
ftrace_trace_userstack(tr->buffer, flags, pc);
trace_buffer_unlock_commit(buffer, event, flags, pc);
}
static void

View File

@ -589,7 +589,7 @@ static struct tracer wakeup_tracer __read_mostly =
.reset = wakeup_tracer_reset,
.start = wakeup_tracer_start,
.stop = wakeup_tracer_stop,
.print_max = 1,
.print_max = true,
.print_header = wakeup_print_header,
.print_line = wakeup_print_line,
.flags = &tracer_flags,
@ -599,7 +599,7 @@ static struct tracer wakeup_tracer __read_mostly =
#endif
.open = wakeup_trace_open,
.close = wakeup_trace_close,
.use_max_tr = 1,
.use_max_tr = true,
};
static struct tracer wakeup_rt_tracer __read_mostly =
@ -610,7 +610,7 @@ static struct tracer wakeup_rt_tracer __read_mostly =
.start = wakeup_tracer_start,
.stop = wakeup_tracer_stop,
.wait_pipe = poll_wait_pipe,
.print_max = 1,
.print_max = true,
.print_header = wakeup_print_header,
.print_line = wakeup_print_line,
.flags = &tracer_flags,
@ -620,7 +620,7 @@ static struct tracer wakeup_rt_tracer __read_mostly =
#endif
.open = wakeup_trace_open,
.close = wakeup_trace_close,
.use_max_tr = 1,
.use_max_tr = true,
};
__init static int init_wakeup_tracer(void)
@ -637,4 +637,4 @@ __init static int init_wakeup_tracer(void)
return 0;
}
device_initcall(init_wakeup_tracer);
core_initcall(init_wakeup_tracer);

View File

@ -320,7 +320,6 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
int (*func)(void))
{
int save_ftrace_enabled = ftrace_enabled;
int save_tracer_enabled = tracer_enabled;
unsigned long count;
char *func_name;
int ret;
@ -331,7 +330,6 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
/* enable tracing, and record the filter function */
ftrace_enabled = 1;
tracer_enabled = 1;
/* passed in by parameter to fool gcc from optimizing */
func();
@ -395,7 +393,6 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
out:
ftrace_enabled = save_ftrace_enabled;
tracer_enabled = save_tracer_enabled;
/* Enable tracing on all functions again */
ftrace_set_global_filter(NULL, 0, 1);
@ -452,7 +449,6 @@ static int
trace_selftest_function_recursion(void)
{
int save_ftrace_enabled = ftrace_enabled;
int save_tracer_enabled = tracer_enabled;
char *func_name;
int len;
int ret;
@ -465,7 +461,6 @@ trace_selftest_function_recursion(void)
/* enable tracing, and record the filter function */
ftrace_enabled = 1;
tracer_enabled = 1;
/* Handle PPC64 '.' name */
func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
@ -534,7 +529,6 @@ trace_selftest_function_recursion(void)
ret = 0;
out:
ftrace_enabled = save_ftrace_enabled;
tracer_enabled = save_tracer_enabled;
return ret;
}
@ -569,7 +563,6 @@ static int
trace_selftest_function_regs(void)
{
int save_ftrace_enabled = ftrace_enabled;
int save_tracer_enabled = tracer_enabled;
char *func_name;
int len;
int ret;
@ -586,7 +579,6 @@ trace_selftest_function_regs(void)
/* enable tracing, and record the filter function */
ftrace_enabled = 1;
tracer_enabled = 1;
/* Handle PPC64 '.' name */
func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
@ -648,7 +640,6 @@ trace_selftest_function_regs(void)
ret = 0;
out:
ftrace_enabled = save_ftrace_enabled;
tracer_enabled = save_tracer_enabled;
return ret;
}
@ -662,7 +653,6 @@ int
trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
{
int save_ftrace_enabled = ftrace_enabled;
int save_tracer_enabled = tracer_enabled;
unsigned long count;
int ret;
@ -671,7 +661,6 @@ trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
/* start the tracing */
ftrace_enabled = 1;
tracer_enabled = 1;
ret = tracer_init(trace, tr);
if (ret) {
@ -708,7 +697,6 @@ trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
ret = trace_selftest_function_regs();
out:
ftrace_enabled = save_ftrace_enabled;
tracer_enabled = save_tracer_enabled;
/* kill ftrace totally if we failed */
if (ret)
@ -1106,6 +1094,7 @@ trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
tracing_stop();
/* check both trace buffers */
ret = trace_test_buffer(tr, NULL);
printk("ret = %d\n", ret);
if (!ret)
ret = trace_test_buffer(&max_tr, &count);

View File

@ -21,9 +21,6 @@ static int syscall_enter_register(struct ftrace_event_call *event,
static int syscall_exit_register(struct ftrace_event_call *event,
enum trace_reg type, void *data);
static int syscall_enter_define_fields(struct ftrace_event_call *call);
static int syscall_exit_define_fields(struct ftrace_event_call *call);
static struct list_head *
syscall_get_enter_fields(struct ftrace_event_call *call)
{
@ -32,30 +29,6 @@ syscall_get_enter_fields(struct ftrace_event_call *call)
return &entry->enter_fields;
}
struct trace_event_functions enter_syscall_print_funcs = {
.trace = print_syscall_enter,
};
struct trace_event_functions exit_syscall_print_funcs = {
.trace = print_syscall_exit,
};
struct ftrace_event_class event_class_syscall_enter = {
.system = "syscalls",
.reg = syscall_enter_register,
.define_fields = syscall_enter_define_fields,
.get_fields = syscall_get_enter_fields,
.raw_init = init_syscall_trace,
};
struct ftrace_event_class event_class_syscall_exit = {
.system = "syscalls",
.reg = syscall_exit_register,
.define_fields = syscall_exit_define_fields,
.fields = LIST_HEAD_INIT(event_class_syscall_exit.fields),
.raw_init = init_syscall_trace,
};
extern struct syscall_metadata *__start_syscalls_metadata[];
extern struct syscall_metadata *__stop_syscalls_metadata[];
@ -432,7 +405,7 @@ void unreg_event_syscall_exit(struct ftrace_event_call *call)
mutex_unlock(&syscall_trace_lock);
}
int init_syscall_trace(struct ftrace_event_call *call)
static int init_syscall_trace(struct ftrace_event_call *call)
{
int id;
int num;
@ -457,6 +430,30 @@ int init_syscall_trace(struct ftrace_event_call *call)
return id;
}
struct trace_event_functions enter_syscall_print_funcs = {
.trace = print_syscall_enter,
};
struct trace_event_functions exit_syscall_print_funcs = {
.trace = print_syscall_exit,
};
struct ftrace_event_class event_class_syscall_enter = {
.system = "syscalls",
.reg = syscall_enter_register,
.define_fields = syscall_enter_define_fields,
.get_fields = syscall_get_enter_fields,
.raw_init = init_syscall_trace,
};
struct ftrace_event_class event_class_syscall_exit = {
.system = "syscalls",
.reg = syscall_exit_register,
.define_fields = syscall_exit_define_fields,
.fields = LIST_HEAD_INIT(event_class_syscall_exit.fields),
.raw_init = init_syscall_trace,
};
unsigned long __init __weak arch_syscall_addr(int nr)
{
return (unsigned long)sys_call_table[nr];
@ -537,7 +534,7 @@ static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id)
perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head, NULL);
}
int perf_sysenter_enable(struct ftrace_event_call *call)
static int perf_sysenter_enable(struct ftrace_event_call *call)
{
int ret = 0;
int num;
@ -558,7 +555,7 @@ int perf_sysenter_enable(struct ftrace_event_call *call)
return ret;
}
void perf_sysenter_disable(struct ftrace_event_call *call)
static void perf_sysenter_disable(struct ftrace_event_call *call)
{
int num;
@ -615,7 +612,7 @@ static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head, NULL);
}
int perf_sysexit_enable(struct ftrace_event_call *call)
static int perf_sysexit_enable(struct ftrace_event_call *call)
{
int ret = 0;
int num;
@ -636,7 +633,7 @@ int perf_sysexit_enable(struct ftrace_event_call *call)
return ret;
}
void perf_sysexit_disable(struct ftrace_event_call *call)
static void perf_sysexit_disable(struct ftrace_event_call *call)
{
int num;

View File

@ -189,7 +189,7 @@ static int create_trace_uprobe(int argc, char **argv)
if (argv[0][0] == '-')
is_delete = true;
else if (argv[0][0] != 'p') {
pr_info("Probe definition must be started with 'p', 'r' or" " '-'.\n");
pr_info("Probe definition must be started with 'p' or '-'.\n");
return -EINVAL;
}
@ -252,7 +252,7 @@ static int create_trace_uprobe(int argc, char **argv)
if (ret)
goto fail_address_parse;
ret = strict_strtoul(arg, 0, &offset);
ret = kstrtoul(arg, 0, &offset);
if (ret)
goto fail_address_parse;

View File

@ -253,7 +253,7 @@ all_deps := $(all_objs:%.o=.%.d)
# let .d file also depends on the source and header files
define check_deps
@set -e; $(RM) $@; \
$(CC) -M $(CFLAGS) $< > $@.$$$$; \
$(CC) -MM $(CFLAGS) $< > $@.$$$$; \
sed 's,\($*\)\.o[ :]*,\1.o $@ : ,g' < $@.$$$$ > $@; \
$(RM) $@.$$$$
endef

View File

@ -174,7 +174,7 @@ static int cmdline_init(struct pevent *pevent)
return 0;
}
static char *find_cmdline(struct pevent *pevent, int pid)
static const char *find_cmdline(struct pevent *pevent, int pid)
{
const struct cmdline *comm;
struct cmdline key;
@ -2637,7 +2637,7 @@ process_func_handler(struct event_format *event, struct pevent_function_handler
struct print_arg *farg;
enum event_type type;
char *token;
char *test;
const char *test;
int i;
arg->type = PRINT_FUNC;
@ -3889,7 +3889,7 @@ static void print_mac_arg(struct trace_seq *s, int mac, void *data, int size,
struct event_format *event, struct print_arg *arg)
{
unsigned char *buf;
char *fmt = "%.2x:%.2x:%.2x:%.2x:%.2x:%.2x";
const char *fmt = "%.2x:%.2x:%.2x:%.2x:%.2x:%.2x";
if (arg->type == PRINT_FUNC) {
process_defined_func(s, data, size, event, arg);
@ -3931,7 +3931,8 @@ static int is_printable_array(char *p, unsigned int len)
return 1;
}
static void print_event_fields(struct trace_seq *s, void *data, int size,
static void print_event_fields(struct trace_seq *s, void *data,
int size __maybe_unused,
struct event_format *event)
{
struct format_field *field;
@ -4408,7 +4409,7 @@ void pevent_event_info(struct trace_seq *s, struct event_format *event,
void pevent_print_event(struct pevent *pevent, struct trace_seq *s,
struct pevent_record *record)
{
static char *spaces = " "; /* 20 spaces */
static const char *spaces = " "; /* 20 spaces */
struct event_format *event;
unsigned long secs;
unsigned long usecs;
@ -5070,8 +5071,8 @@ static const char * const pevent_error_str[] = {
};
#undef _PE
int pevent_strerror(struct pevent *pevent, enum pevent_errno errnum,
char *buf, size_t buflen)
int pevent_strerror(struct pevent *pevent __maybe_unused,
enum pevent_errno errnum, char *buf, size_t buflen)
{
int idx;
const char *msg;
@ -5100,6 +5101,7 @@ int pevent_strerror(struct pevent *pevent, enum pevent_errno errnum,
case PEVENT_ERRNO__READ_FORMAT_FAILED:
case PEVENT_ERRNO__READ_PRINT_FAILED:
case PEVENT_ERRNO__OLD_FTRACE_ARG_FAILED:
case PEVENT_ERRNO__INVALID_ARG_TYPE:
snprintf(buf, buflen, "%s", msg);
break;
@ -5362,7 +5364,7 @@ int pevent_register_print_function(struct pevent *pevent,
if (type == PEVENT_FUNC_ARG_VOID)
break;
if (type < 0 || type >= PEVENT_FUNC_ARG_MAX_TYPES) {
if (type >= PEVENT_FUNC_ARG_MAX_TYPES) {
do_warning("Invalid argument type %d", type);
ret = PEVENT_ERRNO__INVALID_ARG_TYPE;
goto out_free;
@ -5560,7 +5562,7 @@ void pevent_free(struct pevent *pevent)
}
if (pevent->func_map) {
for (i = 0; i < pevent->func_count; i++) {
for (i = 0; i < (int)pevent->func_count; i++) {
free(pevent->func_map[i].func);
free(pevent->func_map[i].mod);
}
@ -5582,7 +5584,7 @@ void pevent_free(struct pevent *pevent)
}
if (pevent->printk_map) {
for (i = 0; i < pevent->printk_count; i++)
for (i = 0; i < (int)pevent->printk_count; i++)
free(pevent->printk_map[i].printk);
free(pevent->printk_map);
}

View File

@ -1,3 +1,5 @@
include ../config/utilities.mak
OUTPUT := ./
ifeq ("$(origin O)", "command line")
ifneq ($(O),)
@ -64,6 +66,7 @@ MAKEINFO=makeinfo
INSTALL_INFO=install-info
DOCBOOK2X_TEXI=docbook2x-texi
DBLATEX=dblatex
XMLTO=xmlto
ifndef PERL_PATH
PERL_PATH = /usr/bin/perl
endif
@ -71,6 +74,16 @@ endif
-include ../config.mak.autogen
-include ../config.mak
_tmp_tool_path := $(call get-executable,$(ASCIIDOC))
ifeq ($(_tmp_tool_path),)
missing_tools = $(ASCIIDOC)
endif
_tmp_tool_path := $(call get-executable,$(XMLTO))
ifeq ($(_tmp_tool_path),)
missing_tools += $(XMLTO)
endif
#
# For asciidoc ...
# -7.1.2, no extra settings are needed.
@ -170,7 +183,12 @@ pdf: $(OUTPUT)user-manual.pdf
install: install-man
install-man: man
check-man-tools:
ifdef missing_tools
$(error "You need to install $(missing_tools) for man pages")
endif
do-install-man: man
$(INSTALL) -d -m 755 $(DESTDIR)$(man1dir)
# $(INSTALL) -d -m 755 $(DESTDIR)$(man5dir)
# $(INSTALL) -d -m 755 $(DESTDIR)$(man7dir)
@ -178,6 +196,15 @@ install-man: man
# $(INSTALL) -m 644 $(DOC_MAN5) $(DESTDIR)$(man5dir)
# $(INSTALL) -m 644 $(DOC_MAN7) $(DESTDIR)$(man7dir)
install-man: check-man-tools man
try-install-man:
ifdef missing_tools
$(warning Please install $(missing_tools) to have the man pages installed)
else
$(MAKE) do-install-man
endif
install-info: info
$(INSTALL) -d -m 755 $(DESTDIR)$(infodir)
$(INSTALL) -m 644 $(OUTPUT)perf.info $(OUTPUT)perfman.info $(DESTDIR)$(infodir)
@ -246,7 +273,7 @@ $(MAN_HTML): $(OUTPUT)%.html : %.txt
$(OUTPUT)%.1 $(OUTPUT)%.5 $(OUTPUT)%.7 : $(OUTPUT)%.xml
$(QUIET_XMLTO)$(RM) $@ && \
xmlto -o $(OUTPUT) -m $(MANPAGE_XSL) $(XMLTO_EXTRA) man $<
$(XMLTO) -o $(OUTPUT) -m $(MANPAGE_XSL) $(XMLTO_EXTRA) man $<
$(OUTPUT)%.xml : %.txt
$(QUIET_ASCIIDOC)$(RM) $@+ $@ && \

View File

@ -0,0 +1,78 @@
How to compile perf for Android
=========================================
I. Set the Android NDK environment
------------------------------------------------
(a). Use the Android NDK
------------------------------------------------
1. You need to download and install the Android Native Development Kit (NDK).
Set the NDK variable to point to the path where you installed the NDK:
export NDK=/path/to/android-ndk
2. Set cross-compiling environment variables for NDK toolchain and sysroot.
For arm:
export NDK_TOOLCHAIN=${NDK}/toolchains/arm-linux-androideabi-4.6/prebuilt/linux-x86/bin/arm-linux-androideabi-
export NDK_SYSROOT=${NDK}/platforms/android-9/arch-arm
For x86:
export NDK_TOOLCHAIN=${NDK}/toolchains/x86-4.6/prebuilt/linux-x86/bin/i686-linux-android-
export NDK_SYSROOT=${NDK}/platforms/android-9/arch-x86
This method is not working for Android NDK versions up to Revision 8b.
perf uses some bionic enhancements that are not included in these NDK versions.
You can use method (b) described below instead.
(b). Use the Android source tree
-----------------------------------------------
1. Download the master branch of the Android source tree.
Set the environment for the target you want using:
source build/envsetup.sh
lunch
2. Build your own NDK sysroot to contain latest bionic changes and set the
NDK sysroot environment variable.
cd ${ANDROID_BUILD_TOP}/ndk
For arm:
./build/tools/build-ndk-sysroot.sh --abi=arm
export NDK_SYSROOT=${ANDROID_BUILD_TOP}/ndk/build/platforms/android-3/arch-arm
For x86:
./build/tools/build-ndk-sysroot.sh --abi=x86
export NDK_SYSROOT=${ANDROID_BUILD_TOP}/ndk/build/platforms/android-3/arch-x86
3. Set the NDK toolchain environment variable.
For arm:
export NDK_TOOLCHAIN=${ANDROID_TOOLCHAIN}/arm-linux-androideabi-
For x86:
export NDK_TOOLCHAIN=${ANDROID_TOOLCHAIN}/i686-linux-android-
II. Compile perf for Android
------------------------------------------------
You need to run make with the NDK toolchain and sysroot defined above:
For arm:
make ARCH=arm CROSS_COMPILE=${NDK_TOOLCHAIN} CFLAGS="--sysroot=${NDK_SYSROOT}"
For x86:
make ARCH=x86 CROSS_COMPILE=${NDK_TOOLCHAIN} CFLAGS="--sysroot=${NDK_SYSROOT}"
III. Install perf
-----------------------------------------------
You need to connect to your Android device/emulator using adb.
Install perf using:
adb push perf /data/perf
If you also want to use perf-archive you need busybox tools for Android.
For installing perf-archive, you first need to replace #!/bin/bash with #!/system/bin/sh:
sed 's/#!\/bin\/bash/#!\/system\/bin\/sh/g' perf-archive >> /tmp/perf-archive
chmod +x /tmp/perf-archive
adb push /tmp/perf-archive /data/perf-archive
IV. Environment settings for running perf
------------------------------------------------
Some perf features need environment variables to run properly.
You need to set these before running perf on the target:
adb shell
# PERF_PAGER=cat
IV. Run perf
------------------------------------------------
Run perf on your device/emulator to which you previously connected using adb:
# ./data/perf

View File

@ -72,6 +72,66 @@ OPTIONS
--symfs=<directory>::
Look for files with symbols relative to this directory.
-b::
--baseline-only::
Show only items with match in baseline.
-c::
--compute::
Differential computation selection - delta,ratio,wdiff (default is delta).
If '+' is specified as a first character, the output is sorted based
on the computation results.
See COMPARISON METHODS section for more info.
-p::
--period::
Show period values for both compared hist entries.
-F::
--formula::
Show formula for given computation.
COMPARISON METHODS
------------------
delta
~~~~~
If specified the 'Delta' column is displayed with value 'd' computed as:
d = A->period_percent - B->period_percent
with:
- A/B being matching hist entry from first/second file specified
(or perf.data/perf.data.old) respectively.
- period_percent being the % of the hist entry period value within
single data file
ratio
~~~~~
If specified the 'Ratio' column is displayed with value 'r' computed as:
r = A->period / B->period
with:
- A/B being matching hist entry from first/second file specified
(or perf.data/perf.data.old) respectively.
- period being the hist entry period value
wdiff
~~~~~
If specified the 'Weighted diff' column is displayed with value 'd' computed as:
d = B->period * WEIGHT-A - A->period * WEIGHT-B
- A/B being matching hist entry from first/second file specified
(or perf.data/perf.data.old) respectively.
- period being the hist entry period value
- WEIGHT-A/WEIGHT-B being user suplied weights in the the '-c' option
behind ':' separator like '-c wdiff:1,2'.
SEE ALSO
--------
linkperf:perf-record[1]

View File

@ -29,6 +29,17 @@ OPTIONS
-v::
--verbose::
Be more verbose.
-i::
--input=::
Input file name. (default: stdin)
-o::
--output=::
Output file name. (default: stdout)
-s::
--sched-stat::
Merge sched_stat and sched_switch for getting events where and how long
tasks slept. sched_switch contains a callchain where a task slept and
sched_stat contains a timeslice how long a task slept.
SEE ALSO
--------

View File

@ -108,6 +108,11 @@ with it. --append may be used here. Examples:
3>results perf stat --log-fd 3 -- $cmd
3>>results perf stat --log-fd 3 --append -- $cmd
--pre::
--post::
Pre and post measurement hooks, e.g.:
perf stat --repeat 10 --null --sync --pre 'make -s O=defconfig-build/clean' -- make -s -j64 O=defconfig-build/ bzImage
EXAMPLES

View File

@ -48,6 +48,12 @@ comma-separated list with no space: 0,1. Ranges of CPUs are specified with -: 0-
In per-thread mode with inheritance mode on (default), Events are captured only when
the thread executes on the designated CPUs. Default is to monitor all CPUs.
--duration:
Show only events that had a duration greater than N.M ms.
--sched:
Accrue thread runtime and provide a summary at the end of the session.
SEE ALSO
--------
linkperf:perf-record[1], linkperf:perf-script[1]

View File

@ -155,15 +155,15 @@ SPARSE_FLAGS = -D__BIG_ENDIAN__ -D__powerpc__
-include config/feature-tests.mak
ifeq ($(call try-cc,$(SOURCE_HELLO),-Werror -fstack-protector-all),y)
ifeq ($(call try-cc,$(SOURCE_HELLO),$(CFLAGS) -Werror -fstack-protector-all,-fstack-protector-all),y)
CFLAGS := $(CFLAGS) -fstack-protector-all
endif
ifeq ($(call try-cc,$(SOURCE_HELLO),-Werror -Wstack-protector),y)
ifeq ($(call try-cc,$(SOURCE_HELLO),$(CFLAGS) -Werror -Wstack-protector,-Wstack-protector),y)
CFLAGS := $(CFLAGS) -Wstack-protector
endif
ifeq ($(call try-cc,$(SOURCE_HELLO),-Werror -Wvolatile-register-var),y)
ifeq ($(call try-cc,$(SOURCE_HELLO),$(CFLAGS) -Werror -Wvolatile-register-var,-Wvolatile-register-var),y)
CFLAGS := $(CFLAGS) -Wvolatile-register-var
endif
@ -197,8 +197,16 @@ BASIC_CFLAGS = \
-I. \
-I$(TRACE_EVENT_DIR) \
-D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -D_GNU_SOURCE
BASIC_LDFLAGS =
ifeq ($(call try-cc,$(SOURCE_BIONIC),$(CFLAGS),bionic),y)
BIONIC := 1
EXTLIBS := $(filter-out -lrt,$(EXTLIBS))
EXTLIBS := $(filter-out -lpthread,$(EXTLIBS))
BASIC_CFLAGS += -I.
endif
# Guard against environment variables
BUILTIN_OBJS =
LIB_H =
@ -330,6 +338,7 @@ LIB_H += util/evlist.h
LIB_H += util/exec_cmd.h
LIB_H += util/types.h
LIB_H += util/levenshtein.h
LIB_H += util/machine.h
LIB_H += util/map.h
LIB_H += util/parse-options.h
LIB_H += util/parse-events.h
@ -346,6 +355,7 @@ LIB_H += util/svghelper.h
LIB_H += util/tool.h
LIB_H += util/run-command.h
LIB_H += util/sigchain.h
LIB_H += util/dso.h
LIB_H += util/symbol.h
LIB_H += util/color.h
LIB_H += util/values.h
@ -389,7 +399,6 @@ LIB_OBJS += $(OUTPUT)util/help.o
LIB_OBJS += $(OUTPUT)util/levenshtein.o
LIB_OBJS += $(OUTPUT)util/parse-options.o
LIB_OBJS += $(OUTPUT)util/parse-events.o
LIB_OBJS += $(OUTPUT)util/parse-events-test.o
LIB_OBJS += $(OUTPUT)util/path.o
LIB_OBJS += $(OUTPUT)util/rbtree.o
LIB_OBJS += $(OUTPUT)util/bitmap.o
@ -404,15 +413,16 @@ LIB_OBJS += $(OUTPUT)util/top.o
LIB_OBJS += $(OUTPUT)util/usage.o
LIB_OBJS += $(OUTPUT)util/wrapper.o
LIB_OBJS += $(OUTPUT)util/sigchain.o
LIB_OBJS += $(OUTPUT)util/dso.o
LIB_OBJS += $(OUTPUT)util/symbol.o
LIB_OBJS += $(OUTPUT)util/symbol-elf.o
LIB_OBJS += $(OUTPUT)util/dso-test-data.o
LIB_OBJS += $(OUTPUT)util/color.o
LIB_OBJS += $(OUTPUT)util/pager.o
LIB_OBJS += $(OUTPUT)util/header.o
LIB_OBJS += $(OUTPUT)util/callchain.o
LIB_OBJS += $(OUTPUT)util/values.o
LIB_OBJS += $(OUTPUT)util/debug.o
LIB_OBJS += $(OUTPUT)util/machine.o
LIB_OBJS += $(OUTPUT)util/map.o
LIB_OBJS += $(OUTPUT)util/pstack.o
LIB_OBJS += $(OUTPUT)util/session.o
@ -440,10 +450,29 @@ LIB_OBJS += $(OUTPUT)util/intlist.o
LIB_OBJS += $(OUTPUT)util/vdso.o
LIB_OBJS += $(OUTPUT)util/stat.o
LIB_OBJS += $(OUTPUT)ui/setup.o
LIB_OBJS += $(OUTPUT)ui/helpline.o
LIB_OBJS += $(OUTPUT)ui/progress.o
LIB_OBJS += $(OUTPUT)ui/hist.o
LIB_OBJS += $(OUTPUT)ui/stdio/hist.o
LIB_OBJS += $(OUTPUT)arch/common.o
LIB_OBJS += $(OUTPUT)tests/parse-events.o
LIB_OBJS += $(OUTPUT)tests/dso-data.o
LIB_OBJS += $(OUTPUT)tests/attr.o
LIB_OBJS += $(OUTPUT)tests/vmlinux-kallsyms.o
LIB_OBJS += $(OUTPUT)tests/open-syscall.o
LIB_OBJS += $(OUTPUT)tests/open-syscall-all-cpus.o
LIB_OBJS += $(OUTPUT)tests/open-syscall-tp-fields.o
LIB_OBJS += $(OUTPUT)tests/mmap-basic.o
LIB_OBJS += $(OUTPUT)tests/perf-record.o
LIB_OBJS += $(OUTPUT)tests/rdpmc.o
LIB_OBJS += $(OUTPUT)tests/evsel-roundtrip-name.o
LIB_OBJS += $(OUTPUT)tests/evsel-tp-sched.o
LIB_OBJS += $(OUTPUT)tests/pmu.o
LIB_OBJS += $(OUTPUT)tests/util.o
BUILTIN_OBJS += $(OUTPUT)builtin-annotate.o
BUILTIN_OBJS += $(OUTPUT)builtin-bench.o
# Benchmark modules
@ -473,8 +502,8 @@ BUILTIN_OBJS += $(OUTPUT)builtin-probe.o
BUILTIN_OBJS += $(OUTPUT)builtin-kmem.o
BUILTIN_OBJS += $(OUTPUT)builtin-lock.o
BUILTIN_OBJS += $(OUTPUT)builtin-kvm.o
BUILTIN_OBJS += $(OUTPUT)builtin-test.o
BUILTIN_OBJS += $(OUTPUT)builtin-inject.o
BUILTIN_OBJS += $(OUTPUT)tests/builtin-test.o
PERFLIBS = $(LIB_FILE) $(LIBTRACEEVENT)
@ -495,18 +524,33 @@ ifdef NO_LIBELF
NO_LIBUNWIND := 1
else
FLAGS_LIBELF=$(ALL_CFLAGS) $(ALL_LDFLAGS) $(EXTLIBS)
ifneq ($(call try-cc,$(SOURCE_LIBELF),$(FLAGS_LIBELF)),y)
ifneq ($(call try-cc,$(SOURCE_LIBELF),$(FLAGS_LIBELF),libelf),y)
FLAGS_GLIBC=$(ALL_CFLAGS) $(ALL_LDFLAGS)
ifneq ($(call try-cc,$(SOURCE_GLIBC),$(FLAGS_GLIBC)),y)
msg := $(error No gnu/libc-version.h found, please install glibc-dev[el]/glibc-static);
else
ifeq ($(call try-cc,$(SOURCE_GLIBC),$(FLAGS_GLIBC),glibc),y)
LIBC_SUPPORT := 1
endif
ifeq ($(BIONIC),1)
LIBC_SUPPORT := 1
endif
ifeq ($(LIBC_SUPPORT),1)
msg := $(warning No libelf found, disables 'probe' tool, please install elfutils-libelf-devel/libelf-dev);
NO_LIBELF := 1
NO_DWARF := 1
NO_DEMANGLE := 1
else
msg := $(error No gnu/libc-version.h found, please install glibc-dev[el]/glibc-static);
endif
else
FLAGS_DWARF=$(ALL_CFLAGS) -ldw -lelf $(ALL_LDFLAGS) $(EXTLIBS)
ifneq ($(call try-cc,$(SOURCE_DWARF),$(FLAGS_DWARF)),y)
# for linking with debug library, run like:
# make DEBUG=1 LIBDW_DIR=/opt/libdw/
ifdef LIBDW_DIR
LIBDW_CFLAGS := -I$(LIBDW_DIR)/include
LIBDW_LDFLAGS := -L$(LIBDW_DIR)/lib
endif
FLAGS_DWARF=$(ALL_CFLAGS) $(LIBDW_CFLAGS) -ldw -lelf $(LIBDW_LDFLAGS) $(ALL_LDFLAGS) $(EXTLIBS)
ifneq ($(call try-cc,$(SOURCE_DWARF),$(FLAGS_DWARF),libdw),y)
msg := $(warning No libdw.h found or old libdw.h found or elfutils is older than 0.138, disables dwarf support. Please install new elfutils-devel/libdw-dev);
NO_DWARF := 1
endif # Dwarf support
@ -522,7 +566,7 @@ ifdef LIBUNWIND_DIR
endif
FLAGS_UNWIND=$(LIBUNWIND_CFLAGS) $(ALL_CFLAGS) $(LIBUNWIND_LDFLAGS) $(ALL_LDFLAGS) $(EXTLIBS) $(LIBUNWIND_LIBS)
ifneq ($(call try-cc,$(SOURCE_LIBUNWIND),$(FLAGS_UNWIND)),y)
ifneq ($(call try-cc,$(SOURCE_LIBUNWIND),$(FLAGS_UNWIND),libunwind),y)
msg := $(warning No libunwind found, disabling post unwind support. Please install libunwind-dev[el] >= 0.99);
NO_LIBUNWIND := 1
endif # Libunwind support
@ -551,7 +595,8 @@ LIB_OBJS += $(OUTPUT)util/symbol-minimal.o
else # NO_LIBELF
BASIC_CFLAGS += -DLIBELF_SUPPORT
ifeq ($(call try-cc,$(SOURCE_ELF_MMAP),$(FLAGS_COMMON)),y)
FLAGS_LIBELF=$(ALL_CFLAGS) $(ALL_LDFLAGS) $(EXTLIBS)
ifeq ($(call try-cc,$(SOURCE_ELF_MMAP),$(FLAGS_LIBELF),-DLIBELF_MMAP),y)
BASIC_CFLAGS += -DLIBELF_MMAP
endif
@ -559,7 +604,8 @@ ifndef NO_DWARF
ifeq ($(origin PERF_HAVE_DWARF_REGS), undefined)
msg := $(warning DWARF register mappings have not been defined for architecture $(ARCH), DWARF support disabled);
else
BASIC_CFLAGS += -DDWARF_SUPPORT
BASIC_CFLAGS := -DDWARF_SUPPORT $(LIBDW_CFLAGS) $(BASIC_CFLAGS)
BASIC_LDFLAGS := $(LIBDW_LDFLAGS) $(BASIC_LDFLAGS)
EXTLIBS += -lelf -ldw
LIB_OBJS += $(OUTPUT)util/probe-finder.o
LIB_OBJS += $(OUTPUT)util/dwarf-aux.o
@ -577,7 +623,7 @@ endif
ifndef NO_LIBAUDIT
FLAGS_LIBAUDIT = $(ALL_CFLAGS) $(ALL_LDFLAGS) -laudit
ifneq ($(call try-cc,$(SOURCE_LIBAUDIT),$(FLAGS_LIBAUDIT)),y)
ifneq ($(call try-cc,$(SOURCE_LIBAUDIT),$(FLAGS_LIBAUDIT),libaudit),y)
msg := $(warning No libaudit.h found, disables 'trace' tool, please install audit-libs-devel or libaudit-dev);
else
BASIC_CFLAGS += -DLIBAUDIT_SUPPORT
@ -588,23 +634,23 @@ endif
ifndef NO_NEWT
FLAGS_NEWT=$(ALL_CFLAGS) $(ALL_LDFLAGS) $(EXTLIBS) -lnewt
ifneq ($(call try-cc,$(SOURCE_NEWT),$(FLAGS_NEWT)),y)
ifneq ($(call try-cc,$(SOURCE_NEWT),$(FLAGS_NEWT),libnewt),y)
msg := $(warning newt not found, disables TUI support. Please install newt-devel or libnewt-dev);
else
# Fedora has /usr/include/slang/slang.h, but ubuntu /usr/include/slang.h
BASIC_CFLAGS += -I/usr/include/slang
BASIC_CFLAGS += -DNEWT_SUPPORT
EXTLIBS += -lnewt -lslang
LIB_OBJS += $(OUTPUT)ui/setup.o
LIB_OBJS += $(OUTPUT)ui/browser.o
LIB_OBJS += $(OUTPUT)ui/browsers/annotate.o
LIB_OBJS += $(OUTPUT)ui/browsers/hists.o
LIB_OBJS += $(OUTPUT)ui/browsers/map.o
LIB_OBJS += $(OUTPUT)ui/progress.o
LIB_OBJS += $(OUTPUT)ui/browsers/scripts.o
LIB_OBJS += $(OUTPUT)ui/util.o
LIB_OBJS += $(OUTPUT)ui/tui/setup.o
LIB_OBJS += $(OUTPUT)ui/tui/util.o
LIB_OBJS += $(OUTPUT)ui/tui/helpline.o
LIB_OBJS += $(OUTPUT)ui/tui/progress.o
LIB_H += ui/browser.h
LIB_H += ui/browsers/map.h
LIB_H += ui/keysyms.h
@ -617,10 +663,10 @@ endif
ifndef NO_GTK2
FLAGS_GTK2=$(ALL_CFLAGS) $(ALL_LDFLAGS) $(EXTLIBS) $(shell pkg-config --libs --cflags gtk+-2.0 2>/dev/null)
ifneq ($(call try-cc,$(SOURCE_GTK2),$(FLAGS_GTK2)),y)
ifneq ($(call try-cc,$(SOURCE_GTK2),$(FLAGS_GTK2),gtk2),y)
msg := $(warning GTK2 not found, disables GTK2 support. Please install gtk2-devel or libgtk2.0-dev);
else
ifeq ($(call try-cc,$(SOURCE_GTK2_INFOBAR),$(FLAGS_GTK2)),y)
ifeq ($(call try-cc,$(SOURCE_GTK2_INFOBAR),$(FLAGS_GTK2),-DHAVE_GTK_INFO_BAR),y)
BASIC_CFLAGS += -DHAVE_GTK_INFO_BAR
endif
BASIC_CFLAGS += -DGTK2_SUPPORT
@ -630,9 +676,9 @@ ifndef NO_GTK2
LIB_OBJS += $(OUTPUT)ui/gtk/setup.o
LIB_OBJS += $(OUTPUT)ui/gtk/util.o
LIB_OBJS += $(OUTPUT)ui/gtk/helpline.o
LIB_OBJS += $(OUTPUT)ui/gtk/progress.o
# Make sure that it'd be included only once.
ifeq ($(findstring -DNEWT_SUPPORT,$(BASIC_CFLAGS)),)
LIB_OBJS += $(OUTPUT)ui/setup.o
LIB_OBJS += $(OUTPUT)ui/util.o
endif
endif
@ -647,7 +693,7 @@ else
PERL_EMBED_CCOPTS = `perl -MExtUtils::Embed -e ccopts 2>/dev/null`
FLAGS_PERL_EMBED=$(PERL_EMBED_CCOPTS) $(PERL_EMBED_LDOPTS)
ifneq ($(call try-cc,$(SOURCE_PERL_EMBED),$(FLAGS_PERL_EMBED)),y)
ifneq ($(call try-cc,$(SOURCE_PERL_EMBED),$(FLAGS_PERL_EMBED),perl),y)
BASIC_CFLAGS += -DNO_LIBPERL
else
ALL_LDFLAGS += $(PERL_EMBED_LDFLAGS)
@ -701,11 +747,11 @@ else
PYTHON_EMBED_CCOPTS := $(shell $(PYTHON_CONFIG_SQ) --cflags 2>/dev/null)
FLAGS_PYTHON_EMBED := $(PYTHON_EMBED_CCOPTS) $(PYTHON_EMBED_LDOPTS)
ifneq ($(call try-cc,$(SOURCE_PYTHON_EMBED),$(FLAGS_PYTHON_EMBED)),y)
ifneq ($(call try-cc,$(SOURCE_PYTHON_EMBED),$(FLAGS_PYTHON_EMBED),python),y)
$(call disable-python,Python.h (for Python 2.x))
else
ifneq ($(call try-cc,$(SOURCE_PYTHON_VERSION),$(FLAGS_PYTHON_EMBED)),y)
ifneq ($(call try-cc,$(SOURCE_PYTHON_VERSION),$(FLAGS_PYTHON_EMBED),python version),y)
$(warning Python 3 is not yet supported; please set)
$(warning PYTHON and/or PYTHON_CONFIG appropriately.)
$(warning If you also have Python 2 installed, then)
@ -739,22 +785,22 @@ else
BASIC_CFLAGS += -DHAVE_CPLUS_DEMANGLE
else
FLAGS_BFD=$(ALL_CFLAGS) $(ALL_LDFLAGS) $(EXTLIBS) -DPACKAGE='perf' -lbfd
has_bfd := $(call try-cc,$(SOURCE_BFD),$(FLAGS_BFD))
has_bfd := $(call try-cc,$(SOURCE_BFD),$(FLAGS_BFD),libbfd)
ifeq ($(has_bfd),y)
EXTLIBS += -lbfd
else
FLAGS_BFD_IBERTY=$(FLAGS_BFD) -liberty
has_bfd_iberty := $(call try-cc,$(SOURCE_BFD),$(FLAGS_BFD_IBERTY))
has_bfd_iberty := $(call try-cc,$(SOURCE_BFD),$(FLAGS_BFD_IBERTY),liberty)
ifeq ($(has_bfd_iberty),y)
EXTLIBS += -lbfd -liberty
else
FLAGS_BFD_IBERTY_Z=$(FLAGS_BFD_IBERTY) -lz
has_bfd_iberty_z := $(call try-cc,$(SOURCE_BFD),$(FLAGS_BFD_IBERTY_Z))
has_bfd_iberty_z := $(call try-cc,$(SOURCE_BFD),$(FLAGS_BFD_IBERTY_Z),libz)
ifeq ($(has_bfd_iberty_z),y)
EXTLIBS += -lbfd -liberty -lz
else
FLAGS_CPLUS_DEMANGLE=$(ALL_CFLAGS) $(ALL_LDFLAGS) $(EXTLIBS) -liberty
has_cplus_demangle := $(call try-cc,$(SOURCE_CPLUS_DEMANGLE),$(FLAGS_CPLUS_DEMANGLE))
has_cplus_demangle := $(call try-cc,$(SOURCE_CPLUS_DEMANGLE),$(FLAGS_CPLUS_DEMANGLE),demangle)
ifeq ($(has_cplus_demangle),y)
EXTLIBS += -liberty
BASIC_CFLAGS += -DHAVE_CPLUS_DEMANGLE
@ -776,13 +822,19 @@ ifeq ($(NO_PERF_REGS),0)
endif
ifndef NO_STRLCPY
ifeq ($(call try-cc,$(SOURCE_STRLCPY),),y)
ifeq ($(call try-cc,$(SOURCE_STRLCPY),,-DHAVE_STRLCPY),y)
BASIC_CFLAGS += -DHAVE_STRLCPY
endif
endif
ifndef NO_ON_EXIT
ifeq ($(call try-cc,$(SOURCE_ON_EXIT),,-DHAVE_ON_EXIT),y)
BASIC_CFLAGS += -DHAVE_ON_EXIT
endif
endif
ifndef NO_BACKTRACE
ifeq ($(call try-cc,$(SOURCE_BACKTRACE),),y)
ifeq ($(call try-cc,$(SOURCE_BACKTRACE),,-DBACKTRACE_SUPPORT),y)
BASIC_CFLAGS += -DBACKTRACE_SUPPORT
endif
endif
@ -891,10 +943,14 @@ $(OUTPUT)%.s: %.S
$(OUTPUT)util/exec_cmd.o: util/exec_cmd.c $(OUTPUT)PERF-CFLAGS
$(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) \
'-DPERF_EXEC_PATH="$(perfexecdir_SQ)"' \
'-DBINDIR="$(bindir_relative_SQ)"' \
'-DPREFIX="$(prefix_SQ)"' \
$<
$(OUTPUT)tests/attr.o: tests/attr.c $(OUTPUT)PERF-CFLAGS
$(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) \
'-DBINDIR="$(bindir_SQ)"' \
$<
$(OUTPUT)util/config.o: util/config.c $(OUTPUT)PERF-CFLAGS
$(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) -DETC_PERFCONFIG='"$(ETC_PERFCONFIG_SQ)"' $<
@ -910,6 +966,9 @@ $(OUTPUT)ui/browsers/hists.o: ui/browsers/hists.c $(OUTPUT)PERF-CFLAGS
$(OUTPUT)ui/browsers/map.o: ui/browsers/map.c $(OUTPUT)PERF-CFLAGS
$(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) -DENABLE_SLFUTURE_CONST $<
$(OUTPUT)ui/browsers/scripts.o: ui/browsers/scripts.c $(OUTPUT)PERF-CFLAGS
$(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) -DENABLE_SLFUTURE_CONST $<
$(OUTPUT)util/rbtree.o: ../../lib/rbtree.c $(OUTPUT)PERF-CFLAGS
$(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) -Wno-unused-parameter -DETC_PERFCONFIG='"$(ETC_PERFCONFIG_SQ)"' $<
@ -981,20 +1040,15 @@ help:
@echo 'Perf maintainer targets:'
@echo ' clean - clean all binary objects and build output'
doc:
$(MAKE) -C Documentation all
man:
$(MAKE) -C Documentation man
DOC_TARGETS := doc man html info pdf
html:
$(MAKE) -C Documentation html
INSTALL_DOC_TARGETS := $(patsubst %,install-%,$(DOC_TARGETS)) try-install-man
INSTALL_DOC_TARGETS += quick-install-doc quick-install-man quick-install-html
info:
$(MAKE) -C Documentation info
pdf:
$(MAKE) -C Documentation pdf
# 'make doc' should call 'make -C Documentation all'
$(DOC_TARGETS):
$(QUIET_SUBDIR0)Documentation $(QUIET_SUBDIR1) $(@:doc=all)
TAGS:
$(RM) TAGS
@ -1045,7 +1099,7 @@ perfexec_instdir = $(prefix)/$(perfexecdir)
endif
perfexec_instdir_SQ = $(subst ','\'',$(perfexec_instdir))
install: all
install: all try-install-man
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(bindir_SQ)'
$(INSTALL) $(OUTPUT)perf '$(DESTDIR_SQ)$(bindir_SQ)'
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/Perf-Trace-Util/lib/Perf/Trace'
@ -1061,33 +1115,17 @@ install: all
$(INSTALL) scripts/python/bin/* -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python/bin'
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(sysconfdir_SQ)/bash_completion.d'
$(INSTALL) bash_completion '$(DESTDIR_SQ)$(sysconfdir_SQ)/bash_completion.d/perf'
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests'
$(INSTALL) tests/attr.py '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests'
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/attr'
$(INSTALL) tests/attr/* '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/attr'
install-python_ext:
$(PYTHON_WORD) util/setup.py --quiet install --root='/$(DESTDIR_SQ)'
install-doc:
$(MAKE) -C Documentation install
install-man:
$(MAKE) -C Documentation install-man
install-html:
$(MAKE) -C Documentation install-html
install-info:
$(MAKE) -C Documentation install-info
install-pdf:
$(MAKE) -C Documentation install-pdf
quick-install-doc:
$(MAKE) -C Documentation quick-install
quick-install-man:
$(MAKE) -C Documentation quick-install-man
quick-install-html:
$(MAKE) -C Documentation quick-install-html
# 'make install-doc' should call 'make -C Documentation install'
$(INSTALL_DOC_TARGETS):
$(QUIET_SUBDIR0)Documentation $(QUIET_SUBDIR1) $(@:-doc=)
### Cleaning rules
@ -1095,7 +1133,7 @@ clean: $(LIBTRACEEVENT)-clean
$(RM) $(LIB_OBJS) $(BUILTIN_OBJS) $(LIB_FILE) $(OUTPUT)perf-archive $(OUTPUT)perf.o $(LANG_BINDINGS)
$(RM) $(ALL_PROGRAMS) perf
$(RM) *.spec *.pyc *.pyo */*.pyc */*.pyo $(OUTPUT)common-cmds.h TAGS tags cscope*
$(MAKE) -C Documentation/ clean
$(QUIET_SUBDIR0)Documentation $(QUIET_SUBDIR1) clean
$(RM) $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)PERF-CFLAGS
$(RM) $(OUTPUT)util/*-bison*
$(RM) $(OUTPUT)util/*-flex*

211
tools/perf/arch/common.c Normal file
View File

@ -0,0 +1,211 @@
#include <stdio.h>
#include <sys/utsname.h>
#include "common.h"
#include "../util/debug.h"
const char *const arm_triplets[] = {
"arm-eabi-",
"arm-linux-androideabi-",
"arm-unknown-linux-",
"arm-unknown-linux-gnu-",
"arm-unknown-linux-gnueabi-",
NULL
};
const char *const powerpc_triplets[] = {
"powerpc-unknown-linux-gnu-",
"powerpc64-unknown-linux-gnu-",
NULL
};
const char *const s390_triplets[] = {
"s390-ibm-linux-",
NULL
};
const char *const sh_triplets[] = {
"sh-unknown-linux-gnu-",
"sh64-unknown-linux-gnu-",
NULL
};
const char *const sparc_triplets[] = {
"sparc-unknown-linux-gnu-",
"sparc64-unknown-linux-gnu-",
NULL
};
const char *const x86_triplets[] = {
"x86_64-pc-linux-gnu-",
"x86_64-unknown-linux-gnu-",
"i686-pc-linux-gnu-",
"i586-pc-linux-gnu-",
"i486-pc-linux-gnu-",
"i386-pc-linux-gnu-",
"i686-linux-android-",
"i686-android-linux-",
NULL
};
const char *const mips_triplets[] = {
"mips-unknown-linux-gnu-",
"mipsel-linux-android-",
NULL
};
static bool lookup_path(char *name)
{
bool found = false;
char *path, *tmp;
char buf[PATH_MAX];
char *env = getenv("PATH");
if (!env)
return false;
env = strdup(env);
if (!env)
return false;
path = strtok_r(env, ":", &tmp);
while (path) {
scnprintf(buf, sizeof(buf), "%s/%s", path, name);
if (access(buf, F_OK) == 0) {
found = true;
break;
}
path = strtok_r(NULL, ":", &tmp);
}
free(env);
return found;
}
static int lookup_triplets(const char *const *triplets, const char *name)
{
int i;
char buf[PATH_MAX];
for (i = 0; triplets[i] != NULL; i++) {
scnprintf(buf, sizeof(buf), "%s%s", triplets[i], name);
if (lookup_path(buf))
return i;
}
return -1;
}
/*
* Return architecture name in a normalized form.
* The conversion logic comes from the Makefile.
*/
static const char *normalize_arch(char *arch)
{
if (!strcmp(arch, "x86_64"))
return "x86";
if (arch[0] == 'i' && arch[2] == '8' && arch[3] == '6')
return "x86";
if (!strcmp(arch, "sun4u") || !strncmp(arch, "sparc", 5))
return "sparc";
if (!strncmp(arch, "arm", 3) || !strcmp(arch, "sa110"))
return "arm";
if (!strncmp(arch, "s390", 4))
return "s390";
if (!strncmp(arch, "parisc", 6))
return "parisc";
if (!strncmp(arch, "powerpc", 7) || !strncmp(arch, "ppc", 3))
return "powerpc";
if (!strncmp(arch, "mips", 4))
return "mips";
if (!strncmp(arch, "sh", 2) && isdigit(arch[2]))
return "sh";
return arch;
}
static int perf_session_env__lookup_binutils_path(struct perf_session_env *env,
const char *name,
const char **path)
{
int idx;
const char *arch, *cross_env;
struct utsname uts;
const char *const *path_list;
char *buf = NULL;
arch = normalize_arch(env->arch);
if (uname(&uts) < 0)
goto out;
/*
* We don't need to try to find objdump path for native system.
* Just use default binutils path (e.g.: "objdump").
*/
if (!strcmp(normalize_arch(uts.machine), arch))
goto out;
cross_env = getenv("CROSS_COMPILE");
if (cross_env) {
if (asprintf(&buf, "%s%s", cross_env, name) < 0)
goto out_error;
if (buf[0] == '/') {
if (access(buf, F_OK) == 0)
goto out;
goto out_error;
}
if (lookup_path(buf))
goto out;
free(buf);
}
if (!strcmp(arch, "arm"))
path_list = arm_triplets;
else if (!strcmp(arch, "powerpc"))
path_list = powerpc_triplets;
else if (!strcmp(arch, "sh"))
path_list = sh_triplets;
else if (!strcmp(arch, "s390"))
path_list = s390_triplets;
else if (!strcmp(arch, "sparc"))
path_list = sparc_triplets;
else if (!strcmp(arch, "x86"))
path_list = x86_triplets;
else if (!strcmp(arch, "mips"))
path_list = mips_triplets;
else {
ui__error("binutils for %s not supported.\n", arch);
goto out_error;
}
idx = lookup_triplets(path_list, name);
if (idx < 0) {
ui__error("Please install %s for %s.\n"
"You can add it to PATH, set CROSS_COMPILE or "
"override the default using --%s.\n",
name, arch, name);
goto out_error;
}
if (asprintf(&buf, "%s%s", path_list[idx], name) < 0)
goto out_error;
out:
*path = buf;
return 0;
out_error:
free(buf);
*path = NULL;
return -1;
}
int perf_session_env__lookup_objdump(struct perf_session_env *env)
{
/*
* For live mode, env->arch will be NULL and we can use
* the native objdump tool.
*/
if (env->arch == NULL)
return 0;
return perf_session_env__lookup_binutils_path(env, "objdump",
&objdump_path);
}

10
tools/perf/arch/common.h Normal file
View File

@ -0,0 +1,10 @@
#ifndef ARCH_PERF_COMMON_H
#define ARCH_PERF_COMMON_H
#include "../util/session.h"
extern const char *objdump_path;
int perf_session_env__lookup_objdump(struct perf_session_env *env);
#endif /* ARCH_PERF_COMMON_H */

View File

@ -28,12 +28,12 @@
#include "util/hist.h"
#include "util/session.h"
#include "util/tool.h"
#include "arch/common.h"
#include <linux/bitmap.h>
struct perf_annotate {
struct perf_tool tool;
char const *input_name;
bool force, use_tui, use_stdio;
bool full_paths;
bool print_line;
@ -139,7 +139,7 @@ find_next:
}
if (use_browser > 0) {
key = hist_entry__tui_annotate(he, evidx, NULL, NULL, 0);
key = hist_entry__tui_annotate(he, evidx, NULL);
switch (key) {
case K_RIGHT:
next = rb_next(nd);
@ -174,7 +174,7 @@ static int __cmd_annotate(struct perf_annotate *ann)
struct perf_evsel *pos;
u64 total_nr_samples;
session = perf_session__new(ann->input_name, O_RDONLY,
session = perf_session__new(input_name, O_RDONLY,
ann->force, false, &ann->tool);
if (session == NULL)
return -ENOMEM;
@ -186,6 +186,12 @@ static int __cmd_annotate(struct perf_annotate *ann)
goto out_delete;
}
if (!objdump_path) {
ret = perf_session_env__lookup_objdump(&session->header.env);
if (ret)
goto out_delete;
}
ret = perf_session__process_events(session, &ann->tool);
if (ret)
goto out_delete;
@ -246,13 +252,14 @@ int cmd_annotate(int argc, const char **argv, const char *prefix __maybe_unused)
.sample = process_sample_event,
.mmap = perf_event__process_mmap,
.comm = perf_event__process_comm,
.fork = perf_event__process_task,
.exit = perf_event__process_exit,
.fork = perf_event__process_fork,
.ordered_samples = true,
.ordering_requires_timestamps = true,
},
};
const struct option options[] = {
OPT_STRING('i', "input", &annotate.input_name, "file",
OPT_STRING('i', "input", &input_name, "file",
"input file name"),
OPT_STRING('d', "dsos", &symbol_conf.dso_list_str, "dso[,dso...]",
"only consider symbols in these dsos"),

View File

@ -13,6 +13,7 @@
#include "util/header.h"
#include "util/parse-options.h"
#include "util/strlist.h"
#include "util/build-id.h"
#include "util/symbol.h"
static int build_id_cache__add_file(const char *filename, const char *debugdir)

View File

@ -44,8 +44,7 @@ static int filename__fprintf_build_id(const char *name, FILE *fp)
return fprintf(fp, "%s\n", sbuild_id);
}
static int perf_session__list_build_ids(const char *input_name,
bool force, bool with_hits)
static int perf_session__list_build_ids(bool force, bool with_hits)
{
struct perf_session *session;
@ -81,7 +80,6 @@ int cmd_buildid_list(int argc, const char **argv,
bool show_kernel = false;
bool with_hits = false;
bool force = false;
const char *input_name = NULL;
const struct option options[] = {
OPT_BOOLEAN('H', "with-hits", &with_hits, "Show only DSOs with hits"),
OPT_STRING('i', "input", &input_name, "file", "input file name"),
@ -101,5 +99,5 @@ int cmd_buildid_list(int argc, const char **argv,
if (show_kernel)
return sysfs__fprintf_build_id(stdout);
return perf_session__list_build_ids(input_name, force, with_hits);
return perf_session__list_build_ids(force, with_hits);
}

View File

@ -24,6 +24,228 @@ static char const *input_old = "perf.data.old",
static char diff__default_sort_order[] = "dso,symbol";
static bool force;
static bool show_displacement;
static bool show_period;
static bool show_formula;
static bool show_baseline_only;
static bool sort_compute;
static s64 compute_wdiff_w1;
static s64 compute_wdiff_w2;
enum {
COMPUTE_DELTA,
COMPUTE_RATIO,
COMPUTE_WEIGHTED_DIFF,
COMPUTE_MAX,
};
const char *compute_names[COMPUTE_MAX] = {
[COMPUTE_DELTA] = "delta",
[COMPUTE_RATIO] = "ratio",
[COMPUTE_WEIGHTED_DIFF] = "wdiff",
};
static int compute;
static int setup_compute_opt_wdiff(char *opt)
{
char *w1_str = opt;
char *w2_str;
int ret = -EINVAL;
if (!opt)
goto out;
w2_str = strchr(opt, ',');
if (!w2_str)
goto out;
*w2_str++ = 0x0;
if (!*w2_str)
goto out;
compute_wdiff_w1 = strtol(w1_str, NULL, 10);
compute_wdiff_w2 = strtol(w2_str, NULL, 10);
if (!compute_wdiff_w1 || !compute_wdiff_w2)
goto out;
pr_debug("compute wdiff w1(%" PRId64 ") w2(%" PRId64 ")\n",
compute_wdiff_w1, compute_wdiff_w2);
ret = 0;
out:
if (ret)
pr_err("Failed: wrong weight data, use 'wdiff:w1,w2'\n");
return ret;
}
static int setup_compute_opt(char *opt)
{
if (compute == COMPUTE_WEIGHTED_DIFF)
return setup_compute_opt_wdiff(opt);
if (opt) {
pr_err("Failed: extra option specified '%s'", opt);
return -EINVAL;
}
return 0;
}
static int setup_compute(const struct option *opt, const char *str,
int unset __maybe_unused)
{
int *cp = (int *) opt->value;
char *cstr = (char *) str;
char buf[50];
unsigned i;
char *option;
if (!str) {
*cp = COMPUTE_DELTA;
return 0;
}
if (*str == '+') {
sort_compute = true;
cstr = (char *) ++str;
if (!*str)
return 0;
}
option = strchr(str, ':');
if (option) {
unsigned len = option++ - str;
/*
* The str data are not writeable, so we need
* to use another buffer.
*/
/* No option value is longer. */
if (len >= sizeof(buf))
return -EINVAL;
strncpy(buf, str, len);
buf[len] = 0x0;
cstr = buf;
}
for (i = 0; i < COMPUTE_MAX; i++)
if (!strcmp(cstr, compute_names[i])) {
*cp = i;
return setup_compute_opt(option);
}
pr_err("Failed: '%s' is not computation method "
"(use 'delta','ratio' or 'wdiff')\n", str);
return -EINVAL;
}
static double get_period_percent(struct hist_entry *he, u64 period)
{
u64 total = he->hists->stats.total_period;
return (period * 100.0) / total;
}
double perf_diff__compute_delta(struct hist_entry *he)
{
struct hist_entry *pair = hist_entry__next_pair(he);
double new_percent = get_period_percent(he, he->stat.period);
double old_percent = pair ? get_period_percent(pair, pair->stat.period) : 0.0;
he->diff.period_ratio_delta = new_percent - old_percent;
he->diff.computed = true;
return he->diff.period_ratio_delta;
}
double perf_diff__compute_ratio(struct hist_entry *he)
{
struct hist_entry *pair = hist_entry__next_pair(he);
double new_period = he->stat.period;
double old_period = pair ? pair->stat.period : 0;
he->diff.computed = true;
he->diff.period_ratio = pair ? (new_period / old_period) : 0;
return he->diff.period_ratio;
}
s64 perf_diff__compute_wdiff(struct hist_entry *he)
{
struct hist_entry *pair = hist_entry__next_pair(he);
u64 new_period = he->stat.period;
u64 old_period = pair ? pair->stat.period : 0;
he->diff.computed = true;
if (!pair)
he->diff.wdiff = 0;
else
he->diff.wdiff = new_period * compute_wdiff_w2 -
old_period * compute_wdiff_w1;
return he->diff.wdiff;
}
static int formula_delta(struct hist_entry *he, char *buf, size_t size)
{
struct hist_entry *pair = hist_entry__next_pair(he);
if (!pair)
return -1;
return scnprintf(buf, size,
"(%" PRIu64 " * 100 / %" PRIu64 ") - "
"(%" PRIu64 " * 100 / %" PRIu64 ")",
he->stat.period, he->hists->stats.total_period,
pair->stat.period, pair->hists->stats.total_period);
}
static int formula_ratio(struct hist_entry *he, char *buf, size_t size)
{
struct hist_entry *pair = hist_entry__next_pair(he);
double new_period = he->stat.period;
double old_period = pair ? pair->stat.period : 0;
if (!pair)
return -1;
return scnprintf(buf, size, "%.0F / %.0F", new_period, old_period);
}
static int formula_wdiff(struct hist_entry *he, char *buf, size_t size)
{
struct hist_entry *pair = hist_entry__next_pair(he);
u64 new_period = he->stat.period;
u64 old_period = pair ? pair->stat.period : 0;
if (!pair)
return -1;
return scnprintf(buf, size,
"(%" PRIu64 " * " "%" PRId64 ") - (%" PRIu64 " * " "%" PRId64 ")",
new_period, compute_wdiff_w2, old_period, compute_wdiff_w1);
}
int perf_diff__formula(char *buf, size_t size, struct hist_entry *he)
{
switch (compute) {
case COMPUTE_DELTA:
return formula_delta(he, buf, size);
case COMPUTE_RATIO:
return formula_ratio(he, buf, size);
case COMPUTE_WEIGHTED_DIFF:
return formula_wdiff(he, buf, size);
default:
BUG_ON(1);
}
return -1;
}
static int hists__add_entry(struct hists *self,
struct addr_location *al, u64 period)
@ -47,7 +269,7 @@ static int diff__process_sample_event(struct perf_tool *tool __maybe_unused,
return -1;
}
if (al.filtered || al.sym == NULL)
if (al.filtered)
return 0;
if (hists__add_entry(&evsel->hists, &al, sample->period)) {
@ -63,8 +285,8 @@ static struct perf_tool tool = {
.sample = diff__process_sample_event,
.mmap = perf_event__process_mmap,
.comm = perf_event__process_comm,
.exit = perf_event__process_task,
.fork = perf_event__process_task,
.exit = perf_event__process_exit,
.fork = perf_event__process_fork,
.lost = perf_event__process_lost,
.ordered_samples = true,
.ordering_requires_timestamps = true,
@ -112,36 +334,6 @@ static void hists__name_resort(struct hists *self, bool sort)
self->entries = tmp;
}
static struct hist_entry *hists__find_entry(struct hists *self,
struct hist_entry *he)
{
struct rb_node *n = self->entries.rb_node;
while (n) {
struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node);
int64_t cmp = hist_entry__cmp(he, iter);
if (cmp < 0)
n = n->rb_left;
else if (cmp > 0)
n = n->rb_right;
else
return iter;
}
return NULL;
}
static void hists__match(struct hists *older, struct hists *newer)
{
struct rb_node *nd;
for (nd = rb_first(&newer->entries); nd; nd = rb_next(nd)) {
struct hist_entry *pos = rb_entry(nd, struct hist_entry, rb_node);
pos->pair = hists__find_entry(older, pos);
}
}
static struct perf_evsel *evsel_match(struct perf_evsel *evsel,
struct perf_evlist *evlist)
{
@ -172,6 +364,144 @@ static void perf_evlist__resort_hists(struct perf_evlist *evlist, bool name)
}
}
static void hists__baseline_only(struct hists *hists)
{
struct rb_node *next = rb_first(&hists->entries);
while (next != NULL) {
struct hist_entry *he = rb_entry(next, struct hist_entry, rb_node);
next = rb_next(&he->rb_node);
if (!hist_entry__next_pair(he)) {
rb_erase(&he->rb_node, &hists->entries);
hist_entry__free(he);
}
}
}
static void hists__precompute(struct hists *hists)
{
struct rb_node *next = rb_first(&hists->entries);
while (next != NULL) {
struct hist_entry *he = rb_entry(next, struct hist_entry, rb_node);
next = rb_next(&he->rb_node);
switch (compute) {
case COMPUTE_DELTA:
perf_diff__compute_delta(he);
break;
case COMPUTE_RATIO:
perf_diff__compute_ratio(he);
break;
case COMPUTE_WEIGHTED_DIFF:
perf_diff__compute_wdiff(he);
break;
default:
BUG_ON(1);
}
}
}
static int64_t cmp_doubles(double l, double r)
{
if (l > r)
return -1;
else if (l < r)
return 1;
else
return 0;
}
static int64_t
hist_entry__cmp_compute(struct hist_entry *left, struct hist_entry *right,
int c)
{
switch (c) {
case COMPUTE_DELTA:
{
double l = left->diff.period_ratio_delta;
double r = right->diff.period_ratio_delta;
return cmp_doubles(l, r);
}
case COMPUTE_RATIO:
{
double l = left->diff.period_ratio;
double r = right->diff.period_ratio;
return cmp_doubles(l, r);
}
case COMPUTE_WEIGHTED_DIFF:
{
s64 l = left->diff.wdiff;
s64 r = right->diff.wdiff;
return r - l;
}
default:
BUG_ON(1);
}
return 0;
}
static void insert_hist_entry_by_compute(struct rb_root *root,
struct hist_entry *he,
int c)
{
struct rb_node **p = &root->rb_node;
struct rb_node *parent = NULL;
struct hist_entry *iter;
while (*p != NULL) {
parent = *p;
iter = rb_entry(parent, struct hist_entry, rb_node);
if (hist_entry__cmp_compute(he, iter, c) < 0)
p = &(*p)->rb_left;
else
p = &(*p)->rb_right;
}
rb_link_node(&he->rb_node, parent, p);
rb_insert_color(&he->rb_node, root);
}
static void hists__compute_resort(struct hists *hists)
{
struct rb_root tmp = RB_ROOT;
struct rb_node *next = rb_first(&hists->entries);
while (next != NULL) {
struct hist_entry *he = rb_entry(next, struct hist_entry, rb_node);
next = rb_next(&he->rb_node);
rb_erase(&he->rb_node, &hists->entries);
insert_hist_entry_by_compute(&tmp, he, compute);
}
hists->entries = tmp;
}
static void hists__process(struct hists *old, struct hists *new)
{
hists__match(new, old);
if (show_baseline_only)
hists__baseline_only(new);
else
hists__link(new, old);
if (sort_compute) {
hists__precompute(new);
hists__compute_resort(new);
}
hists__fprintf(new, true, 0, 0, stdout);
}
static int __cmd_diff(void)
{
int ret, i;
@ -213,8 +543,7 @@ static int __cmd_diff(void)
first = false;
hists__match(&evsel_old->hists, &evsel->hists);
hists__fprintf(&evsel->hists, true, 0, 0, stdout);
hists__process(&evsel_old->hists, &evsel->hists);
}
out_delete:
@ -235,6 +564,16 @@ static const struct option options[] = {
"be more verbose (show symbol address, etc)"),
OPT_BOOLEAN('M', "displacement", &show_displacement,
"Show position displacement relative to baseline"),
OPT_BOOLEAN('b', "baseline-only", &show_baseline_only,
"Show only items with match in baseline"),
OPT_CALLBACK('c', "compute", &compute,
"delta,ratio,wdiff:w1,w2 (default delta)",
"Entries differential computation selection",
setup_compute),
OPT_BOOLEAN('p', "period", &show_period,
"Show period values."),
OPT_BOOLEAN('F', "formula", &show_formula,
"Show formula."),
OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
"dump raw trace in ASCII"),
OPT_BOOLEAN('f', "force", &force, "don't complain, do it"),
@ -263,12 +602,36 @@ static void ui_init(void)
/* No overhead column. */
perf_hpp__column_enable(PERF_HPP__OVERHEAD, false);
/* Display baseline/delta/displacement columns. */
/*
* Display baseline/delta/ratio/displacement/
* formula/periods columns.
*/
perf_hpp__column_enable(PERF_HPP__BASELINE, true);
perf_hpp__column_enable(PERF_HPP__DELTA, true);
switch (compute) {
case COMPUTE_DELTA:
perf_hpp__column_enable(PERF_HPP__DELTA, true);
break;
case COMPUTE_RATIO:
perf_hpp__column_enable(PERF_HPP__RATIO, true);
break;
case COMPUTE_WEIGHTED_DIFF:
perf_hpp__column_enable(PERF_HPP__WEIGHTED_DIFF, true);
break;
default:
BUG_ON(1);
};
if (show_displacement)
perf_hpp__column_enable(PERF_HPP__DISPL, true);
if (show_formula)
perf_hpp__column_enable(PERF_HPP__FORMULA, true);
if (show_period) {
perf_hpp__column_enable(PERF_HPP__PERIOD, true);
perf_hpp__column_enable(PERF_HPP__PERIOD_BASELINE, true);
}
}
int cmd_diff(int argc, const char **argv, const char *prefix __maybe_unused)

View File

@ -48,12 +48,12 @@ static int __if_print(bool *first, const char *field, u64 value)
#define if_print(field) __if_print(&first, #field, pos->attr.field)
static int __cmd_evlist(const char *input_name, struct perf_attr_details *details)
static int __cmd_evlist(const char *file_name, struct perf_attr_details *details)
{
struct perf_session *session;
struct perf_evsel *pos;
session = perf_session__new(input_name, O_RDONLY, 0, false, NULL);
session = perf_session__new(file_name, O_RDONLY, 0, false, NULL);
if (session == NULL)
return -ENOMEM;
@ -111,7 +111,6 @@ static int __cmd_evlist(const char *input_name, struct perf_attr_details *detail
int cmd_evlist(int argc, const char **argv, const char *prefix __maybe_unused)
{
struct perf_attr_details details = { .verbose = false, };
const char *input_name = NULL;
const struct option options[] = {
OPT_STRING('i', "input", &input_name, "file", "Input file name"),
OPT_BOOLEAN('F', "freq", &details.freq, "Show the sample frequency"),

View File

@ -8,33 +8,53 @@
#include "builtin.h"
#include "perf.h"
#include "util/color.h"
#include "util/evlist.h"
#include "util/evsel.h"
#include "util/session.h"
#include "util/tool.h"
#include "util/debug.h"
#include "util/build-id.h"
#include "util/parse-options.h"
#include <linux/list.h>
struct perf_inject {
struct perf_tool tool;
bool build_ids;
bool sched_stat;
const char *input_name;
int pipe_output,
output;
u64 bytes_written;
struct list_head samples;
};
static int perf_event__repipe_synth(struct perf_tool *tool __maybe_unused,
struct event_entry {
struct list_head node;
u32 tid;
union perf_event event[0];
};
static int perf_event__repipe_synth(struct perf_tool *tool,
union perf_event *event,
struct machine *machine __maybe_unused)
{
struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
uint32_t size;
void *buf = event;
size = event->header.size;
while (size) {
int ret = write(STDOUT_FILENO, buf, size);
int ret = write(inject->output, buf, size);
if (ret < 0)
return -errno;
size -= ret;
buf += ret;
inject->bytes_written += ret;
}
return 0;
@ -80,12 +100,25 @@ static int perf_event__repipe(struct perf_tool *tool,
return perf_event__repipe_synth(tool, event, machine);
}
typedef int (*inject_handler)(struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct perf_evsel *evsel,
struct machine *machine);
static int perf_event__repipe_sample(struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample __maybe_unused,
struct perf_evsel *evsel __maybe_unused,
struct machine *machine)
struct perf_sample *sample,
struct perf_evsel *evsel,
struct machine *machine)
{
if (evsel->handler.func) {
inject_handler f = evsel->handler.func;
return f(tool, event, sample, evsel, machine);
}
build_id__mark_dso_hit(tool, event, sample, evsel, machine);
return perf_event__repipe_synth(tool, event, machine);
}
@ -102,14 +135,14 @@ static int perf_event__repipe_mmap(struct perf_tool *tool,
return err;
}
static int perf_event__repipe_task(struct perf_tool *tool,
static int perf_event__repipe_fork(struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct machine *machine)
{
int err;
err = perf_event__process_task(tool, event, sample, machine);
err = perf_event__process_fork(tool, event, sample, machine);
perf_event__repipe(tool, event, sample, machine);
return err;
@ -210,6 +243,80 @@ repipe:
return 0;
}
static int perf_inject__sched_process_exit(struct perf_tool *tool,
union perf_event *event __maybe_unused,
struct perf_sample *sample,
struct perf_evsel *evsel __maybe_unused,
struct machine *machine __maybe_unused)
{
struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
struct event_entry *ent;
list_for_each_entry(ent, &inject->samples, node) {
if (sample->tid == ent->tid) {
list_del_init(&ent->node);
free(ent);
break;
}
}
return 0;
}
static int perf_inject__sched_switch(struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct perf_evsel *evsel,
struct machine *machine)
{
struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
struct event_entry *ent;
perf_inject__sched_process_exit(tool, event, sample, evsel, machine);
ent = malloc(event->header.size + sizeof(struct event_entry));
if (ent == NULL) {
color_fprintf(stderr, PERF_COLOR_RED,
"Not enough memory to process sched switch event!");
return -1;
}
ent->tid = sample->tid;
memcpy(&ent->event, event, event->header.size);
list_add(&ent->node, &inject->samples);
return 0;
}
static int perf_inject__sched_stat(struct perf_tool *tool,
union perf_event *event __maybe_unused,
struct perf_sample *sample,
struct perf_evsel *evsel,
struct machine *machine)
{
struct event_entry *ent;
union perf_event *event_sw;
struct perf_sample sample_sw;
struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
u32 pid = perf_evsel__intval(evsel, sample, "pid");
list_for_each_entry(ent, &inject->samples, node) {
if (pid == ent->tid)
goto found;
}
return 0;
found:
event_sw = &ent->event[0];
perf_evsel__parse_sample(evsel, event_sw, &sample_sw);
sample_sw.period = sample->period;
sample_sw.time = sample->time;
perf_event__synthesize_sample(event_sw, evsel->attr.sample_type,
&sample_sw, false);
build_id__mark_dso_hit(tool, event_sw, &sample_sw, evsel, machine);
return perf_event__repipe(tool, event_sw, &sample_sw, machine);
}
extern volatile int session_done;
static void sig_handler(int sig __maybe_unused)
@ -217,6 +324,21 @@ static void sig_handler(int sig __maybe_unused)
session_done = 1;
}
static int perf_evsel__check_stype(struct perf_evsel *evsel,
u64 sample_type, const char *sample_msg)
{
struct perf_event_attr *attr = &evsel->attr;
const char *name = perf_evsel__name(evsel);
if (!(attr->sample_type & sample_type)) {
pr_err("Samples for %s event do not have %s attribute set.",
name, sample_msg);
return -EINVAL;
}
return 0;
}
static int __cmd_inject(struct perf_inject *inject)
{
struct perf_session *session;
@ -224,19 +346,48 @@ static int __cmd_inject(struct perf_inject *inject)
signal(SIGINT, sig_handler);
if (inject->build_ids) {
inject->tool.sample = perf_event__inject_buildid;
if (inject->build_ids || inject->sched_stat) {
inject->tool.mmap = perf_event__repipe_mmap;
inject->tool.fork = perf_event__repipe_task;
inject->tool.fork = perf_event__repipe_fork;
inject->tool.tracing_data = perf_event__repipe_tracing_data;
}
session = perf_session__new("-", O_RDONLY, false, true, &inject->tool);
session = perf_session__new(inject->input_name, O_RDONLY, false, true, &inject->tool);
if (session == NULL)
return -ENOMEM;
if (inject->build_ids) {
inject->tool.sample = perf_event__inject_buildid;
} else if (inject->sched_stat) {
struct perf_evsel *evsel;
inject->tool.ordered_samples = true;
list_for_each_entry(evsel, &session->evlist->entries, node) {
const char *name = perf_evsel__name(evsel);
if (!strcmp(name, "sched:sched_switch")) {
if (perf_evsel__check_stype(evsel, PERF_SAMPLE_TID, "TID"))
return -EINVAL;
evsel->handler.func = perf_inject__sched_switch;
} else if (!strcmp(name, "sched:sched_process_exit"))
evsel->handler.func = perf_inject__sched_process_exit;
else if (!strncmp(name, "sched:sched_stat_", 17))
evsel->handler.func = perf_inject__sched_stat;
}
}
if (!inject->pipe_output)
lseek(inject->output, session->header.data_offset, SEEK_SET);
ret = perf_session__process_events(session, &inject->tool);
if (!inject->pipe_output) {
session->header.data_size = inject->bytes_written;
perf_session__write_header(session, session->evlist, inject->output, true);
}
perf_session__delete(session);
return ret;
@ -260,10 +411,20 @@ int cmd_inject(int argc, const char **argv, const char *prefix __maybe_unused)
.tracing_data = perf_event__repipe_tracing_data_synth,
.build_id = perf_event__repipe_op2_synth,
},
.input_name = "-",
.samples = LIST_HEAD_INIT(inject.samples),
};
const char *output_name = "-";
const struct option options[] = {
OPT_BOOLEAN('b', "build-ids", &inject.build_ids,
"Inject build-ids into the output stream"),
OPT_STRING('i', "input", &inject.input_name, "file",
"input file name"),
OPT_STRING('o', "output", &output_name, "file",
"output file name"),
OPT_BOOLEAN('s', "sched-stat", &inject.sched_stat,
"Merge sched-stat and sched-switch for getting events "
"where and how long tasks slept"),
OPT_INCR('v', "verbose", &verbose,
"be more verbose (show build ids, etc)"),
OPT_END()
@ -281,6 +442,18 @@ int cmd_inject(int argc, const char **argv, const char *prefix __maybe_unused)
if (argc)
usage_with_options(inject_usage, options);
if (!strcmp(output_name, "-")) {
inject.pipe_output = 1;
inject.output = STDOUT_FILENO;
} else {
inject.output = open(output_name, O_CREAT | O_WRONLY | O_TRUNC,
S_IRUSR | S_IWUSR);
if (inject.output < 0) {
perror("failed to create output file");
return -1;
}
}
if (symbol__init() < 0)
return -1;

View File

@ -477,7 +477,7 @@ static void sort_result(void)
__sort_result(&root_caller_stat, &root_caller_sorted, &caller_sort);
}
static int __cmd_kmem(const char *input_name)
static int __cmd_kmem(void)
{
int err = -EINVAL;
struct perf_session *session;
@ -743,7 +743,6 @@ static int __cmd_record(int argc, const char **argv)
int cmd_kmem(int argc, const char **argv, const char *prefix __maybe_unused)
{
const char * const default_sort_order = "frag,hit,bytes";
const char *input_name = NULL;
const struct option kmem_options[] = {
OPT_STRING('i', "input", &input_name, "file", "input file name"),
OPT_CALLBACK_NOOPT(0, "caller", NULL, NULL,
@ -779,7 +778,7 @@ int cmd_kmem(int argc, const char **argv, const char *prefix __maybe_unused)
if (list_empty(&alloc_sort))
setup_sorting(&alloc_sort, default_sort_order);
return __cmd_kmem(input_name);
return __cmd_kmem();
} else
usage_with_options(kmem_usage, kmem_options);

View File

@ -314,9 +314,9 @@ struct vcpu_event_record {
static void init_kvm_event_record(struct perf_kvm_stat *kvm)
{
int i;
unsigned int i;
for (i = 0; i < (int)EVENTS_CACHE_SIZE; i++)
for (i = 0; i < EVENTS_CACHE_SIZE; i++)
INIT_LIST_HEAD(&kvm->kvm_events_cache[i]);
}
@ -370,9 +370,10 @@ static struct kvm_event *find_create_kvm_event(struct perf_kvm_stat *kvm,
BUG_ON(key->key == INVALID_KEY);
head = &kvm->kvm_events_cache[kvm_events_hash_fn(key->key)];
list_for_each_entry(event, head, hash_entry)
list_for_each_entry(event, head, hash_entry) {
if (event->key.key == key->key && event->key.info == key->info)
return event;
}
event = kvm_alloc_init_event(key);
if (!event)
@ -417,7 +418,10 @@ static double kvm_event_rel_stddev(int vcpu_id, struct kvm_event *event)
static bool update_kvm_event(struct kvm_event *event, int vcpu_id,
u64 time_diff)
{
kvm_update_event_stats(&event->total, time_diff);
if (vcpu_id == -1) {
kvm_update_event_stats(&event->total, time_diff);
return true;
}
if (!kvm_event_expand(event, vcpu_id))
return false;
@ -433,6 +437,12 @@ static bool handle_end_event(struct perf_kvm_stat *kvm,
{
struct kvm_event *event;
u64 time_begin, time_diff;
int vcpu;
if (kvm->trace_vcpu == -1)
vcpu = -1;
else
vcpu = vcpu_record->vcpu_id;
event = vcpu_record->last_event;
time_begin = vcpu_record->start_time;
@ -462,7 +472,7 @@ static bool handle_end_event(struct perf_kvm_stat *kvm,
BUG_ON(timestamp < time_begin);
time_diff = timestamp - time_begin;
return update_kvm_event(event, vcpu_record->vcpu_id, time_diff);
return update_kvm_event(event, vcpu, time_diff);
}
static
@ -499,6 +509,11 @@ static bool handle_kvm_event(struct perf_kvm_stat *kvm,
if (!vcpu_record)
return true;
/* only process events for vcpus user cares about */
if ((kvm->trace_vcpu != -1) &&
(kvm->trace_vcpu != vcpu_record->vcpu_id))
return true;
if (kvm->events_ops->is_begin_event(evsel, sample, &key))
return handle_begin_event(kvm, vcpu_record, &key, sample->time);
@ -598,13 +613,15 @@ static void sort_result(struct perf_kvm_stat *kvm)
int vcpu = kvm->trace_vcpu;
struct kvm_event *event;
for (i = 0; i < EVENTS_CACHE_SIZE; i++)
list_for_each_entry(event, &kvm->kvm_events_cache[i], hash_entry)
for (i = 0; i < EVENTS_CACHE_SIZE; i++) {
list_for_each_entry(event, &kvm->kvm_events_cache[i], hash_entry) {
if (event_is_valid(event, vcpu)) {
update_total_count(kvm, event);
insert_to_result(&kvm->result, event,
kvm->compare, vcpu);
}
}
}
}
/* returns left most element of result, and erase it */
@ -661,8 +678,8 @@ static void print_result(struct perf_kvm_stat *kvm)
pr_info("\n");
}
pr_info("\nTotal Samples:%lld, Total events handled time:%.2fus.\n\n",
(unsigned long long)kvm->total_count, kvm->total_time / 1e3);
pr_info("\nTotal Samples:%" PRIu64 ", Total events handled time:%.2fus.\n\n",
kvm->total_count, kvm->total_time / 1e3);
}
static int process_sample_event(struct perf_tool *tool,

View File

@ -335,8 +335,6 @@ alloc_failed:
return NULL;
}
static const char *input_name;
struct trace_lock_handler {
int (*acquire_event)(struct perf_evsel *evsel,
struct perf_sample *sample);

View File

@ -31,6 +31,38 @@
#include <sched.h>
#include <sys/mman.h>
#ifndef HAVE_ON_EXIT
#ifndef ATEXIT_MAX
#define ATEXIT_MAX 32
#endif
static int __on_exit_count = 0;
typedef void (*on_exit_func_t) (int, void *);
static on_exit_func_t __on_exit_funcs[ATEXIT_MAX];
static void *__on_exit_args[ATEXIT_MAX];
static int __exitcode = 0;
static void __handle_on_exit_funcs(void);
static int on_exit(on_exit_func_t function, void *arg);
#define exit(x) (exit)(__exitcode = (x))
static int on_exit(on_exit_func_t function, void *arg)
{
if (__on_exit_count == ATEXIT_MAX)
return -ENOMEM;
else if (__on_exit_count == 0)
atexit(__handle_on_exit_funcs);
__on_exit_funcs[__on_exit_count] = function;
__on_exit_args[__on_exit_count++] = arg;
return 0;
}
static void __handle_on_exit_funcs(void)
{
int i;
for (i = 0; i < __on_exit_count; i++)
__on_exit_funcs[i] (__exitcode, __on_exit_args[i]);
}
#endif
enum write_mode_t {
WRITE_FORCE,
WRITE_APPEND
@ -198,11 +230,15 @@ static int perf_record__open(struct perf_record *rec)
struct perf_record_opts *opts = &rec->opts;
int rc = 0;
perf_evlist__config_attrs(evlist, opts);
/*
* Set the evsel leader links before we configure attributes,
* since some might depend on this info.
*/
if (opts->group)
perf_evlist__set_leader(evlist);
perf_evlist__config_attrs(evlist, opts);
list_for_each_entry(pos, &evlist->entries, node) {
struct perf_event_attr *attr = &pos->attr;
/*
@ -285,6 +321,11 @@ try_again:
perf_evsel__name(pos));
rc = -err;
goto out;
} else if ((err == EOPNOTSUPP) && (attr->precise_ip)) {
ui__error("\'precise\' request may not be supported. "
"Try removing 'p' modifier\n");
rc = -err;
goto out;
}
printf("\n");
@ -326,7 +367,8 @@ try_again:
"or try again with a smaller value of -m/--mmap_pages.\n"
"(current value: %d)\n", opts->mmap_pages);
rc = -errno;
} else if (!is_power_of_2(opts->mmap_pages)) {
} else if (!is_power_of_2(opts->mmap_pages) &&
(opts->mmap_pages != UINT_MAX)) {
pr_err("--mmap_pages/-m value must be a power of two.");
rc = -EINVAL;
} else {
@ -460,6 +502,7 @@ static int __cmd_record(struct perf_record *rec, int argc, const char **argv)
struct perf_evlist *evsel_list = rec->evlist;
const char *output_name = rec->output_name;
struct perf_session *session;
bool disabled = false;
rec->progname = argv[0];
@ -659,7 +702,13 @@ static int __cmd_record(struct perf_record *rec, int argc, const char **argv)
}
}
perf_evlist__enable(evsel_list);
/*
* When perf is starting the traced process, all the events
* (apart from group members) have enable_on_exec=1 set,
* so don't spoil it by prematurely enabling them.
*/
if (!perf_target__none(&opts->target))
perf_evlist__enable(evsel_list);
/*
* Let the child rip
@ -682,8 +731,15 @@ static int __cmd_record(struct perf_record *rec, int argc, const char **argv)
waking++;
}
if (done)
/*
* When perf is starting the traced process, at the end events
* die with the process and we wait for that. Thus no need to
* disable events in this case.
*/
if (done && !disabled && !perf_target__none(&opts->target)) {
perf_evlist__disable(evsel_list);
disabled = true;
}
}
if (quiet || signr == SIGUSR1)

View File

@ -33,13 +33,13 @@
#include "util/thread.h"
#include "util/sort.h"
#include "util/hist.h"
#include "arch/common.h"
#include <linux/bitmap.h>
struct perf_report {
struct perf_tool tool;
struct perf_session *session;
char const *input_name;
bool force, use_tui, use_gtk, use_stdio;
bool hide_unresolved;
bool dont_use_callchains;
@ -428,10 +428,11 @@ static int __cmd_report(struct perf_report *rep)
if (use_browser > 0) {
if (use_browser == 1) {
perf_evlist__tui_browse_hists(session->evlist, help,
NULL, NULL, 0);
NULL,
&session->header.env);
} else if (use_browser == 2) {
perf_evlist__gtk_browse_hists(session->evlist, help,
NULL, NULL, 0);
NULL);
}
} else
perf_evlist__tty_browse_hists(session->evlist, rep, help);
@ -556,8 +557,8 @@ int cmd_report(int argc, const char **argv, const char *prefix __maybe_unused)
.sample = process_sample_event,
.mmap = perf_event__process_mmap,
.comm = perf_event__process_comm,
.exit = perf_event__process_task,
.fork = perf_event__process_task,
.exit = perf_event__process_exit,
.fork = perf_event__process_fork,
.lost = perf_event__process_lost,
.read = process_read_event,
.attr = perf_event__process_attr,
@ -570,7 +571,7 @@ int cmd_report(int argc, const char **argv, const char *prefix __maybe_unused)
.pretty_printing_style = "normal",
};
const struct option options[] = {
OPT_STRING('i', "input", &report.input_name, "file",
OPT_STRING('i', "input", &input_name, "file",
"input file name"),
OPT_INCR('v', "verbose", &verbose,
"be more verbose (show symbol address, etc)"),
@ -656,13 +657,13 @@ int cmd_report(int argc, const char **argv, const char *prefix __maybe_unused)
if (report.inverted_callchain)
callchain_param.order = ORDER_CALLER;
if (!report.input_name || !strlen(report.input_name)) {
if (!input_name || !strlen(input_name)) {
if (!fstat(STDIN_FILENO, &st) && S_ISFIFO(st.st_mode))
report.input_name = "-";
input_name = "-";
else
report.input_name = "perf.data";
input_name = "perf.data";
}
session = perf_session__new(report.input_name, O_RDONLY,
session = perf_session__new(input_name, O_RDONLY,
report.force, false, &report.tool);
if (session == NULL)
return -ENOMEM;
@ -687,7 +688,7 @@ int cmd_report(int argc, const char **argv, const char *prefix __maybe_unused)
}
if (strcmp(report.input_name, "-") != 0)
if (strcmp(input_name, "-") != 0)
setup_browser(true);
else {
use_browser = 0;

View File

@ -120,7 +120,6 @@ struct trace_sched_handler {
struct perf_sched {
struct perf_tool tool;
const char *input_name;
const char *sort_order;
unsigned long nr_tasks;
struct task_desc *pid_to_task[MAX_PID];
@ -1460,7 +1459,7 @@ static int perf_sched__read_events(struct perf_sched *sched, bool destroy,
};
struct perf_session *session;
session = perf_session__new(sched->input_name, O_RDONLY, 0, false, &sched->tool);
session = perf_session__new(input_name, O_RDONLY, 0, false, &sched->tool);
if (session == NULL) {
pr_debug("No Memory for session\n");
return -1;
@ -1672,7 +1671,8 @@ int cmd_sched(int argc, const char **argv, const char *prefix __maybe_unused)
.sample = perf_sched__process_tracepoint_sample,
.comm = perf_event__process_comm,
.lost = perf_event__process_lost,
.fork = perf_event__process_task,
.exit = perf_event__process_exit,
.fork = perf_event__process_fork,
.ordered_samples = true,
},
.cmp_pid = LIST_HEAD_INIT(sched.cmp_pid),
@ -1707,7 +1707,7 @@ int cmd_sched(int argc, const char **argv, const char *prefix __maybe_unused)
OPT_END()
};
const struct option sched_options[] = {
OPT_STRING('i', "input", &sched.input_name, "file",
OPT_STRING('i', "input", &input_name, "file",
"input file name"),
OPT_INCR('v', "verbose", &verbose,
"be more verbose (show symbol address, etc)"),

View File

@ -520,8 +520,8 @@ static struct perf_tool perf_script = {
.sample = process_sample_event,
.mmap = perf_event__process_mmap,
.comm = perf_event__process_comm,
.exit = perf_event__process_task,
.fork = perf_event__process_task,
.exit = perf_event__process_exit,
.fork = perf_event__process_fork,
.attr = perf_event__process_attr,
.event_type = perf_event__process_event_type,
.tracing_data = perf_event__process_tracing_data,
@ -1029,6 +1029,68 @@ static int list_available_scripts(const struct option *opt __maybe_unused,
exit(0);
}
/*
* Some scripts specify the required events in their "xxx-record" file,
* this function will check if the events in perf.data match those
* mentioned in the "xxx-record".
*
* Fixme: All existing "xxx-record" are all in good formats "-e event ",
* which is covered well now. And new parsing code should be added to
* cover the future complexing formats like event groups etc.
*/
static int check_ev_match(char *dir_name, char *scriptname,
struct perf_session *session)
{
char filename[MAXPATHLEN], evname[128];
char line[BUFSIZ], *p;
struct perf_evsel *pos;
int match, len;
FILE *fp;
sprintf(filename, "%s/bin/%s-record", dir_name, scriptname);
fp = fopen(filename, "r");
if (!fp)
return -1;
while (fgets(line, sizeof(line), fp)) {
p = ltrim(line);
if (*p == '#')
continue;
while (strlen(p)) {
p = strstr(p, "-e");
if (!p)
break;
p += 2;
p = ltrim(p);
len = strcspn(p, " \t");
if (!len)
break;
snprintf(evname, len + 1, "%s", p);
match = 0;
list_for_each_entry(pos,
&session->evlist->entries, node) {
if (!strcmp(perf_evsel__name(pos), evname)) {
match = 1;
break;
}
}
if (!match) {
fclose(fp);
return -1;
}
}
}
fclose(fp);
return 0;
}
/*
* Return -1 if none is found, otherwise the actual scripts number.
*
@ -1039,17 +1101,23 @@ static int list_available_scripts(const struct option *opt __maybe_unused,
int find_scripts(char **scripts_array, char **scripts_path_array)
{
struct dirent *script_next, *lang_next, script_dirent, lang_dirent;
char scripts_path[MAXPATHLEN];
char scripts_path[MAXPATHLEN], lang_path[MAXPATHLEN];
DIR *scripts_dir, *lang_dir;
char lang_path[MAXPATHLEN];
struct perf_session *session;
char *temp;
int i = 0;
session = perf_session__new(input_name, O_RDONLY, 0, false, NULL);
if (!session)
return -1;
snprintf(scripts_path, MAXPATHLEN, "%s/scripts", perf_exec_path());
scripts_dir = opendir(scripts_path);
if (!scripts_dir)
if (!scripts_dir) {
perf_session__delete(session);
return -1;
}
for_each_lang(scripts_path, scripts_dir, lang_dirent, lang_next) {
snprintf(lang_path, MAXPATHLEN, "%s/%s", scripts_path,
@ -1077,10 +1145,18 @@ int find_scripts(char **scripts_array, char **scripts_path_array)
snprintf(scripts_array[i],
(temp - script_dirent.d_name) + 1,
"%s", script_dirent.d_name);
if (check_ev_match(lang_path,
scripts_array[i], session))
continue;
i++;
}
closedir(lang_dir);
}
closedir(scripts_dir);
perf_session__delete(session);
return i;
}
@ -1175,7 +1251,6 @@ static int have_cmd(int argc, const char **argv)
int cmd_script(int argc, const char **argv, const char *prefix __maybe_unused)
{
bool show_full_info = false;
const char *input_name = NULL;
char *rec_script_path = NULL;
char *rep_script_path = NULL;
struct perf_session *session;

View File

@ -57,6 +57,7 @@
#include "util/thread.h"
#include "util/thread_map.h"
#include <stdlib.h>
#include <sys/prctl.h>
#include <locale.h>
@ -83,6 +84,9 @@ static const char *csv_sep = NULL;
static bool csv_output = false;
static bool group = false;
static FILE *output = NULL;
static const char *pre_cmd = NULL;
static const char *post_cmd = NULL;
static bool sync_run = false;
static volatile int done = 0;
@ -125,8 +129,7 @@ static struct stats runtime_itlb_cache_stats[MAX_NR_CPUS];
static struct stats runtime_dtlb_cache_stats[MAX_NR_CPUS];
static struct stats walltime_nsecs_stats;
static int create_perf_stat_counter(struct perf_evsel *evsel,
struct perf_evsel *first)
static int create_perf_stat_counter(struct perf_evsel *evsel)
{
struct perf_event_attr *attr = &evsel->attr;
bool exclude_guest_missing = false;
@ -149,7 +152,8 @@ retry:
return 0;
}
if (!perf_target__has_task(&target) && (!group || evsel == first)) {
if (!perf_target__has_task(&target) &&
!perf_evsel__is_group_member(evsel)) {
attr->disabled = 1;
attr->enable_on_exec = 1;
}
@ -265,10 +269,10 @@ static int read_counter(struct perf_evsel *counter)
return 0;
}
static int run_perf_stat(int argc __maybe_unused, const char **argv)
static int __run_perf_stat(int argc __maybe_unused, const char **argv)
{
unsigned long long t0, t1;
struct perf_evsel *counter, *first;
struct perf_evsel *counter;
int status = 0;
int child_ready_pipe[2], go_pipe[2];
const bool forks = (argc > 0);
@ -328,10 +332,8 @@ static int run_perf_stat(int argc __maybe_unused, const char **argv)
if (group)
perf_evlist__set_leader(evsel_list);
first = perf_evlist__first(evsel_list);
list_for_each_entry(counter, &evsel_list->entries, node) {
if (create_perf_stat_counter(counter, first) < 0) {
if (create_perf_stat_counter(counter) < 0) {
/*
* PPC returns ENXIO for HW counters until 2.6.37
* (behavior changed with commit b0a873e).
@ -405,6 +407,32 @@ static int run_perf_stat(int argc __maybe_unused, const char **argv)
return WEXITSTATUS(status);
}
static int run_perf_stat(int argc __maybe_unused, const char **argv)
{
int ret;
if (pre_cmd) {
ret = system(pre_cmd);
if (ret)
return ret;
}
if (sync_run)
sync();
ret = __run_perf_stat(argc, argv);
if (ret)
return ret;
if (post_cmd) {
ret = system(post_cmd);
if (ret)
return ret;
}
return ret;
}
static void print_noise_pct(double total, double avg)
{
double pct = rel_stddev_stats(total, avg);
@ -1069,8 +1097,7 @@ static int add_default_attributes(void)
int cmd_stat(int argc, const char **argv, const char *prefix __maybe_unused)
{
bool append_file = false,
sync_run = false;
bool append_file = false;
int output_fd = 0;
const char *output_name = NULL;
const struct option options[] = {
@ -1114,6 +1141,10 @@ int cmd_stat(int argc, const char **argv, const char *prefix __maybe_unused)
OPT_BOOLEAN(0, "append", &append_file, "append to the output file"),
OPT_INTEGER(0, "log-fd", &output_fd,
"log output to fd, instead of stderr"),
OPT_STRING(0, "pre", &pre_cmd, "command",
"command to run prior to the measured command"),
OPT_STRING(0, "post", &post_cmd, "command",
"command to run after to the measured command"),
OPT_END()
};
const char * const stat_usage[] = {
@ -1238,9 +1269,6 @@ int cmd_stat(int argc, const char **argv, const char *prefix __maybe_unused)
fprintf(output, "[ perf stat: executing run #%d ... ]\n",
run_idx + 1);
if (sync_run)
sync();
status = run_perf_stat(argc, argv);
}

File diff suppressed because it is too large Load Diff

View File

@ -965,7 +965,7 @@ static void write_svg_file(const char *filename)
svg_close();
}
static int __cmd_timechart(const char *input_name, const char *output_name)
static int __cmd_timechart(const char *output_name)
{
struct perf_tool perf_timechart = {
.comm = process_comm_event,
@ -1061,7 +1061,6 @@ parse_process(const struct option *opt __maybe_unused, const char *arg,
int cmd_timechart(int argc, const char **argv,
const char *prefix __maybe_unused)
{
const char *input_name;
const char *output_name = "output.svg";
const struct option options[] = {
OPT_STRING('i', "input", &input_name, "file", "input file name"),
@ -1092,5 +1091,5 @@ int cmd_timechart(int argc, const char **argv,
setup_pager();
return __cmd_timechart(input_name, output_name);
return __cmd_timechart(output_name);
}

View File

@ -26,6 +26,7 @@
#include "util/color.h"
#include "util/evlist.h"
#include "util/evsel.h"
#include "util/machine.h"
#include "util/session.h"
#include "util/symbol.h"
#include "util/thread.h"
@ -581,6 +582,11 @@ static void *display_thread_tui(void *arg)
struct perf_evsel *pos;
struct perf_top *top = arg;
const char *help = "For a higher level overview, try: perf top --sort comm,dso";
struct hist_browser_timer hbt = {
.timer = perf_top__sort_new_samples,
.arg = top,
.refresh = top->delay_secs,
};
perf_top__sort_new_samples(top);
@ -592,9 +598,8 @@ static void *display_thread_tui(void *arg)
list_for_each_entry(pos, &top->evlist->entries, node)
pos->hists.uid_filter_str = top->target.uid_str;
perf_evlist__tui_browse_hists(top->evlist, help,
perf_top__sort_new_samples,
top, top->delay_secs);
perf_evlist__tui_browse_hists(top->evlist, help, &hbt,
&top->session->header.env);
exit_browser(0);
exit(0);
@ -871,7 +876,7 @@ static void perf_top__mmap_read_idx(struct perf_top *top, int idx)
&sample, machine);
} else if (event->header.type < PERF_RECORD_MAX) {
hists__inc_nr_events(&evsel->hists, event->header.type);
perf_event__process(&top->tool, event, &sample, machine);
machine__process_event(machine, event);
} else
++session->hists.stats.nr_unknown_events;
}
@ -976,6 +981,10 @@ try_again:
ui__error("Too many events are opened.\n"
"Try again after reducing the number of events\n");
goto out_err;
} else if ((err == EOPNOTSUPP) && (attr->precise_ip)) {
ui__error("\'precise\' request may not be supported. "
"Try removing 'p' modifier\n");
goto out_err;
}
ui__error("The sys_perf_event_open() syscall "

View File

@ -1,5 +1,8 @@
#include "builtin.h"
#include "util/color.h"
#include "util/evlist.h"
#include "util/machine.h"
#include "util/thread.h"
#include "util/parse-options.h"
#include "util/thread_map.h"
#include "event-parse.h"
@ -13,15 +16,18 @@ static struct syscall_fmt {
bool errmsg;
bool timeout;
} syscall_fmts[] = {
{ .name = "access", .errmsg = true, },
{ .name = "arch_prctl", .errmsg = true, .alias = "prctl", },
{ .name = "fstat", .errmsg = true, .alias = "newfstat", },
{ .name = "fstatat", .errmsg = true, .alias = "newfstatat", },
{ .name = "futex", .errmsg = true, },
{ .name = "open", .errmsg = true, },
{ .name = "poll", .errmsg = true, .timeout = true, },
{ .name = "ppoll", .errmsg = true, .timeout = true, },
{ .name = "read", .errmsg = true, },
{ .name = "recvfrom", .errmsg = true, },
{ .name = "select", .errmsg = true, .timeout = true, },
{ .name = "socket", .errmsg = true, },
{ .name = "stat", .errmsg = true, .alias = "newstat", },
};
@ -43,6 +49,57 @@ struct syscall {
struct syscall_fmt *fmt;
};
static size_t fprintf_duration(unsigned long t, FILE *fp)
{
double duration = (double)t / NSEC_PER_MSEC;
size_t printed = fprintf(fp, "(");
if (duration >= 1.0)
printed += color_fprintf(fp, PERF_COLOR_RED, "%6.3f ms", duration);
else if (duration >= 0.01)
printed += color_fprintf(fp, PERF_COLOR_YELLOW, "%6.3f ms", duration);
else
printed += color_fprintf(fp, PERF_COLOR_NORMAL, "%6.3f ms", duration);
return printed + fprintf(stdout, "): ");
}
struct thread_trace {
u64 entry_time;
u64 exit_time;
bool entry_pending;
unsigned long nr_events;
char *entry_str;
double runtime_ms;
};
static struct thread_trace *thread_trace__new(void)
{
return zalloc(sizeof(struct thread_trace));
}
static struct thread_trace *thread__trace(struct thread *thread)
{
struct thread_trace *ttrace;
if (thread == NULL)
goto fail;
if (thread->priv == NULL)
thread->priv = thread_trace__new();
if (thread->priv == NULL)
goto fail;
ttrace = thread->priv;
++ttrace->nr_events;
return ttrace;
fail:
color_fprintf(stdout, PERF_COLOR_RED,
"WARNING: not enough memory, dropping samples!\n");
return NULL;
}
struct trace {
int audit_machine;
struct {
@ -50,8 +107,96 @@ struct trace {
struct syscall *table;
} syscalls;
struct perf_record_opts opts;
struct machine host;
u64 base_time;
unsigned long nr_events;
bool sched;
bool multiple_threads;
double duration_filter;
double runtime_ms;
};
static bool trace__filter_duration(struct trace *trace, double t)
{
return t < (trace->duration_filter * NSEC_PER_MSEC);
}
static size_t trace__fprintf_tstamp(struct trace *trace, u64 tstamp, FILE *fp)
{
double ts = (double)(tstamp - trace->base_time) / NSEC_PER_MSEC;
return fprintf(fp, "%10.3f ", ts);
}
static bool done = false;
static void sig_handler(int sig __maybe_unused)
{
done = true;
}
static size_t trace__fprintf_entry_head(struct trace *trace, struct thread *thread,
u64 duration, u64 tstamp, FILE *fp)
{
size_t printed = trace__fprintf_tstamp(trace, tstamp, fp);
printed += fprintf_duration(duration, fp);
if (trace->multiple_threads)
printed += fprintf(fp, "%d ", thread->pid);
return printed;
}
static int trace__process_event(struct machine *machine, union perf_event *event)
{
int ret = 0;
switch (event->header.type) {
case PERF_RECORD_LOST:
color_fprintf(stdout, PERF_COLOR_RED,
"LOST %" PRIu64 " events!\n", event->lost.lost);
ret = machine__process_lost_event(machine, event);
default:
ret = machine__process_event(machine, event);
break;
}
return ret;
}
static int trace__tool_process(struct perf_tool *tool __maybe_unused,
union perf_event *event,
struct perf_sample *sample __maybe_unused,
struct machine *machine)
{
return trace__process_event(machine, event);
}
static int trace__symbols_init(struct trace *trace, struct perf_evlist *evlist)
{
int err = symbol__init();
if (err)
return err;
machine__init(&trace->host, "", HOST_KERNEL_ID);
machine__create_kernel_maps(&trace->host);
if (perf_target__has_task(&trace->opts.target)) {
err = perf_event__synthesize_thread_map(NULL, evlist->threads,
trace__tool_process,
&trace->host);
} else {
err = perf_event__synthesize_threads(NULL, trace__tool_process,
&trace->host);
}
if (err)
symbol__exit();
return err;
}
static int trace__read_syscall_info(struct trace *trace, int id)
{
char tp_name[128];
@ -93,7 +238,8 @@ static int trace__read_syscall_info(struct trace *trace, int id)
return sc->tp_format != NULL ? 0 : -1;
}
static size_t syscall__fprintf_args(struct syscall *sc, unsigned long *args, FILE *fp)
static size_t syscall__scnprintf_args(struct syscall *sc, char *bf, size_t size,
unsigned long *args)
{
int i = 0;
size_t printed = 0;
@ -102,12 +248,15 @@ static size_t syscall__fprintf_args(struct syscall *sc, unsigned long *args, FIL
struct format_field *field;
for (field = sc->tp_format->format.fields->next; field; field = field->next) {
printed += fprintf(fp, "%s%s: %ld", printed ? ", " : "",
field->name, args[i++]);
printed += scnprintf(bf + printed, size - printed,
"%s%s: %ld", printed ? ", " : "",
field->name, args[i++]);
}
} else {
while (i < 6) {
printed += fprintf(fp, "%sarg%d: %ld", printed ? ", " : "", i, args[i]);
printed += scnprintf(bf + printed, size - printed,
"%sarg%d: %ld",
printed ? ", " : "", i, args[i]);
++i;
}
}
@ -139,17 +288,24 @@ static struct syscall *trace__syscall_info(struct trace *trace,
return &trace->syscalls.table[id];
out_cant_read:
printf("Problems reading syscall %d information\n", id);
printf("Problems reading syscall %d", id);
if (id <= trace->syscalls.max && trace->syscalls.table[id].name != NULL)
printf("(%s)", trace->syscalls.table[id].name);
puts(" information");
return NULL;
}
static int trace__sys_enter(struct trace *trace, struct perf_evsel *evsel,
struct perf_sample *sample)
{
char *msg;
void *args;
size_t printed = 0;
struct thread *thread = machine__findnew_thread(&trace->host, sample->tid);
struct syscall *sc = trace__syscall_info(trace, evsel, sample);
struct thread_trace *ttrace = thread__trace(thread);
if (sc == NULL)
if (ttrace == NULL || sc == NULL)
return -1;
args = perf_evsel__rawptr(evsel, sample, "args");
@ -158,8 +314,27 @@ static int trace__sys_enter(struct trace *trace, struct perf_evsel *evsel,
return -1;
}
printf("%s(", sc->name);
syscall__fprintf_args(sc, args, stdout);
ttrace = thread->priv;
if (ttrace->entry_str == NULL) {
ttrace->entry_str = malloc(1024);
if (!ttrace->entry_str)
return -1;
}
ttrace->entry_time = sample->time;
msg = ttrace->entry_str;
printed += scnprintf(msg + printed, 1024 - printed, "%s(", sc->name);
printed += syscall__scnprintf_args(sc, msg + printed, 1024 - printed, args);
if (!strcmp(sc->name, "exit_group") || !strcmp(sc->name, "exit")) {
if (!trace->duration_filter) {
trace__fprintf_entry_head(trace, thread, 1, sample->time, stdout);
printf("%-70s\n", ttrace->entry_str);
}
} else
ttrace->entry_pending = true;
return 0;
}
@ -168,13 +343,37 @@ static int trace__sys_exit(struct trace *trace, struct perf_evsel *evsel,
struct perf_sample *sample)
{
int ret;
u64 duration = 0;
struct thread *thread = machine__findnew_thread(&trace->host, sample->tid);
struct thread_trace *ttrace = thread__trace(thread);
struct syscall *sc = trace__syscall_info(trace, evsel, sample);
if (sc == NULL)
if (ttrace == NULL || sc == NULL)
return -1;
ret = perf_evsel__intval(evsel, sample, "ret");
ttrace = thread->priv;
ttrace->exit_time = sample->time;
if (ttrace->entry_time) {
duration = sample->time - ttrace->entry_time;
if (trace__filter_duration(trace, duration))
goto out;
} else if (trace->duration_filter)
goto out;
trace__fprintf_entry_head(trace, thread, duration, sample->time, stdout);
if (ttrace->entry_pending) {
printf("%-70s", ttrace->entry_str);
} else {
printf(" ... [");
color_fprintf(stdout, PERF_COLOR_YELLOW, "continued");
printf("]: %s()", sc->name);
}
if (ret < 0 && sc->fmt && sc->fmt->errmsg) {
char bf[256];
const char *emsg = strerror_r(-ret, bf, sizeof(bf)),
@ -187,14 +386,44 @@ static int trace__sys_exit(struct trace *trace, struct perf_evsel *evsel,
printf(") = %d", ret);
putchar('\n');
out:
ttrace->entry_pending = false;
return 0;
}
static int trace__run(struct trace *trace)
static int trace__sched_stat_runtime(struct trace *trace, struct perf_evsel *evsel,
struct perf_sample *sample)
{
u64 runtime = perf_evsel__intval(evsel, sample, "runtime");
double runtime_ms = (double)runtime / NSEC_PER_MSEC;
struct thread *thread = machine__findnew_thread(&trace->host, sample->tid);
struct thread_trace *ttrace = thread__trace(thread);
if (ttrace == NULL)
goto out_dump;
ttrace->runtime_ms += runtime_ms;
trace->runtime_ms += runtime_ms;
return 0;
out_dump:
printf("%s: comm=%s,pid=%u,runtime=%" PRIu64 ",vruntime=%" PRIu64 ")\n",
evsel->name,
perf_evsel__strval(evsel, sample, "comm"),
(pid_t)perf_evsel__intval(evsel, sample, "pid"),
runtime,
perf_evsel__intval(evsel, sample, "vruntime"));
return 0;
}
static int trace__run(struct trace *trace, int argc, const char **argv)
{
struct perf_evlist *evlist = perf_evlist__new(NULL, NULL);
struct perf_evsel *evsel;
int err = -1, i, nr_events = 0, before;
int err = -1, i;
unsigned long before;
const bool forks = argc > 0;
if (evlist == NULL) {
printf("Not enough memory to run!\n");
@ -207,14 +436,38 @@ static int trace__run(struct trace *trace)
goto out_delete_evlist;
}
if (trace->sched &&
perf_evlist__add_newtp(evlist, "sched", "sched_stat_runtime",
trace__sched_stat_runtime)) {
printf("Couldn't read the sched_stat_runtime tracepoint information!\n");
goto out_delete_evlist;
}
err = perf_evlist__create_maps(evlist, &trace->opts.target);
if (err < 0) {
printf("Problems parsing the target to trace, check your options!\n");
goto out_delete_evlist;
}
err = trace__symbols_init(trace, evlist);
if (err < 0) {
printf("Problems initializing symbol libraries!\n");
goto out_delete_evlist;
}
perf_evlist__config_attrs(evlist, &trace->opts);
signal(SIGCHLD, sig_handler);
signal(SIGINT, sig_handler);
if (forks) {
err = perf_evlist__prepare_workload(evlist, &trace->opts, argv);
if (err < 0) {
printf("Couldn't run the workload!\n");
goto out_delete_evlist;
}
}
err = perf_evlist__open(evlist);
if (err < 0) {
printf("Couldn't create the events: %s\n", strerror(errno));
@ -228,8 +481,13 @@ static int trace__run(struct trace *trace)
}
perf_evlist__enable(evlist);
if (forks)
perf_evlist__start_workload(evlist);
trace->multiple_threads = evlist->threads->map[0] == -1 || evlist->threads->nr > 1;
again:
before = nr_events;
before = trace->nr_events;
for (i = 0; i < evlist->nr_mmaps; i++) {
union perf_event *event;
@ -239,19 +497,7 @@ again:
tracepoint_handler handler;
struct perf_sample sample;
++nr_events;
switch (type) {
case PERF_RECORD_SAMPLE:
break;
case PERF_RECORD_LOST:
printf("LOST %" PRIu64 " events!\n", event->lost.lost);
continue;
default:
printf("Unexpected %s event, skipping...\n",
perf_event__name(type));
continue;
}
++trace->nr_events;
err = perf_evlist__parse_sample(evlist, event, &sample);
if (err) {
@ -259,14 +505,26 @@ again:
continue;
}
if (trace->base_time == 0)
trace->base_time = sample.time;
if (type != PERF_RECORD_SAMPLE) {
trace__process_event(&trace->host, event);
continue;
}
evsel = perf_evlist__id2evsel(evlist, sample.id);
if (evsel == NULL) {
printf("Unknown tp ID %" PRIu64 ", skipping...\n", sample.id);
continue;
}
if (evlist->threads->map[0] == -1 || evlist->threads->nr > 1)
printf("%d ", sample.tid);
if (sample.raw_data == NULL) {
printf("%s sample with no payload for tid: %d, cpu %d, raw_size=%d, skipping...\n",
perf_evsel__name(evsel), sample.tid,
sample.cpu, sample.raw_size);
continue;
}
if (sample.raw_data == NULL) {
printf("%s sample with no payload for tid: %d, cpu %d, raw_size=%d, skipping...\n",
@ -280,8 +538,15 @@ again:
}
}
if (nr_events == before)
if (trace->nr_events == before) {
if (done)
goto out_delete_evlist;
poll(evlist->pollfd, evlist->nr_fds, -1);
}
if (done)
perf_evlist__disable(evlist);
goto again;
@ -291,10 +556,65 @@ out:
return err;
}
static size_t trace__fprintf_threads_header(FILE *fp)
{
size_t printed;
printed = fprintf(fp, "\n _____________________________________________________________________\n");
printed += fprintf(fp," __) Summary of events (__\n\n");
printed += fprintf(fp," [ task - pid ] [ events ] [ ratio ] [ runtime ]\n");
printed += fprintf(fp," _____________________________________________________________________\n\n");
return printed;
}
static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp)
{
size_t printed = trace__fprintf_threads_header(fp);
struct rb_node *nd;
for (nd = rb_first(&trace->host.threads); nd; nd = rb_next(nd)) {
struct thread *thread = rb_entry(nd, struct thread, rb_node);
struct thread_trace *ttrace = thread->priv;
const char *color;
double ratio;
if (ttrace == NULL)
continue;
ratio = (double)ttrace->nr_events / trace->nr_events * 100.0;
color = PERF_COLOR_NORMAL;
if (ratio > 50.0)
color = PERF_COLOR_RED;
else if (ratio > 25.0)
color = PERF_COLOR_GREEN;
else if (ratio > 5.0)
color = PERF_COLOR_YELLOW;
printed += color_fprintf(fp, color, "%20s", thread->comm);
printed += fprintf(fp, " - %-5d :%11lu [", thread->pid, ttrace->nr_events);
printed += color_fprintf(fp, color, "%5.1f%%", ratio);
printed += fprintf(fp, " ] %10.3f ms\n", ttrace->runtime_ms);
}
return printed;
}
static int trace__set_duration(const struct option *opt, const char *str,
int unset __maybe_unused)
{
struct trace *trace = opt->value;
trace->duration_filter = atof(str);
return 0;
}
int cmd_trace(int argc, const char **argv, const char *prefix __maybe_unused)
{
const char * const trace_usage[] = {
"perf trace [<options>]",
"perf trace [<options>] [<command>]",
"perf trace [<options>] -- <command> [<options>]",
NULL
};
struct trace trace = {
@ -328,21 +648,38 @@ int cmd_trace(int argc, const char **argv, const char *prefix __maybe_unused)
"number of mmap data pages"),
OPT_STRING(0, "uid", &trace.opts.target.uid_str, "user",
"user to profile"),
OPT_CALLBACK(0, "duration", &trace, "float",
"show only events with duration > N.M ms",
trace__set_duration),
OPT_BOOLEAN(0, "sched", &trace.sched, "show blocking scheduler events"),
OPT_END()
};
int err;
char bf[BUFSIZ];
argc = parse_options(argc, argv, trace_options, trace_usage, 0);
if (argc)
usage_with_options(trace_usage, trace_options);
err = perf_target__parse_uid(&trace.opts.target);
err = perf_target__validate(&trace.opts.target);
if (err) {
char bf[BUFSIZ];
perf_target__strerror(&trace.opts.target, err, bf, sizeof(bf));
printf("%s", bf);
return err;
}
return trace__run(&trace);
err = perf_target__parse_uid(&trace.opts.target);
if (err) {
perf_target__strerror(&trace.opts.target, err, bf, sizeof(bf));
printf("%s", bf);
return err;
}
if (!argc && perf_target__none(&trace.opts.target))
trace.opts.target.system_wide = true;
err = trace__run(&trace, argc, argv);
if (trace.sched && !err)
trace__fprintf_thread_summary(&trace, stdout);
return err;
}

View File

@ -43,6 +43,15 @@ int main(void)
}
endef
define SOURCE_BIONIC
#include <android/api-level.h>
int main(void)
{
return __ANDROID_API__;
}
endef
define SOURCE_ELF_MMAP
#include <libelf.h>
int main(void)
@ -112,7 +121,10 @@ define SOURCE_PYTHON_VERSION
#if PY_VERSION_HEX >= 0x03000000
#error
#endif
int main(void){}
int main(void)
{
return 0;
}
endef
define SOURCE_PYTHON_EMBED
#include <Python.h>
@ -203,4 +215,13 @@ int main(void)
return audit_open();
}
endef
endif
endif
define SOURCE_ON_EXIT
#include <stdio.h>
int main(void)
{
return on_exit(NULL, NULL);
}
endef

Some files were not shown because too many files have changed in this diff Show More