tracing: Fix various typos in comments

Fix ~59 single-word typos in the tracing code comments, and fix
the grammar in a handful of places.

Link: https://lore.kernel.org/r/20210322224546.GA1981273@gmail.com
Link: https://lkml.kernel.org/r/20210323174935.GA4176821@gmail.com

Reviewed-by: Randy Dunlap <rdunlap@infradead.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
This commit is contained in:
Ingo Molnar 2021-03-23 18:49:35 +01:00 committed by Steven Rostedt (VMware)
parent b700fc3a63
commit f2cc020d78
32 changed files with 60 additions and 59 deletions

View File

@ -13,7 +13,7 @@ extern void ftrace_call_graph(void);
#endif #endif
#ifdef CONFIG_DYNAMIC_FTRACE #ifdef CONFIG_DYNAMIC_FTRACE
/* reloction of mcount call site is the same as the address */ /* relocation of mcount call site is the same as the address */
static inline unsigned long ftrace_call_adjust(unsigned long addr) static inline unsigned long ftrace_call_adjust(unsigned long addr)
{ {
return addr; return addr;

View File

@ -236,7 +236,7 @@ void __naked return_to_handler(void)
"bal ftrace_return_to_handler\n\t" "bal ftrace_return_to_handler\n\t"
"move $lp, $r0 \n\t" "move $lp, $r0 \n\t"
/* restore state nedded by the ABI */ /* restore state needed by the ABI */
"lmw.bim $r0,[$sp],$r1,#0x0 \n\t"); "lmw.bim $r0,[$sp],$r1,#0x0 \n\t");
} }

View File

@ -12,7 +12,7 @@
#ifdef __ASSEMBLY__ #ifdef __ASSEMBLY__
/* Based off of objdump optput from glibc */ /* Based off of objdump output from glibc */
#define MCOUNT_SAVE_FRAME \ #define MCOUNT_SAVE_FRAME \
stwu r1,-48(r1); \ stwu r1,-48(r1); \
@ -52,7 +52,7 @@ extern void _mcount(void);
static inline unsigned long ftrace_call_adjust(unsigned long addr) static inline unsigned long ftrace_call_adjust(unsigned long addr)
{ {
/* reloction of mcount call site is the same as the address */ /* relocation of mcount call site is the same as the address */
return addr; return addr;
} }

View File

@ -67,7 +67,7 @@ static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
* Modifying code must take extra care. On an SMP machine, if * Modifying code must take extra care. On an SMP machine, if
* the code being modified is also being executed on another CPU * the code being modified is also being executed on another CPU
* that CPU will have undefined results and possibly take a GPF. * that CPU will have undefined results and possibly take a GPF.
* We use kstop_machine to stop other CPUS from exectuing code. * We use kstop_machine to stop other CPUS from executing code.
* But this does not stop NMIs from happening. We still need * But this does not stop NMIs from happening. We still need
* to protect against that. We separate out the modification of * to protect against that. We separate out the modification of
* the code to take care of this. * the code to take care of this.

View File

@ -17,7 +17,7 @@ void _mcount(void);
#endif #endif
#ifdef CONFIG_DYNAMIC_FTRACE #ifdef CONFIG_DYNAMIC_FTRACE
/* reloction of mcount call site is the same as the address */ /* relocation of mcount call site is the same as the address */
static inline unsigned long ftrace_call_adjust(unsigned long addr) static inline unsigned long ftrace_call_adjust(unsigned long addr)
{ {
return addr; return addr;

View File

@ -477,7 +477,7 @@ struct dentry *tracefs_create_dir(const char *name, struct dentry *parent)
* *
* The instances directory is special as it allows for mkdir and rmdir to * The instances directory is special as it allows for mkdir and rmdir to
* to be done by userspace. When a mkdir or rmdir is performed, the inode * to be done by userspace. When a mkdir or rmdir is performed, the inode
* locks are released and the methhods passed in (@mkdir and @rmdir) are * locks are released and the methods passed in (@mkdir and @rmdir) are
* called without locks and with the name of the directory being created * called without locks and with the name of the directory being created
* within the instances directory. * within the instances directory.
* *

View File

@ -33,7 +33,7 @@
/* /*
* If the arch's mcount caller does not support all of ftrace's * If the arch's mcount caller does not support all of ftrace's
* features, then it must call an indirect function that * features, then it must call an indirect function that
* does. Or at least does enough to prevent any unwelcomed side effects. * does. Or at least does enough to prevent any unwelcome side effects.
*/ */
#if !ARCH_SUPPORTS_FTRACE_OPS #if !ARCH_SUPPORTS_FTRACE_OPS
# define FTRACE_FORCE_LIST_FUNC 1 # define FTRACE_FORCE_LIST_FUNC 1
@ -389,7 +389,7 @@ DECLARE_PER_CPU(int, disable_stack_tracer);
*/ */
static inline void stack_tracer_disable(void) static inline void stack_tracer_disable(void)
{ {
/* Preemption or interupts must be disabled */ /* Preemption or interrupts must be disabled */
if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)) if (IS_ENABLED(CONFIG_DEBUG_PREEMPT))
WARN_ON_ONCE(!preempt_count() || !irqs_disabled()); WARN_ON_ONCE(!preempt_count() || !irqs_disabled());
this_cpu_inc(disable_stack_tracer); this_cpu_inc(disable_stack_tracer);

View File

@ -206,7 +206,7 @@ static inline unsigned int tracing_gen_ctx_dec(void)
trace_ctx = tracing_gen_ctx(); trace_ctx = tracing_gen_ctx();
/* /*
* Subtract one from the preeption counter if preemption is enabled, * Subtract one from the preemption counter if preemption is enabled,
* see trace_event_buffer_reserve()for details. * see trace_event_buffer_reserve()for details.
*/ */
if (IS_ENABLED(CONFIG_PREEMPTION)) if (IS_ENABLED(CONFIG_PREEMPTION))

View File

@ -465,7 +465,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
* * * *
* * The declared 'local variable' is called '__entry' * * The declared 'local variable' is called '__entry'
* * * *
* * __field(pid_t, prev_prid) is equivalent to a standard declariton: * * __field(pid_t, prev_prid) is equivalent to a standard declaration:
* * * *
* * pid_t prev_pid; * * pid_t prev_pid;
* * * *

View File

@ -49,7 +49,7 @@ TRACE_EVENT(io_uring_create,
); );
/** /**
* io_uring_register - called after a buffer/file/eventfd was succesfully * io_uring_register - called after a buffer/file/eventfd was successfully
* registered for a ring * registered for a ring
* *
* @ctx: pointer to a ring context structure * @ctx: pointer to a ring context structure

View File

@ -48,7 +48,7 @@ TRACE_EVENT(rcu_utilization,
* RCU flavor, the grace-period number, and a string identifying the * RCU flavor, the grace-period number, and a string identifying the
* grace-period-related event as follows: * grace-period-related event as follows:
* *
* "AccReadyCB": CPU acclerates new callbacks to RCU_NEXT_READY_TAIL. * "AccReadyCB": CPU accelerates new callbacks to RCU_NEXT_READY_TAIL.
* "AccWaitCB": CPU accelerates new callbacks to RCU_WAIT_TAIL. * "AccWaitCB": CPU accelerates new callbacks to RCU_WAIT_TAIL.
* "newreq": Request a new grace period. * "newreq": Request a new grace period.
* "start": Start a grace period. * "start": Start a grace period.

View File

@ -174,7 +174,7 @@ DEFINE_EVENT(sched_wakeup_template, sched_waking,
TP_ARGS(p)); TP_ARGS(p));
/* /*
* Tracepoint called when the task is actually woken; p->state == TASK_RUNNNG. * Tracepoint called when the task is actually woken; p->state == TASK_RUNNING.
* It is not always called from the waking context. * It is not always called from the waking context.
*/ */
DEFINE_EVENT(sched_wakeup_template, sched_wakeup, DEFINE_EVENT(sched_wakeup_template, sched_wakeup,

View File

@ -119,7 +119,7 @@ TRACE_EVENT(timer_expire_entry,
* When used in combination with the timer_expire_entry tracepoint we can * When used in combination with the timer_expire_entry tracepoint we can
* determine the runtime of the timer callback function. * determine the runtime of the timer callback function.
* *
* NOTE: Do NOT derefernce timer in TP_fast_assign. The pointer might * NOTE: Do NOT dereference timer in TP_fast_assign. The pointer might
* be invalid. We solely track the pointer. * be invalid. We solely track the pointer.
*/ */
DEFINE_EVENT(timer_class, timer_expire_exit, DEFINE_EVENT(timer_class, timer_expire_exit,

View File

@ -658,7 +658,7 @@ BPF_CALL_5(bpf_seq_printf, struct seq_file *, m, char *, fmt, u32, fmt_size,
/* fmt[i] != 0 && fmt[last] == 0, so we can access fmt[i + 1] */ /* fmt[i] != 0 && fmt[last] == 0, so we can access fmt[i + 1] */
i++; i++;
/* skip optional "[0 +-][num]" width formating field */ /* skip optional "[0 +-][num]" width formatting field */
while (fmt[i] == '0' || fmt[i] == '+' || fmt[i] == '-' || while (fmt[i] == '0' || fmt[i] == '+' || fmt[i] == '-' ||
fmt[i] == ' ') fmt[i] == ' ')
i++; i++;
@ -748,7 +748,8 @@ BPF_CALL_5(bpf_seq_printf, struct seq_file *, m, char *, fmt, u32, fmt_size,
fmt_cnt++; fmt_cnt++;
} }
/* Maximumly we can have MAX_SEQ_PRINTF_VARARGS parameter, just give /*
* The maximum we can have is MAX_SEQ_PRINTF_VARARGS parameters, so just give
* all of them to seq_printf(). * all of them to seq_printf().
*/ */
seq_printf(m, fmt, params[0], params[1], params[2], params[3], seq_printf(m, fmt, params[0], params[1], params[2], params[3],

View File

@ -42,7 +42,7 @@ bool ftrace_graph_is_dead(void)
} }
/** /**
* ftrace_graph_stop - set to permanently disable function graph tracincg * ftrace_graph_stop - set to permanently disable function graph tracing
* *
* In case of an error int function graph tracing, this is called * In case of an error int function graph tracing, this is called
* to try to keep function graph tracing from causing any more harm. * to try to keep function graph tracing from causing any more harm.
@ -117,7 +117,7 @@ int function_graph_enter(unsigned long ret, unsigned long func,
/* /*
* Skip graph tracing if the return location is served by direct trampoline, * Skip graph tracing if the return location is served by direct trampoline,
* since call sequence and return addresses is unpredicatable anymore. * since call sequence and return addresses are unpredictable anyway.
* Ex: BPF trampoline may call original function and may skip frame * Ex: BPF trampoline may call original function and may skip frame
* depending on type of BPF programs attached. * depending on type of BPF programs attached.
*/ */

View File

@ -1045,7 +1045,7 @@ struct ftrace_ops global_ops = {
}; };
/* /*
* Used by the stack undwinder to know about dynamic ftrace trampolines. * Used by the stack unwinder to know about dynamic ftrace trampolines.
*/ */
struct ftrace_ops *ftrace_ops_trampoline(unsigned long addr) struct ftrace_ops *ftrace_ops_trampoline(unsigned long addr)
{ {
@ -3000,7 +3000,7 @@ int ftrace_shutdown(struct ftrace_ops *ops, int command)
* When the kernel is preemptive, tasks can be preempted * When the kernel is preemptive, tasks can be preempted
* while on a ftrace trampoline. Just scheduling a task on * while on a ftrace trampoline. Just scheduling a task on
* a CPU is not good enough to flush them. Calling * a CPU is not good enough to flush them. Calling
* synchornize_rcu_tasks() will wait for those tasks to * synchronize_rcu_tasks() will wait for those tasks to
* execute and either schedule voluntarily or enter user space. * execute and either schedule voluntarily or enter user space.
*/ */
if (IS_ENABLED(CONFIG_PREEMPTION)) if (IS_ENABLED(CONFIG_PREEMPTION))
@ -5373,7 +5373,7 @@ EXPORT_SYMBOL_GPL(modify_ftrace_direct);
* @reset - non zero to reset all filters before applying this filter. * @reset - non zero to reset all filters before applying this filter.
* *
* Filters denote which functions should be enabled when tracing is enabled * Filters denote which functions should be enabled when tracing is enabled
* If @ip is NULL, it failes to update filter. * If @ip is NULL, it fails to update filter.
*/ */
int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip, int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip,
int remove, int reset) int remove, int reset)
@ -6325,7 +6325,7 @@ clear_mod_from_hash(struct ftrace_page *pg, struct ftrace_hash *hash)
} }
} }
/* Clear any records from hashs */ /* Clear any records from hashes */
static void clear_mod_from_hashes(struct ftrace_page *pg) static void clear_mod_from_hashes(struct ftrace_page *pg)
{ {
struct trace_array *tr; struct trace_array *tr;

View File

@ -3154,7 +3154,7 @@ rb_wakeups(struct trace_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer)
* is called before preempt_count() is updated, since the check will * is called before preempt_count() is updated, since the check will
* be on the NORMAL bit, the TRANSITION bit will then be set. If an * be on the NORMAL bit, the TRANSITION bit will then be set. If an
* NMI then comes in, it will set the NMI bit, but when the NMI code * NMI then comes in, it will set the NMI bit, but when the NMI code
* does the trace_recursive_unlock() it will clear the TRANSTION bit * does the trace_recursive_unlock() it will clear the TRANSITION bit
* and leave the NMI bit set. But this is fine, because the interrupt * and leave the NMI bit set. But this is fine, because the interrupt
* code that set the TRANSITION bit will then clear the NMI bit when it * code that set the TRANSITION bit will then clear the NMI bit when it
* calls trace_recursive_unlock(). If another NMI comes in, it will * calls trace_recursive_unlock(). If another NMI comes in, it will

View File

@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 // SPDX-License-Identifier: GPL-2.0
/* /*
* Test module for in-kernel sythetic event creation and generation. * Test module for in-kernel synthetic event creation and generation.
* *
* Copyright (C) 2019 Tom Zanussi <zanussi@kernel.org> * Copyright (C) 2019 Tom Zanussi <zanussi@kernel.org>
*/ */

View File

@ -514,7 +514,7 @@ void trace_free_pid_list(struct trace_pid_list *pid_list)
* @filtered_pids: The list of pids to check * @filtered_pids: The list of pids to check
* @search_pid: The PID to find in @filtered_pids * @search_pid: The PID to find in @filtered_pids
* *
* Returns true if @search_pid is fonud in @filtered_pids, and false otherwis. * Returns true if @search_pid is found in @filtered_pids, and false otherwise.
*/ */
bool bool
trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid) trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid)
@ -545,7 +545,7 @@ trace_ignore_this_task(struct trace_pid_list *filtered_pids,
struct task_struct *task) struct task_struct *task)
{ {
/* /*
* If filterd_no_pids is not empty, and the task's pid is listed * If filtered_no_pids is not empty, and the task's pid is listed
* in filtered_no_pids, then return true. * in filtered_no_pids, then return true.
* Otherwise, if filtered_pids is empty, that means we can * Otherwise, if filtered_pids is empty, that means we can
* trace all tasks. If it has content, then only trace pids * trace all tasks. If it has content, then only trace pids
@ -612,7 +612,7 @@ void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos)
(*pos)++; (*pos)++;
/* pid already is +1 of the actual prevous bit */ /* pid already is +1 of the actual previous bit */
pid = find_next_bit(pid_list->pids, pid_list->pid_max, pid); pid = find_next_bit(pid_list->pids, pid_list->pid_max, pid);
/* Return pid + 1 to allow zero to be represented */ /* Return pid + 1 to allow zero to be represented */
@ -834,7 +834,7 @@ DEFINE_MUTEX(trace_types_lock);
* The content of events may become garbage if we allow other process consumes * The content of events may become garbage if we allow other process consumes
* these events concurrently: * these events concurrently:
* A) the page of the consumed events may become a normal page * A) the page of the consumed events may become a normal page
* (not reader page) in ring buffer, and this page will be rewrited * (not reader page) in ring buffer, and this page will be rewritten
* by events producer. * by events producer.
* B) The page of the consumed events may become a page for splice_read, * B) The page of the consumed events may become a page for splice_read,
* and this page will be returned to system. * and this page will be returned to system.
@ -1520,7 +1520,7 @@ unsigned long nsecs_to_usecs(unsigned long nsecs)
#undef C #undef C
#define C(a, b) b #define C(a, b) b
/* These must match the bit postions in trace_iterator_flags */ /* These must match the bit positions in trace_iterator_flags */
static const char *trace_options[] = { static const char *trace_options[] = {
TRACE_FLAGS TRACE_FLAGS
NULL NULL
@ -3368,7 +3368,7 @@ int trace_array_vprintk(struct trace_array *tr,
* buffer (use trace_printk() for that), as writing into the top level * buffer (use trace_printk() for that), as writing into the top level
* buffer should only have events that can be individually disabled. * buffer should only have events that can be individually disabled.
* trace_printk() is only used for debugging a kernel, and should not * trace_printk() is only used for debugging a kernel, and should not
* be ever encorporated in normal use. * be ever incorporated in normal use.
* *
* trace_array_printk() can be used, as it will not add noise to the * trace_array_printk() can be used, as it will not add noise to the
* top level tracing buffer. * top level tracing buffer.
@ -7533,11 +7533,11 @@ static struct tracing_log_err *get_tracing_log_err(struct trace_array *tr)
* @cmd: The tracing command that caused the error * @cmd: The tracing command that caused the error
* @str: The string to position the caret at within @cmd * @str: The string to position the caret at within @cmd
* *
* Finds the position of the first occurence of @str within @cmd. The * Finds the position of the first occurrence of @str within @cmd. The
* return value can be passed to tracing_log_err() for caret placement * return value can be passed to tracing_log_err() for caret placement
* within @cmd. * within @cmd.
* *
* Returns the index within @cmd of the first occurence of @str or 0 * Returns the index within @cmd of the first occurrence of @str or 0
* if @str was not found. * if @str was not found.
*/ */
unsigned int err_pos(char *cmd, const char *str) unsigned int err_pos(char *cmd, const char *str)
@ -9320,7 +9320,7 @@ int tracing_init_dentry(void)
* As there may still be users that expect the tracing * As there may still be users that expect the tracing
* files to exist in debugfs/tracing, we must automount * files to exist in debugfs/tracing, we must automount
* the tracefs file system there, so older tools still * the tracefs file system there, so older tools still
* work with the newer kerenl. * work with the newer kernel.
*/ */
tr->dir = debugfs_create_automount("tracing", NULL, tr->dir = debugfs_create_automount("tracing", NULL,
trace_automount, NULL); trace_automount, NULL);

View File

@ -1347,7 +1347,7 @@ __event_trigger_test_discard(struct trace_event_file *file,
/** /**
* event_trigger_unlock_commit - handle triggers and finish event commit * event_trigger_unlock_commit - handle triggers and finish event commit
* @file: The file pointer assoctiated to the event * @file: The file pointer associated with the event
* @buffer: The ring buffer that the event is being written to * @buffer: The ring buffer that the event is being written to
* @event: The event meta data in the ring buffer * @event: The event meta data in the ring buffer
* @entry: The event itself * @entry: The event itself
@ -1374,7 +1374,7 @@ event_trigger_unlock_commit(struct trace_event_file *file,
/** /**
* event_trigger_unlock_commit_regs - handle triggers and finish event commit * event_trigger_unlock_commit_regs - handle triggers and finish event commit
* @file: The file pointer assoctiated to the event * @file: The file pointer associated with the event
* @buffer: The ring buffer that the event is being written to * @buffer: The ring buffer that the event is being written to
* @event: The event meta data in the ring buffer * @event: The event meta data in the ring buffer
* @entry: The event itself * @entry: The event itself

View File

@ -16,7 +16,7 @@ static char __percpu *perf_trace_buf[PERF_NR_CONTEXTS];
/* /*
* Force it to be aligned to unsigned long to avoid misaligned accesses * Force it to be aligned to unsigned long to avoid misaligned accesses
* suprises * surprises
*/ */
typedef typeof(unsigned long [PERF_MAX_TRACE_SIZE / sizeof(unsigned long)]) typedef typeof(unsigned long [PERF_MAX_TRACE_SIZE / sizeof(unsigned long)])
perf_trace_t; perf_trace_t;

View File

@ -2646,7 +2646,7 @@ void trace_event_eval_update(struct trace_eval_map **map, int len)
} }
/* /*
* Since calls are grouped by systems, the likelyhood that the * Since calls are grouped by systems, the likelihood that the
* next call in the iteration belongs to the same system as the * next call in the iteration belongs to the same system as the
* previous call is high. As an optimization, we skip searching * previous call is high. As an optimization, we skip searching
* for a map[] that matches the call's system if the last call * for a map[] that matches the call's system if the last call
@ -2706,7 +2706,7 @@ __trace_add_new_event(struct trace_event_call *call, struct trace_array *tr)
} }
/* /*
* Just create a decriptor for early init. A descriptor is required * Just create a descriptor for early init. A descriptor is required
* for enabling events at boot. We want to enable events before * for enabling events at boot. We want to enable events before
* the filesystem is initialized. * the filesystem is initialized.
*/ */

View File

@ -296,7 +296,7 @@ enum {
* and "FALSE" the program entry after that, we are now done with the first * and "FALSE" the program entry after that, we are now done with the first
* pass. * pass.
* *
* Making the above "a || b && c" have a progam of: * Making the above "a || b && c" have a program of:
* prog[0] = { "a", 1, 2 } * prog[0] = { "a", 1, 2 }
* prog[1] = { "b", 0, 2 } * prog[1] = { "b", 0, 2 }
* prog[2] = { "c", 0, 3 } * prog[2] = { "c", 0, 3 }
@ -390,7 +390,7 @@ enum {
* F: return FALSE * F: return FALSE
* *
* As "r = a; if (!r) goto n5;" is obviously the same as * As "r = a; if (!r) goto n5;" is obviously the same as
* "if (!a) goto n5;" without doing anything we can interperate the * "if (!a) goto n5;" without doing anything we can interpret the
* program as: * program as:
* n1: if (!a) goto n5; * n1: if (!a) goto n5;
* n2: if (!b) goto n5; * n2: if (!b) goto n5;

View File

@ -1385,7 +1385,7 @@ static int destroy_synth_event(struct synth_event *se)
/** /**
* synth_event_delete - Delete a synthetic event * synth_event_delete - Delete a synthetic event
* @event_name: The name of the new sythetic event * @event_name: The name of the new synthetic event
* *
* Delete a synthetic event that was created with synth_event_create(). * Delete a synthetic event that was created with synth_event_create().
* *

View File

@ -764,7 +764,7 @@ print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
* - we are inside irq code * - we are inside irq code
* - we just entered irq code * - we just entered irq code
* *
* retunns 0 if * returns 0 if
* - funcgraph-interrupts option is set * - funcgraph-interrupts option is set
* - we are not inside irq code * - we are not inside irq code
*/ */

View File

@ -83,7 +83,7 @@ struct hwlat_sample {
u64 nmi_total_ts; /* Total time spent in NMIs */ u64 nmi_total_ts; /* Total time spent in NMIs */
struct timespec64 timestamp; /* wall time */ struct timespec64 timestamp; /* wall time */
int nmi_count; /* # NMIs during this sample */ int nmi_count; /* # NMIs during this sample */
int count; /* # of iteratons over threash */ int count; /* # of iterations over thresh */
}; };
/* keep the global state somewhere. */ /* keep the global state somewhere. */
@ -389,7 +389,7 @@ static int start_kthread(struct trace_array *tr)
} }
/** /**
* stop_kthread - Inform the hardware latency samping/detector kthread to stop * stop_kthread - Inform the hardware latency sampling/detector kthread to stop
* *
* This kicks the running hardware latency sampling/detector kernel thread and * This kicks the running hardware latency sampling/detector kernel thread and
* tells it to stop sampling now. Use this on unload and at system shutdown. * tells it to stop sampling now. Use this on unload and at system shutdown.

View File

@ -1748,7 +1748,7 @@ kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs)
if (trace_probe_test_flag(&tk->tp, TP_FLAG_PROFILE)) if (trace_probe_test_flag(&tk->tp, TP_FLAG_PROFILE))
kretprobe_perf_func(tk, ri, regs); kretprobe_perf_func(tk, ri, regs);
#endif #endif
return 0; /* We don't tweek kernel, so just return 0 */ return 0; /* We don't tweak kernel, so just return 0 */
} }
NOKPROBE_SYMBOL(kretprobe_dispatcher); NOKPROBE_SYMBOL(kretprobe_dispatcher);

View File

@ -168,7 +168,7 @@ void __trace_probe_log_err(int offset, int err_type)
if (!trace_probe_log.argv) if (!trace_probe_log.argv)
return; return;
/* Recalcurate the length and allocate buffer */ /* Recalculate the length and allocate buffer */
for (i = 0; i < trace_probe_log.argc; i++) { for (i = 0; i < trace_probe_log.argc; i++) {
if (i == trace_probe_log.index) if (i == trace_probe_log.index)
pos = len; pos = len;
@ -182,7 +182,7 @@ void __trace_probe_log_err(int offset, int err_type)
/** /**
* Set the error position is next to the last arg + space. * Set the error position is next to the last arg + space.
* Note that len includes the terminal null and the cursor * Note that len includes the terminal null and the cursor
* appaers at pos + 1. * appears at pos + 1.
*/ */
pos = len; pos = len;
offset = 0; offset = 0;
@ -592,7 +592,7 @@ static int traceprobe_parse_probe_arg_body(char *arg, ssize_t *size,
} }
/* /*
* Since $comm and immediate string can not be dereferred, * Since $comm and immediate string can not be dereferenced,
* we can find those by strcmp. * we can find those by strcmp.
*/ */
if (strcmp(arg, "$comm") == 0 || strncmp(arg, "\\\"", 2) == 0) { if (strcmp(arg, "$comm") == 0 || strncmp(arg, "\\\"", 2) == 0) {

View File

@ -134,7 +134,7 @@ struct fetch_type {
size_t size; /* Byte size of type */ size_t size; /* Byte size of type */
int is_signed; /* Signed flag */ int is_signed; /* Signed flag */
print_type_func_t print; /* Print functions */ print_type_func_t print; /* Print functions */
const char *fmt; /* Fromat string */ const char *fmt; /* Format string */
const char *fmttype; /* Name in format file */ const char *fmttype; /* Name in format file */
}; };

View File

@ -167,7 +167,7 @@ array:
return code->op == FETCH_OP_END ? ret : -EILSEQ; return code->op == FETCH_OP_END ? ret : -EILSEQ;
} }
/* Sum up total data length for dynamic arraies (strings) */ /* Sum up total data length for dynamic arrays (strings) */
static nokprobe_inline int static nokprobe_inline int
__get_data_size(struct trace_probe *tp, struct pt_regs *regs) __get_data_size(struct trace_probe *tp, struct pt_regs *regs)
{ {

View File

@ -878,7 +878,7 @@ trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
int ret; int ret;
/* /*
* Now that the big kernel lock is no longer preemptable, * Now that the big kernel lock is no longer preemptible,
* and this is called with the BKL held, it will always * and this is called with the BKL held, it will always
* fail. If preemption is already disabled, simply * fail. If preemption is already disabled, simply
* pass the test. When the BKL is removed, or becomes * pass the test. When the BKL is removed, or becomes
@ -940,7 +940,7 @@ trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *
int ret; int ret;
/* /*
* Now that the big kernel lock is no longer preemptable, * Now that the big kernel lock is no longer preemptible,
* and this is called with the BKL held, it will always * and this is called with the BKL held, it will always
* fail. If preemption is already disabled, simply * fail. If preemption is already disabled, simply
* pass the test. When the BKL is removed, or becomes * pass the test. When the BKL is removed, or becomes

View File

@ -16,7 +16,7 @@
* The buffer size is currently PAGE_SIZE, although it may become dynamic * The buffer size is currently PAGE_SIZE, although it may become dynamic
* in the future. * in the future.
* *
* A write to the buffer will either succed or fail. That is, unlike * A write to the buffer will either succeed or fail. That is, unlike
* sprintf() there will not be a partial write (well it may write into * sprintf() there will not be a partial write (well it may write into
* the buffer but it wont update the pointers). This allows users to * the buffer but it wont update the pointers). This allows users to
* try to write something into the trace_seq buffer and if it fails * try to write something into the trace_seq buffer and if it fails
@ -73,7 +73,7 @@ int trace_print_seq(struct seq_file *m, struct trace_seq *s)
* @fmt: printf format string * @fmt: printf format string
* *
* The tracer may use either sequence operations or its own * The tracer may use either sequence operations or its own
* copy to user routines. To simplify formating of a trace * copy to user routines. To simplify formatting of a trace
* trace_seq_printf() is used to store strings into a special * trace_seq_printf() is used to store strings into a special
* buffer (@s). Then the output may be either used by * buffer (@s). Then the output may be either used by
* the sequencer or pulled into another buffer. * the sequencer or pulled into another buffer.
@ -133,7 +133,7 @@ EXPORT_SYMBOL_GPL(trace_seq_bitmask);
* @fmt: printf format string * @fmt: printf format string
* *
* The tracer may use either sequence operations or its own * The tracer may use either sequence operations or its own
* copy to user routines. To simplify formating of a trace * copy to user routines. To simplify formatting of a trace
* trace_seq_printf is used to store strings into a special * trace_seq_printf is used to store strings into a special
* buffer (@s). Then the output may be either used by * buffer (@s). Then the output may be either used by
* the sequencer or pulled into another buffer. * the sequencer or pulled into another buffer.
@ -226,7 +226,7 @@ EXPORT_SYMBOL_GPL(trace_seq_puts);
* @c: simple character to record * @c: simple character to record
* *
* The tracer may use either the sequence operations or its own * The tracer may use either the sequence operations or its own
* copy to user routines. This function records a simple charater * copy to user routines. This function records a simple character
* into a special buffer (@s) for later retrieval by a sequencer * into a special buffer (@s) for later retrieval by a sequencer
* or other mechanism. * or other mechanism.
*/ */
@ -348,7 +348,7 @@ int trace_seq_path(struct trace_seq *s, const struct path *path)
EXPORT_SYMBOL_GPL(trace_seq_path); EXPORT_SYMBOL_GPL(trace_seq_path);
/** /**
* trace_seq_to_user - copy the squence buffer to user space * trace_seq_to_user - copy the sequence buffer to user space
* @s: trace sequence descriptor * @s: trace sequence descriptor
* @ubuf: The userspace memory location to copy to * @ubuf: The userspace memory location to copy to
* @cnt: The amount to copy * @cnt: The amount to copy
@ -363,7 +363,7 @@ EXPORT_SYMBOL_GPL(trace_seq_path);
* *
* On failure it returns -EBUSY if all of the content in the * On failure it returns -EBUSY if all of the content in the
* sequence has been already read, which includes nothing in the * sequence has been already read, which includes nothing in the
* sequenc (@s->len == @s->readpos). * sequence (@s->len == @s->readpos).
* *
* Returns -EFAULT if the copy to userspace fails. * Returns -EFAULT if the copy to userspace fails.
*/ */