xref: /linux/kernel/trace/trace.c (revision 164666fa66669d437bdcc8d5f1744a2aee73be41)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * ring buffer based function tracer
4  *
5  * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
6  * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7  *
8  * Originally taken from the RT patch by:
9  *    Arnaldo Carvalho de Melo <acme@redhat.com>
10  *
11  * Based on code from the latency_tracer, that is:
12  *  Copyright (C) 2004-2006 Ingo Molnar
13  *  Copyright (C) 2004 Nadia Yvette Chambers
14  */
15 #include <linux/ring_buffer.h>
16 #include <generated/utsrelease.h>
17 #include <linux/stacktrace.h>
18 #include <linux/writeback.h>
19 #include <linux/kallsyms.h>
20 #include <linux/security.h>
21 #include <linux/seq_file.h>
22 #include <linux/notifier.h>
23 #include <linux/irqflags.h>
24 #include <linux/debugfs.h>
25 #include <linux/tracefs.h>
26 #include <linux/pagemap.h>
27 #include <linux/hardirq.h>
28 #include <linux/linkage.h>
29 #include <linux/uaccess.h>
30 #include <linux/vmalloc.h>
31 #include <linux/ftrace.h>
32 #include <linux/module.h>
33 #include <linux/percpu.h>
34 #include <linux/splice.h>
35 #include <linux/kdebug.h>
36 #include <linux/string.h>
37 #include <linux/mount.h>
38 #include <linux/rwsem.h>
39 #include <linux/slab.h>
40 #include <linux/ctype.h>
41 #include <linux/init.h>
42 #include <linux/panic_notifier.h>
43 #include <linux/poll.h>
44 #include <linux/nmi.h>
45 #include <linux/fs.h>
46 #include <linux/trace.h>
47 #include <linux/sched/clock.h>
48 #include <linux/sched/rt.h>
49 #include <linux/fsnotify.h>
50 #include <linux/irq_work.h>
51 #include <linux/workqueue.h>
52 
53 #include "trace.h"
54 #include "trace_output.h"
55 
56 /*
57  * On boot up, the ring buffer is set to the minimum size, so that
58  * we do not waste memory on systems that are not using tracing.
59  */
60 bool ring_buffer_expanded;
61 
62 /*
63  * We need to change this state when a selftest is running.
64  * A selftest will lurk into the ring-buffer to count the
65  * entries inserted during the selftest although some concurrent
66  * insertions into the ring-buffer such as trace_printk could occurred
67  * at the same time, giving false positive or negative results.
68  */
69 static bool __read_mostly tracing_selftest_running;
70 
71 /*
72  * If boot-time tracing including tracers/events via kernel cmdline
73  * is running, we do not want to run SELFTEST.
74  */
75 bool __read_mostly tracing_selftest_disabled;
76 
77 #ifdef CONFIG_FTRACE_STARTUP_TEST
78 void __init disable_tracing_selftest(const char *reason)
79 {
80 	if (!tracing_selftest_disabled) {
81 		tracing_selftest_disabled = true;
82 		pr_info("Ftrace startup test is disabled due to %s\n", reason);
83 	}
84 }
85 #endif
86 
87 /* Pipe tracepoints to printk */
88 struct trace_iterator *tracepoint_print_iter;
89 int tracepoint_printk;
90 static bool tracepoint_printk_stop_on_boot __initdata;
91 static DEFINE_STATIC_KEY_FALSE(tracepoint_printk_key);
92 
93 /* For tracers that don't implement custom flags */
94 static struct tracer_opt dummy_tracer_opt[] = {
95 	{ }
96 };
97 
98 static int
99 dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
100 {
101 	return 0;
102 }
103 
104 /*
105  * To prevent the comm cache from being overwritten when no
106  * tracing is active, only save the comm when a trace event
107  * occurred.
108  */
109 static DEFINE_PER_CPU(bool, trace_taskinfo_save);
110 
111 /*
112  * Kill all tracing for good (never come back).
113  * It is initialized to 1 but will turn to zero if the initialization
114  * of the tracer is successful. But that is the only place that sets
115  * this back to zero.
116  */
117 static int tracing_disabled = 1;
118 
119 cpumask_var_t __read_mostly	tracing_buffer_mask;
120 
121 /*
122  * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
123  *
124  * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
125  * is set, then ftrace_dump is called. This will output the contents
126  * of the ftrace buffers to the console.  This is very useful for
127  * capturing traces that lead to crashes and outputing it to a
128  * serial console.
129  *
130  * It is default off, but you can enable it with either specifying
131  * "ftrace_dump_on_oops" in the kernel command line, or setting
132  * /proc/sys/kernel/ftrace_dump_on_oops
133  * Set 1 if you want to dump buffers of all CPUs
134  * Set 2 if you want to dump the buffer of the CPU that triggered oops
135  */
136 
137 enum ftrace_dump_mode ftrace_dump_on_oops;
138 
139 /* When set, tracing will stop when a WARN*() is hit */
140 int __disable_trace_on_warning;
141 
142 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
143 /* Map of enums to their values, for "eval_map" file */
144 struct trace_eval_map_head {
145 	struct module			*mod;
146 	unsigned long			length;
147 };
148 
149 union trace_eval_map_item;
150 
151 struct trace_eval_map_tail {
152 	/*
153 	 * "end" is first and points to NULL as it must be different
154 	 * than "mod" or "eval_string"
155 	 */
156 	union trace_eval_map_item	*next;
157 	const char			*end;	/* points to NULL */
158 };
159 
160 static DEFINE_MUTEX(trace_eval_mutex);
161 
162 /*
163  * The trace_eval_maps are saved in an array with two extra elements,
164  * one at the beginning, and one at the end. The beginning item contains
165  * the count of the saved maps (head.length), and the module they
166  * belong to if not built in (head.mod). The ending item contains a
167  * pointer to the next array of saved eval_map items.
168  */
169 union trace_eval_map_item {
170 	struct trace_eval_map		map;
171 	struct trace_eval_map_head	head;
172 	struct trace_eval_map_tail	tail;
173 };
174 
175 static union trace_eval_map_item *trace_eval_maps;
176 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
177 
178 int tracing_set_tracer(struct trace_array *tr, const char *buf);
179 static void ftrace_trace_userstack(struct trace_array *tr,
180 				   struct trace_buffer *buffer,
181 				   unsigned int trace_ctx);
182 
183 #define MAX_TRACER_SIZE		100
184 static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
185 static char *default_bootup_tracer;
186 
187 static bool allocate_snapshot;
188 
189 static int __init set_cmdline_ftrace(char *str)
190 {
191 	strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
192 	default_bootup_tracer = bootup_tracer_buf;
193 	/* We are using ftrace early, expand it */
194 	ring_buffer_expanded = true;
195 	return 1;
196 }
197 __setup("ftrace=", set_cmdline_ftrace);
198 
199 static int __init set_ftrace_dump_on_oops(char *str)
200 {
201 	if (*str++ != '=' || !*str || !strcmp("1", str)) {
202 		ftrace_dump_on_oops = DUMP_ALL;
203 		return 1;
204 	}
205 
206 	if (!strcmp("orig_cpu", str) || !strcmp("2", str)) {
207 		ftrace_dump_on_oops = DUMP_ORIG;
208                 return 1;
209         }
210 
211         return 0;
212 }
213 __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
214 
215 static int __init stop_trace_on_warning(char *str)
216 {
217 	if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
218 		__disable_trace_on_warning = 1;
219 	return 1;
220 }
221 __setup("traceoff_on_warning", stop_trace_on_warning);
222 
223 static int __init boot_alloc_snapshot(char *str)
224 {
225 	allocate_snapshot = true;
226 	/* We also need the main ring buffer expanded */
227 	ring_buffer_expanded = true;
228 	return 1;
229 }
230 __setup("alloc_snapshot", boot_alloc_snapshot);
231 
232 
233 static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
234 
235 static int __init set_trace_boot_options(char *str)
236 {
237 	strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
238 	return 0;
239 }
240 __setup("trace_options=", set_trace_boot_options);
241 
242 static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
243 static char *trace_boot_clock __initdata;
244 
245 static int __init set_trace_boot_clock(char *str)
246 {
247 	strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
248 	trace_boot_clock = trace_boot_clock_buf;
249 	return 0;
250 }
251 __setup("trace_clock=", set_trace_boot_clock);
252 
253 static int __init set_tracepoint_printk(char *str)
254 {
255 	if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
256 		tracepoint_printk = 1;
257 	return 1;
258 }
259 __setup("tp_printk", set_tracepoint_printk);
260 
261 static int __init set_tracepoint_printk_stop(char *str)
262 {
263 	tracepoint_printk_stop_on_boot = true;
264 	return 1;
265 }
266 __setup("tp_printk_stop_on_boot", set_tracepoint_printk_stop);
267 
268 unsigned long long ns2usecs(u64 nsec)
269 {
270 	nsec += 500;
271 	do_div(nsec, 1000);
272 	return nsec;
273 }
274 
275 static void
276 trace_process_export(struct trace_export *export,
277 	       struct ring_buffer_event *event, int flag)
278 {
279 	struct trace_entry *entry;
280 	unsigned int size = 0;
281 
282 	if (export->flags & flag) {
283 		entry = ring_buffer_event_data(event);
284 		size = ring_buffer_event_length(event);
285 		export->write(export, entry, size);
286 	}
287 }
288 
289 static DEFINE_MUTEX(ftrace_export_lock);
290 
291 static struct trace_export __rcu *ftrace_exports_list __read_mostly;
292 
293 static DEFINE_STATIC_KEY_FALSE(trace_function_exports_enabled);
294 static DEFINE_STATIC_KEY_FALSE(trace_event_exports_enabled);
295 static DEFINE_STATIC_KEY_FALSE(trace_marker_exports_enabled);
296 
297 static inline void ftrace_exports_enable(struct trace_export *export)
298 {
299 	if (export->flags & TRACE_EXPORT_FUNCTION)
300 		static_branch_inc(&trace_function_exports_enabled);
301 
302 	if (export->flags & TRACE_EXPORT_EVENT)
303 		static_branch_inc(&trace_event_exports_enabled);
304 
305 	if (export->flags & TRACE_EXPORT_MARKER)
306 		static_branch_inc(&trace_marker_exports_enabled);
307 }
308 
309 static inline void ftrace_exports_disable(struct trace_export *export)
310 {
311 	if (export->flags & TRACE_EXPORT_FUNCTION)
312 		static_branch_dec(&trace_function_exports_enabled);
313 
314 	if (export->flags & TRACE_EXPORT_EVENT)
315 		static_branch_dec(&trace_event_exports_enabled);
316 
317 	if (export->flags & TRACE_EXPORT_MARKER)
318 		static_branch_dec(&trace_marker_exports_enabled);
319 }
320 
321 static void ftrace_exports(struct ring_buffer_event *event, int flag)
322 {
323 	struct trace_export *export;
324 
325 	preempt_disable_notrace();
326 
327 	export = rcu_dereference_raw_check(ftrace_exports_list);
328 	while (export) {
329 		trace_process_export(export, event, flag);
330 		export = rcu_dereference_raw_check(export->next);
331 	}
332 
333 	preempt_enable_notrace();
334 }
335 
336 static inline void
337 add_trace_export(struct trace_export **list, struct trace_export *export)
338 {
339 	rcu_assign_pointer(export->next, *list);
340 	/*
341 	 * We are entering export into the list but another
342 	 * CPU might be walking that list. We need to make sure
343 	 * the export->next pointer is valid before another CPU sees
344 	 * the export pointer included into the list.
345 	 */
346 	rcu_assign_pointer(*list, export);
347 }
348 
349 static inline int
350 rm_trace_export(struct trace_export **list, struct trace_export *export)
351 {
352 	struct trace_export **p;
353 
354 	for (p = list; *p != NULL; p = &(*p)->next)
355 		if (*p == export)
356 			break;
357 
358 	if (*p != export)
359 		return -1;
360 
361 	rcu_assign_pointer(*p, (*p)->next);
362 
363 	return 0;
364 }
365 
366 static inline void
367 add_ftrace_export(struct trace_export **list, struct trace_export *export)
368 {
369 	ftrace_exports_enable(export);
370 
371 	add_trace_export(list, export);
372 }
373 
374 static inline int
375 rm_ftrace_export(struct trace_export **list, struct trace_export *export)
376 {
377 	int ret;
378 
379 	ret = rm_trace_export(list, export);
380 	ftrace_exports_disable(export);
381 
382 	return ret;
383 }
384 
385 int register_ftrace_export(struct trace_export *export)
386 {
387 	if (WARN_ON_ONCE(!export->write))
388 		return -1;
389 
390 	mutex_lock(&ftrace_export_lock);
391 
392 	add_ftrace_export(&ftrace_exports_list, export);
393 
394 	mutex_unlock(&ftrace_export_lock);
395 
396 	return 0;
397 }
398 EXPORT_SYMBOL_GPL(register_ftrace_export);
399 
400 int unregister_ftrace_export(struct trace_export *export)
401 {
402 	int ret;
403 
404 	mutex_lock(&ftrace_export_lock);
405 
406 	ret = rm_ftrace_export(&ftrace_exports_list, export);
407 
408 	mutex_unlock(&ftrace_export_lock);
409 
410 	return ret;
411 }
412 EXPORT_SYMBOL_GPL(unregister_ftrace_export);
413 
414 /* trace_flags holds trace_options default values */
415 #define TRACE_DEFAULT_FLAGS						\
416 	(FUNCTION_DEFAULT_FLAGS |					\
417 	 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |			\
418 	 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO |		\
419 	 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE |			\
420 	 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS |			\
421 	 TRACE_ITER_HASH_PTR)
422 
423 /* trace_options that are only supported by global_trace */
424 #define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK |			\
425 	       TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
426 
427 /* trace_flags that are default zero for instances */
428 #define ZEROED_TRACE_FLAGS \
429 	(TRACE_ITER_EVENT_FORK | TRACE_ITER_FUNC_FORK)
430 
431 /*
432  * The global_trace is the descriptor that holds the top-level tracing
433  * buffers for the live tracing.
434  */
435 static struct trace_array global_trace = {
436 	.trace_flags = TRACE_DEFAULT_FLAGS,
437 };
438 
439 LIST_HEAD(ftrace_trace_arrays);
440 
441 int trace_array_get(struct trace_array *this_tr)
442 {
443 	struct trace_array *tr;
444 	int ret = -ENODEV;
445 
446 	mutex_lock(&trace_types_lock);
447 	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
448 		if (tr == this_tr) {
449 			tr->ref++;
450 			ret = 0;
451 			break;
452 		}
453 	}
454 	mutex_unlock(&trace_types_lock);
455 
456 	return ret;
457 }
458 
459 static void __trace_array_put(struct trace_array *this_tr)
460 {
461 	WARN_ON(!this_tr->ref);
462 	this_tr->ref--;
463 }
464 
465 /**
466  * trace_array_put - Decrement the reference counter for this trace array.
467  * @this_tr : pointer to the trace array
468  *
469  * NOTE: Use this when we no longer need the trace array returned by
470  * trace_array_get_by_name(). This ensures the trace array can be later
471  * destroyed.
472  *
473  */
474 void trace_array_put(struct trace_array *this_tr)
475 {
476 	if (!this_tr)
477 		return;
478 
479 	mutex_lock(&trace_types_lock);
480 	__trace_array_put(this_tr);
481 	mutex_unlock(&trace_types_lock);
482 }
483 EXPORT_SYMBOL_GPL(trace_array_put);
484 
485 int tracing_check_open_get_tr(struct trace_array *tr)
486 {
487 	int ret;
488 
489 	ret = security_locked_down(LOCKDOWN_TRACEFS);
490 	if (ret)
491 		return ret;
492 
493 	if (tracing_disabled)
494 		return -ENODEV;
495 
496 	if (tr && trace_array_get(tr) < 0)
497 		return -ENODEV;
498 
499 	return 0;
500 }
501 
502 int call_filter_check_discard(struct trace_event_call *call, void *rec,
503 			      struct trace_buffer *buffer,
504 			      struct ring_buffer_event *event)
505 {
506 	if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
507 	    !filter_match_preds(call->filter, rec)) {
508 		__trace_event_discard_commit(buffer, event);
509 		return 1;
510 	}
511 
512 	return 0;
513 }
514 
515 /**
516  * trace_find_filtered_pid - check if a pid exists in a filtered_pid list
517  * @filtered_pids: The list of pids to check
518  * @search_pid: The PID to find in @filtered_pids
519  *
520  * Returns true if @search_pid is found in @filtered_pids, and false otherwise.
521  */
522 bool
523 trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid)
524 {
525 	return trace_pid_list_is_set(filtered_pids, search_pid);
526 }
527 
528 /**
529  * trace_ignore_this_task - should a task be ignored for tracing
530  * @filtered_pids: The list of pids to check
531  * @filtered_no_pids: The list of pids not to be traced
532  * @task: The task that should be ignored if not filtered
533  *
534  * Checks if @task should be traced or not from @filtered_pids.
535  * Returns true if @task should *NOT* be traced.
536  * Returns false if @task should be traced.
537  */
538 bool
539 trace_ignore_this_task(struct trace_pid_list *filtered_pids,
540 		       struct trace_pid_list *filtered_no_pids,
541 		       struct task_struct *task)
542 {
543 	/*
544 	 * If filtered_no_pids is not empty, and the task's pid is listed
545 	 * in filtered_no_pids, then return true.
546 	 * Otherwise, if filtered_pids is empty, that means we can
547 	 * trace all tasks. If it has content, then only trace pids
548 	 * within filtered_pids.
549 	 */
550 
551 	return (filtered_pids &&
552 		!trace_find_filtered_pid(filtered_pids, task->pid)) ||
553 		(filtered_no_pids &&
554 		 trace_find_filtered_pid(filtered_no_pids, task->pid));
555 }
556 
557 /**
558  * trace_filter_add_remove_task - Add or remove a task from a pid_list
559  * @pid_list: The list to modify
560  * @self: The current task for fork or NULL for exit
561  * @task: The task to add or remove
562  *
563  * If adding a task, if @self is defined, the task is only added if @self
564  * is also included in @pid_list. This happens on fork and tasks should
565  * only be added when the parent is listed. If @self is NULL, then the
566  * @task pid will be removed from the list, which would happen on exit
567  * of a task.
568  */
569 void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
570 				  struct task_struct *self,
571 				  struct task_struct *task)
572 {
573 	if (!pid_list)
574 		return;
575 
576 	/* For forks, we only add if the forking task is listed */
577 	if (self) {
578 		if (!trace_find_filtered_pid(pid_list, self->pid))
579 			return;
580 	}
581 
582 	/* "self" is set for forks, and NULL for exits */
583 	if (self)
584 		trace_pid_list_set(pid_list, task->pid);
585 	else
586 		trace_pid_list_clear(pid_list, task->pid);
587 }
588 
589 /**
590  * trace_pid_next - Used for seq_file to get to the next pid of a pid_list
591  * @pid_list: The pid list to show
592  * @v: The last pid that was shown (+1 the actual pid to let zero be displayed)
593  * @pos: The position of the file
594  *
595  * This is used by the seq_file "next" operation to iterate the pids
596  * listed in a trace_pid_list structure.
597  *
598  * Returns the pid+1 as we want to display pid of zero, but NULL would
599  * stop the iteration.
600  */
601 void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos)
602 {
603 	long pid = (unsigned long)v;
604 	unsigned int next;
605 
606 	(*pos)++;
607 
608 	/* pid already is +1 of the actual previous bit */
609 	if (trace_pid_list_next(pid_list, pid, &next) < 0)
610 		return NULL;
611 
612 	pid = next;
613 
614 	/* Return pid + 1 to allow zero to be represented */
615 	return (void *)(pid + 1);
616 }
617 
618 /**
619  * trace_pid_start - Used for seq_file to start reading pid lists
620  * @pid_list: The pid list to show
621  * @pos: The position of the file
622  *
623  * This is used by seq_file "start" operation to start the iteration
624  * of listing pids.
625  *
626  * Returns the pid+1 as we want to display pid of zero, but NULL would
627  * stop the iteration.
628  */
629 void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos)
630 {
631 	unsigned long pid;
632 	unsigned int first;
633 	loff_t l = 0;
634 
635 	if (trace_pid_list_first(pid_list, &first) < 0)
636 		return NULL;
637 
638 	pid = first;
639 
640 	/* Return pid + 1 so that zero can be the exit value */
641 	for (pid++; pid && l < *pos;
642 	     pid = (unsigned long)trace_pid_next(pid_list, (void *)pid, &l))
643 		;
644 	return (void *)pid;
645 }
646 
647 /**
648  * trace_pid_show - show the current pid in seq_file processing
649  * @m: The seq_file structure to write into
650  * @v: A void pointer of the pid (+1) value to display
651  *
652  * Can be directly used by seq_file operations to display the current
653  * pid value.
654  */
655 int trace_pid_show(struct seq_file *m, void *v)
656 {
657 	unsigned long pid = (unsigned long)v - 1;
658 
659 	seq_printf(m, "%lu\n", pid);
660 	return 0;
661 }
662 
663 /* 128 should be much more than enough */
664 #define PID_BUF_SIZE		127
665 
666 int trace_pid_write(struct trace_pid_list *filtered_pids,
667 		    struct trace_pid_list **new_pid_list,
668 		    const char __user *ubuf, size_t cnt)
669 {
670 	struct trace_pid_list *pid_list;
671 	struct trace_parser parser;
672 	unsigned long val;
673 	int nr_pids = 0;
674 	ssize_t read = 0;
675 	ssize_t ret;
676 	loff_t pos;
677 	pid_t pid;
678 
679 	if (trace_parser_get_init(&parser, PID_BUF_SIZE + 1))
680 		return -ENOMEM;
681 
682 	/*
683 	 * Always recreate a new array. The write is an all or nothing
684 	 * operation. Always create a new array when adding new pids by
685 	 * the user. If the operation fails, then the current list is
686 	 * not modified.
687 	 */
688 	pid_list = trace_pid_list_alloc();
689 	if (!pid_list) {
690 		trace_parser_put(&parser);
691 		return -ENOMEM;
692 	}
693 
694 	if (filtered_pids) {
695 		/* copy the current bits to the new max */
696 		ret = trace_pid_list_first(filtered_pids, &pid);
697 		while (!ret) {
698 			trace_pid_list_set(pid_list, pid);
699 			ret = trace_pid_list_next(filtered_pids, pid + 1, &pid);
700 			nr_pids++;
701 		}
702 	}
703 
704 	ret = 0;
705 	while (cnt > 0) {
706 
707 		pos = 0;
708 
709 		ret = trace_get_user(&parser, ubuf, cnt, &pos);
710 		if (ret < 0 || !trace_parser_loaded(&parser))
711 			break;
712 
713 		read += ret;
714 		ubuf += ret;
715 		cnt -= ret;
716 
717 		ret = -EINVAL;
718 		if (kstrtoul(parser.buffer, 0, &val))
719 			break;
720 
721 		pid = (pid_t)val;
722 
723 		if (trace_pid_list_set(pid_list, pid) < 0) {
724 			ret = -1;
725 			break;
726 		}
727 		nr_pids++;
728 
729 		trace_parser_clear(&parser);
730 		ret = 0;
731 	}
732 	trace_parser_put(&parser);
733 
734 	if (ret < 0) {
735 		trace_pid_list_free(pid_list);
736 		return ret;
737 	}
738 
739 	if (!nr_pids) {
740 		/* Cleared the list of pids */
741 		trace_pid_list_free(pid_list);
742 		read = ret;
743 		pid_list = NULL;
744 	}
745 
746 	*new_pid_list = pid_list;
747 
748 	return read;
749 }
750 
751 static u64 buffer_ftrace_now(struct array_buffer *buf, int cpu)
752 {
753 	u64 ts;
754 
755 	/* Early boot up does not have a buffer yet */
756 	if (!buf->buffer)
757 		return trace_clock_local();
758 
759 	ts = ring_buffer_time_stamp(buf->buffer);
760 	ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
761 
762 	return ts;
763 }
764 
765 u64 ftrace_now(int cpu)
766 {
767 	return buffer_ftrace_now(&global_trace.array_buffer, cpu);
768 }
769 
770 /**
771  * tracing_is_enabled - Show if global_trace has been enabled
772  *
773  * Shows if the global trace has been enabled or not. It uses the
774  * mirror flag "buffer_disabled" to be used in fast paths such as for
775  * the irqsoff tracer. But it may be inaccurate due to races. If you
776  * need to know the accurate state, use tracing_is_on() which is a little
777  * slower, but accurate.
778  */
779 int tracing_is_enabled(void)
780 {
781 	/*
782 	 * For quick access (irqsoff uses this in fast path), just
783 	 * return the mirror variable of the state of the ring buffer.
784 	 * It's a little racy, but we don't really care.
785 	 */
786 	smp_rmb();
787 	return !global_trace.buffer_disabled;
788 }
789 
790 /*
791  * trace_buf_size is the size in bytes that is allocated
792  * for a buffer. Note, the number of bytes is always rounded
793  * to page size.
794  *
795  * This number is purposely set to a low number of 16384.
796  * If the dump on oops happens, it will be much appreciated
797  * to not have to wait for all that output. Anyway this can be
798  * boot time and run time configurable.
799  */
800 #define TRACE_BUF_SIZE_DEFAULT	1441792UL /* 16384 * 88 (sizeof(entry)) */
801 
802 static unsigned long		trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
803 
804 /* trace_types holds a link list of available tracers. */
805 static struct tracer		*trace_types __read_mostly;
806 
807 /*
808  * trace_types_lock is used to protect the trace_types list.
809  */
810 DEFINE_MUTEX(trace_types_lock);
811 
812 /*
813  * serialize the access of the ring buffer
814  *
815  * ring buffer serializes readers, but it is low level protection.
816  * The validity of the events (which returns by ring_buffer_peek() ..etc)
817  * are not protected by ring buffer.
818  *
819  * The content of events may become garbage if we allow other process consumes
820  * these events concurrently:
821  *   A) the page of the consumed events may become a normal page
822  *      (not reader page) in ring buffer, and this page will be rewritten
823  *      by events producer.
824  *   B) The page of the consumed events may become a page for splice_read,
825  *      and this page will be returned to system.
826  *
827  * These primitives allow multi process access to different cpu ring buffer
828  * concurrently.
829  *
830  * These primitives don't distinguish read-only and read-consume access.
831  * Multi read-only access are also serialized.
832  */
833 
834 #ifdef CONFIG_SMP
835 static DECLARE_RWSEM(all_cpu_access_lock);
836 static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
837 
838 static inline void trace_access_lock(int cpu)
839 {
840 	if (cpu == RING_BUFFER_ALL_CPUS) {
841 		/* gain it for accessing the whole ring buffer. */
842 		down_write(&all_cpu_access_lock);
843 	} else {
844 		/* gain it for accessing a cpu ring buffer. */
845 
846 		/* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
847 		down_read(&all_cpu_access_lock);
848 
849 		/* Secondly block other access to this @cpu ring buffer. */
850 		mutex_lock(&per_cpu(cpu_access_lock, cpu));
851 	}
852 }
853 
854 static inline void trace_access_unlock(int cpu)
855 {
856 	if (cpu == RING_BUFFER_ALL_CPUS) {
857 		up_write(&all_cpu_access_lock);
858 	} else {
859 		mutex_unlock(&per_cpu(cpu_access_lock, cpu));
860 		up_read(&all_cpu_access_lock);
861 	}
862 }
863 
864 static inline void trace_access_lock_init(void)
865 {
866 	int cpu;
867 
868 	for_each_possible_cpu(cpu)
869 		mutex_init(&per_cpu(cpu_access_lock, cpu));
870 }
871 
872 #else
873 
874 static DEFINE_MUTEX(access_lock);
875 
876 static inline void trace_access_lock(int cpu)
877 {
878 	(void)cpu;
879 	mutex_lock(&access_lock);
880 }
881 
882 static inline void trace_access_unlock(int cpu)
883 {
884 	(void)cpu;
885 	mutex_unlock(&access_lock);
886 }
887 
888 static inline void trace_access_lock_init(void)
889 {
890 }
891 
892 #endif
893 
894 #ifdef CONFIG_STACKTRACE
895 static void __ftrace_trace_stack(struct trace_buffer *buffer,
896 				 unsigned int trace_ctx,
897 				 int skip, struct pt_regs *regs);
898 static inline void ftrace_trace_stack(struct trace_array *tr,
899 				      struct trace_buffer *buffer,
900 				      unsigned int trace_ctx,
901 				      int skip, struct pt_regs *regs);
902 
903 #else
904 static inline void __ftrace_trace_stack(struct trace_buffer *buffer,
905 					unsigned int trace_ctx,
906 					int skip, struct pt_regs *regs)
907 {
908 }
909 static inline void ftrace_trace_stack(struct trace_array *tr,
910 				      struct trace_buffer *buffer,
911 				      unsigned long trace_ctx,
912 				      int skip, struct pt_regs *regs)
913 {
914 }
915 
916 #endif
917 
918 static __always_inline void
919 trace_event_setup(struct ring_buffer_event *event,
920 		  int type, unsigned int trace_ctx)
921 {
922 	struct trace_entry *ent = ring_buffer_event_data(event);
923 
924 	tracing_generic_entry_update(ent, type, trace_ctx);
925 }
926 
927 static __always_inline struct ring_buffer_event *
928 __trace_buffer_lock_reserve(struct trace_buffer *buffer,
929 			  int type,
930 			  unsigned long len,
931 			  unsigned int trace_ctx)
932 {
933 	struct ring_buffer_event *event;
934 
935 	event = ring_buffer_lock_reserve(buffer, len);
936 	if (event != NULL)
937 		trace_event_setup(event, type, trace_ctx);
938 
939 	return event;
940 }
941 
942 void tracer_tracing_on(struct trace_array *tr)
943 {
944 	if (tr->array_buffer.buffer)
945 		ring_buffer_record_on(tr->array_buffer.buffer);
946 	/*
947 	 * This flag is looked at when buffers haven't been allocated
948 	 * yet, or by some tracers (like irqsoff), that just want to
949 	 * know if the ring buffer has been disabled, but it can handle
950 	 * races of where it gets disabled but we still do a record.
951 	 * As the check is in the fast path of the tracers, it is more
952 	 * important to be fast than accurate.
953 	 */
954 	tr->buffer_disabled = 0;
955 	/* Make the flag seen by readers */
956 	smp_wmb();
957 }
958 
959 /**
960  * tracing_on - enable tracing buffers
961  *
962  * This function enables tracing buffers that may have been
963  * disabled with tracing_off.
964  */
965 void tracing_on(void)
966 {
967 	tracer_tracing_on(&global_trace);
968 }
969 EXPORT_SYMBOL_GPL(tracing_on);
970 
971 
972 static __always_inline void
973 __buffer_unlock_commit(struct trace_buffer *buffer, struct ring_buffer_event *event)
974 {
975 	__this_cpu_write(trace_taskinfo_save, true);
976 
977 	/* If this is the temp buffer, we need to commit fully */
978 	if (this_cpu_read(trace_buffered_event) == event) {
979 		/* Length is in event->array[0] */
980 		ring_buffer_write(buffer, event->array[0], &event->array[1]);
981 		/* Release the temp buffer */
982 		this_cpu_dec(trace_buffered_event_cnt);
983 		/* ring_buffer_unlock_commit() enables preemption */
984 		preempt_enable_notrace();
985 	} else
986 		ring_buffer_unlock_commit(buffer, event);
987 }
988 
989 /**
990  * __trace_puts - write a constant string into the trace buffer.
991  * @ip:	   The address of the caller
992  * @str:   The constant string to write
993  * @size:  The size of the string.
994  */
995 int __trace_puts(unsigned long ip, const char *str, int size)
996 {
997 	struct ring_buffer_event *event;
998 	struct trace_buffer *buffer;
999 	struct print_entry *entry;
1000 	unsigned int trace_ctx;
1001 	int alloc;
1002 
1003 	if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
1004 		return 0;
1005 
1006 	if (unlikely(tracing_selftest_running || tracing_disabled))
1007 		return 0;
1008 
1009 	alloc = sizeof(*entry) + size + 2; /* possible \n added */
1010 
1011 	trace_ctx = tracing_gen_ctx();
1012 	buffer = global_trace.array_buffer.buffer;
1013 	ring_buffer_nest_start(buffer);
1014 	event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
1015 					    trace_ctx);
1016 	if (!event) {
1017 		size = 0;
1018 		goto out;
1019 	}
1020 
1021 	entry = ring_buffer_event_data(event);
1022 	entry->ip = ip;
1023 
1024 	memcpy(&entry->buf, str, size);
1025 
1026 	/* Add a newline if necessary */
1027 	if (entry->buf[size - 1] != '\n') {
1028 		entry->buf[size] = '\n';
1029 		entry->buf[size + 1] = '\0';
1030 	} else
1031 		entry->buf[size] = '\0';
1032 
1033 	__buffer_unlock_commit(buffer, event);
1034 	ftrace_trace_stack(&global_trace, buffer, trace_ctx, 4, NULL);
1035  out:
1036 	ring_buffer_nest_end(buffer);
1037 	return size;
1038 }
1039 EXPORT_SYMBOL_GPL(__trace_puts);
1040 
1041 /**
1042  * __trace_bputs - write the pointer to a constant string into trace buffer
1043  * @ip:	   The address of the caller
1044  * @str:   The constant string to write to the buffer to
1045  */
1046 int __trace_bputs(unsigned long ip, const char *str)
1047 {
1048 	struct ring_buffer_event *event;
1049 	struct trace_buffer *buffer;
1050 	struct bputs_entry *entry;
1051 	unsigned int trace_ctx;
1052 	int size = sizeof(struct bputs_entry);
1053 	int ret = 0;
1054 
1055 	if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
1056 		return 0;
1057 
1058 	if (unlikely(tracing_selftest_running || tracing_disabled))
1059 		return 0;
1060 
1061 	trace_ctx = tracing_gen_ctx();
1062 	buffer = global_trace.array_buffer.buffer;
1063 
1064 	ring_buffer_nest_start(buffer);
1065 	event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
1066 					    trace_ctx);
1067 	if (!event)
1068 		goto out;
1069 
1070 	entry = ring_buffer_event_data(event);
1071 	entry->ip			= ip;
1072 	entry->str			= str;
1073 
1074 	__buffer_unlock_commit(buffer, event);
1075 	ftrace_trace_stack(&global_trace, buffer, trace_ctx, 4, NULL);
1076 
1077 	ret = 1;
1078  out:
1079 	ring_buffer_nest_end(buffer);
1080 	return ret;
1081 }
1082 EXPORT_SYMBOL_GPL(__trace_bputs);
1083 
1084 #ifdef CONFIG_TRACER_SNAPSHOT
1085 static void tracing_snapshot_instance_cond(struct trace_array *tr,
1086 					   void *cond_data)
1087 {
1088 	struct tracer *tracer = tr->current_trace;
1089 	unsigned long flags;
1090 
1091 	if (in_nmi()) {
1092 		internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
1093 		internal_trace_puts("*** snapshot is being ignored        ***\n");
1094 		return;
1095 	}
1096 
1097 	if (!tr->allocated_snapshot) {
1098 		internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
1099 		internal_trace_puts("*** stopping trace here!   ***\n");
1100 		tracing_off();
1101 		return;
1102 	}
1103 
1104 	/* Note, snapshot can not be used when the tracer uses it */
1105 	if (tracer->use_max_tr) {
1106 		internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
1107 		internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
1108 		return;
1109 	}
1110 
1111 	local_irq_save(flags);
1112 	update_max_tr(tr, current, smp_processor_id(), cond_data);
1113 	local_irq_restore(flags);
1114 }
1115 
1116 void tracing_snapshot_instance(struct trace_array *tr)
1117 {
1118 	tracing_snapshot_instance_cond(tr, NULL);
1119 }
1120 
1121 /**
1122  * tracing_snapshot - take a snapshot of the current buffer.
1123  *
1124  * This causes a swap between the snapshot buffer and the current live
1125  * tracing buffer. You can use this to take snapshots of the live
1126  * trace when some condition is triggered, but continue to trace.
1127  *
1128  * Note, make sure to allocate the snapshot with either
1129  * a tracing_snapshot_alloc(), or by doing it manually
1130  * with: echo 1 > /sys/kernel/debug/tracing/snapshot
1131  *
1132  * If the snapshot buffer is not allocated, it will stop tracing.
1133  * Basically making a permanent snapshot.
1134  */
1135 void tracing_snapshot(void)
1136 {
1137 	struct trace_array *tr = &global_trace;
1138 
1139 	tracing_snapshot_instance(tr);
1140 }
1141 EXPORT_SYMBOL_GPL(tracing_snapshot);
1142 
1143 /**
1144  * tracing_snapshot_cond - conditionally take a snapshot of the current buffer.
1145  * @tr:		The tracing instance to snapshot
1146  * @cond_data:	The data to be tested conditionally, and possibly saved
1147  *
1148  * This is the same as tracing_snapshot() except that the snapshot is
1149  * conditional - the snapshot will only happen if the
1150  * cond_snapshot.update() implementation receiving the cond_data
1151  * returns true, which means that the trace array's cond_snapshot
1152  * update() operation used the cond_data to determine whether the
1153  * snapshot should be taken, and if it was, presumably saved it along
1154  * with the snapshot.
1155  */
1156 void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
1157 {
1158 	tracing_snapshot_instance_cond(tr, cond_data);
1159 }
1160 EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
1161 
1162 /**
1163  * tracing_snapshot_cond_data - get the user data associated with a snapshot
1164  * @tr:		The tracing instance
1165  *
1166  * When the user enables a conditional snapshot using
1167  * tracing_snapshot_cond_enable(), the user-defined cond_data is saved
1168  * with the snapshot.  This accessor is used to retrieve it.
1169  *
1170  * Should not be called from cond_snapshot.update(), since it takes
1171  * the tr->max_lock lock, which the code calling
1172  * cond_snapshot.update() has already done.
1173  *
1174  * Returns the cond_data associated with the trace array's snapshot.
1175  */
1176 void *tracing_cond_snapshot_data(struct trace_array *tr)
1177 {
1178 	void *cond_data = NULL;
1179 
1180 	arch_spin_lock(&tr->max_lock);
1181 
1182 	if (tr->cond_snapshot)
1183 		cond_data = tr->cond_snapshot->cond_data;
1184 
1185 	arch_spin_unlock(&tr->max_lock);
1186 
1187 	return cond_data;
1188 }
1189 EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
1190 
1191 static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
1192 					struct array_buffer *size_buf, int cpu_id);
1193 static void set_buffer_entries(struct array_buffer *buf, unsigned long val);
1194 
1195 int tracing_alloc_snapshot_instance(struct trace_array *tr)
1196 {
1197 	int ret;
1198 
1199 	if (!tr->allocated_snapshot) {
1200 
1201 		/* allocate spare buffer */
1202 		ret = resize_buffer_duplicate_size(&tr->max_buffer,
1203 				   &tr->array_buffer, RING_BUFFER_ALL_CPUS);
1204 		if (ret < 0)
1205 			return ret;
1206 
1207 		tr->allocated_snapshot = true;
1208 	}
1209 
1210 	return 0;
1211 }
1212 
1213 static void free_snapshot(struct trace_array *tr)
1214 {
1215 	/*
1216 	 * We don't free the ring buffer. instead, resize it because
1217 	 * The max_tr ring buffer has some state (e.g. ring->clock) and
1218 	 * we want preserve it.
1219 	 */
1220 	ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
1221 	set_buffer_entries(&tr->max_buffer, 1);
1222 	tracing_reset_online_cpus(&tr->max_buffer);
1223 	tr->allocated_snapshot = false;
1224 }
1225 
1226 /**
1227  * tracing_alloc_snapshot - allocate snapshot buffer.
1228  *
1229  * This only allocates the snapshot buffer if it isn't already
1230  * allocated - it doesn't also take a snapshot.
1231  *
1232  * This is meant to be used in cases where the snapshot buffer needs
1233  * to be set up for events that can't sleep but need to be able to
1234  * trigger a snapshot.
1235  */
1236 int tracing_alloc_snapshot(void)
1237 {
1238 	struct trace_array *tr = &global_trace;
1239 	int ret;
1240 
1241 	ret = tracing_alloc_snapshot_instance(tr);
1242 	WARN_ON(ret < 0);
1243 
1244 	return ret;
1245 }
1246 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1247 
1248 /**
1249  * tracing_snapshot_alloc - allocate and take a snapshot of the current buffer.
1250  *
1251  * This is similar to tracing_snapshot(), but it will allocate the
1252  * snapshot buffer if it isn't already allocated. Use this only
1253  * where it is safe to sleep, as the allocation may sleep.
1254  *
1255  * This causes a swap between the snapshot buffer and the current live
1256  * tracing buffer. You can use this to take snapshots of the live
1257  * trace when some condition is triggered, but continue to trace.
1258  */
1259 void tracing_snapshot_alloc(void)
1260 {
1261 	int ret;
1262 
1263 	ret = tracing_alloc_snapshot();
1264 	if (ret < 0)
1265 		return;
1266 
1267 	tracing_snapshot();
1268 }
1269 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
1270 
1271 /**
1272  * tracing_snapshot_cond_enable - enable conditional snapshot for an instance
1273  * @tr:		The tracing instance
1274  * @cond_data:	User data to associate with the snapshot
1275  * @update:	Implementation of the cond_snapshot update function
1276  *
1277  * Check whether the conditional snapshot for the given instance has
1278  * already been enabled, or if the current tracer is already using a
1279  * snapshot; if so, return -EBUSY, else create a cond_snapshot and
1280  * save the cond_data and update function inside.
1281  *
1282  * Returns 0 if successful, error otherwise.
1283  */
1284 int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data,
1285 				 cond_update_fn_t update)
1286 {
1287 	struct cond_snapshot *cond_snapshot;
1288 	int ret = 0;
1289 
1290 	cond_snapshot = kzalloc(sizeof(*cond_snapshot), GFP_KERNEL);
1291 	if (!cond_snapshot)
1292 		return -ENOMEM;
1293 
1294 	cond_snapshot->cond_data = cond_data;
1295 	cond_snapshot->update = update;
1296 
1297 	mutex_lock(&trace_types_lock);
1298 
1299 	ret = tracing_alloc_snapshot_instance(tr);
1300 	if (ret)
1301 		goto fail_unlock;
1302 
1303 	if (tr->current_trace->use_max_tr) {
1304 		ret = -EBUSY;
1305 		goto fail_unlock;
1306 	}
1307 
1308 	/*
1309 	 * The cond_snapshot can only change to NULL without the
1310 	 * trace_types_lock. We don't care if we race with it going
1311 	 * to NULL, but we want to make sure that it's not set to
1312 	 * something other than NULL when we get here, which we can
1313 	 * do safely with only holding the trace_types_lock and not
1314 	 * having to take the max_lock.
1315 	 */
1316 	if (tr->cond_snapshot) {
1317 		ret = -EBUSY;
1318 		goto fail_unlock;
1319 	}
1320 
1321 	arch_spin_lock(&tr->max_lock);
1322 	tr->cond_snapshot = cond_snapshot;
1323 	arch_spin_unlock(&tr->max_lock);
1324 
1325 	mutex_unlock(&trace_types_lock);
1326 
1327 	return ret;
1328 
1329  fail_unlock:
1330 	mutex_unlock(&trace_types_lock);
1331 	kfree(cond_snapshot);
1332 	return ret;
1333 }
1334 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
1335 
1336 /**
1337  * tracing_snapshot_cond_disable - disable conditional snapshot for an instance
1338  * @tr:		The tracing instance
1339  *
1340  * Check whether the conditional snapshot for the given instance is
1341  * enabled; if so, free the cond_snapshot associated with it,
1342  * otherwise return -EINVAL.
1343  *
1344  * Returns 0 if successful, error otherwise.
1345  */
1346 int tracing_snapshot_cond_disable(struct trace_array *tr)
1347 {
1348 	int ret = 0;
1349 
1350 	arch_spin_lock(&tr->max_lock);
1351 
1352 	if (!tr->cond_snapshot)
1353 		ret = -EINVAL;
1354 	else {
1355 		kfree(tr->cond_snapshot);
1356 		tr->cond_snapshot = NULL;
1357 	}
1358 
1359 	arch_spin_unlock(&tr->max_lock);
1360 
1361 	return ret;
1362 }
1363 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
1364 #else
1365 void tracing_snapshot(void)
1366 {
1367 	WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
1368 }
1369 EXPORT_SYMBOL_GPL(tracing_snapshot);
1370 void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
1371 {
1372 	WARN_ONCE(1, "Snapshot feature not enabled, but internal conditional snapshot used");
1373 }
1374 EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
1375 int tracing_alloc_snapshot(void)
1376 {
1377 	WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
1378 	return -ENODEV;
1379 }
1380 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1381 void tracing_snapshot_alloc(void)
1382 {
1383 	/* Give warning */
1384 	tracing_snapshot();
1385 }
1386 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
1387 void *tracing_cond_snapshot_data(struct trace_array *tr)
1388 {
1389 	return NULL;
1390 }
1391 EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
1392 int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update)
1393 {
1394 	return -ENODEV;
1395 }
1396 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
1397 int tracing_snapshot_cond_disable(struct trace_array *tr)
1398 {
1399 	return false;
1400 }
1401 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
1402 #endif /* CONFIG_TRACER_SNAPSHOT */
1403 
1404 void tracer_tracing_off(struct trace_array *tr)
1405 {
1406 	if (tr->array_buffer.buffer)
1407 		ring_buffer_record_off(tr->array_buffer.buffer);
1408 	/*
1409 	 * This flag is looked at when buffers haven't been allocated
1410 	 * yet, or by some tracers (like irqsoff), that just want to
1411 	 * know if the ring buffer has been disabled, but it can handle
1412 	 * races of where it gets disabled but we still do a record.
1413 	 * As the check is in the fast path of the tracers, it is more
1414 	 * important to be fast than accurate.
1415 	 */
1416 	tr->buffer_disabled = 1;
1417 	/* Make the flag seen by readers */
1418 	smp_wmb();
1419 }
1420 
1421 /**
1422  * tracing_off - turn off tracing buffers
1423  *
1424  * This function stops the tracing buffers from recording data.
1425  * It does not disable any overhead the tracers themselves may
1426  * be causing. This function simply causes all recording to
1427  * the ring buffers to fail.
1428  */
1429 void tracing_off(void)
1430 {
1431 	tracer_tracing_off(&global_trace);
1432 }
1433 EXPORT_SYMBOL_GPL(tracing_off);
1434 
1435 void disable_trace_on_warning(void)
1436 {
1437 	if (__disable_trace_on_warning) {
1438 		trace_array_printk_buf(global_trace.array_buffer.buffer, _THIS_IP_,
1439 			"Disabling tracing due to warning\n");
1440 		tracing_off();
1441 	}
1442 }
1443 
1444 /**
1445  * tracer_tracing_is_on - show real state of ring buffer enabled
1446  * @tr : the trace array to know if ring buffer is enabled
1447  *
1448  * Shows real state of the ring buffer if it is enabled or not.
1449  */
1450 bool tracer_tracing_is_on(struct trace_array *tr)
1451 {
1452 	if (tr->array_buffer.buffer)
1453 		return ring_buffer_record_is_on(tr->array_buffer.buffer);
1454 	return !tr->buffer_disabled;
1455 }
1456 
1457 /**
1458  * tracing_is_on - show state of ring buffers enabled
1459  */
1460 int tracing_is_on(void)
1461 {
1462 	return tracer_tracing_is_on(&global_trace);
1463 }
1464 EXPORT_SYMBOL_GPL(tracing_is_on);
1465 
1466 static int __init set_buf_size(char *str)
1467 {
1468 	unsigned long buf_size;
1469 
1470 	if (!str)
1471 		return 0;
1472 	buf_size = memparse(str, &str);
1473 	/* nr_entries can not be zero */
1474 	if (buf_size == 0)
1475 		return 0;
1476 	trace_buf_size = buf_size;
1477 	return 1;
1478 }
1479 __setup("trace_buf_size=", set_buf_size);
1480 
1481 static int __init set_tracing_thresh(char *str)
1482 {
1483 	unsigned long threshold;
1484 	int ret;
1485 
1486 	if (!str)
1487 		return 0;
1488 	ret = kstrtoul(str, 0, &threshold);
1489 	if (ret < 0)
1490 		return 0;
1491 	tracing_thresh = threshold * 1000;
1492 	return 1;
1493 }
1494 __setup("tracing_thresh=", set_tracing_thresh);
1495 
1496 unsigned long nsecs_to_usecs(unsigned long nsecs)
1497 {
1498 	return nsecs / 1000;
1499 }
1500 
1501 /*
1502  * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
1503  * It uses C(a, b) where 'a' is the eval (enum) name and 'b' is the string that
1504  * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
1505  * of strings in the order that the evals (enum) were defined.
1506  */
1507 #undef C
1508 #define C(a, b) b
1509 
1510 /* These must match the bit positions in trace_iterator_flags */
1511 static const char *trace_options[] = {
1512 	TRACE_FLAGS
1513 	NULL
1514 };
1515 
1516 static struct {
1517 	u64 (*func)(void);
1518 	const char *name;
1519 	int in_ns;		/* is this clock in nanoseconds? */
1520 } trace_clocks[] = {
1521 	{ trace_clock_local,		"local",	1 },
1522 	{ trace_clock_global,		"global",	1 },
1523 	{ trace_clock_counter,		"counter",	0 },
1524 	{ trace_clock_jiffies,		"uptime",	0 },
1525 	{ trace_clock,			"perf",		1 },
1526 	{ ktime_get_mono_fast_ns,	"mono",		1 },
1527 	{ ktime_get_raw_fast_ns,	"mono_raw",	1 },
1528 	{ ktime_get_boot_fast_ns,	"boot",		1 },
1529 	ARCH_TRACE_CLOCKS
1530 };
1531 
1532 bool trace_clock_in_ns(struct trace_array *tr)
1533 {
1534 	if (trace_clocks[tr->clock_id].in_ns)
1535 		return true;
1536 
1537 	return false;
1538 }
1539 
1540 /*
1541  * trace_parser_get_init - gets the buffer for trace parser
1542  */
1543 int trace_parser_get_init(struct trace_parser *parser, int size)
1544 {
1545 	memset(parser, 0, sizeof(*parser));
1546 
1547 	parser->buffer = kmalloc(size, GFP_KERNEL);
1548 	if (!parser->buffer)
1549 		return 1;
1550 
1551 	parser->size = size;
1552 	return 0;
1553 }
1554 
1555 /*
1556  * trace_parser_put - frees the buffer for trace parser
1557  */
1558 void trace_parser_put(struct trace_parser *parser)
1559 {
1560 	kfree(parser->buffer);
1561 	parser->buffer = NULL;
1562 }
1563 
1564 /*
1565  * trace_get_user - reads the user input string separated by  space
1566  * (matched by isspace(ch))
1567  *
1568  * For each string found the 'struct trace_parser' is updated,
1569  * and the function returns.
1570  *
1571  * Returns number of bytes read.
1572  *
1573  * See kernel/trace/trace.h for 'struct trace_parser' details.
1574  */
1575 int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
1576 	size_t cnt, loff_t *ppos)
1577 {
1578 	char ch;
1579 	size_t read = 0;
1580 	ssize_t ret;
1581 
1582 	if (!*ppos)
1583 		trace_parser_clear(parser);
1584 
1585 	ret = get_user(ch, ubuf++);
1586 	if (ret)
1587 		goto out;
1588 
1589 	read++;
1590 	cnt--;
1591 
1592 	/*
1593 	 * The parser is not finished with the last write,
1594 	 * continue reading the user input without skipping spaces.
1595 	 */
1596 	if (!parser->cont) {
1597 		/* skip white space */
1598 		while (cnt && isspace(ch)) {
1599 			ret = get_user(ch, ubuf++);
1600 			if (ret)
1601 				goto out;
1602 			read++;
1603 			cnt--;
1604 		}
1605 
1606 		parser->idx = 0;
1607 
1608 		/* only spaces were written */
1609 		if (isspace(ch) || !ch) {
1610 			*ppos += read;
1611 			ret = read;
1612 			goto out;
1613 		}
1614 	}
1615 
1616 	/* read the non-space input */
1617 	while (cnt && !isspace(ch) && ch) {
1618 		if (parser->idx < parser->size - 1)
1619 			parser->buffer[parser->idx++] = ch;
1620 		else {
1621 			ret = -EINVAL;
1622 			goto out;
1623 		}
1624 		ret = get_user(ch, ubuf++);
1625 		if (ret)
1626 			goto out;
1627 		read++;
1628 		cnt--;
1629 	}
1630 
1631 	/* We either got finished input or we have to wait for another call. */
1632 	if (isspace(ch) || !ch) {
1633 		parser->buffer[parser->idx] = 0;
1634 		parser->cont = false;
1635 	} else if (parser->idx < parser->size - 1) {
1636 		parser->cont = true;
1637 		parser->buffer[parser->idx++] = ch;
1638 		/* Make sure the parsed string always terminates with '\0'. */
1639 		parser->buffer[parser->idx] = 0;
1640 	} else {
1641 		ret = -EINVAL;
1642 		goto out;
1643 	}
1644 
1645 	*ppos += read;
1646 	ret = read;
1647 
1648 out:
1649 	return ret;
1650 }
1651 
1652 /* TODO add a seq_buf_to_buffer() */
1653 static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
1654 {
1655 	int len;
1656 
1657 	if (trace_seq_used(s) <= s->seq.readpos)
1658 		return -EBUSY;
1659 
1660 	len = trace_seq_used(s) - s->seq.readpos;
1661 	if (cnt > len)
1662 		cnt = len;
1663 	memcpy(buf, s->buffer + s->seq.readpos, cnt);
1664 
1665 	s->seq.readpos += cnt;
1666 	return cnt;
1667 }
1668 
1669 unsigned long __read_mostly	tracing_thresh;
1670 static const struct file_operations tracing_max_lat_fops;
1671 
1672 #ifdef LATENCY_FS_NOTIFY
1673 
1674 static struct workqueue_struct *fsnotify_wq;
1675 
1676 static void latency_fsnotify_workfn(struct work_struct *work)
1677 {
1678 	struct trace_array *tr = container_of(work, struct trace_array,
1679 					      fsnotify_work);
1680 	fsnotify_inode(tr->d_max_latency->d_inode, FS_MODIFY);
1681 }
1682 
1683 static void latency_fsnotify_workfn_irq(struct irq_work *iwork)
1684 {
1685 	struct trace_array *tr = container_of(iwork, struct trace_array,
1686 					      fsnotify_irqwork);
1687 	queue_work(fsnotify_wq, &tr->fsnotify_work);
1688 }
1689 
1690 static void trace_create_maxlat_file(struct trace_array *tr,
1691 				     struct dentry *d_tracer)
1692 {
1693 	INIT_WORK(&tr->fsnotify_work, latency_fsnotify_workfn);
1694 	init_irq_work(&tr->fsnotify_irqwork, latency_fsnotify_workfn_irq);
1695 	tr->d_max_latency = trace_create_file("tracing_max_latency",
1696 					      TRACE_MODE_WRITE,
1697 					      d_tracer, &tr->max_latency,
1698 					      &tracing_max_lat_fops);
1699 }
1700 
1701 __init static int latency_fsnotify_init(void)
1702 {
1703 	fsnotify_wq = alloc_workqueue("tr_max_lat_wq",
1704 				      WQ_UNBOUND | WQ_HIGHPRI, 0);
1705 	if (!fsnotify_wq) {
1706 		pr_err("Unable to allocate tr_max_lat_wq\n");
1707 		return -ENOMEM;
1708 	}
1709 	return 0;
1710 }
1711 
1712 late_initcall_sync(latency_fsnotify_init);
1713 
1714 void latency_fsnotify(struct trace_array *tr)
1715 {
1716 	if (!fsnotify_wq)
1717 		return;
1718 	/*
1719 	 * We cannot call queue_work(&tr->fsnotify_work) from here because it's
1720 	 * possible that we are called from __schedule() or do_idle(), which
1721 	 * could cause a deadlock.
1722 	 */
1723 	irq_work_queue(&tr->fsnotify_irqwork);
1724 }
1725 
1726 #elif defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)	\
1727 	|| defined(CONFIG_OSNOISE_TRACER)
1728 
1729 #define trace_create_maxlat_file(tr, d_tracer)				\
1730 	trace_create_file("tracing_max_latency", TRACE_MODE_WRITE,	\
1731 			  d_tracer, &tr->max_latency, &tracing_max_lat_fops)
1732 
1733 #else
1734 #define trace_create_maxlat_file(tr, d_tracer)	 do { } while (0)
1735 #endif
1736 
1737 #ifdef CONFIG_TRACER_MAX_TRACE
1738 /*
1739  * Copy the new maximum trace into the separate maximum-trace
1740  * structure. (this way the maximum trace is permanently saved,
1741  * for later retrieval via /sys/kernel/tracing/tracing_max_latency)
1742  */
1743 static void
1744 __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1745 {
1746 	struct array_buffer *trace_buf = &tr->array_buffer;
1747 	struct array_buffer *max_buf = &tr->max_buffer;
1748 	struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
1749 	struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
1750 
1751 	max_buf->cpu = cpu;
1752 	max_buf->time_start = data->preempt_timestamp;
1753 
1754 	max_data->saved_latency = tr->max_latency;
1755 	max_data->critical_start = data->critical_start;
1756 	max_data->critical_end = data->critical_end;
1757 
1758 	strncpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
1759 	max_data->pid = tsk->pid;
1760 	/*
1761 	 * If tsk == current, then use current_uid(), as that does not use
1762 	 * RCU. The irq tracer can be called out of RCU scope.
1763 	 */
1764 	if (tsk == current)
1765 		max_data->uid = current_uid();
1766 	else
1767 		max_data->uid = task_uid(tsk);
1768 
1769 	max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1770 	max_data->policy = tsk->policy;
1771 	max_data->rt_priority = tsk->rt_priority;
1772 
1773 	/* record this tasks comm */
1774 	tracing_record_cmdline(tsk);
1775 	latency_fsnotify(tr);
1776 }
1777 
1778 /**
1779  * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1780  * @tr: tracer
1781  * @tsk: the task with the latency
1782  * @cpu: The cpu that initiated the trace.
1783  * @cond_data: User data associated with a conditional snapshot
1784  *
1785  * Flip the buffers between the @tr and the max_tr and record information
1786  * about which task was the cause of this latency.
1787  */
1788 void
1789 update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu,
1790 	      void *cond_data)
1791 {
1792 	if (tr->stop_count)
1793 		return;
1794 
1795 	WARN_ON_ONCE(!irqs_disabled());
1796 
1797 	if (!tr->allocated_snapshot) {
1798 		/* Only the nop tracer should hit this when disabling */
1799 		WARN_ON_ONCE(tr->current_trace != &nop_trace);
1800 		return;
1801 	}
1802 
1803 	arch_spin_lock(&tr->max_lock);
1804 
1805 	/* Inherit the recordable setting from array_buffer */
1806 	if (ring_buffer_record_is_set_on(tr->array_buffer.buffer))
1807 		ring_buffer_record_on(tr->max_buffer.buffer);
1808 	else
1809 		ring_buffer_record_off(tr->max_buffer.buffer);
1810 
1811 #ifdef CONFIG_TRACER_SNAPSHOT
1812 	if (tr->cond_snapshot && !tr->cond_snapshot->update(tr, cond_data))
1813 		goto out_unlock;
1814 #endif
1815 	swap(tr->array_buffer.buffer, tr->max_buffer.buffer);
1816 
1817 	__update_max_tr(tr, tsk, cpu);
1818 
1819  out_unlock:
1820 	arch_spin_unlock(&tr->max_lock);
1821 }
1822 
1823 /**
1824  * update_max_tr_single - only copy one trace over, and reset the rest
1825  * @tr: tracer
1826  * @tsk: task with the latency
1827  * @cpu: the cpu of the buffer to copy.
1828  *
1829  * Flip the trace of a single CPU buffer between the @tr and the max_tr.
1830  */
1831 void
1832 update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1833 {
1834 	int ret;
1835 
1836 	if (tr->stop_count)
1837 		return;
1838 
1839 	WARN_ON_ONCE(!irqs_disabled());
1840 	if (!tr->allocated_snapshot) {
1841 		/* Only the nop tracer should hit this when disabling */
1842 		WARN_ON_ONCE(tr->current_trace != &nop_trace);
1843 		return;
1844 	}
1845 
1846 	arch_spin_lock(&tr->max_lock);
1847 
1848 	ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->array_buffer.buffer, cpu);
1849 
1850 	if (ret == -EBUSY) {
1851 		/*
1852 		 * We failed to swap the buffer due to a commit taking
1853 		 * place on this CPU. We fail to record, but we reset
1854 		 * the max trace buffer (no one writes directly to it)
1855 		 * and flag that it failed.
1856 		 */
1857 		trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
1858 			"Failed to swap buffers due to commit in progress\n");
1859 	}
1860 
1861 	WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
1862 
1863 	__update_max_tr(tr, tsk, cpu);
1864 	arch_spin_unlock(&tr->max_lock);
1865 }
1866 #endif /* CONFIG_TRACER_MAX_TRACE */
1867 
1868 static int wait_on_pipe(struct trace_iterator *iter, int full)
1869 {
1870 	/* Iterators are static, they should be filled or empty */
1871 	if (trace_buffer_iter(iter, iter->cpu_file))
1872 		return 0;
1873 
1874 	return ring_buffer_wait(iter->array_buffer->buffer, iter->cpu_file,
1875 				full);
1876 }
1877 
1878 #ifdef CONFIG_FTRACE_STARTUP_TEST
1879 static bool selftests_can_run;
1880 
1881 struct trace_selftests {
1882 	struct list_head		list;
1883 	struct tracer			*type;
1884 };
1885 
1886 static LIST_HEAD(postponed_selftests);
1887 
1888 static int save_selftest(struct tracer *type)
1889 {
1890 	struct trace_selftests *selftest;
1891 
1892 	selftest = kmalloc(sizeof(*selftest), GFP_KERNEL);
1893 	if (!selftest)
1894 		return -ENOMEM;
1895 
1896 	selftest->type = type;
1897 	list_add(&selftest->list, &postponed_selftests);
1898 	return 0;
1899 }
1900 
1901 static int run_tracer_selftest(struct tracer *type)
1902 {
1903 	struct trace_array *tr = &global_trace;
1904 	struct tracer *saved_tracer = tr->current_trace;
1905 	int ret;
1906 
1907 	if (!type->selftest || tracing_selftest_disabled)
1908 		return 0;
1909 
1910 	/*
1911 	 * If a tracer registers early in boot up (before scheduling is
1912 	 * initialized and such), then do not run its selftests yet.
1913 	 * Instead, run it a little later in the boot process.
1914 	 */
1915 	if (!selftests_can_run)
1916 		return save_selftest(type);
1917 
1918 	if (!tracing_is_on()) {
1919 		pr_warn("Selftest for tracer %s skipped due to tracing disabled\n",
1920 			type->name);
1921 		return 0;
1922 	}
1923 
1924 	/*
1925 	 * Run a selftest on this tracer.
1926 	 * Here we reset the trace buffer, and set the current
1927 	 * tracer to be this tracer. The tracer can then run some
1928 	 * internal tracing to verify that everything is in order.
1929 	 * If we fail, we do not register this tracer.
1930 	 */
1931 	tracing_reset_online_cpus(&tr->array_buffer);
1932 
1933 	tr->current_trace = type;
1934 
1935 #ifdef CONFIG_TRACER_MAX_TRACE
1936 	if (type->use_max_tr) {
1937 		/* If we expanded the buffers, make sure the max is expanded too */
1938 		if (ring_buffer_expanded)
1939 			ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1940 					   RING_BUFFER_ALL_CPUS);
1941 		tr->allocated_snapshot = true;
1942 	}
1943 #endif
1944 
1945 	/* the test is responsible for initializing and enabling */
1946 	pr_info("Testing tracer %s: ", type->name);
1947 	ret = type->selftest(type, tr);
1948 	/* the test is responsible for resetting too */
1949 	tr->current_trace = saved_tracer;
1950 	if (ret) {
1951 		printk(KERN_CONT "FAILED!\n");
1952 		/* Add the warning after printing 'FAILED' */
1953 		WARN_ON(1);
1954 		return -1;
1955 	}
1956 	/* Only reset on passing, to avoid touching corrupted buffers */
1957 	tracing_reset_online_cpus(&tr->array_buffer);
1958 
1959 #ifdef CONFIG_TRACER_MAX_TRACE
1960 	if (type->use_max_tr) {
1961 		tr->allocated_snapshot = false;
1962 
1963 		/* Shrink the max buffer again */
1964 		if (ring_buffer_expanded)
1965 			ring_buffer_resize(tr->max_buffer.buffer, 1,
1966 					   RING_BUFFER_ALL_CPUS);
1967 	}
1968 #endif
1969 
1970 	printk(KERN_CONT "PASSED\n");
1971 	return 0;
1972 }
1973 
1974 static __init int init_trace_selftests(void)
1975 {
1976 	struct trace_selftests *p, *n;
1977 	struct tracer *t, **last;
1978 	int ret;
1979 
1980 	selftests_can_run = true;
1981 
1982 	mutex_lock(&trace_types_lock);
1983 
1984 	if (list_empty(&postponed_selftests))
1985 		goto out;
1986 
1987 	pr_info("Running postponed tracer tests:\n");
1988 
1989 	tracing_selftest_running = true;
1990 	list_for_each_entry_safe(p, n, &postponed_selftests, list) {
1991 		/* This loop can take minutes when sanitizers are enabled, so
1992 		 * lets make sure we allow RCU processing.
1993 		 */
1994 		cond_resched();
1995 		ret = run_tracer_selftest(p->type);
1996 		/* If the test fails, then warn and remove from available_tracers */
1997 		if (ret < 0) {
1998 			WARN(1, "tracer: %s failed selftest, disabling\n",
1999 			     p->type->name);
2000 			last = &trace_types;
2001 			for (t = trace_types; t; t = t->next) {
2002 				if (t == p->type) {
2003 					*last = t->next;
2004 					break;
2005 				}
2006 				last = &t->next;
2007 			}
2008 		}
2009 		list_del(&p->list);
2010 		kfree(p);
2011 	}
2012 	tracing_selftest_running = false;
2013 
2014  out:
2015 	mutex_unlock(&trace_types_lock);
2016 
2017 	return 0;
2018 }
2019 core_initcall(init_trace_selftests);
2020 #else
2021 static inline int run_tracer_selftest(struct tracer *type)
2022 {
2023 	return 0;
2024 }
2025 #endif /* CONFIG_FTRACE_STARTUP_TEST */
2026 
2027 static void add_tracer_options(struct trace_array *tr, struct tracer *t);
2028 
2029 static void __init apply_trace_boot_options(void);
2030 
2031 /**
2032  * register_tracer - register a tracer with the ftrace system.
2033  * @type: the plugin for the tracer
2034  *
2035  * Register a new plugin tracer.
2036  */
2037 int __init register_tracer(struct tracer *type)
2038 {
2039 	struct tracer *t;
2040 	int ret = 0;
2041 
2042 	if (!type->name) {
2043 		pr_info("Tracer must have a name\n");
2044 		return -1;
2045 	}
2046 
2047 	if (strlen(type->name) >= MAX_TRACER_SIZE) {
2048 		pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
2049 		return -1;
2050 	}
2051 
2052 	if (security_locked_down(LOCKDOWN_TRACEFS)) {
2053 		pr_warn("Can not register tracer %s due to lockdown\n",
2054 			   type->name);
2055 		return -EPERM;
2056 	}
2057 
2058 	mutex_lock(&trace_types_lock);
2059 
2060 	tracing_selftest_running = true;
2061 
2062 	for (t = trace_types; t; t = t->next) {
2063 		if (strcmp(type->name, t->name) == 0) {
2064 			/* already found */
2065 			pr_info("Tracer %s already registered\n",
2066 				type->name);
2067 			ret = -1;
2068 			goto out;
2069 		}
2070 	}
2071 
2072 	if (!type->set_flag)
2073 		type->set_flag = &dummy_set_flag;
2074 	if (!type->flags) {
2075 		/*allocate a dummy tracer_flags*/
2076 		type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL);
2077 		if (!type->flags) {
2078 			ret = -ENOMEM;
2079 			goto out;
2080 		}
2081 		type->flags->val = 0;
2082 		type->flags->opts = dummy_tracer_opt;
2083 	} else
2084 		if (!type->flags->opts)
2085 			type->flags->opts = dummy_tracer_opt;
2086 
2087 	/* store the tracer for __set_tracer_option */
2088 	type->flags->trace = type;
2089 
2090 	ret = run_tracer_selftest(type);
2091 	if (ret < 0)
2092 		goto out;
2093 
2094 	type->next = trace_types;
2095 	trace_types = type;
2096 	add_tracer_options(&global_trace, type);
2097 
2098  out:
2099 	tracing_selftest_running = false;
2100 	mutex_unlock(&trace_types_lock);
2101 
2102 	if (ret || !default_bootup_tracer)
2103 		goto out_unlock;
2104 
2105 	if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
2106 		goto out_unlock;
2107 
2108 	printk(KERN_INFO "Starting tracer '%s'\n", type->name);
2109 	/* Do we want this tracer to start on bootup? */
2110 	tracing_set_tracer(&global_trace, type->name);
2111 	default_bootup_tracer = NULL;
2112 
2113 	apply_trace_boot_options();
2114 
2115 	/* disable other selftests, since this will break it. */
2116 	disable_tracing_selftest("running a tracer");
2117 
2118  out_unlock:
2119 	return ret;
2120 }
2121 
2122 static void tracing_reset_cpu(struct array_buffer *buf, int cpu)
2123 {
2124 	struct trace_buffer *buffer = buf->buffer;
2125 
2126 	if (!buffer)
2127 		return;
2128 
2129 	ring_buffer_record_disable(buffer);
2130 
2131 	/* Make sure all commits have finished */
2132 	synchronize_rcu();
2133 	ring_buffer_reset_cpu(buffer, cpu);
2134 
2135 	ring_buffer_record_enable(buffer);
2136 }
2137 
2138 void tracing_reset_online_cpus(struct array_buffer *buf)
2139 {
2140 	struct trace_buffer *buffer = buf->buffer;
2141 
2142 	if (!buffer)
2143 		return;
2144 
2145 	ring_buffer_record_disable(buffer);
2146 
2147 	/* Make sure all commits have finished */
2148 	synchronize_rcu();
2149 
2150 	buf->time_start = buffer_ftrace_now(buf, buf->cpu);
2151 
2152 	ring_buffer_reset_online_cpus(buffer);
2153 
2154 	ring_buffer_record_enable(buffer);
2155 }
2156 
2157 /* Must have trace_types_lock held */
2158 void tracing_reset_all_online_cpus(void)
2159 {
2160 	struct trace_array *tr;
2161 
2162 	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
2163 		if (!tr->clear_trace)
2164 			continue;
2165 		tr->clear_trace = false;
2166 		tracing_reset_online_cpus(&tr->array_buffer);
2167 #ifdef CONFIG_TRACER_MAX_TRACE
2168 		tracing_reset_online_cpus(&tr->max_buffer);
2169 #endif
2170 	}
2171 }
2172 
2173 /*
2174  * The tgid_map array maps from pid to tgid; i.e. the value stored at index i
2175  * is the tgid last observed corresponding to pid=i.
2176  */
2177 static int *tgid_map;
2178 
2179 /* The maximum valid index into tgid_map. */
2180 static size_t tgid_map_max;
2181 
2182 #define SAVED_CMDLINES_DEFAULT 128
2183 #define NO_CMDLINE_MAP UINT_MAX
2184 static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
2185 struct saved_cmdlines_buffer {
2186 	unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
2187 	unsigned *map_cmdline_to_pid;
2188 	unsigned cmdline_num;
2189 	int cmdline_idx;
2190 	char *saved_cmdlines;
2191 };
2192 static struct saved_cmdlines_buffer *savedcmd;
2193 
2194 static inline char *get_saved_cmdlines(int idx)
2195 {
2196 	return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
2197 }
2198 
2199 static inline void set_cmdline(int idx, const char *cmdline)
2200 {
2201 	strncpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
2202 }
2203 
2204 static int allocate_cmdlines_buffer(unsigned int val,
2205 				    struct saved_cmdlines_buffer *s)
2206 {
2207 	s->map_cmdline_to_pid = kmalloc_array(val,
2208 					      sizeof(*s->map_cmdline_to_pid),
2209 					      GFP_KERNEL);
2210 	if (!s->map_cmdline_to_pid)
2211 		return -ENOMEM;
2212 
2213 	s->saved_cmdlines = kmalloc_array(TASK_COMM_LEN, val, GFP_KERNEL);
2214 	if (!s->saved_cmdlines) {
2215 		kfree(s->map_cmdline_to_pid);
2216 		return -ENOMEM;
2217 	}
2218 
2219 	s->cmdline_idx = 0;
2220 	s->cmdline_num = val;
2221 	memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
2222 	       sizeof(s->map_pid_to_cmdline));
2223 	memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
2224 	       val * sizeof(*s->map_cmdline_to_pid));
2225 
2226 	return 0;
2227 }
2228 
2229 static int trace_create_savedcmd(void)
2230 {
2231 	int ret;
2232 
2233 	savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
2234 	if (!savedcmd)
2235 		return -ENOMEM;
2236 
2237 	ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
2238 	if (ret < 0) {
2239 		kfree(savedcmd);
2240 		savedcmd = NULL;
2241 		return -ENOMEM;
2242 	}
2243 
2244 	return 0;
2245 }
2246 
2247 int is_tracing_stopped(void)
2248 {
2249 	return global_trace.stop_count;
2250 }
2251 
2252 /**
2253  * tracing_start - quick start of the tracer
2254  *
2255  * If tracing is enabled but was stopped by tracing_stop,
2256  * this will start the tracer back up.
2257  */
2258 void tracing_start(void)
2259 {
2260 	struct trace_buffer *buffer;
2261 	unsigned long flags;
2262 
2263 	if (tracing_disabled)
2264 		return;
2265 
2266 	raw_spin_lock_irqsave(&global_trace.start_lock, flags);
2267 	if (--global_trace.stop_count) {
2268 		if (global_trace.stop_count < 0) {
2269 			/* Someone screwed up their debugging */
2270 			WARN_ON_ONCE(1);
2271 			global_trace.stop_count = 0;
2272 		}
2273 		goto out;
2274 	}
2275 
2276 	/* Prevent the buffers from switching */
2277 	arch_spin_lock(&global_trace.max_lock);
2278 
2279 	buffer = global_trace.array_buffer.buffer;
2280 	if (buffer)
2281 		ring_buffer_record_enable(buffer);
2282 
2283 #ifdef CONFIG_TRACER_MAX_TRACE
2284 	buffer = global_trace.max_buffer.buffer;
2285 	if (buffer)
2286 		ring_buffer_record_enable(buffer);
2287 #endif
2288 
2289 	arch_spin_unlock(&global_trace.max_lock);
2290 
2291  out:
2292 	raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
2293 }
2294 
2295 static void tracing_start_tr(struct trace_array *tr)
2296 {
2297 	struct trace_buffer *buffer;
2298 	unsigned long flags;
2299 
2300 	if (tracing_disabled)
2301 		return;
2302 
2303 	/* If global, we need to also start the max tracer */
2304 	if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
2305 		return tracing_start();
2306 
2307 	raw_spin_lock_irqsave(&tr->start_lock, flags);
2308 
2309 	if (--tr->stop_count) {
2310 		if (tr->stop_count < 0) {
2311 			/* Someone screwed up their debugging */
2312 			WARN_ON_ONCE(1);
2313 			tr->stop_count = 0;
2314 		}
2315 		goto out;
2316 	}
2317 
2318 	buffer = tr->array_buffer.buffer;
2319 	if (buffer)
2320 		ring_buffer_record_enable(buffer);
2321 
2322  out:
2323 	raw_spin_unlock_irqrestore(&tr->start_lock, flags);
2324 }
2325 
2326 /**
2327  * tracing_stop - quick stop of the tracer
2328  *
2329  * Light weight way to stop tracing. Use in conjunction with
2330  * tracing_start.
2331  */
2332 void tracing_stop(void)
2333 {
2334 	struct trace_buffer *buffer;
2335 	unsigned long flags;
2336 
2337 	raw_spin_lock_irqsave(&global_trace.start_lock, flags);
2338 	if (global_trace.stop_count++)
2339 		goto out;
2340 
2341 	/* Prevent the buffers from switching */
2342 	arch_spin_lock(&global_trace.max_lock);
2343 
2344 	buffer = global_trace.array_buffer.buffer;
2345 	if (buffer)
2346 		ring_buffer_record_disable(buffer);
2347 
2348 #ifdef CONFIG_TRACER_MAX_TRACE
2349 	buffer = global_trace.max_buffer.buffer;
2350 	if (buffer)
2351 		ring_buffer_record_disable(buffer);
2352 #endif
2353 
2354 	arch_spin_unlock(&global_trace.max_lock);
2355 
2356  out:
2357 	raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
2358 }
2359 
2360 static void tracing_stop_tr(struct trace_array *tr)
2361 {
2362 	struct trace_buffer *buffer;
2363 	unsigned long flags;
2364 
2365 	/* If global, we need to also stop the max tracer */
2366 	if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
2367 		return tracing_stop();
2368 
2369 	raw_spin_lock_irqsave(&tr->start_lock, flags);
2370 	if (tr->stop_count++)
2371 		goto out;
2372 
2373 	buffer = tr->array_buffer.buffer;
2374 	if (buffer)
2375 		ring_buffer_record_disable(buffer);
2376 
2377  out:
2378 	raw_spin_unlock_irqrestore(&tr->start_lock, flags);
2379 }
2380 
2381 static int trace_save_cmdline(struct task_struct *tsk)
2382 {
2383 	unsigned tpid, idx;
2384 
2385 	/* treat recording of idle task as a success */
2386 	if (!tsk->pid)
2387 		return 1;
2388 
2389 	tpid = tsk->pid & (PID_MAX_DEFAULT - 1);
2390 
2391 	/*
2392 	 * It's not the end of the world if we don't get
2393 	 * the lock, but we also don't want to spin
2394 	 * nor do we want to disable interrupts,
2395 	 * so if we miss here, then better luck next time.
2396 	 */
2397 	if (!arch_spin_trylock(&trace_cmdline_lock))
2398 		return 0;
2399 
2400 	idx = savedcmd->map_pid_to_cmdline[tpid];
2401 	if (idx == NO_CMDLINE_MAP) {
2402 		idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
2403 
2404 		savedcmd->map_pid_to_cmdline[tpid] = idx;
2405 		savedcmd->cmdline_idx = idx;
2406 	}
2407 
2408 	savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
2409 	set_cmdline(idx, tsk->comm);
2410 
2411 	arch_spin_unlock(&trace_cmdline_lock);
2412 
2413 	return 1;
2414 }
2415 
2416 static void __trace_find_cmdline(int pid, char comm[])
2417 {
2418 	unsigned map;
2419 	int tpid;
2420 
2421 	if (!pid) {
2422 		strcpy(comm, "<idle>");
2423 		return;
2424 	}
2425 
2426 	if (WARN_ON_ONCE(pid < 0)) {
2427 		strcpy(comm, "<XXX>");
2428 		return;
2429 	}
2430 
2431 	tpid = pid & (PID_MAX_DEFAULT - 1);
2432 	map = savedcmd->map_pid_to_cmdline[tpid];
2433 	if (map != NO_CMDLINE_MAP) {
2434 		tpid = savedcmd->map_cmdline_to_pid[map];
2435 		if (tpid == pid) {
2436 			strlcpy(comm, get_saved_cmdlines(map), TASK_COMM_LEN);
2437 			return;
2438 		}
2439 	}
2440 	strcpy(comm, "<...>");
2441 }
2442 
2443 void trace_find_cmdline(int pid, char comm[])
2444 {
2445 	preempt_disable();
2446 	arch_spin_lock(&trace_cmdline_lock);
2447 
2448 	__trace_find_cmdline(pid, comm);
2449 
2450 	arch_spin_unlock(&trace_cmdline_lock);
2451 	preempt_enable();
2452 }
2453 
2454 static int *trace_find_tgid_ptr(int pid)
2455 {
2456 	/*
2457 	 * Pairs with the smp_store_release in set_tracer_flag() to ensure that
2458 	 * if we observe a non-NULL tgid_map then we also observe the correct
2459 	 * tgid_map_max.
2460 	 */
2461 	int *map = smp_load_acquire(&tgid_map);
2462 
2463 	if (unlikely(!map || pid > tgid_map_max))
2464 		return NULL;
2465 
2466 	return &map[pid];
2467 }
2468 
2469 int trace_find_tgid(int pid)
2470 {
2471 	int *ptr = trace_find_tgid_ptr(pid);
2472 
2473 	return ptr ? *ptr : 0;
2474 }
2475 
2476 static int trace_save_tgid(struct task_struct *tsk)
2477 {
2478 	int *ptr;
2479 
2480 	/* treat recording of idle task as a success */
2481 	if (!tsk->pid)
2482 		return 1;
2483 
2484 	ptr = trace_find_tgid_ptr(tsk->pid);
2485 	if (!ptr)
2486 		return 0;
2487 
2488 	*ptr = tsk->tgid;
2489 	return 1;
2490 }
2491 
2492 static bool tracing_record_taskinfo_skip(int flags)
2493 {
2494 	if (unlikely(!(flags & (TRACE_RECORD_CMDLINE | TRACE_RECORD_TGID))))
2495 		return true;
2496 	if (!__this_cpu_read(trace_taskinfo_save))
2497 		return true;
2498 	return false;
2499 }
2500 
2501 /**
2502  * tracing_record_taskinfo - record the task info of a task
2503  *
2504  * @task:  task to record
2505  * @flags: TRACE_RECORD_CMDLINE for recording comm
2506  *         TRACE_RECORD_TGID for recording tgid
2507  */
2508 void tracing_record_taskinfo(struct task_struct *task, int flags)
2509 {
2510 	bool done;
2511 
2512 	if (tracing_record_taskinfo_skip(flags))
2513 		return;
2514 
2515 	/*
2516 	 * Record as much task information as possible. If some fail, continue
2517 	 * to try to record the others.
2518 	 */
2519 	done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(task);
2520 	done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(task);
2521 
2522 	/* If recording any information failed, retry again soon. */
2523 	if (!done)
2524 		return;
2525 
2526 	__this_cpu_write(trace_taskinfo_save, false);
2527 }
2528 
2529 /**
2530  * tracing_record_taskinfo_sched_switch - record task info for sched_switch
2531  *
2532  * @prev: previous task during sched_switch
2533  * @next: next task during sched_switch
2534  * @flags: TRACE_RECORD_CMDLINE for recording comm
2535  *         TRACE_RECORD_TGID for recording tgid
2536  */
2537 void tracing_record_taskinfo_sched_switch(struct task_struct *prev,
2538 					  struct task_struct *next, int flags)
2539 {
2540 	bool done;
2541 
2542 	if (tracing_record_taskinfo_skip(flags))
2543 		return;
2544 
2545 	/*
2546 	 * Record as much task information as possible. If some fail, continue
2547 	 * to try to record the others.
2548 	 */
2549 	done  = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(prev);
2550 	done &= !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(next);
2551 	done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(prev);
2552 	done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(next);
2553 
2554 	/* If recording any information failed, retry again soon. */
2555 	if (!done)
2556 		return;
2557 
2558 	__this_cpu_write(trace_taskinfo_save, false);
2559 }
2560 
2561 /* Helpers to record a specific task information */
2562 void tracing_record_cmdline(struct task_struct *task)
2563 {
2564 	tracing_record_taskinfo(task, TRACE_RECORD_CMDLINE);
2565 }
2566 
2567 void tracing_record_tgid(struct task_struct *task)
2568 {
2569 	tracing_record_taskinfo(task, TRACE_RECORD_TGID);
2570 }
2571 
2572 /*
2573  * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq
2574  * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function
2575  * simplifies those functions and keeps them in sync.
2576  */
2577 enum print_line_t trace_handle_return(struct trace_seq *s)
2578 {
2579 	return trace_seq_has_overflowed(s) ?
2580 		TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED;
2581 }
2582 EXPORT_SYMBOL_GPL(trace_handle_return);
2583 
2584 static unsigned short migration_disable_value(void)
2585 {
2586 #if defined(CONFIG_SMP)
2587 	return current->migration_disabled;
2588 #else
2589 	return 0;
2590 #endif
2591 }
2592 
2593 unsigned int tracing_gen_ctx_irq_test(unsigned int irqs_status)
2594 {
2595 	unsigned int trace_flags = irqs_status;
2596 	unsigned int pc;
2597 
2598 	pc = preempt_count();
2599 
2600 	if (pc & NMI_MASK)
2601 		trace_flags |= TRACE_FLAG_NMI;
2602 	if (pc & HARDIRQ_MASK)
2603 		trace_flags |= TRACE_FLAG_HARDIRQ;
2604 	if (in_serving_softirq())
2605 		trace_flags |= TRACE_FLAG_SOFTIRQ;
2606 	if (softirq_count() >> (SOFTIRQ_SHIFT + 1))
2607 		trace_flags |= TRACE_FLAG_BH_OFF;
2608 
2609 	if (tif_need_resched())
2610 		trace_flags |= TRACE_FLAG_NEED_RESCHED;
2611 	if (test_preempt_need_resched())
2612 		trace_flags |= TRACE_FLAG_PREEMPT_RESCHED;
2613 	return (trace_flags << 16) | (min_t(unsigned int, pc & 0xff, 0xf)) |
2614 		(min_t(unsigned int, migration_disable_value(), 0xf)) << 4;
2615 }
2616 
2617 struct ring_buffer_event *
2618 trace_buffer_lock_reserve(struct trace_buffer *buffer,
2619 			  int type,
2620 			  unsigned long len,
2621 			  unsigned int trace_ctx)
2622 {
2623 	return __trace_buffer_lock_reserve(buffer, type, len, trace_ctx);
2624 }
2625 
2626 DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
2627 DEFINE_PER_CPU(int, trace_buffered_event_cnt);
2628 static int trace_buffered_event_ref;
2629 
2630 /**
2631  * trace_buffered_event_enable - enable buffering events
2632  *
2633  * When events are being filtered, it is quicker to use a temporary
2634  * buffer to write the event data into if there's a likely chance
2635  * that it will not be committed. The discard of the ring buffer
2636  * is not as fast as committing, and is much slower than copying
2637  * a commit.
2638  *
2639  * When an event is to be filtered, allocate per cpu buffers to
2640  * write the event data into, and if the event is filtered and discarded
2641  * it is simply dropped, otherwise, the entire data is to be committed
2642  * in one shot.
2643  */
2644 void trace_buffered_event_enable(void)
2645 {
2646 	struct ring_buffer_event *event;
2647 	struct page *page;
2648 	int cpu;
2649 
2650 	WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2651 
2652 	if (trace_buffered_event_ref++)
2653 		return;
2654 
2655 	for_each_tracing_cpu(cpu) {
2656 		page = alloc_pages_node(cpu_to_node(cpu),
2657 					GFP_KERNEL | __GFP_NORETRY, 0);
2658 		if (!page)
2659 			goto failed;
2660 
2661 		event = page_address(page);
2662 		memset(event, 0, sizeof(*event));
2663 
2664 		per_cpu(trace_buffered_event, cpu) = event;
2665 
2666 		preempt_disable();
2667 		if (cpu == smp_processor_id() &&
2668 		    __this_cpu_read(trace_buffered_event) !=
2669 		    per_cpu(trace_buffered_event, cpu))
2670 			WARN_ON_ONCE(1);
2671 		preempt_enable();
2672 	}
2673 
2674 	return;
2675  failed:
2676 	trace_buffered_event_disable();
2677 }
2678 
2679 static void enable_trace_buffered_event(void *data)
2680 {
2681 	/* Probably not needed, but do it anyway */
2682 	smp_rmb();
2683 	this_cpu_dec(trace_buffered_event_cnt);
2684 }
2685 
2686 static void disable_trace_buffered_event(void *data)
2687 {
2688 	this_cpu_inc(trace_buffered_event_cnt);
2689 }
2690 
2691 /**
2692  * trace_buffered_event_disable - disable buffering events
2693  *
2694  * When a filter is removed, it is faster to not use the buffered
2695  * events, and to commit directly into the ring buffer. Free up
2696  * the temp buffers when there are no more users. This requires
2697  * special synchronization with current events.
2698  */
2699 void trace_buffered_event_disable(void)
2700 {
2701 	int cpu;
2702 
2703 	WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2704 
2705 	if (WARN_ON_ONCE(!trace_buffered_event_ref))
2706 		return;
2707 
2708 	if (--trace_buffered_event_ref)
2709 		return;
2710 
2711 	preempt_disable();
2712 	/* For each CPU, set the buffer as used. */
2713 	smp_call_function_many(tracing_buffer_mask,
2714 			       disable_trace_buffered_event, NULL, 1);
2715 	preempt_enable();
2716 
2717 	/* Wait for all current users to finish */
2718 	synchronize_rcu();
2719 
2720 	for_each_tracing_cpu(cpu) {
2721 		free_page((unsigned long)per_cpu(trace_buffered_event, cpu));
2722 		per_cpu(trace_buffered_event, cpu) = NULL;
2723 	}
2724 	/*
2725 	 * Make sure trace_buffered_event is NULL before clearing
2726 	 * trace_buffered_event_cnt.
2727 	 */
2728 	smp_wmb();
2729 
2730 	preempt_disable();
2731 	/* Do the work on each cpu */
2732 	smp_call_function_many(tracing_buffer_mask,
2733 			       enable_trace_buffered_event, NULL, 1);
2734 	preempt_enable();
2735 }
2736 
2737 static struct trace_buffer *temp_buffer;
2738 
2739 struct ring_buffer_event *
2740 trace_event_buffer_lock_reserve(struct trace_buffer **current_rb,
2741 			  struct trace_event_file *trace_file,
2742 			  int type, unsigned long len,
2743 			  unsigned int trace_ctx)
2744 {
2745 	struct ring_buffer_event *entry;
2746 	struct trace_array *tr = trace_file->tr;
2747 	int val;
2748 
2749 	*current_rb = tr->array_buffer.buffer;
2750 
2751 	if (!tr->no_filter_buffering_ref &&
2752 	    (trace_file->flags & (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED))) {
2753 		preempt_disable_notrace();
2754 		/*
2755 		 * Filtering is on, so try to use the per cpu buffer first.
2756 		 * This buffer will simulate a ring_buffer_event,
2757 		 * where the type_len is zero and the array[0] will
2758 		 * hold the full length.
2759 		 * (see include/linux/ring-buffer.h for details on
2760 		 *  how the ring_buffer_event is structured).
2761 		 *
2762 		 * Using a temp buffer during filtering and copying it
2763 		 * on a matched filter is quicker than writing directly
2764 		 * into the ring buffer and then discarding it when
2765 		 * it doesn't match. That is because the discard
2766 		 * requires several atomic operations to get right.
2767 		 * Copying on match and doing nothing on a failed match
2768 		 * is still quicker than no copy on match, but having
2769 		 * to discard out of the ring buffer on a failed match.
2770 		 */
2771 		if ((entry = __this_cpu_read(trace_buffered_event))) {
2772 			int max_len = PAGE_SIZE - struct_size(entry, array, 1);
2773 
2774 			val = this_cpu_inc_return(trace_buffered_event_cnt);
2775 
2776 			/*
2777 			 * Preemption is disabled, but interrupts and NMIs
2778 			 * can still come in now. If that happens after
2779 			 * the above increment, then it will have to go
2780 			 * back to the old method of allocating the event
2781 			 * on the ring buffer, and if the filter fails, it
2782 			 * will have to call ring_buffer_discard_commit()
2783 			 * to remove it.
2784 			 *
2785 			 * Need to also check the unlikely case that the
2786 			 * length is bigger than the temp buffer size.
2787 			 * If that happens, then the reserve is pretty much
2788 			 * guaranteed to fail, as the ring buffer currently
2789 			 * only allows events less than a page. But that may
2790 			 * change in the future, so let the ring buffer reserve
2791 			 * handle the failure in that case.
2792 			 */
2793 			if (val == 1 && likely(len <= max_len)) {
2794 				trace_event_setup(entry, type, trace_ctx);
2795 				entry->array[0] = len;
2796 				/* Return with preemption disabled */
2797 				return entry;
2798 			}
2799 			this_cpu_dec(trace_buffered_event_cnt);
2800 		}
2801 		/* __trace_buffer_lock_reserve() disables preemption */
2802 		preempt_enable_notrace();
2803 	}
2804 
2805 	entry = __trace_buffer_lock_reserve(*current_rb, type, len,
2806 					    trace_ctx);
2807 	/*
2808 	 * If tracing is off, but we have triggers enabled
2809 	 * we still need to look at the event data. Use the temp_buffer
2810 	 * to store the trace event for the trigger to use. It's recursive
2811 	 * safe and will not be recorded anywhere.
2812 	 */
2813 	if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
2814 		*current_rb = temp_buffer;
2815 		entry = __trace_buffer_lock_reserve(*current_rb, type, len,
2816 						    trace_ctx);
2817 	}
2818 	return entry;
2819 }
2820 EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
2821 
2822 static DEFINE_SPINLOCK(tracepoint_iter_lock);
2823 static DEFINE_MUTEX(tracepoint_printk_mutex);
2824 
2825 static void output_printk(struct trace_event_buffer *fbuffer)
2826 {
2827 	struct trace_event_call *event_call;
2828 	struct trace_event_file *file;
2829 	struct trace_event *event;
2830 	unsigned long flags;
2831 	struct trace_iterator *iter = tracepoint_print_iter;
2832 
2833 	/* We should never get here if iter is NULL */
2834 	if (WARN_ON_ONCE(!iter))
2835 		return;
2836 
2837 	event_call = fbuffer->trace_file->event_call;
2838 	if (!event_call || !event_call->event.funcs ||
2839 	    !event_call->event.funcs->trace)
2840 		return;
2841 
2842 	file = fbuffer->trace_file;
2843 	if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags) ||
2844 	    (unlikely(file->flags & EVENT_FILE_FL_FILTERED) &&
2845 	     !filter_match_preds(file->filter, fbuffer->entry)))
2846 		return;
2847 
2848 	event = &fbuffer->trace_file->event_call->event;
2849 
2850 	spin_lock_irqsave(&tracepoint_iter_lock, flags);
2851 	trace_seq_init(&iter->seq);
2852 	iter->ent = fbuffer->entry;
2853 	event_call->event.funcs->trace(iter, 0, event);
2854 	trace_seq_putc(&iter->seq, 0);
2855 	printk("%s", iter->seq.buffer);
2856 
2857 	spin_unlock_irqrestore(&tracepoint_iter_lock, flags);
2858 }
2859 
2860 int tracepoint_printk_sysctl(struct ctl_table *table, int write,
2861 			     void *buffer, size_t *lenp,
2862 			     loff_t *ppos)
2863 {
2864 	int save_tracepoint_printk;
2865 	int ret;
2866 
2867 	mutex_lock(&tracepoint_printk_mutex);
2868 	save_tracepoint_printk = tracepoint_printk;
2869 
2870 	ret = proc_dointvec(table, write, buffer, lenp, ppos);
2871 
2872 	/*
2873 	 * This will force exiting early, as tracepoint_printk
2874 	 * is always zero when tracepoint_printk_iter is not allocated
2875 	 */
2876 	if (!tracepoint_print_iter)
2877 		tracepoint_printk = 0;
2878 
2879 	if (save_tracepoint_printk == tracepoint_printk)
2880 		goto out;
2881 
2882 	if (tracepoint_printk)
2883 		static_key_enable(&tracepoint_printk_key.key);
2884 	else
2885 		static_key_disable(&tracepoint_printk_key.key);
2886 
2887  out:
2888 	mutex_unlock(&tracepoint_printk_mutex);
2889 
2890 	return ret;
2891 }
2892 
2893 void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
2894 {
2895 	enum event_trigger_type tt = ETT_NONE;
2896 	struct trace_event_file *file = fbuffer->trace_file;
2897 
2898 	if (__event_trigger_test_discard(file, fbuffer->buffer, fbuffer->event,
2899 			fbuffer->entry, &tt))
2900 		goto discard;
2901 
2902 	if (static_key_false(&tracepoint_printk_key.key))
2903 		output_printk(fbuffer);
2904 
2905 	if (static_branch_unlikely(&trace_event_exports_enabled))
2906 		ftrace_exports(fbuffer->event, TRACE_EXPORT_EVENT);
2907 
2908 	trace_buffer_unlock_commit_regs(file->tr, fbuffer->buffer,
2909 			fbuffer->event, fbuffer->trace_ctx, fbuffer->regs);
2910 
2911 discard:
2912 	if (tt)
2913 		event_triggers_post_call(file, tt);
2914 
2915 }
2916 EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
2917 
2918 /*
2919  * Skip 3:
2920  *
2921  *   trace_buffer_unlock_commit_regs()
2922  *   trace_event_buffer_commit()
2923  *   trace_event_raw_event_xxx()
2924  */
2925 # define STACK_SKIP 3
2926 
2927 void trace_buffer_unlock_commit_regs(struct trace_array *tr,
2928 				     struct trace_buffer *buffer,
2929 				     struct ring_buffer_event *event,
2930 				     unsigned int trace_ctx,
2931 				     struct pt_regs *regs)
2932 {
2933 	__buffer_unlock_commit(buffer, event);
2934 
2935 	/*
2936 	 * If regs is not set, then skip the necessary functions.
2937 	 * Note, we can still get here via blktrace, wakeup tracer
2938 	 * and mmiotrace, but that's ok if they lose a function or
2939 	 * two. They are not that meaningful.
2940 	 */
2941 	ftrace_trace_stack(tr, buffer, trace_ctx, regs ? 0 : STACK_SKIP, regs);
2942 	ftrace_trace_userstack(tr, buffer, trace_ctx);
2943 }
2944 
2945 /*
2946  * Similar to trace_buffer_unlock_commit_regs() but do not dump stack.
2947  */
2948 void
2949 trace_buffer_unlock_commit_nostack(struct trace_buffer *buffer,
2950 				   struct ring_buffer_event *event)
2951 {
2952 	__buffer_unlock_commit(buffer, event);
2953 }
2954 
2955 void
2956 trace_function(struct trace_array *tr, unsigned long ip, unsigned long
2957 	       parent_ip, unsigned int trace_ctx)
2958 {
2959 	struct trace_event_call *call = &event_function;
2960 	struct trace_buffer *buffer = tr->array_buffer.buffer;
2961 	struct ring_buffer_event *event;
2962 	struct ftrace_entry *entry;
2963 
2964 	event = __trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
2965 					    trace_ctx);
2966 	if (!event)
2967 		return;
2968 	entry	= ring_buffer_event_data(event);
2969 	entry->ip			= ip;
2970 	entry->parent_ip		= parent_ip;
2971 
2972 	if (!call_filter_check_discard(call, entry, buffer, event)) {
2973 		if (static_branch_unlikely(&trace_function_exports_enabled))
2974 			ftrace_exports(event, TRACE_EXPORT_FUNCTION);
2975 		__buffer_unlock_commit(buffer, event);
2976 	}
2977 }
2978 
2979 #ifdef CONFIG_STACKTRACE
2980 
2981 /* Allow 4 levels of nesting: normal, softirq, irq, NMI */
2982 #define FTRACE_KSTACK_NESTING	4
2983 
2984 #define FTRACE_KSTACK_ENTRIES	(PAGE_SIZE / FTRACE_KSTACK_NESTING)
2985 
2986 struct ftrace_stack {
2987 	unsigned long		calls[FTRACE_KSTACK_ENTRIES];
2988 };
2989 
2990 
2991 struct ftrace_stacks {
2992 	struct ftrace_stack	stacks[FTRACE_KSTACK_NESTING];
2993 };
2994 
2995 static DEFINE_PER_CPU(struct ftrace_stacks, ftrace_stacks);
2996 static DEFINE_PER_CPU(int, ftrace_stack_reserve);
2997 
2998 static void __ftrace_trace_stack(struct trace_buffer *buffer,
2999 				 unsigned int trace_ctx,
3000 				 int skip, struct pt_regs *regs)
3001 {
3002 	struct trace_event_call *call = &event_kernel_stack;
3003 	struct ring_buffer_event *event;
3004 	unsigned int size, nr_entries;
3005 	struct ftrace_stack *fstack;
3006 	struct stack_entry *entry;
3007 	int stackidx;
3008 
3009 	/*
3010 	 * Add one, for this function and the call to save_stack_trace()
3011 	 * If regs is set, then these functions will not be in the way.
3012 	 */
3013 #ifndef CONFIG_UNWINDER_ORC
3014 	if (!regs)
3015 		skip++;
3016 #endif
3017 
3018 	preempt_disable_notrace();
3019 
3020 	stackidx = __this_cpu_inc_return(ftrace_stack_reserve) - 1;
3021 
3022 	/* This should never happen. If it does, yell once and skip */
3023 	if (WARN_ON_ONCE(stackidx >= FTRACE_KSTACK_NESTING))
3024 		goto out;
3025 
3026 	/*
3027 	 * The above __this_cpu_inc_return() is 'atomic' cpu local. An
3028 	 * interrupt will either see the value pre increment or post
3029 	 * increment. If the interrupt happens pre increment it will have
3030 	 * restored the counter when it returns.  We just need a barrier to
3031 	 * keep gcc from moving things around.
3032 	 */
3033 	barrier();
3034 
3035 	fstack = this_cpu_ptr(ftrace_stacks.stacks) + stackidx;
3036 	size = ARRAY_SIZE(fstack->calls);
3037 
3038 	if (regs) {
3039 		nr_entries = stack_trace_save_regs(regs, fstack->calls,
3040 						   size, skip);
3041 	} else {
3042 		nr_entries = stack_trace_save(fstack->calls, size, skip);
3043 	}
3044 
3045 	size = nr_entries * sizeof(unsigned long);
3046 	event = __trace_buffer_lock_reserve(buffer, TRACE_STACK,
3047 				    (sizeof(*entry) - sizeof(entry->caller)) + size,
3048 				    trace_ctx);
3049 	if (!event)
3050 		goto out;
3051 	entry = ring_buffer_event_data(event);
3052 
3053 	memcpy(&entry->caller, fstack->calls, size);
3054 	entry->size = nr_entries;
3055 
3056 	if (!call_filter_check_discard(call, entry, buffer, event))
3057 		__buffer_unlock_commit(buffer, event);
3058 
3059  out:
3060 	/* Again, don't let gcc optimize things here */
3061 	barrier();
3062 	__this_cpu_dec(ftrace_stack_reserve);
3063 	preempt_enable_notrace();
3064 
3065 }
3066 
3067 static inline void ftrace_trace_stack(struct trace_array *tr,
3068 				      struct trace_buffer *buffer,
3069 				      unsigned int trace_ctx,
3070 				      int skip, struct pt_regs *regs)
3071 {
3072 	if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
3073 		return;
3074 
3075 	__ftrace_trace_stack(buffer, trace_ctx, skip, regs);
3076 }
3077 
3078 void __trace_stack(struct trace_array *tr, unsigned int trace_ctx,
3079 		   int skip)
3080 {
3081 	struct trace_buffer *buffer = tr->array_buffer.buffer;
3082 
3083 	if (rcu_is_watching()) {
3084 		__ftrace_trace_stack(buffer, trace_ctx, skip, NULL);
3085 		return;
3086 	}
3087 
3088 	/*
3089 	 * When an NMI triggers, RCU is enabled via rcu_nmi_enter(),
3090 	 * but if the above rcu_is_watching() failed, then the NMI
3091 	 * triggered someplace critical, and rcu_irq_enter() should
3092 	 * not be called from NMI.
3093 	 */
3094 	if (unlikely(in_nmi()))
3095 		return;
3096 
3097 	rcu_irq_enter_irqson();
3098 	__ftrace_trace_stack(buffer, trace_ctx, skip, NULL);
3099 	rcu_irq_exit_irqson();
3100 }
3101 
3102 /**
3103  * trace_dump_stack - record a stack back trace in the trace buffer
3104  * @skip: Number of functions to skip (helper handlers)
3105  */
3106 void trace_dump_stack(int skip)
3107 {
3108 	if (tracing_disabled || tracing_selftest_running)
3109 		return;
3110 
3111 #ifndef CONFIG_UNWINDER_ORC
3112 	/* Skip 1 to skip this function. */
3113 	skip++;
3114 #endif
3115 	__ftrace_trace_stack(global_trace.array_buffer.buffer,
3116 			     tracing_gen_ctx(), skip, NULL);
3117 }
3118 EXPORT_SYMBOL_GPL(trace_dump_stack);
3119 
3120 #ifdef CONFIG_USER_STACKTRACE_SUPPORT
3121 static DEFINE_PER_CPU(int, user_stack_count);
3122 
3123 static void
3124 ftrace_trace_userstack(struct trace_array *tr,
3125 		       struct trace_buffer *buffer, unsigned int trace_ctx)
3126 {
3127 	struct trace_event_call *call = &event_user_stack;
3128 	struct ring_buffer_event *event;
3129 	struct userstack_entry *entry;
3130 
3131 	if (!(tr->trace_flags & TRACE_ITER_USERSTACKTRACE))
3132 		return;
3133 
3134 	/*
3135 	 * NMIs can not handle page faults, even with fix ups.
3136 	 * The save user stack can (and often does) fault.
3137 	 */
3138 	if (unlikely(in_nmi()))
3139 		return;
3140 
3141 	/*
3142 	 * prevent recursion, since the user stack tracing may
3143 	 * trigger other kernel events.
3144 	 */
3145 	preempt_disable();
3146 	if (__this_cpu_read(user_stack_count))
3147 		goto out;
3148 
3149 	__this_cpu_inc(user_stack_count);
3150 
3151 	event = __trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
3152 					    sizeof(*entry), trace_ctx);
3153 	if (!event)
3154 		goto out_drop_count;
3155 	entry	= ring_buffer_event_data(event);
3156 
3157 	entry->tgid		= current->tgid;
3158 	memset(&entry->caller, 0, sizeof(entry->caller));
3159 
3160 	stack_trace_save_user(entry->caller, FTRACE_STACK_ENTRIES);
3161 	if (!call_filter_check_discard(call, entry, buffer, event))
3162 		__buffer_unlock_commit(buffer, event);
3163 
3164  out_drop_count:
3165 	__this_cpu_dec(user_stack_count);
3166  out:
3167 	preempt_enable();
3168 }
3169 #else /* CONFIG_USER_STACKTRACE_SUPPORT */
3170 static void ftrace_trace_userstack(struct trace_array *tr,
3171 				   struct trace_buffer *buffer,
3172 				   unsigned int trace_ctx)
3173 {
3174 }
3175 #endif /* !CONFIG_USER_STACKTRACE_SUPPORT */
3176 
3177 #endif /* CONFIG_STACKTRACE */
3178 
3179 static inline void
3180 func_repeats_set_delta_ts(struct func_repeats_entry *entry,
3181 			  unsigned long long delta)
3182 {
3183 	entry->bottom_delta_ts = delta & U32_MAX;
3184 	entry->top_delta_ts = (delta >> 32);
3185 }
3186 
3187 void trace_last_func_repeats(struct trace_array *tr,
3188 			     struct trace_func_repeats *last_info,
3189 			     unsigned int trace_ctx)
3190 {
3191 	struct trace_buffer *buffer = tr->array_buffer.buffer;
3192 	struct func_repeats_entry *entry;
3193 	struct ring_buffer_event *event;
3194 	u64 delta;
3195 
3196 	event = __trace_buffer_lock_reserve(buffer, TRACE_FUNC_REPEATS,
3197 					    sizeof(*entry), trace_ctx);
3198 	if (!event)
3199 		return;
3200 
3201 	delta = ring_buffer_event_time_stamp(buffer, event) -
3202 		last_info->ts_last_call;
3203 
3204 	entry = ring_buffer_event_data(event);
3205 	entry->ip = last_info->ip;
3206 	entry->parent_ip = last_info->parent_ip;
3207 	entry->count = last_info->count;
3208 	func_repeats_set_delta_ts(entry, delta);
3209 
3210 	__buffer_unlock_commit(buffer, event);
3211 }
3212 
3213 /* created for use with alloc_percpu */
3214 struct trace_buffer_struct {
3215 	int nesting;
3216 	char buffer[4][TRACE_BUF_SIZE];
3217 };
3218 
3219 static struct trace_buffer_struct __percpu *trace_percpu_buffer;
3220 
3221 /*
3222  * This allows for lockless recording.  If we're nested too deeply, then
3223  * this returns NULL.
3224  */
3225 static char *get_trace_buf(void)
3226 {
3227 	struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer);
3228 
3229 	if (!trace_percpu_buffer || buffer->nesting >= 4)
3230 		return NULL;
3231 
3232 	buffer->nesting++;
3233 
3234 	/* Interrupts must see nesting incremented before we use the buffer */
3235 	barrier();
3236 	return &buffer->buffer[buffer->nesting - 1][0];
3237 }
3238 
3239 static void put_trace_buf(void)
3240 {
3241 	/* Don't let the decrement of nesting leak before this */
3242 	barrier();
3243 	this_cpu_dec(trace_percpu_buffer->nesting);
3244 }
3245 
3246 static int alloc_percpu_trace_buffer(void)
3247 {
3248 	struct trace_buffer_struct __percpu *buffers;
3249 
3250 	if (trace_percpu_buffer)
3251 		return 0;
3252 
3253 	buffers = alloc_percpu(struct trace_buffer_struct);
3254 	if (MEM_FAIL(!buffers, "Could not allocate percpu trace_printk buffer"))
3255 		return -ENOMEM;
3256 
3257 	trace_percpu_buffer = buffers;
3258 	return 0;
3259 }
3260 
3261 static int buffers_allocated;
3262 
3263 void trace_printk_init_buffers(void)
3264 {
3265 	if (buffers_allocated)
3266 		return;
3267 
3268 	if (alloc_percpu_trace_buffer())
3269 		return;
3270 
3271 	/* trace_printk() is for debug use only. Don't use it in production. */
3272 
3273 	pr_warn("\n");
3274 	pr_warn("**********************************************************\n");
3275 	pr_warn("**   NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE   **\n");
3276 	pr_warn("**                                                      **\n");
3277 	pr_warn("** trace_printk() being used. Allocating extra memory.  **\n");
3278 	pr_warn("**                                                      **\n");
3279 	pr_warn("** This means that this is a DEBUG kernel and it is     **\n");
3280 	pr_warn("** unsafe for production use.                           **\n");
3281 	pr_warn("**                                                      **\n");
3282 	pr_warn("** If you see this message and you are not debugging    **\n");
3283 	pr_warn("** the kernel, report this immediately to your vendor!  **\n");
3284 	pr_warn("**                                                      **\n");
3285 	pr_warn("**   NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE   **\n");
3286 	pr_warn("**********************************************************\n");
3287 
3288 	/* Expand the buffers to set size */
3289 	tracing_update_buffers();
3290 
3291 	buffers_allocated = 1;
3292 
3293 	/*
3294 	 * trace_printk_init_buffers() can be called by modules.
3295 	 * If that happens, then we need to start cmdline recording
3296 	 * directly here. If the global_trace.buffer is already
3297 	 * allocated here, then this was called by module code.
3298 	 */
3299 	if (global_trace.array_buffer.buffer)
3300 		tracing_start_cmdline_record();
3301 }
3302 EXPORT_SYMBOL_GPL(trace_printk_init_buffers);
3303 
3304 void trace_printk_start_comm(void)
3305 {
3306 	/* Start tracing comms if trace printk is set */
3307 	if (!buffers_allocated)
3308 		return;
3309 	tracing_start_cmdline_record();
3310 }
3311 
3312 static void trace_printk_start_stop_comm(int enabled)
3313 {
3314 	if (!buffers_allocated)
3315 		return;
3316 
3317 	if (enabled)
3318 		tracing_start_cmdline_record();
3319 	else
3320 		tracing_stop_cmdline_record();
3321 }
3322 
3323 /**
3324  * trace_vbprintk - write binary msg to tracing buffer
3325  * @ip:    The address of the caller
3326  * @fmt:   The string format to write to the buffer
3327  * @args:  Arguments for @fmt
3328  */
3329 int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
3330 {
3331 	struct trace_event_call *call = &event_bprint;
3332 	struct ring_buffer_event *event;
3333 	struct trace_buffer *buffer;
3334 	struct trace_array *tr = &global_trace;
3335 	struct bprint_entry *entry;
3336 	unsigned int trace_ctx;
3337 	char *tbuffer;
3338 	int len = 0, size;
3339 
3340 	if (unlikely(tracing_selftest_running || tracing_disabled))
3341 		return 0;
3342 
3343 	/* Don't pollute graph traces with trace_vprintk internals */
3344 	pause_graph_tracing();
3345 
3346 	trace_ctx = tracing_gen_ctx();
3347 	preempt_disable_notrace();
3348 
3349 	tbuffer = get_trace_buf();
3350 	if (!tbuffer) {
3351 		len = 0;
3352 		goto out_nobuffer;
3353 	}
3354 
3355 	len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
3356 
3357 	if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
3358 		goto out_put;
3359 
3360 	size = sizeof(*entry) + sizeof(u32) * len;
3361 	buffer = tr->array_buffer.buffer;
3362 	ring_buffer_nest_start(buffer);
3363 	event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
3364 					    trace_ctx);
3365 	if (!event)
3366 		goto out;
3367 	entry = ring_buffer_event_data(event);
3368 	entry->ip			= ip;
3369 	entry->fmt			= fmt;
3370 
3371 	memcpy(entry->buf, tbuffer, sizeof(u32) * len);
3372 	if (!call_filter_check_discard(call, entry, buffer, event)) {
3373 		__buffer_unlock_commit(buffer, event);
3374 		ftrace_trace_stack(tr, buffer, trace_ctx, 6, NULL);
3375 	}
3376 
3377 out:
3378 	ring_buffer_nest_end(buffer);
3379 out_put:
3380 	put_trace_buf();
3381 
3382 out_nobuffer:
3383 	preempt_enable_notrace();
3384 	unpause_graph_tracing();
3385 
3386 	return len;
3387 }
3388 EXPORT_SYMBOL_GPL(trace_vbprintk);
3389 
3390 __printf(3, 0)
3391 static int
3392 __trace_array_vprintk(struct trace_buffer *buffer,
3393 		      unsigned long ip, const char *fmt, va_list args)
3394 {
3395 	struct trace_event_call *call = &event_print;
3396 	struct ring_buffer_event *event;
3397 	int len = 0, size;
3398 	struct print_entry *entry;
3399 	unsigned int trace_ctx;
3400 	char *tbuffer;
3401 
3402 	if (tracing_disabled || tracing_selftest_running)
3403 		return 0;
3404 
3405 	/* Don't pollute graph traces with trace_vprintk internals */
3406 	pause_graph_tracing();
3407 
3408 	trace_ctx = tracing_gen_ctx();
3409 	preempt_disable_notrace();
3410 
3411 
3412 	tbuffer = get_trace_buf();
3413 	if (!tbuffer) {
3414 		len = 0;
3415 		goto out_nobuffer;
3416 	}
3417 
3418 	len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
3419 
3420 	size = sizeof(*entry) + len + 1;
3421 	ring_buffer_nest_start(buffer);
3422 	event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
3423 					    trace_ctx);
3424 	if (!event)
3425 		goto out;
3426 	entry = ring_buffer_event_data(event);
3427 	entry->ip = ip;
3428 
3429 	memcpy(&entry->buf, tbuffer, len + 1);
3430 	if (!call_filter_check_discard(call, entry, buffer, event)) {
3431 		__buffer_unlock_commit(buffer, event);
3432 		ftrace_trace_stack(&global_trace, buffer, trace_ctx, 6, NULL);
3433 	}
3434 
3435 out:
3436 	ring_buffer_nest_end(buffer);
3437 	put_trace_buf();
3438 
3439 out_nobuffer:
3440 	preempt_enable_notrace();
3441 	unpause_graph_tracing();
3442 
3443 	return len;
3444 }
3445 
3446 __printf(3, 0)
3447 int trace_array_vprintk(struct trace_array *tr,
3448 			unsigned long ip, const char *fmt, va_list args)
3449 {
3450 	return __trace_array_vprintk(tr->array_buffer.buffer, ip, fmt, args);
3451 }
3452 
3453 /**
3454  * trace_array_printk - Print a message to a specific instance
3455  * @tr: The instance trace_array descriptor
3456  * @ip: The instruction pointer that this is called from.
3457  * @fmt: The format to print (printf format)
3458  *
3459  * If a subsystem sets up its own instance, they have the right to
3460  * printk strings into their tracing instance buffer using this
3461  * function. Note, this function will not write into the top level
3462  * buffer (use trace_printk() for that), as writing into the top level
3463  * buffer should only have events that can be individually disabled.
3464  * trace_printk() is only used for debugging a kernel, and should not
3465  * be ever incorporated in normal use.
3466  *
3467  * trace_array_printk() can be used, as it will not add noise to the
3468  * top level tracing buffer.
3469  *
3470  * Note, trace_array_init_printk() must be called on @tr before this
3471  * can be used.
3472  */
3473 __printf(3, 0)
3474 int trace_array_printk(struct trace_array *tr,
3475 		       unsigned long ip, const char *fmt, ...)
3476 {
3477 	int ret;
3478 	va_list ap;
3479 
3480 	if (!tr)
3481 		return -ENOENT;
3482 
3483 	/* This is only allowed for created instances */
3484 	if (tr == &global_trace)
3485 		return 0;
3486 
3487 	if (!(tr->trace_flags & TRACE_ITER_PRINTK))
3488 		return 0;
3489 
3490 	va_start(ap, fmt);
3491 	ret = trace_array_vprintk(tr, ip, fmt, ap);
3492 	va_end(ap);
3493 	return ret;
3494 }
3495 EXPORT_SYMBOL_GPL(trace_array_printk);
3496 
3497 /**
3498  * trace_array_init_printk - Initialize buffers for trace_array_printk()
3499  * @tr: The trace array to initialize the buffers for
3500  *
3501  * As trace_array_printk() only writes into instances, they are OK to
3502  * have in the kernel (unlike trace_printk()). This needs to be called
3503  * before trace_array_printk() can be used on a trace_array.
3504  */
3505 int trace_array_init_printk(struct trace_array *tr)
3506 {
3507 	if (!tr)
3508 		return -ENOENT;
3509 
3510 	/* This is only allowed for created instances */
3511 	if (tr == &global_trace)
3512 		return -EINVAL;
3513 
3514 	return alloc_percpu_trace_buffer();
3515 }
3516 EXPORT_SYMBOL_GPL(trace_array_init_printk);
3517 
3518 __printf(3, 4)
3519 int trace_array_printk_buf(struct trace_buffer *buffer,
3520 			   unsigned long ip, const char *fmt, ...)
3521 {
3522 	int ret;
3523 	va_list ap;
3524 
3525 	if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
3526 		return 0;
3527 
3528 	va_start(ap, fmt);
3529 	ret = __trace_array_vprintk(buffer, ip, fmt, ap);
3530 	va_end(ap);
3531 	return ret;
3532 }
3533 
3534 __printf(2, 0)
3535 int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
3536 {
3537 	return trace_array_vprintk(&global_trace, ip, fmt, args);
3538 }
3539 EXPORT_SYMBOL_GPL(trace_vprintk);
3540 
3541 static void trace_iterator_increment(struct trace_iterator *iter)
3542 {
3543 	struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
3544 
3545 	iter->idx++;
3546 	if (buf_iter)
3547 		ring_buffer_iter_advance(buf_iter);
3548 }
3549 
3550 static struct trace_entry *
3551 peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
3552 		unsigned long *lost_events)
3553 {
3554 	struct ring_buffer_event *event;
3555 	struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
3556 
3557 	if (buf_iter) {
3558 		event = ring_buffer_iter_peek(buf_iter, ts);
3559 		if (lost_events)
3560 			*lost_events = ring_buffer_iter_dropped(buf_iter) ?
3561 				(unsigned long)-1 : 0;
3562 	} else {
3563 		event = ring_buffer_peek(iter->array_buffer->buffer, cpu, ts,
3564 					 lost_events);
3565 	}
3566 
3567 	if (event) {
3568 		iter->ent_size = ring_buffer_event_length(event);
3569 		return ring_buffer_event_data(event);
3570 	}
3571 	iter->ent_size = 0;
3572 	return NULL;
3573 }
3574 
3575 static struct trace_entry *
3576 __find_next_entry(struct trace_iterator *iter, int *ent_cpu,
3577 		  unsigned long *missing_events, u64 *ent_ts)
3578 {
3579 	struct trace_buffer *buffer = iter->array_buffer->buffer;
3580 	struct trace_entry *ent, *next = NULL;
3581 	unsigned long lost_events = 0, next_lost = 0;
3582 	int cpu_file = iter->cpu_file;
3583 	u64 next_ts = 0, ts;
3584 	int next_cpu = -1;
3585 	int next_size = 0;
3586 	int cpu;
3587 
3588 	/*
3589 	 * If we are in a per_cpu trace file, don't bother by iterating over
3590 	 * all cpu and peek directly.
3591 	 */
3592 	if (cpu_file > RING_BUFFER_ALL_CPUS) {
3593 		if (ring_buffer_empty_cpu(buffer, cpu_file))
3594 			return NULL;
3595 		ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
3596 		if (ent_cpu)
3597 			*ent_cpu = cpu_file;
3598 
3599 		return ent;
3600 	}
3601 
3602 	for_each_tracing_cpu(cpu) {
3603 
3604 		if (ring_buffer_empty_cpu(buffer, cpu))
3605 			continue;
3606 
3607 		ent = peek_next_entry(iter, cpu, &ts, &lost_events);
3608 
3609 		/*
3610 		 * Pick the entry with the smallest timestamp:
3611 		 */
3612 		if (ent && (!next || ts < next_ts)) {
3613 			next = ent;
3614 			next_cpu = cpu;
3615 			next_ts = ts;
3616 			next_lost = lost_events;
3617 			next_size = iter->ent_size;
3618 		}
3619 	}
3620 
3621 	iter->ent_size = next_size;
3622 
3623 	if (ent_cpu)
3624 		*ent_cpu = next_cpu;
3625 
3626 	if (ent_ts)
3627 		*ent_ts = next_ts;
3628 
3629 	if (missing_events)
3630 		*missing_events = next_lost;
3631 
3632 	return next;
3633 }
3634 
3635 #define STATIC_FMT_BUF_SIZE	128
3636 static char static_fmt_buf[STATIC_FMT_BUF_SIZE];
3637 
3638 static char *trace_iter_expand_format(struct trace_iterator *iter)
3639 {
3640 	char *tmp;
3641 
3642 	/*
3643 	 * iter->tr is NULL when used with tp_printk, which makes
3644 	 * this get called where it is not safe to call krealloc().
3645 	 */
3646 	if (!iter->tr || iter->fmt == static_fmt_buf)
3647 		return NULL;
3648 
3649 	tmp = krealloc(iter->fmt, iter->fmt_size + STATIC_FMT_BUF_SIZE,
3650 		       GFP_KERNEL);
3651 	if (tmp) {
3652 		iter->fmt_size += STATIC_FMT_BUF_SIZE;
3653 		iter->fmt = tmp;
3654 	}
3655 
3656 	return tmp;
3657 }
3658 
3659 /* Returns true if the string is safe to dereference from an event */
3660 static bool trace_safe_str(struct trace_iterator *iter, const char *str)
3661 {
3662 	unsigned long addr = (unsigned long)str;
3663 	struct trace_event *trace_event;
3664 	struct trace_event_call *event;
3665 
3666 	/* OK if part of the event data */
3667 	if ((addr >= (unsigned long)iter->ent) &&
3668 	    (addr < (unsigned long)iter->ent + iter->ent_size))
3669 		return true;
3670 
3671 	/* OK if part of the temp seq buffer */
3672 	if ((addr >= (unsigned long)iter->tmp_seq.buffer) &&
3673 	    (addr < (unsigned long)iter->tmp_seq.buffer + PAGE_SIZE))
3674 		return true;
3675 
3676 	/* Core rodata can not be freed */
3677 	if (is_kernel_rodata(addr))
3678 		return true;
3679 
3680 	if (trace_is_tracepoint_string(str))
3681 		return true;
3682 
3683 	/*
3684 	 * Now this could be a module event, referencing core module
3685 	 * data, which is OK.
3686 	 */
3687 	if (!iter->ent)
3688 		return false;
3689 
3690 	trace_event = ftrace_find_event(iter->ent->type);
3691 	if (!trace_event)
3692 		return false;
3693 
3694 	event = container_of(trace_event, struct trace_event_call, event);
3695 	if ((event->flags & TRACE_EVENT_FL_DYNAMIC) || !event->module)
3696 		return false;
3697 
3698 	/* Would rather have rodata, but this will suffice */
3699 	if (within_module_core(addr, event->module))
3700 		return true;
3701 
3702 	return false;
3703 }
3704 
3705 static const char *show_buffer(struct trace_seq *s)
3706 {
3707 	struct seq_buf *seq = &s->seq;
3708 
3709 	seq_buf_terminate(seq);
3710 
3711 	return seq->buffer;
3712 }
3713 
3714 static DEFINE_STATIC_KEY_FALSE(trace_no_verify);
3715 
3716 static int test_can_verify_check(const char *fmt, ...)
3717 {
3718 	char buf[16];
3719 	va_list ap;
3720 	int ret;
3721 
3722 	/*
3723 	 * The verifier is dependent on vsnprintf() modifies the va_list
3724 	 * passed to it, where it is sent as a reference. Some architectures
3725 	 * (like x86_32) passes it by value, which means that vsnprintf()
3726 	 * does not modify the va_list passed to it, and the verifier
3727 	 * would then need to be able to understand all the values that
3728 	 * vsnprintf can use. If it is passed by value, then the verifier
3729 	 * is disabled.
3730 	 */
3731 	va_start(ap, fmt);
3732 	vsnprintf(buf, 16, "%d", ap);
3733 	ret = va_arg(ap, int);
3734 	va_end(ap);
3735 
3736 	return ret;
3737 }
3738 
3739 static void test_can_verify(void)
3740 {
3741 	if (!test_can_verify_check("%d %d", 0, 1)) {
3742 		pr_info("trace event string verifier disabled\n");
3743 		static_branch_inc(&trace_no_verify);
3744 	}
3745 }
3746 
3747 /**
3748  * trace_check_vprintf - Check dereferenced strings while writing to the seq buffer
3749  * @iter: The iterator that holds the seq buffer and the event being printed
3750  * @fmt: The format used to print the event
3751  * @ap: The va_list holding the data to print from @fmt.
3752  *
3753  * This writes the data into the @iter->seq buffer using the data from
3754  * @fmt and @ap. If the format has a %s, then the source of the string
3755  * is examined to make sure it is safe to print, otherwise it will
3756  * warn and print "[UNSAFE MEMORY]" in place of the dereferenced string
3757  * pointer.
3758  */
3759 void trace_check_vprintf(struct trace_iterator *iter, const char *fmt,
3760 			 va_list ap)
3761 {
3762 	const char *p = fmt;
3763 	const char *str;
3764 	int i, j;
3765 
3766 	if (WARN_ON_ONCE(!fmt))
3767 		return;
3768 
3769 	if (static_branch_unlikely(&trace_no_verify))
3770 		goto print;
3771 
3772 	/* Don't bother checking when doing a ftrace_dump() */
3773 	if (iter->fmt == static_fmt_buf)
3774 		goto print;
3775 
3776 	while (*p) {
3777 		bool star = false;
3778 		int len = 0;
3779 
3780 		j = 0;
3781 
3782 		/* We only care about %s and variants */
3783 		for (i = 0; p[i]; i++) {
3784 			if (i + 1 >= iter->fmt_size) {
3785 				/*
3786 				 * If we can't expand the copy buffer,
3787 				 * just print it.
3788 				 */
3789 				if (!trace_iter_expand_format(iter))
3790 					goto print;
3791 			}
3792 
3793 			if (p[i] == '\\' && p[i+1]) {
3794 				i++;
3795 				continue;
3796 			}
3797 			if (p[i] == '%') {
3798 				/* Need to test cases like %08.*s */
3799 				for (j = 1; p[i+j]; j++) {
3800 					if (isdigit(p[i+j]) ||
3801 					    p[i+j] == '.')
3802 						continue;
3803 					if (p[i+j] == '*') {
3804 						star = true;
3805 						continue;
3806 					}
3807 					break;
3808 				}
3809 				if (p[i+j] == 's')
3810 					break;
3811 				star = false;
3812 			}
3813 			j = 0;
3814 		}
3815 		/* If no %s found then just print normally */
3816 		if (!p[i])
3817 			break;
3818 
3819 		/* Copy up to the %s, and print that */
3820 		strncpy(iter->fmt, p, i);
3821 		iter->fmt[i] = '\0';
3822 		trace_seq_vprintf(&iter->seq, iter->fmt, ap);
3823 
3824 		/*
3825 		 * If iter->seq is full, the above call no longer guarantees
3826 		 * that ap is in sync with fmt processing, and further calls
3827 		 * to va_arg() can return wrong positional arguments.
3828 		 *
3829 		 * Ensure that ap is no longer used in this case.
3830 		 */
3831 		if (iter->seq.full) {
3832 			p = "";
3833 			break;
3834 		}
3835 
3836 		if (star)
3837 			len = va_arg(ap, int);
3838 
3839 		/* The ap now points to the string data of the %s */
3840 		str = va_arg(ap, const char *);
3841 
3842 		/*
3843 		 * If you hit this warning, it is likely that the
3844 		 * trace event in question used %s on a string that
3845 		 * was saved at the time of the event, but may not be
3846 		 * around when the trace is read. Use __string(),
3847 		 * __assign_str() and __get_str() helpers in the TRACE_EVENT()
3848 		 * instead. See samples/trace_events/trace-events-sample.h
3849 		 * for reference.
3850 		 */
3851 		if (WARN_ONCE(!trace_safe_str(iter, str),
3852 			      "fmt: '%s' current_buffer: '%s'",
3853 			      fmt, show_buffer(&iter->seq))) {
3854 			int ret;
3855 
3856 			/* Try to safely read the string */
3857 			if (star) {
3858 				if (len + 1 > iter->fmt_size)
3859 					len = iter->fmt_size - 1;
3860 				if (len < 0)
3861 					len = 0;
3862 				ret = copy_from_kernel_nofault(iter->fmt, str, len);
3863 				iter->fmt[len] = 0;
3864 				star = false;
3865 			} else {
3866 				ret = strncpy_from_kernel_nofault(iter->fmt, str,
3867 								  iter->fmt_size);
3868 			}
3869 			if (ret < 0)
3870 				trace_seq_printf(&iter->seq, "(0x%px)", str);
3871 			else
3872 				trace_seq_printf(&iter->seq, "(0x%px:%s)",
3873 						 str, iter->fmt);
3874 			str = "[UNSAFE-MEMORY]";
3875 			strcpy(iter->fmt, "%s");
3876 		} else {
3877 			strncpy(iter->fmt, p + i, j + 1);
3878 			iter->fmt[j+1] = '\0';
3879 		}
3880 		if (star)
3881 			trace_seq_printf(&iter->seq, iter->fmt, len, str);
3882 		else
3883 			trace_seq_printf(&iter->seq, iter->fmt, str);
3884 
3885 		p += i + j + 1;
3886 	}
3887  print:
3888 	if (*p)
3889 		trace_seq_vprintf(&iter->seq, p, ap);
3890 }
3891 
3892 const char *trace_event_format(struct trace_iterator *iter, const char *fmt)
3893 {
3894 	const char *p, *new_fmt;
3895 	char *q;
3896 
3897 	if (WARN_ON_ONCE(!fmt))
3898 		return fmt;
3899 
3900 	if (!iter->tr || iter->tr->trace_flags & TRACE_ITER_HASH_PTR)
3901 		return fmt;
3902 
3903 	p = fmt;
3904 	new_fmt = q = iter->fmt;
3905 	while (*p) {
3906 		if (unlikely(q - new_fmt + 3 > iter->fmt_size)) {
3907 			if (!trace_iter_expand_format(iter))
3908 				return fmt;
3909 
3910 			q += iter->fmt - new_fmt;
3911 			new_fmt = iter->fmt;
3912 		}
3913 
3914 		*q++ = *p++;
3915 
3916 		/* Replace %p with %px */
3917 		if (p[-1] == '%') {
3918 			if (p[0] == '%') {
3919 				*q++ = *p++;
3920 			} else if (p[0] == 'p' && !isalnum(p[1])) {
3921 				*q++ = *p++;
3922 				*q++ = 'x';
3923 			}
3924 		}
3925 	}
3926 	*q = '\0';
3927 
3928 	return new_fmt;
3929 }
3930 
3931 #define STATIC_TEMP_BUF_SIZE	128
3932 static char static_temp_buf[STATIC_TEMP_BUF_SIZE] __aligned(4);
3933 
3934 /* Find the next real entry, without updating the iterator itself */
3935 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
3936 					  int *ent_cpu, u64 *ent_ts)
3937 {
3938 	/* __find_next_entry will reset ent_size */
3939 	int ent_size = iter->ent_size;
3940 	struct trace_entry *entry;
3941 
3942 	/*
3943 	 * If called from ftrace_dump(), then the iter->temp buffer
3944 	 * will be the static_temp_buf and not created from kmalloc.
3945 	 * If the entry size is greater than the buffer, we can
3946 	 * not save it. Just return NULL in that case. This is only
3947 	 * used to add markers when two consecutive events' time
3948 	 * stamps have a large delta. See trace_print_lat_context()
3949 	 */
3950 	if (iter->temp == static_temp_buf &&
3951 	    STATIC_TEMP_BUF_SIZE < ent_size)
3952 		return NULL;
3953 
3954 	/*
3955 	 * The __find_next_entry() may call peek_next_entry(), which may
3956 	 * call ring_buffer_peek() that may make the contents of iter->ent
3957 	 * undefined. Need to copy iter->ent now.
3958 	 */
3959 	if (iter->ent && iter->ent != iter->temp) {
3960 		if ((!iter->temp || iter->temp_size < iter->ent_size) &&
3961 		    !WARN_ON_ONCE(iter->temp == static_temp_buf)) {
3962 			void *temp;
3963 			temp = kmalloc(iter->ent_size, GFP_KERNEL);
3964 			if (!temp)
3965 				return NULL;
3966 			kfree(iter->temp);
3967 			iter->temp = temp;
3968 			iter->temp_size = iter->ent_size;
3969 		}
3970 		memcpy(iter->temp, iter->ent, iter->ent_size);
3971 		iter->ent = iter->temp;
3972 	}
3973 	entry = __find_next_entry(iter, ent_cpu, NULL, ent_ts);
3974 	/* Put back the original ent_size */
3975 	iter->ent_size = ent_size;
3976 
3977 	return entry;
3978 }
3979 
3980 /* Find the next real entry, and increment the iterator to the next entry */
3981 void *trace_find_next_entry_inc(struct trace_iterator *iter)
3982 {
3983 	iter->ent = __find_next_entry(iter, &iter->cpu,
3984 				      &iter->lost_events, &iter->ts);
3985 
3986 	if (iter->ent)
3987 		trace_iterator_increment(iter);
3988 
3989 	return iter->ent ? iter : NULL;
3990 }
3991 
3992 static void trace_consume(struct trace_iterator *iter)
3993 {
3994 	ring_buffer_consume(iter->array_buffer->buffer, iter->cpu, &iter->ts,
3995 			    &iter->lost_events);
3996 }
3997 
3998 static void *s_next(struct seq_file *m, void *v, loff_t *pos)
3999 {
4000 	struct trace_iterator *iter = m->private;
4001 	int i = (int)*pos;
4002 	void *ent;
4003 
4004 	WARN_ON_ONCE(iter->leftover);
4005 
4006 	(*pos)++;
4007 
4008 	/* can't go backwards */
4009 	if (iter->idx > i)
4010 		return NULL;
4011 
4012 	if (iter->idx < 0)
4013 		ent = trace_find_next_entry_inc(iter);
4014 	else
4015 		ent = iter;
4016 
4017 	while (ent && iter->idx < i)
4018 		ent = trace_find_next_entry_inc(iter);
4019 
4020 	iter->pos = *pos;
4021 
4022 	return ent;
4023 }
4024 
4025 void tracing_iter_reset(struct trace_iterator *iter, int cpu)
4026 {
4027 	struct ring_buffer_iter *buf_iter;
4028 	unsigned long entries = 0;
4029 	u64 ts;
4030 
4031 	per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = 0;
4032 
4033 	buf_iter = trace_buffer_iter(iter, cpu);
4034 	if (!buf_iter)
4035 		return;
4036 
4037 	ring_buffer_iter_reset(buf_iter);
4038 
4039 	/*
4040 	 * We could have the case with the max latency tracers
4041 	 * that a reset never took place on a cpu. This is evident
4042 	 * by the timestamp being before the start of the buffer.
4043 	 */
4044 	while (ring_buffer_iter_peek(buf_iter, &ts)) {
4045 		if (ts >= iter->array_buffer->time_start)
4046 			break;
4047 		entries++;
4048 		ring_buffer_iter_advance(buf_iter);
4049 	}
4050 
4051 	per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = entries;
4052 }
4053 
4054 /*
4055  * The current tracer is copied to avoid a global locking
4056  * all around.
4057  */
4058 static void *s_start(struct seq_file *m, loff_t *pos)
4059 {
4060 	struct trace_iterator *iter = m->private;
4061 	struct trace_array *tr = iter->tr;
4062 	int cpu_file = iter->cpu_file;
4063 	void *p = NULL;
4064 	loff_t l = 0;
4065 	int cpu;
4066 
4067 	/*
4068 	 * copy the tracer to avoid using a global lock all around.
4069 	 * iter->trace is a copy of current_trace, the pointer to the
4070 	 * name may be used instead of a strcmp(), as iter->trace->name
4071 	 * will point to the same string as current_trace->name.
4072 	 */
4073 	mutex_lock(&trace_types_lock);
4074 	if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
4075 		*iter->trace = *tr->current_trace;
4076 	mutex_unlock(&trace_types_lock);
4077 
4078 #ifdef CONFIG_TRACER_MAX_TRACE
4079 	if (iter->snapshot && iter->trace->use_max_tr)
4080 		return ERR_PTR(-EBUSY);
4081 #endif
4082 
4083 	if (*pos != iter->pos) {
4084 		iter->ent = NULL;
4085 		iter->cpu = 0;
4086 		iter->idx = -1;
4087 
4088 		if (cpu_file == RING_BUFFER_ALL_CPUS) {
4089 			for_each_tracing_cpu(cpu)
4090 				tracing_iter_reset(iter, cpu);
4091 		} else
4092 			tracing_iter_reset(iter, cpu_file);
4093 
4094 		iter->leftover = 0;
4095 		for (p = iter; p && l < *pos; p = s_next(m, p, &l))
4096 			;
4097 
4098 	} else {
4099 		/*
4100 		 * If we overflowed the seq_file before, then we want
4101 		 * to just reuse the trace_seq buffer again.
4102 		 */
4103 		if (iter->leftover)
4104 			p = iter;
4105 		else {
4106 			l = *pos - 1;
4107 			p = s_next(m, p, &l);
4108 		}
4109 	}
4110 
4111 	trace_event_read_lock();
4112 	trace_access_lock(cpu_file);
4113 	return p;
4114 }
4115 
4116 static void s_stop(struct seq_file *m, void *p)
4117 {
4118 	struct trace_iterator *iter = m->private;
4119 
4120 #ifdef CONFIG_TRACER_MAX_TRACE
4121 	if (iter->snapshot && iter->trace->use_max_tr)
4122 		return;
4123 #endif
4124 
4125 	trace_access_unlock(iter->cpu_file);
4126 	trace_event_read_unlock();
4127 }
4128 
4129 static void
4130 get_total_entries_cpu(struct array_buffer *buf, unsigned long *total,
4131 		      unsigned long *entries, int cpu)
4132 {
4133 	unsigned long count;
4134 
4135 	count = ring_buffer_entries_cpu(buf->buffer, cpu);
4136 	/*
4137 	 * If this buffer has skipped entries, then we hold all
4138 	 * entries for the trace and we need to ignore the
4139 	 * ones before the time stamp.
4140 	 */
4141 	if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
4142 		count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
4143 		/* total is the same as the entries */
4144 		*total = count;
4145 	} else
4146 		*total = count +
4147 			ring_buffer_overrun_cpu(buf->buffer, cpu);
4148 	*entries = count;
4149 }
4150 
4151 static void
4152 get_total_entries(struct array_buffer *buf,
4153 		  unsigned long *total, unsigned long *entries)
4154 {
4155 	unsigned long t, e;
4156 	int cpu;
4157 
4158 	*total = 0;
4159 	*entries = 0;
4160 
4161 	for_each_tracing_cpu(cpu) {
4162 		get_total_entries_cpu(buf, &t, &e, cpu);
4163 		*total += t;
4164 		*entries += e;
4165 	}
4166 }
4167 
4168 unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu)
4169 {
4170 	unsigned long total, entries;
4171 
4172 	if (!tr)
4173 		tr = &global_trace;
4174 
4175 	get_total_entries_cpu(&tr->array_buffer, &total, &entries, cpu);
4176 
4177 	return entries;
4178 }
4179 
4180 unsigned long trace_total_entries(struct trace_array *tr)
4181 {
4182 	unsigned long total, entries;
4183 
4184 	if (!tr)
4185 		tr = &global_trace;
4186 
4187 	get_total_entries(&tr->array_buffer, &total, &entries);
4188 
4189 	return entries;
4190 }
4191 
4192 static void print_lat_help_header(struct seq_file *m)
4193 {
4194 	seq_puts(m, "#                    _------=> CPU#            \n"
4195 		    "#                   / _-----=> irqs-off/BH-disabled\n"
4196 		    "#                  | / _----=> need-resched    \n"
4197 		    "#                  || / _---=> hardirq/softirq \n"
4198 		    "#                  ||| / _--=> preempt-depth   \n"
4199 		    "#                  |||| / _-=> migrate-disable \n"
4200 		    "#                  ||||| /     delay           \n"
4201 		    "#  cmd     pid     |||||| time  |   caller     \n"
4202 		    "#     \\   /        ||||||  \\    |    /       \n");
4203 }
4204 
4205 static void print_event_info(struct array_buffer *buf, struct seq_file *m)
4206 {
4207 	unsigned long total;
4208 	unsigned long entries;
4209 
4210 	get_total_entries(buf, &total, &entries);
4211 	seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu   #P:%d\n",
4212 		   entries, total, num_online_cpus());
4213 	seq_puts(m, "#\n");
4214 }
4215 
4216 static void print_func_help_header(struct array_buffer *buf, struct seq_file *m,
4217 				   unsigned int flags)
4218 {
4219 	bool tgid = flags & TRACE_ITER_RECORD_TGID;
4220 
4221 	print_event_info(buf, m);
4222 
4223 	seq_printf(m, "#           TASK-PID    %s CPU#     TIMESTAMP  FUNCTION\n", tgid ? "   TGID   " : "");
4224 	seq_printf(m, "#              | |      %s   |         |         |\n",      tgid ? "     |    " : "");
4225 }
4226 
4227 static void print_func_help_header_irq(struct array_buffer *buf, struct seq_file *m,
4228 				       unsigned int flags)
4229 {
4230 	bool tgid = flags & TRACE_ITER_RECORD_TGID;
4231 	const char *space = "            ";
4232 	int prec = tgid ? 12 : 2;
4233 
4234 	print_event_info(buf, m);
4235 
4236 	seq_printf(m, "#                            %.*s  _-----=> irqs-off/BH-disabled\n", prec, space);
4237 	seq_printf(m, "#                            %.*s / _----=> need-resched\n", prec, space);
4238 	seq_printf(m, "#                            %.*s| / _---=> hardirq/softirq\n", prec, space);
4239 	seq_printf(m, "#                            %.*s|| / _--=> preempt-depth\n", prec, space);
4240 	seq_printf(m, "#                            %.*s||| / _-=> migrate-disable\n", prec, space);
4241 	seq_printf(m, "#                            %.*s|||| /     delay\n", prec, space);
4242 	seq_printf(m, "#           TASK-PID  %.*s CPU#  |||||  TIMESTAMP  FUNCTION\n", prec, "     TGID   ");
4243 	seq_printf(m, "#              | |    %.*s   |   |||||     |         |\n", prec, "       |    ");
4244 }
4245 
4246 void
4247 print_trace_header(struct seq_file *m, struct trace_iterator *iter)
4248 {
4249 	unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK);
4250 	struct array_buffer *buf = iter->array_buffer;
4251 	struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
4252 	struct tracer *type = iter->trace;
4253 	unsigned long entries;
4254 	unsigned long total;
4255 	const char *name = "preemption";
4256 
4257 	name = type->name;
4258 
4259 	get_total_entries(buf, &total, &entries);
4260 
4261 	seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
4262 		   name, UTS_RELEASE);
4263 	seq_puts(m, "# -----------------------------------"
4264 		 "---------------------------------\n");
4265 	seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
4266 		   " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
4267 		   nsecs_to_usecs(data->saved_latency),
4268 		   entries,
4269 		   total,
4270 		   buf->cpu,
4271 #if defined(CONFIG_PREEMPT_NONE)
4272 		   "server",
4273 #elif defined(CONFIG_PREEMPT_VOLUNTARY)
4274 		   "desktop",
4275 #elif defined(CONFIG_PREEMPT)
4276 		   "preempt",
4277 #elif defined(CONFIG_PREEMPT_RT)
4278 		   "preempt_rt",
4279 #else
4280 		   "unknown",
4281 #endif
4282 		   /* These are reserved for later use */
4283 		   0, 0, 0, 0);
4284 #ifdef CONFIG_SMP
4285 	seq_printf(m, " #P:%d)\n", num_online_cpus());
4286 #else
4287 	seq_puts(m, ")\n");
4288 #endif
4289 	seq_puts(m, "#    -----------------\n");
4290 	seq_printf(m, "#    | task: %.16s-%d "
4291 		   "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
4292 		   data->comm, data->pid,
4293 		   from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
4294 		   data->policy, data->rt_priority);
4295 	seq_puts(m, "#    -----------------\n");
4296 
4297 	if (data->critical_start) {
4298 		seq_puts(m, "#  => started at: ");
4299 		seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
4300 		trace_print_seq(m, &iter->seq);
4301 		seq_puts(m, "\n#  => ended at:   ");
4302 		seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
4303 		trace_print_seq(m, &iter->seq);
4304 		seq_puts(m, "\n#\n");
4305 	}
4306 
4307 	seq_puts(m, "#\n");
4308 }
4309 
4310 static void test_cpu_buff_start(struct trace_iterator *iter)
4311 {
4312 	struct trace_seq *s = &iter->seq;
4313 	struct trace_array *tr = iter->tr;
4314 
4315 	if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
4316 		return;
4317 
4318 	if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
4319 		return;
4320 
4321 	if (cpumask_available(iter->started) &&
4322 	    cpumask_test_cpu(iter->cpu, iter->started))
4323 		return;
4324 
4325 	if (per_cpu_ptr(iter->array_buffer->data, iter->cpu)->skipped_entries)
4326 		return;
4327 
4328 	if (cpumask_available(iter->started))
4329 		cpumask_set_cpu(iter->cpu, iter->started);
4330 
4331 	/* Don't print started cpu buffer for the first entry of the trace */
4332 	if (iter->idx > 1)
4333 		trace_seq_printf(s, "##### CPU %u buffer started ####\n",
4334 				iter->cpu);
4335 }
4336 
4337 static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
4338 {
4339 	struct trace_array *tr = iter->tr;
4340 	struct trace_seq *s = &iter->seq;
4341 	unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
4342 	struct trace_entry *entry;
4343 	struct trace_event *event;
4344 
4345 	entry = iter->ent;
4346 
4347 	test_cpu_buff_start(iter);
4348 
4349 	event = ftrace_find_event(entry->type);
4350 
4351 	if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
4352 		if (iter->iter_flags & TRACE_FILE_LAT_FMT)
4353 			trace_print_lat_context(iter);
4354 		else
4355 			trace_print_context(iter);
4356 	}
4357 
4358 	if (trace_seq_has_overflowed(s))
4359 		return TRACE_TYPE_PARTIAL_LINE;
4360 
4361 	if (event)
4362 		return event->funcs->trace(iter, sym_flags, event);
4363 
4364 	trace_seq_printf(s, "Unknown type %d\n", entry->type);
4365 
4366 	return trace_handle_return(s);
4367 }
4368 
4369 static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
4370 {
4371 	struct trace_array *tr = iter->tr;
4372 	struct trace_seq *s = &iter->seq;
4373 	struct trace_entry *entry;
4374 	struct trace_event *event;
4375 
4376 	entry = iter->ent;
4377 
4378 	if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
4379 		trace_seq_printf(s, "%d %d %llu ",
4380 				 entry->pid, iter->cpu, iter->ts);
4381 
4382 	if (trace_seq_has_overflowed(s))
4383 		return TRACE_TYPE_PARTIAL_LINE;
4384 
4385 	event = ftrace_find_event(entry->type);
4386 	if (event)
4387 		return event->funcs->raw(iter, 0, event);
4388 
4389 	trace_seq_printf(s, "%d ?\n", entry->type);
4390 
4391 	return trace_handle_return(s);
4392 }
4393 
4394 static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
4395 {
4396 	struct trace_array *tr = iter->tr;
4397 	struct trace_seq *s = &iter->seq;
4398 	unsigned char newline = '\n';
4399 	struct trace_entry *entry;
4400 	struct trace_event *event;
4401 
4402 	entry = iter->ent;
4403 
4404 	if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
4405 		SEQ_PUT_HEX_FIELD(s, entry->pid);
4406 		SEQ_PUT_HEX_FIELD(s, iter->cpu);
4407 		SEQ_PUT_HEX_FIELD(s, iter->ts);
4408 		if (trace_seq_has_overflowed(s))
4409 			return TRACE_TYPE_PARTIAL_LINE;
4410 	}
4411 
4412 	event = ftrace_find_event(entry->type);
4413 	if (event) {
4414 		enum print_line_t ret = event->funcs->hex(iter, 0, event);
4415 		if (ret != TRACE_TYPE_HANDLED)
4416 			return ret;
4417 	}
4418 
4419 	SEQ_PUT_FIELD(s, newline);
4420 
4421 	return trace_handle_return(s);
4422 }
4423 
4424 static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
4425 {
4426 	struct trace_array *tr = iter->tr;
4427 	struct trace_seq *s = &iter->seq;
4428 	struct trace_entry *entry;
4429 	struct trace_event *event;
4430 
4431 	entry = iter->ent;
4432 
4433 	if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
4434 		SEQ_PUT_FIELD(s, entry->pid);
4435 		SEQ_PUT_FIELD(s, iter->cpu);
4436 		SEQ_PUT_FIELD(s, iter->ts);
4437 		if (trace_seq_has_overflowed(s))
4438 			return TRACE_TYPE_PARTIAL_LINE;
4439 	}
4440 
4441 	event = ftrace_find_event(entry->type);
4442 	return event ? event->funcs->binary(iter, 0, event) :
4443 		TRACE_TYPE_HANDLED;
4444 }
4445 
4446 int trace_empty(struct trace_iterator *iter)
4447 {
4448 	struct ring_buffer_iter *buf_iter;
4449 	int cpu;
4450 
4451 	/* If we are looking at one CPU buffer, only check that one */
4452 	if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
4453 		cpu = iter->cpu_file;
4454 		buf_iter = trace_buffer_iter(iter, cpu);
4455 		if (buf_iter) {
4456 			if (!ring_buffer_iter_empty(buf_iter))
4457 				return 0;
4458 		} else {
4459 			if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
4460 				return 0;
4461 		}
4462 		return 1;
4463 	}
4464 
4465 	for_each_tracing_cpu(cpu) {
4466 		buf_iter = trace_buffer_iter(iter, cpu);
4467 		if (buf_iter) {
4468 			if (!ring_buffer_iter_empty(buf_iter))
4469 				return 0;
4470 		} else {
4471 			if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
4472 				return 0;
4473 		}
4474 	}
4475 
4476 	return 1;
4477 }
4478 
4479 /*  Called with trace_event_read_lock() held. */
4480 enum print_line_t print_trace_line(struct trace_iterator *iter)
4481 {
4482 	struct trace_array *tr = iter->tr;
4483 	unsigned long trace_flags = tr->trace_flags;
4484 	enum print_line_t ret;
4485 
4486 	if (iter->lost_events) {
4487 		if (iter->lost_events == (unsigned long)-1)
4488 			trace_seq_printf(&iter->seq, "CPU:%d [LOST EVENTS]\n",
4489 					 iter->cpu);
4490 		else
4491 			trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
4492 					 iter->cpu, iter->lost_events);
4493 		if (trace_seq_has_overflowed(&iter->seq))
4494 			return TRACE_TYPE_PARTIAL_LINE;
4495 	}
4496 
4497 	if (iter->trace && iter->trace->print_line) {
4498 		ret = iter->trace->print_line(iter);
4499 		if (ret != TRACE_TYPE_UNHANDLED)
4500 			return ret;
4501 	}
4502 
4503 	if (iter->ent->type == TRACE_BPUTS &&
4504 			trace_flags & TRACE_ITER_PRINTK &&
4505 			trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4506 		return trace_print_bputs_msg_only(iter);
4507 
4508 	if (iter->ent->type == TRACE_BPRINT &&
4509 			trace_flags & TRACE_ITER_PRINTK &&
4510 			trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4511 		return trace_print_bprintk_msg_only(iter);
4512 
4513 	if (iter->ent->type == TRACE_PRINT &&
4514 			trace_flags & TRACE_ITER_PRINTK &&
4515 			trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4516 		return trace_print_printk_msg_only(iter);
4517 
4518 	if (trace_flags & TRACE_ITER_BIN)
4519 		return print_bin_fmt(iter);
4520 
4521 	if (trace_flags & TRACE_ITER_HEX)
4522 		return print_hex_fmt(iter);
4523 
4524 	if (trace_flags & TRACE_ITER_RAW)
4525 		return print_raw_fmt(iter);
4526 
4527 	return print_trace_fmt(iter);
4528 }
4529 
4530 void trace_latency_header(struct seq_file *m)
4531 {
4532 	struct trace_iterator *iter = m->private;
4533 	struct trace_array *tr = iter->tr;
4534 
4535 	/* print nothing if the buffers are empty */
4536 	if (trace_empty(iter))
4537 		return;
4538 
4539 	if (iter->iter_flags & TRACE_FILE_LAT_FMT)
4540 		print_trace_header(m, iter);
4541 
4542 	if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
4543 		print_lat_help_header(m);
4544 }
4545 
4546 void trace_default_header(struct seq_file *m)
4547 {
4548 	struct trace_iterator *iter = m->private;
4549 	struct trace_array *tr = iter->tr;
4550 	unsigned long trace_flags = tr->trace_flags;
4551 
4552 	if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
4553 		return;
4554 
4555 	if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
4556 		/* print nothing if the buffers are empty */
4557 		if (trace_empty(iter))
4558 			return;
4559 		print_trace_header(m, iter);
4560 		if (!(trace_flags & TRACE_ITER_VERBOSE))
4561 			print_lat_help_header(m);
4562 	} else {
4563 		if (!(trace_flags & TRACE_ITER_VERBOSE)) {
4564 			if (trace_flags & TRACE_ITER_IRQ_INFO)
4565 				print_func_help_header_irq(iter->array_buffer,
4566 							   m, trace_flags);
4567 			else
4568 				print_func_help_header(iter->array_buffer, m,
4569 						       trace_flags);
4570 		}
4571 	}
4572 }
4573 
4574 static void test_ftrace_alive(struct seq_file *m)
4575 {
4576 	if (!ftrace_is_dead())
4577 		return;
4578 	seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
4579 		    "#          MAY BE MISSING FUNCTION EVENTS\n");
4580 }
4581 
4582 #ifdef CONFIG_TRACER_MAX_TRACE
4583 static void show_snapshot_main_help(struct seq_file *m)
4584 {
4585 	seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
4586 		    "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
4587 		    "#                      Takes a snapshot of the main buffer.\n"
4588 		    "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
4589 		    "#                      (Doesn't have to be '2' works with any number that\n"
4590 		    "#                       is not a '0' or '1')\n");
4591 }
4592 
4593 static void show_snapshot_percpu_help(struct seq_file *m)
4594 {
4595 	seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
4596 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
4597 	seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
4598 		    "#                      Takes a snapshot of the main buffer for this cpu.\n");
4599 #else
4600 	seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
4601 		    "#                     Must use main snapshot file to allocate.\n");
4602 #endif
4603 	seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
4604 		    "#                      (Doesn't have to be '2' works with any number that\n"
4605 		    "#                       is not a '0' or '1')\n");
4606 }
4607 
4608 static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
4609 {
4610 	if (iter->tr->allocated_snapshot)
4611 		seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
4612 	else
4613 		seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
4614 
4615 	seq_puts(m, "# Snapshot commands:\n");
4616 	if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
4617 		show_snapshot_main_help(m);
4618 	else
4619 		show_snapshot_percpu_help(m);
4620 }
4621 #else
4622 /* Should never be called */
4623 static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
4624 #endif
4625 
4626 static int s_show(struct seq_file *m, void *v)
4627 {
4628 	struct trace_iterator *iter = v;
4629 	int ret;
4630 
4631 	if (iter->ent == NULL) {
4632 		if (iter->tr) {
4633 			seq_printf(m, "# tracer: %s\n", iter->trace->name);
4634 			seq_puts(m, "#\n");
4635 			test_ftrace_alive(m);
4636 		}
4637 		if (iter->snapshot && trace_empty(iter))
4638 			print_snapshot_help(m, iter);
4639 		else if (iter->trace && iter->trace->print_header)
4640 			iter->trace->print_header(m);
4641 		else
4642 			trace_default_header(m);
4643 
4644 	} else if (iter->leftover) {
4645 		/*
4646 		 * If we filled the seq_file buffer earlier, we
4647 		 * want to just show it now.
4648 		 */
4649 		ret = trace_print_seq(m, &iter->seq);
4650 
4651 		/* ret should this time be zero, but you never know */
4652 		iter->leftover = ret;
4653 
4654 	} else {
4655 		print_trace_line(iter);
4656 		ret = trace_print_seq(m, &iter->seq);
4657 		/*
4658 		 * If we overflow the seq_file buffer, then it will
4659 		 * ask us for this data again at start up.
4660 		 * Use that instead.
4661 		 *  ret is 0 if seq_file write succeeded.
4662 		 *        -1 otherwise.
4663 		 */
4664 		iter->leftover = ret;
4665 	}
4666 
4667 	return 0;
4668 }
4669 
4670 /*
4671  * Should be used after trace_array_get(), trace_types_lock
4672  * ensures that i_cdev was already initialized.
4673  */
4674 static inline int tracing_get_cpu(struct inode *inode)
4675 {
4676 	if (inode->i_cdev) /* See trace_create_cpu_file() */
4677 		return (long)inode->i_cdev - 1;
4678 	return RING_BUFFER_ALL_CPUS;
4679 }
4680 
4681 static const struct seq_operations tracer_seq_ops = {
4682 	.start		= s_start,
4683 	.next		= s_next,
4684 	.stop		= s_stop,
4685 	.show		= s_show,
4686 };
4687 
4688 static struct trace_iterator *
4689 __tracing_open(struct inode *inode, struct file *file, bool snapshot)
4690 {
4691 	struct trace_array *tr = inode->i_private;
4692 	struct trace_iterator *iter;
4693 	int cpu;
4694 
4695 	if (tracing_disabled)
4696 		return ERR_PTR(-ENODEV);
4697 
4698 	iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
4699 	if (!iter)
4700 		return ERR_PTR(-ENOMEM);
4701 
4702 	iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
4703 				    GFP_KERNEL);
4704 	if (!iter->buffer_iter)
4705 		goto release;
4706 
4707 	/*
4708 	 * trace_find_next_entry() may need to save off iter->ent.
4709 	 * It will place it into the iter->temp buffer. As most
4710 	 * events are less than 128, allocate a buffer of that size.
4711 	 * If one is greater, then trace_find_next_entry() will
4712 	 * allocate a new buffer to adjust for the bigger iter->ent.
4713 	 * It's not critical if it fails to get allocated here.
4714 	 */
4715 	iter->temp = kmalloc(128, GFP_KERNEL);
4716 	if (iter->temp)
4717 		iter->temp_size = 128;
4718 
4719 	/*
4720 	 * trace_event_printf() may need to modify given format
4721 	 * string to replace %p with %px so that it shows real address
4722 	 * instead of hash value. However, that is only for the event
4723 	 * tracing, other tracer may not need. Defer the allocation
4724 	 * until it is needed.
4725 	 */
4726 	iter->fmt = NULL;
4727 	iter->fmt_size = 0;
4728 
4729 	/*
4730 	 * We make a copy of the current tracer to avoid concurrent
4731 	 * changes on it while we are reading.
4732 	 */
4733 	mutex_lock(&trace_types_lock);
4734 	iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
4735 	if (!iter->trace)
4736 		goto fail;
4737 
4738 	*iter->trace = *tr->current_trace;
4739 
4740 	if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
4741 		goto fail;
4742 
4743 	iter->tr = tr;
4744 
4745 #ifdef CONFIG_TRACER_MAX_TRACE
4746 	/* Currently only the top directory has a snapshot */
4747 	if (tr->current_trace->print_max || snapshot)
4748 		iter->array_buffer = &tr->max_buffer;
4749 	else
4750 #endif
4751 		iter->array_buffer = &tr->array_buffer;
4752 	iter->snapshot = snapshot;
4753 	iter->pos = -1;
4754 	iter->cpu_file = tracing_get_cpu(inode);
4755 	mutex_init(&iter->mutex);
4756 
4757 	/* Notify the tracer early; before we stop tracing. */
4758 	if (iter->trace->open)
4759 		iter->trace->open(iter);
4760 
4761 	/* Annotate start of buffers if we had overruns */
4762 	if (ring_buffer_overruns(iter->array_buffer->buffer))
4763 		iter->iter_flags |= TRACE_FILE_ANNOTATE;
4764 
4765 	/* Output in nanoseconds only if we are using a clock in nanoseconds. */
4766 	if (trace_clocks[tr->clock_id].in_ns)
4767 		iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4768 
4769 	/*
4770 	 * If pause-on-trace is enabled, then stop the trace while
4771 	 * dumping, unless this is the "snapshot" file
4772 	 */
4773 	if (!iter->snapshot && (tr->trace_flags & TRACE_ITER_PAUSE_ON_TRACE))
4774 		tracing_stop_tr(tr);
4775 
4776 	if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
4777 		for_each_tracing_cpu(cpu) {
4778 			iter->buffer_iter[cpu] =
4779 				ring_buffer_read_prepare(iter->array_buffer->buffer,
4780 							 cpu, GFP_KERNEL);
4781 		}
4782 		ring_buffer_read_prepare_sync();
4783 		for_each_tracing_cpu(cpu) {
4784 			ring_buffer_read_start(iter->buffer_iter[cpu]);
4785 			tracing_iter_reset(iter, cpu);
4786 		}
4787 	} else {
4788 		cpu = iter->cpu_file;
4789 		iter->buffer_iter[cpu] =
4790 			ring_buffer_read_prepare(iter->array_buffer->buffer,
4791 						 cpu, GFP_KERNEL);
4792 		ring_buffer_read_prepare_sync();
4793 		ring_buffer_read_start(iter->buffer_iter[cpu]);
4794 		tracing_iter_reset(iter, cpu);
4795 	}
4796 
4797 	mutex_unlock(&trace_types_lock);
4798 
4799 	return iter;
4800 
4801  fail:
4802 	mutex_unlock(&trace_types_lock);
4803 	kfree(iter->trace);
4804 	kfree(iter->temp);
4805 	kfree(iter->buffer_iter);
4806 release:
4807 	seq_release_private(inode, file);
4808 	return ERR_PTR(-ENOMEM);
4809 }
4810 
4811 int tracing_open_generic(struct inode *inode, struct file *filp)
4812 {
4813 	int ret;
4814 
4815 	ret = tracing_check_open_get_tr(NULL);
4816 	if (ret)
4817 		return ret;
4818 
4819 	filp->private_data = inode->i_private;
4820 	return 0;
4821 }
4822 
4823 bool tracing_is_disabled(void)
4824 {
4825 	return (tracing_disabled) ? true: false;
4826 }
4827 
4828 /*
4829  * Open and update trace_array ref count.
4830  * Must have the current trace_array passed to it.
4831  */
4832 int tracing_open_generic_tr(struct inode *inode, struct file *filp)
4833 {
4834 	struct trace_array *tr = inode->i_private;
4835 	int ret;
4836 
4837 	ret = tracing_check_open_get_tr(tr);
4838 	if (ret)
4839 		return ret;
4840 
4841 	filp->private_data = inode->i_private;
4842 
4843 	return 0;
4844 }
4845 
4846 static int tracing_mark_open(struct inode *inode, struct file *filp)
4847 {
4848 	stream_open(inode, filp);
4849 	return tracing_open_generic_tr(inode, filp);
4850 }
4851 
4852 static int tracing_release(struct inode *inode, struct file *file)
4853 {
4854 	struct trace_array *tr = inode->i_private;
4855 	struct seq_file *m = file->private_data;
4856 	struct trace_iterator *iter;
4857 	int cpu;
4858 
4859 	if (!(file->f_mode & FMODE_READ)) {
4860 		trace_array_put(tr);
4861 		return 0;
4862 	}
4863 
4864 	/* Writes do not use seq_file */
4865 	iter = m->private;
4866 	mutex_lock(&trace_types_lock);
4867 
4868 	for_each_tracing_cpu(cpu) {
4869 		if (iter->buffer_iter[cpu])
4870 			ring_buffer_read_finish(iter->buffer_iter[cpu]);
4871 	}
4872 
4873 	if (iter->trace && iter->trace->close)
4874 		iter->trace->close(iter);
4875 
4876 	if (!iter->snapshot && tr->stop_count)
4877 		/* reenable tracing if it was previously enabled */
4878 		tracing_start_tr(tr);
4879 
4880 	__trace_array_put(tr);
4881 
4882 	mutex_unlock(&trace_types_lock);
4883 
4884 	mutex_destroy(&iter->mutex);
4885 	free_cpumask_var(iter->started);
4886 	kfree(iter->fmt);
4887 	kfree(iter->temp);
4888 	kfree(iter->trace);
4889 	kfree(iter->buffer_iter);
4890 	seq_release_private(inode, file);
4891 
4892 	return 0;
4893 }
4894 
4895 static int tracing_release_generic_tr(struct inode *inode, struct file *file)
4896 {
4897 	struct trace_array *tr = inode->i_private;
4898 
4899 	trace_array_put(tr);
4900 	return 0;
4901 }
4902 
4903 static int tracing_single_release_tr(struct inode *inode, struct file *file)
4904 {
4905 	struct trace_array *tr = inode->i_private;
4906 
4907 	trace_array_put(tr);
4908 
4909 	return single_release(inode, file);
4910 }
4911 
4912 static int tracing_open(struct inode *inode, struct file *file)
4913 {
4914 	struct trace_array *tr = inode->i_private;
4915 	struct trace_iterator *iter;
4916 	int ret;
4917 
4918 	ret = tracing_check_open_get_tr(tr);
4919 	if (ret)
4920 		return ret;
4921 
4922 	/* If this file was open for write, then erase contents */
4923 	if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
4924 		int cpu = tracing_get_cpu(inode);
4925 		struct array_buffer *trace_buf = &tr->array_buffer;
4926 
4927 #ifdef CONFIG_TRACER_MAX_TRACE
4928 		if (tr->current_trace->print_max)
4929 			trace_buf = &tr->max_buffer;
4930 #endif
4931 
4932 		if (cpu == RING_BUFFER_ALL_CPUS)
4933 			tracing_reset_online_cpus(trace_buf);
4934 		else
4935 			tracing_reset_cpu(trace_buf, cpu);
4936 	}
4937 
4938 	if (file->f_mode & FMODE_READ) {
4939 		iter = __tracing_open(inode, file, false);
4940 		if (IS_ERR(iter))
4941 			ret = PTR_ERR(iter);
4942 		else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
4943 			iter->iter_flags |= TRACE_FILE_LAT_FMT;
4944 	}
4945 
4946 	if (ret < 0)
4947 		trace_array_put(tr);
4948 
4949 	return ret;
4950 }
4951 
4952 /*
4953  * Some tracers are not suitable for instance buffers.
4954  * A tracer is always available for the global array (toplevel)
4955  * or if it explicitly states that it is.
4956  */
4957 static bool
4958 trace_ok_for_array(struct tracer *t, struct trace_array *tr)
4959 {
4960 	return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
4961 }
4962 
4963 /* Find the next tracer that this trace array may use */
4964 static struct tracer *
4965 get_tracer_for_array(struct trace_array *tr, struct tracer *t)
4966 {
4967 	while (t && !trace_ok_for_array(t, tr))
4968 		t = t->next;
4969 
4970 	return t;
4971 }
4972 
4973 static void *
4974 t_next(struct seq_file *m, void *v, loff_t *pos)
4975 {
4976 	struct trace_array *tr = m->private;
4977 	struct tracer *t = v;
4978 
4979 	(*pos)++;
4980 
4981 	if (t)
4982 		t = get_tracer_for_array(tr, t->next);
4983 
4984 	return t;
4985 }
4986 
4987 static void *t_start(struct seq_file *m, loff_t *pos)
4988 {
4989 	struct trace_array *tr = m->private;
4990 	struct tracer *t;
4991 	loff_t l = 0;
4992 
4993 	mutex_lock(&trace_types_lock);
4994 
4995 	t = get_tracer_for_array(tr, trace_types);
4996 	for (; t && l < *pos; t = t_next(m, t, &l))
4997 			;
4998 
4999 	return t;
5000 }
5001 
5002 static void t_stop(struct seq_file *m, void *p)
5003 {
5004 	mutex_unlock(&trace_types_lock);
5005 }
5006 
5007 static int t_show(struct seq_file *m, void *v)
5008 {
5009 	struct tracer *t = v;
5010 
5011 	if (!t)
5012 		return 0;
5013 
5014 	seq_puts(m, t->name);
5015 	if (t->next)
5016 		seq_putc(m, ' ');
5017 	else
5018 		seq_putc(m, '\n');
5019 
5020 	return 0;
5021 }
5022 
5023 static const struct seq_operations show_traces_seq_ops = {
5024 	.start		= t_start,
5025 	.next		= t_next,
5026 	.stop		= t_stop,
5027 	.show		= t_show,
5028 };
5029 
5030 static int show_traces_open(struct inode *inode, struct file *file)
5031 {
5032 	struct trace_array *tr = inode->i_private;
5033 	struct seq_file *m;
5034 	int ret;
5035 
5036 	ret = tracing_check_open_get_tr(tr);
5037 	if (ret)
5038 		return ret;
5039 
5040 	ret = seq_open(file, &show_traces_seq_ops);
5041 	if (ret) {
5042 		trace_array_put(tr);
5043 		return ret;
5044 	}
5045 
5046 	m = file->private_data;
5047 	m->private = tr;
5048 
5049 	return 0;
5050 }
5051 
5052 static int show_traces_release(struct inode *inode, struct file *file)
5053 {
5054 	struct trace_array *tr = inode->i_private;
5055 
5056 	trace_array_put(tr);
5057 	return seq_release(inode, file);
5058 }
5059 
5060 static ssize_t
5061 tracing_write_stub(struct file *filp, const char __user *ubuf,
5062 		   size_t count, loff_t *ppos)
5063 {
5064 	return count;
5065 }
5066 
5067 loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
5068 {
5069 	int ret;
5070 
5071 	if (file->f_mode & FMODE_READ)
5072 		ret = seq_lseek(file, offset, whence);
5073 	else
5074 		file->f_pos = ret = 0;
5075 
5076 	return ret;
5077 }
5078 
5079 static const struct file_operations tracing_fops = {
5080 	.open		= tracing_open,
5081 	.read		= seq_read,
5082 	.write		= tracing_write_stub,
5083 	.llseek		= tracing_lseek,
5084 	.release	= tracing_release,
5085 };
5086 
5087 static const struct file_operations show_traces_fops = {
5088 	.open		= show_traces_open,
5089 	.read		= seq_read,
5090 	.llseek		= seq_lseek,
5091 	.release	= show_traces_release,
5092 };
5093 
5094 static ssize_t
5095 tracing_cpumask_read(struct file *filp, char __user *ubuf,
5096 		     size_t count, loff_t *ppos)
5097 {
5098 	struct trace_array *tr = file_inode(filp)->i_private;
5099 	char *mask_str;
5100 	int len;
5101 
5102 	len = snprintf(NULL, 0, "%*pb\n",
5103 		       cpumask_pr_args(tr->tracing_cpumask)) + 1;
5104 	mask_str = kmalloc(len, GFP_KERNEL);
5105 	if (!mask_str)
5106 		return -ENOMEM;
5107 
5108 	len = snprintf(mask_str, len, "%*pb\n",
5109 		       cpumask_pr_args(tr->tracing_cpumask));
5110 	if (len >= count) {
5111 		count = -EINVAL;
5112 		goto out_err;
5113 	}
5114 	count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len);
5115 
5116 out_err:
5117 	kfree(mask_str);
5118 
5119 	return count;
5120 }
5121 
5122 int tracing_set_cpumask(struct trace_array *tr,
5123 			cpumask_var_t tracing_cpumask_new)
5124 {
5125 	int cpu;
5126 
5127 	if (!tr)
5128 		return -EINVAL;
5129 
5130 	local_irq_disable();
5131 	arch_spin_lock(&tr->max_lock);
5132 	for_each_tracing_cpu(cpu) {
5133 		/*
5134 		 * Increase/decrease the disabled counter if we are
5135 		 * about to flip a bit in the cpumask:
5136 		 */
5137 		if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
5138 				!cpumask_test_cpu(cpu, tracing_cpumask_new)) {
5139 			atomic_inc(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
5140 			ring_buffer_record_disable_cpu(tr->array_buffer.buffer, cpu);
5141 		}
5142 		if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
5143 				cpumask_test_cpu(cpu, tracing_cpumask_new)) {
5144 			atomic_dec(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
5145 			ring_buffer_record_enable_cpu(tr->array_buffer.buffer, cpu);
5146 		}
5147 	}
5148 	arch_spin_unlock(&tr->max_lock);
5149 	local_irq_enable();
5150 
5151 	cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
5152 
5153 	return 0;
5154 }
5155 
5156 static ssize_t
5157 tracing_cpumask_write(struct file *filp, const char __user *ubuf,
5158 		      size_t count, loff_t *ppos)
5159 {
5160 	struct trace_array *tr = file_inode(filp)->i_private;
5161 	cpumask_var_t tracing_cpumask_new;
5162 	int err;
5163 
5164 	if (!zalloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
5165 		return -ENOMEM;
5166 
5167 	err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
5168 	if (err)
5169 		goto err_free;
5170 
5171 	err = tracing_set_cpumask(tr, tracing_cpumask_new);
5172 	if (err)
5173 		goto err_free;
5174 
5175 	free_cpumask_var(tracing_cpumask_new);
5176 
5177 	return count;
5178 
5179 err_free:
5180 	free_cpumask_var(tracing_cpumask_new);
5181 
5182 	return err;
5183 }
5184 
5185 static const struct file_operations tracing_cpumask_fops = {
5186 	.open		= tracing_open_generic_tr,
5187 	.read		= tracing_cpumask_read,
5188 	.write		= tracing_cpumask_write,
5189 	.release	= tracing_release_generic_tr,
5190 	.llseek		= generic_file_llseek,
5191 };
5192 
5193 static int tracing_trace_options_show(struct seq_file *m, void *v)
5194 {
5195 	struct tracer_opt *trace_opts;
5196 	struct trace_array *tr = m->private;
5197 	u32 tracer_flags;
5198 	int i;
5199 
5200 	mutex_lock(&trace_types_lock);
5201 	tracer_flags = tr->current_trace->flags->val;
5202 	trace_opts = tr->current_trace->flags->opts;
5203 
5204 	for (i = 0; trace_options[i]; i++) {
5205 		if (tr->trace_flags & (1 << i))
5206 			seq_printf(m, "%s\n", trace_options[i]);
5207 		else
5208 			seq_printf(m, "no%s\n", trace_options[i]);
5209 	}
5210 
5211 	for (i = 0; trace_opts[i].name; i++) {
5212 		if (tracer_flags & trace_opts[i].bit)
5213 			seq_printf(m, "%s\n", trace_opts[i].name);
5214 		else
5215 			seq_printf(m, "no%s\n", trace_opts[i].name);
5216 	}
5217 	mutex_unlock(&trace_types_lock);
5218 
5219 	return 0;
5220 }
5221 
5222 static int __set_tracer_option(struct trace_array *tr,
5223 			       struct tracer_flags *tracer_flags,
5224 			       struct tracer_opt *opts, int neg)
5225 {
5226 	struct tracer *trace = tracer_flags->trace;
5227 	int ret;
5228 
5229 	ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
5230 	if (ret)
5231 		return ret;
5232 
5233 	if (neg)
5234 		tracer_flags->val &= ~opts->bit;
5235 	else
5236 		tracer_flags->val |= opts->bit;
5237 	return 0;
5238 }
5239 
5240 /* Try to assign a tracer specific option */
5241 static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
5242 {
5243 	struct tracer *trace = tr->current_trace;
5244 	struct tracer_flags *tracer_flags = trace->flags;
5245 	struct tracer_opt *opts = NULL;
5246 	int i;
5247 
5248 	for (i = 0; tracer_flags->opts[i].name; i++) {
5249 		opts = &tracer_flags->opts[i];
5250 
5251 		if (strcmp(cmp, opts->name) == 0)
5252 			return __set_tracer_option(tr, trace->flags, opts, neg);
5253 	}
5254 
5255 	return -EINVAL;
5256 }
5257 
5258 /* Some tracers require overwrite to stay enabled */
5259 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
5260 {
5261 	if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
5262 		return -1;
5263 
5264 	return 0;
5265 }
5266 
5267 int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
5268 {
5269 	int *map;
5270 
5271 	if ((mask == TRACE_ITER_RECORD_TGID) ||
5272 	    (mask == TRACE_ITER_RECORD_CMD))
5273 		lockdep_assert_held(&event_mutex);
5274 
5275 	/* do nothing if flag is already set */
5276 	if (!!(tr->trace_flags & mask) == !!enabled)
5277 		return 0;
5278 
5279 	/* Give the tracer a chance to approve the change */
5280 	if (tr->current_trace->flag_changed)
5281 		if (tr->current_trace->flag_changed(tr, mask, !!enabled))
5282 			return -EINVAL;
5283 
5284 	if (enabled)
5285 		tr->trace_flags |= mask;
5286 	else
5287 		tr->trace_flags &= ~mask;
5288 
5289 	if (mask == TRACE_ITER_RECORD_CMD)
5290 		trace_event_enable_cmd_record(enabled);
5291 
5292 	if (mask == TRACE_ITER_RECORD_TGID) {
5293 		if (!tgid_map) {
5294 			tgid_map_max = pid_max;
5295 			map = kvcalloc(tgid_map_max + 1, sizeof(*tgid_map),
5296 				       GFP_KERNEL);
5297 
5298 			/*
5299 			 * Pairs with smp_load_acquire() in
5300 			 * trace_find_tgid_ptr() to ensure that if it observes
5301 			 * the tgid_map we just allocated then it also observes
5302 			 * the corresponding tgid_map_max value.
5303 			 */
5304 			smp_store_release(&tgid_map, map);
5305 		}
5306 		if (!tgid_map) {
5307 			tr->trace_flags &= ~TRACE_ITER_RECORD_TGID;
5308 			return -ENOMEM;
5309 		}
5310 
5311 		trace_event_enable_tgid_record(enabled);
5312 	}
5313 
5314 	if (mask == TRACE_ITER_EVENT_FORK)
5315 		trace_event_follow_fork(tr, enabled);
5316 
5317 	if (mask == TRACE_ITER_FUNC_FORK)
5318 		ftrace_pid_follow_fork(tr, enabled);
5319 
5320 	if (mask == TRACE_ITER_OVERWRITE) {
5321 		ring_buffer_change_overwrite(tr->array_buffer.buffer, enabled);
5322 #ifdef CONFIG_TRACER_MAX_TRACE
5323 		ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
5324 #endif
5325 	}
5326 
5327 	if (mask == TRACE_ITER_PRINTK) {
5328 		trace_printk_start_stop_comm(enabled);
5329 		trace_printk_control(enabled);
5330 	}
5331 
5332 	return 0;
5333 }
5334 
5335 int trace_set_options(struct trace_array *tr, char *option)
5336 {
5337 	char *cmp;
5338 	int neg = 0;
5339 	int ret;
5340 	size_t orig_len = strlen(option);
5341 	int len;
5342 
5343 	cmp = strstrip(option);
5344 
5345 	len = str_has_prefix(cmp, "no");
5346 	if (len)
5347 		neg = 1;
5348 
5349 	cmp += len;
5350 
5351 	mutex_lock(&event_mutex);
5352 	mutex_lock(&trace_types_lock);
5353 
5354 	ret = match_string(trace_options, -1, cmp);
5355 	/* If no option could be set, test the specific tracer options */
5356 	if (ret < 0)
5357 		ret = set_tracer_option(tr, cmp, neg);
5358 	else
5359 		ret = set_tracer_flag(tr, 1 << ret, !neg);
5360 
5361 	mutex_unlock(&trace_types_lock);
5362 	mutex_unlock(&event_mutex);
5363 
5364 	/*
5365 	 * If the first trailing whitespace is replaced with '\0' by strstrip,
5366 	 * turn it back into a space.
5367 	 */
5368 	if (orig_len > strlen(option))
5369 		option[strlen(option)] = ' ';
5370 
5371 	return ret;
5372 }
5373 
5374 static void __init apply_trace_boot_options(void)
5375 {
5376 	char *buf = trace_boot_options_buf;
5377 	char *option;
5378 
5379 	while (true) {
5380 		option = strsep(&buf, ",");
5381 
5382 		if (!option)
5383 			break;
5384 
5385 		if (*option)
5386 			trace_set_options(&global_trace, option);
5387 
5388 		/* Put back the comma to allow this to be called again */
5389 		if (buf)
5390 			*(buf - 1) = ',';
5391 	}
5392 }
5393 
5394 static ssize_t
5395 tracing_trace_options_write(struct file *filp, const char __user *ubuf,
5396 			size_t cnt, loff_t *ppos)
5397 {
5398 	struct seq_file *m = filp->private_data;
5399 	struct trace_array *tr = m->private;
5400 	char buf[64];
5401 	int ret;
5402 
5403 	if (cnt >= sizeof(buf))
5404 		return -EINVAL;
5405 
5406 	if (copy_from_user(buf, ubuf, cnt))
5407 		return -EFAULT;
5408 
5409 	buf[cnt] = 0;
5410 
5411 	ret = trace_set_options(tr, buf);
5412 	if (ret < 0)
5413 		return ret;
5414 
5415 	*ppos += cnt;
5416 
5417 	return cnt;
5418 }
5419 
5420 static int tracing_trace_options_open(struct inode *inode, struct file *file)
5421 {
5422 	struct trace_array *tr = inode->i_private;
5423 	int ret;
5424 
5425 	ret = tracing_check_open_get_tr(tr);
5426 	if (ret)
5427 		return ret;
5428 
5429 	ret = single_open(file, tracing_trace_options_show, inode->i_private);
5430 	if (ret < 0)
5431 		trace_array_put(tr);
5432 
5433 	return ret;
5434 }
5435 
5436 static const struct file_operations tracing_iter_fops = {
5437 	.open		= tracing_trace_options_open,
5438 	.read		= seq_read,
5439 	.llseek		= seq_lseek,
5440 	.release	= tracing_single_release_tr,
5441 	.write		= tracing_trace_options_write,
5442 };
5443 
5444 static const char readme_msg[] =
5445 	"tracing mini-HOWTO:\n\n"
5446 	"# echo 0 > tracing_on : quick way to disable tracing\n"
5447 	"# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
5448 	" Important files:\n"
5449 	"  trace\t\t\t- The static contents of the buffer\n"
5450 	"\t\t\t  To clear the buffer write into this file: echo > trace\n"
5451 	"  trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
5452 	"  current_tracer\t- function and latency tracers\n"
5453 	"  available_tracers\t- list of configured tracers for current_tracer\n"
5454 	"  error_log\t- error log for failed commands (that support it)\n"
5455 	"  buffer_size_kb\t- view and modify size of per cpu buffer\n"
5456 	"  buffer_total_size_kb  - view total size of all cpu buffers\n\n"
5457 	"  trace_clock\t\t-change the clock used to order events\n"
5458 	"       local:   Per cpu clock but may not be synced across CPUs\n"
5459 	"      global:   Synced across CPUs but slows tracing down.\n"
5460 	"     counter:   Not a clock, but just an increment\n"
5461 	"      uptime:   Jiffy counter from time of boot\n"
5462 	"        perf:   Same clock that perf events use\n"
5463 #ifdef CONFIG_X86_64
5464 	"     x86-tsc:   TSC cycle counter\n"
5465 #endif
5466 	"\n  timestamp_mode\t-view the mode used to timestamp events\n"
5467 	"       delta:   Delta difference against a buffer-wide timestamp\n"
5468 	"    absolute:   Absolute (standalone) timestamp\n"
5469 	"\n  trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
5470 	"\n  trace_marker_raw\t\t- Writes into this file writes binary data into the kernel buffer\n"
5471 	"  tracing_cpumask\t- Limit which CPUs to trace\n"
5472 	"  instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
5473 	"\t\t\t  Remove sub-buffer with rmdir\n"
5474 	"  trace_options\t\t- Set format or modify how tracing happens\n"
5475 	"\t\t\t  Disable an option by prefixing 'no' to the\n"
5476 	"\t\t\t  option name\n"
5477 	"  saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
5478 #ifdef CONFIG_DYNAMIC_FTRACE
5479 	"\n  available_filter_functions - list of functions that can be filtered on\n"
5480 	"  set_ftrace_filter\t- echo function name in here to only trace these\n"
5481 	"\t\t\t  functions\n"
5482 	"\t     accepts: func_full_name or glob-matching-pattern\n"
5483 	"\t     modules: Can select a group via module\n"
5484 	"\t      Format: :mod:<module-name>\n"
5485 	"\t     example: echo :mod:ext3 > set_ftrace_filter\n"
5486 	"\t    triggers: a command to perform when function is hit\n"
5487 	"\t      Format: <function>:<trigger>[:count]\n"
5488 	"\t     trigger: traceon, traceoff\n"
5489 	"\t\t      enable_event:<system>:<event>\n"
5490 	"\t\t      disable_event:<system>:<event>\n"
5491 #ifdef CONFIG_STACKTRACE
5492 	"\t\t      stacktrace\n"
5493 #endif
5494 #ifdef CONFIG_TRACER_SNAPSHOT
5495 	"\t\t      snapshot\n"
5496 #endif
5497 	"\t\t      dump\n"
5498 	"\t\t      cpudump\n"
5499 	"\t     example: echo do_fault:traceoff > set_ftrace_filter\n"
5500 	"\t              echo do_trap:traceoff:3 > set_ftrace_filter\n"
5501 	"\t     The first one will disable tracing every time do_fault is hit\n"
5502 	"\t     The second will disable tracing at most 3 times when do_trap is hit\n"
5503 	"\t       The first time do trap is hit and it disables tracing, the\n"
5504 	"\t       counter will decrement to 2. If tracing is already disabled,\n"
5505 	"\t       the counter will not decrement. It only decrements when the\n"
5506 	"\t       trigger did work\n"
5507 	"\t     To remove trigger without count:\n"
5508 	"\t       echo '!<function>:<trigger> > set_ftrace_filter\n"
5509 	"\t     To remove trigger with a count:\n"
5510 	"\t       echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
5511 	"  set_ftrace_notrace\t- echo function name in here to never trace.\n"
5512 	"\t    accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
5513 	"\t    modules: Can select a group via module command :mod:\n"
5514 	"\t    Does not accept triggers\n"
5515 #endif /* CONFIG_DYNAMIC_FTRACE */
5516 #ifdef CONFIG_FUNCTION_TRACER
5517 	"  set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
5518 	"\t\t    (function)\n"
5519 	"  set_ftrace_notrace_pid\t- Write pid(s) to not function trace those pids\n"
5520 	"\t\t    (function)\n"
5521 #endif
5522 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
5523 	"  set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
5524 	"  set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
5525 	"  max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
5526 #endif
5527 #ifdef CONFIG_TRACER_SNAPSHOT
5528 	"\n  snapshot\t\t- Like 'trace' but shows the content of the static\n"
5529 	"\t\t\t  snapshot buffer. Read the contents for more\n"
5530 	"\t\t\t  information\n"
5531 #endif
5532 #ifdef CONFIG_STACK_TRACER
5533 	"  stack_trace\t\t- Shows the max stack trace when active\n"
5534 	"  stack_max_size\t- Shows current max stack size that was traced\n"
5535 	"\t\t\t  Write into this file to reset the max size (trigger a\n"
5536 	"\t\t\t  new trace)\n"
5537 #ifdef CONFIG_DYNAMIC_FTRACE
5538 	"  stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
5539 	"\t\t\t  traces\n"
5540 #endif
5541 #endif /* CONFIG_STACK_TRACER */
5542 #ifdef CONFIG_DYNAMIC_EVENTS
5543 	"  dynamic_events\t\t- Create/append/remove/show the generic dynamic events\n"
5544 	"\t\t\t  Write into this file to define/undefine new trace events.\n"
5545 #endif
5546 #ifdef CONFIG_KPROBE_EVENTS
5547 	"  kprobe_events\t\t- Create/append/remove/show the kernel dynamic events\n"
5548 	"\t\t\t  Write into this file to define/undefine new trace events.\n"
5549 #endif
5550 #ifdef CONFIG_UPROBE_EVENTS
5551 	"  uprobe_events\t\t- Create/append/remove/show the userspace dynamic events\n"
5552 	"\t\t\t  Write into this file to define/undefine new trace events.\n"
5553 #endif
5554 #if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS)
5555 	"\t  accepts: event-definitions (one definition per line)\n"
5556 	"\t   Format: p[:[<group>/]<event>] <place> [<args>]\n"
5557 	"\t           r[maxactive][:[<group>/]<event>] <place> [<args>]\n"
5558 #ifdef CONFIG_HIST_TRIGGERS
5559 	"\t           s:[synthetic/]<event> <field> [<field>]\n"
5560 #endif
5561 	"\t           e[:[<group>/]<event>] <attached-group>.<attached-event> [<args>]\n"
5562 	"\t           -:[<group>/]<event>\n"
5563 #ifdef CONFIG_KPROBE_EVENTS
5564 	"\t    place: [<module>:]<symbol>[+<offset>]|<memaddr>\n"
5565   "place (kretprobe): [<module>:]<symbol>[+<offset>]%return|<memaddr>\n"
5566 #endif
5567 #ifdef CONFIG_UPROBE_EVENTS
5568   "   place (uprobe): <path>:<offset>[%return][(ref_ctr_offset)]\n"
5569 #endif
5570 	"\t     args: <name>=fetcharg[:type]\n"
5571 	"\t fetcharg: (%<register>|$<efield>), @<address>, @<symbol>[+|-<offset>],\n"
5572 #ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
5573 	"\t           $stack<index>, $stack, $retval, $comm, $arg<N>,\n"
5574 #else
5575 	"\t           $stack<index>, $stack, $retval, $comm,\n"
5576 #endif
5577 	"\t           +|-[u]<offset>(<fetcharg>), \\imm-value, \\\"imm-string\"\n"
5578 	"\t     type: s8/16/32/64, u8/16/32/64, x8/16/32/64, string, symbol,\n"
5579 	"\t           b<bit-width>@<bit-offset>/<container-size>, ustring,\n"
5580 	"\t           <type>\\[<array-size>\\]\n"
5581 #ifdef CONFIG_HIST_TRIGGERS
5582 	"\t    field: <stype> <name>;\n"
5583 	"\t    stype: u8/u16/u32/u64, s8/s16/s32/s64, pid_t,\n"
5584 	"\t           [unsigned] char/int/long\n"
5585 #endif
5586 	"\t    efield: For event probes ('e' types), the field is on of the fields\n"
5587 	"\t            of the <attached-group>/<attached-event>.\n"
5588 #endif
5589 	"  events/\t\t- Directory containing all trace event subsystems:\n"
5590 	"      enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
5591 	"  events/<system>/\t- Directory containing all trace events for <system>:\n"
5592 	"      enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
5593 	"\t\t\t  events\n"
5594 	"      filter\t\t- If set, only events passing filter are traced\n"
5595 	"  events/<system>/<event>/\t- Directory containing control files for\n"
5596 	"\t\t\t  <event>:\n"
5597 	"      enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
5598 	"      filter\t\t- If set, only events passing filter are traced\n"
5599 	"      trigger\t\t- If set, a command to perform when event is hit\n"
5600 	"\t    Format: <trigger>[:count][if <filter>]\n"
5601 	"\t   trigger: traceon, traceoff\n"
5602 	"\t            enable_event:<system>:<event>\n"
5603 	"\t            disable_event:<system>:<event>\n"
5604 #ifdef CONFIG_HIST_TRIGGERS
5605 	"\t            enable_hist:<system>:<event>\n"
5606 	"\t            disable_hist:<system>:<event>\n"
5607 #endif
5608 #ifdef CONFIG_STACKTRACE
5609 	"\t\t    stacktrace\n"
5610 #endif
5611 #ifdef CONFIG_TRACER_SNAPSHOT
5612 	"\t\t    snapshot\n"
5613 #endif
5614 #ifdef CONFIG_HIST_TRIGGERS
5615 	"\t\t    hist (see below)\n"
5616 #endif
5617 	"\t   example: echo traceoff > events/block/block_unplug/trigger\n"
5618 	"\t            echo traceoff:3 > events/block/block_unplug/trigger\n"
5619 	"\t            echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
5620 	"\t                  events/block/block_unplug/trigger\n"
5621 	"\t   The first disables tracing every time block_unplug is hit.\n"
5622 	"\t   The second disables tracing the first 3 times block_unplug is hit.\n"
5623 	"\t   The third enables the kmalloc event the first 3 times block_unplug\n"
5624 	"\t     is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
5625 	"\t   Like function triggers, the counter is only decremented if it\n"
5626 	"\t    enabled or disabled tracing.\n"
5627 	"\t   To remove a trigger without a count:\n"
5628 	"\t     echo '!<trigger> > <system>/<event>/trigger\n"
5629 	"\t   To remove a trigger with a count:\n"
5630 	"\t     echo '!<trigger>:0 > <system>/<event>/trigger\n"
5631 	"\t   Filters can be ignored when removing a trigger.\n"
5632 #ifdef CONFIG_HIST_TRIGGERS
5633 	"      hist trigger\t- If set, event hits are aggregated into a hash table\n"
5634 	"\t    Format: hist:keys=<field1[,field2,...]>\n"
5635 	"\t            [:<var1>=<field|var_ref|numeric_literal>[,<var2>=...]]\n"
5636 	"\t            [:values=<field1[,field2,...]>]\n"
5637 	"\t            [:sort=<field1[,field2,...]>]\n"
5638 	"\t            [:size=#entries]\n"
5639 	"\t            [:pause][:continue][:clear]\n"
5640 	"\t            [:name=histname1]\n"
5641 	"\t            [:<handler>.<action>]\n"
5642 	"\t            [if <filter>]\n\n"
5643 	"\t    Note, special fields can be used as well:\n"
5644 	"\t            common_timestamp - to record current timestamp\n"
5645 	"\t            common_cpu - to record the CPU the event happened on\n"
5646 	"\n"
5647 	"\t    A hist trigger variable can be:\n"
5648 	"\t        - a reference to a field e.g. x=current_timestamp,\n"
5649 	"\t        - a reference to another variable e.g. y=$x,\n"
5650 	"\t        - a numeric literal: e.g. ms_per_sec=1000,\n"
5651 	"\t        - an arithmetic expression: e.g. time_secs=current_timestamp/1000\n"
5652 	"\n"
5653 	"\t    hist trigger arithmetic expressions support addition(+), subtraction(-),\n"
5654 	"\t    multiplication(*) and division(/) operators. An operand can be either a\n"
5655 	"\t    variable reference, field or numeric literal.\n"
5656 	"\n"
5657 	"\t    When a matching event is hit, an entry is added to a hash\n"
5658 	"\t    table using the key(s) and value(s) named, and the value of a\n"
5659 	"\t    sum called 'hitcount' is incremented.  Keys and values\n"
5660 	"\t    correspond to fields in the event's format description.  Keys\n"
5661 	"\t    can be any field, or the special string 'stacktrace'.\n"
5662 	"\t    Compound keys consisting of up to two fields can be specified\n"
5663 	"\t    by the 'keys' keyword.  Values must correspond to numeric\n"
5664 	"\t    fields.  Sort keys consisting of up to two fields can be\n"
5665 	"\t    specified using the 'sort' keyword.  The sort direction can\n"
5666 	"\t    be modified by appending '.descending' or '.ascending' to a\n"
5667 	"\t    sort field.  The 'size' parameter can be used to specify more\n"
5668 	"\t    or fewer than the default 2048 entries for the hashtable size.\n"
5669 	"\t    If a hist trigger is given a name using the 'name' parameter,\n"
5670 	"\t    its histogram data will be shared with other triggers of the\n"
5671 	"\t    same name, and trigger hits will update this common data.\n\n"
5672 	"\t    Reading the 'hist' file for the event will dump the hash\n"
5673 	"\t    table in its entirety to stdout.  If there are multiple hist\n"
5674 	"\t    triggers attached to an event, there will be a table for each\n"
5675 	"\t    trigger in the output.  The table displayed for a named\n"
5676 	"\t    trigger will be the same as any other instance having the\n"
5677 	"\t    same name.  The default format used to display a given field\n"
5678 	"\t    can be modified by appending any of the following modifiers\n"
5679 	"\t    to the field name, as applicable:\n\n"
5680 	"\t            .hex        display a number as a hex value\n"
5681 	"\t            .sym        display an address as a symbol\n"
5682 	"\t            .sym-offset display an address as a symbol and offset\n"
5683 	"\t            .execname   display a common_pid as a program name\n"
5684 	"\t            .syscall    display a syscall id as a syscall name\n"
5685 	"\t            .log2       display log2 value rather than raw number\n"
5686 	"\t            .buckets=size  display values in groups of size rather than raw number\n"
5687 	"\t            .usecs      display a common_timestamp in microseconds\n\n"
5688 	"\t    The 'pause' parameter can be used to pause an existing hist\n"
5689 	"\t    trigger or to start a hist trigger but not log any events\n"
5690 	"\t    until told to do so.  'continue' can be used to start or\n"
5691 	"\t    restart a paused hist trigger.\n\n"
5692 	"\t    The 'clear' parameter will clear the contents of a running\n"
5693 	"\t    hist trigger and leave its current paused/active state\n"
5694 	"\t    unchanged.\n\n"
5695 	"\t    The enable_hist and disable_hist triggers can be used to\n"
5696 	"\t    have one event conditionally start and stop another event's\n"
5697 	"\t    already-attached hist trigger.  The syntax is analogous to\n"
5698 	"\t    the enable_event and disable_event triggers.\n\n"
5699 	"\t    Hist trigger handlers and actions are executed whenever a\n"
5700 	"\t    a histogram entry is added or updated.  They take the form:\n\n"
5701 	"\t        <handler>.<action>\n\n"
5702 	"\t    The available handlers are:\n\n"
5703 	"\t        onmatch(matching.event)  - invoke on addition or update\n"
5704 	"\t        onmax(var)               - invoke if var exceeds current max\n"
5705 	"\t        onchange(var)            - invoke action if var changes\n\n"
5706 	"\t    The available actions are:\n\n"
5707 	"\t        trace(<synthetic_event>,param list)  - generate synthetic event\n"
5708 	"\t        save(field,...)                      - save current event fields\n"
5709 #ifdef CONFIG_TRACER_SNAPSHOT
5710 	"\t        snapshot()                           - snapshot the trace buffer\n\n"
5711 #endif
5712 #ifdef CONFIG_SYNTH_EVENTS
5713 	"  events/synthetic_events\t- Create/append/remove/show synthetic events\n"
5714 	"\t  Write into this file to define/undefine new synthetic events.\n"
5715 	"\t     example: echo 'myevent u64 lat; char name[]' >> synthetic_events\n"
5716 #endif
5717 #endif
5718 ;
5719 
5720 static ssize_t
5721 tracing_readme_read(struct file *filp, char __user *ubuf,
5722 		       size_t cnt, loff_t *ppos)
5723 {
5724 	return simple_read_from_buffer(ubuf, cnt, ppos,
5725 					readme_msg, strlen(readme_msg));
5726 }
5727 
5728 static const struct file_operations tracing_readme_fops = {
5729 	.open		= tracing_open_generic,
5730 	.read		= tracing_readme_read,
5731 	.llseek		= generic_file_llseek,
5732 };
5733 
5734 static void *saved_tgids_next(struct seq_file *m, void *v, loff_t *pos)
5735 {
5736 	int pid = ++(*pos);
5737 
5738 	return trace_find_tgid_ptr(pid);
5739 }
5740 
5741 static void *saved_tgids_start(struct seq_file *m, loff_t *pos)
5742 {
5743 	int pid = *pos;
5744 
5745 	return trace_find_tgid_ptr(pid);
5746 }
5747 
5748 static void saved_tgids_stop(struct seq_file *m, void *v)
5749 {
5750 }
5751 
5752 static int saved_tgids_show(struct seq_file *m, void *v)
5753 {
5754 	int *entry = (int *)v;
5755 	int pid = entry - tgid_map;
5756 	int tgid = *entry;
5757 
5758 	if (tgid == 0)
5759 		return SEQ_SKIP;
5760 
5761 	seq_printf(m, "%d %d\n", pid, tgid);
5762 	return 0;
5763 }
5764 
5765 static const struct seq_operations tracing_saved_tgids_seq_ops = {
5766 	.start		= saved_tgids_start,
5767 	.stop		= saved_tgids_stop,
5768 	.next		= saved_tgids_next,
5769 	.show		= saved_tgids_show,
5770 };
5771 
5772 static int tracing_saved_tgids_open(struct inode *inode, struct file *filp)
5773 {
5774 	int ret;
5775 
5776 	ret = tracing_check_open_get_tr(NULL);
5777 	if (ret)
5778 		return ret;
5779 
5780 	return seq_open(filp, &tracing_saved_tgids_seq_ops);
5781 }
5782 
5783 
5784 static const struct file_operations tracing_saved_tgids_fops = {
5785 	.open		= tracing_saved_tgids_open,
5786 	.read		= seq_read,
5787 	.llseek		= seq_lseek,
5788 	.release	= seq_release,
5789 };
5790 
5791 static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
5792 {
5793 	unsigned int *ptr = v;
5794 
5795 	if (*pos || m->count)
5796 		ptr++;
5797 
5798 	(*pos)++;
5799 
5800 	for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
5801 	     ptr++) {
5802 		if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
5803 			continue;
5804 
5805 		return ptr;
5806 	}
5807 
5808 	return NULL;
5809 }
5810 
5811 static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
5812 {
5813 	void *v;
5814 	loff_t l = 0;
5815 
5816 	preempt_disable();
5817 	arch_spin_lock(&trace_cmdline_lock);
5818 
5819 	v = &savedcmd->map_cmdline_to_pid[0];
5820 	while (l <= *pos) {
5821 		v = saved_cmdlines_next(m, v, &l);
5822 		if (!v)
5823 			return NULL;
5824 	}
5825 
5826 	return v;
5827 }
5828 
5829 static void saved_cmdlines_stop(struct seq_file *m, void *v)
5830 {
5831 	arch_spin_unlock(&trace_cmdline_lock);
5832 	preempt_enable();
5833 }
5834 
5835 static int saved_cmdlines_show(struct seq_file *m, void *v)
5836 {
5837 	char buf[TASK_COMM_LEN];
5838 	unsigned int *pid = v;
5839 
5840 	__trace_find_cmdline(*pid, buf);
5841 	seq_printf(m, "%d %s\n", *pid, buf);
5842 	return 0;
5843 }
5844 
5845 static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
5846 	.start		= saved_cmdlines_start,
5847 	.next		= saved_cmdlines_next,
5848 	.stop		= saved_cmdlines_stop,
5849 	.show		= saved_cmdlines_show,
5850 };
5851 
5852 static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
5853 {
5854 	int ret;
5855 
5856 	ret = tracing_check_open_get_tr(NULL);
5857 	if (ret)
5858 		return ret;
5859 
5860 	return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
5861 }
5862 
5863 static const struct file_operations tracing_saved_cmdlines_fops = {
5864 	.open		= tracing_saved_cmdlines_open,
5865 	.read		= seq_read,
5866 	.llseek		= seq_lseek,
5867 	.release	= seq_release,
5868 };
5869 
5870 static ssize_t
5871 tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
5872 				 size_t cnt, loff_t *ppos)
5873 {
5874 	char buf[64];
5875 	int r;
5876 
5877 	arch_spin_lock(&trace_cmdline_lock);
5878 	r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
5879 	arch_spin_unlock(&trace_cmdline_lock);
5880 
5881 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5882 }
5883 
5884 static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
5885 {
5886 	kfree(s->saved_cmdlines);
5887 	kfree(s->map_cmdline_to_pid);
5888 	kfree(s);
5889 }
5890 
5891 static int tracing_resize_saved_cmdlines(unsigned int val)
5892 {
5893 	struct saved_cmdlines_buffer *s, *savedcmd_temp;
5894 
5895 	s = kmalloc(sizeof(*s), GFP_KERNEL);
5896 	if (!s)
5897 		return -ENOMEM;
5898 
5899 	if (allocate_cmdlines_buffer(val, s) < 0) {
5900 		kfree(s);
5901 		return -ENOMEM;
5902 	}
5903 
5904 	arch_spin_lock(&trace_cmdline_lock);
5905 	savedcmd_temp = savedcmd;
5906 	savedcmd = s;
5907 	arch_spin_unlock(&trace_cmdline_lock);
5908 	free_saved_cmdlines_buffer(savedcmd_temp);
5909 
5910 	return 0;
5911 }
5912 
5913 static ssize_t
5914 tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
5915 				  size_t cnt, loff_t *ppos)
5916 {
5917 	unsigned long val;
5918 	int ret;
5919 
5920 	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5921 	if (ret)
5922 		return ret;
5923 
5924 	/* must have at least 1 entry or less than PID_MAX_DEFAULT */
5925 	if (!val || val > PID_MAX_DEFAULT)
5926 		return -EINVAL;
5927 
5928 	ret = tracing_resize_saved_cmdlines((unsigned int)val);
5929 	if (ret < 0)
5930 		return ret;
5931 
5932 	*ppos += cnt;
5933 
5934 	return cnt;
5935 }
5936 
5937 static const struct file_operations tracing_saved_cmdlines_size_fops = {
5938 	.open		= tracing_open_generic,
5939 	.read		= tracing_saved_cmdlines_size_read,
5940 	.write		= tracing_saved_cmdlines_size_write,
5941 };
5942 
5943 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
5944 static union trace_eval_map_item *
5945 update_eval_map(union trace_eval_map_item *ptr)
5946 {
5947 	if (!ptr->map.eval_string) {
5948 		if (ptr->tail.next) {
5949 			ptr = ptr->tail.next;
5950 			/* Set ptr to the next real item (skip head) */
5951 			ptr++;
5952 		} else
5953 			return NULL;
5954 	}
5955 	return ptr;
5956 }
5957 
5958 static void *eval_map_next(struct seq_file *m, void *v, loff_t *pos)
5959 {
5960 	union trace_eval_map_item *ptr = v;
5961 
5962 	/*
5963 	 * Paranoid! If ptr points to end, we don't want to increment past it.
5964 	 * This really should never happen.
5965 	 */
5966 	(*pos)++;
5967 	ptr = update_eval_map(ptr);
5968 	if (WARN_ON_ONCE(!ptr))
5969 		return NULL;
5970 
5971 	ptr++;
5972 	ptr = update_eval_map(ptr);
5973 
5974 	return ptr;
5975 }
5976 
5977 static void *eval_map_start(struct seq_file *m, loff_t *pos)
5978 {
5979 	union trace_eval_map_item *v;
5980 	loff_t l = 0;
5981 
5982 	mutex_lock(&trace_eval_mutex);
5983 
5984 	v = trace_eval_maps;
5985 	if (v)
5986 		v++;
5987 
5988 	while (v && l < *pos) {
5989 		v = eval_map_next(m, v, &l);
5990 	}
5991 
5992 	return v;
5993 }
5994 
5995 static void eval_map_stop(struct seq_file *m, void *v)
5996 {
5997 	mutex_unlock(&trace_eval_mutex);
5998 }
5999 
6000 static int eval_map_show(struct seq_file *m, void *v)
6001 {
6002 	union trace_eval_map_item *ptr = v;
6003 
6004 	seq_printf(m, "%s %ld (%s)\n",
6005 		   ptr->map.eval_string, ptr->map.eval_value,
6006 		   ptr->map.system);
6007 
6008 	return 0;
6009 }
6010 
6011 static const struct seq_operations tracing_eval_map_seq_ops = {
6012 	.start		= eval_map_start,
6013 	.next		= eval_map_next,
6014 	.stop		= eval_map_stop,
6015 	.show		= eval_map_show,
6016 };
6017 
6018 static int tracing_eval_map_open(struct inode *inode, struct file *filp)
6019 {
6020 	int ret;
6021 
6022 	ret = tracing_check_open_get_tr(NULL);
6023 	if (ret)
6024 		return ret;
6025 
6026 	return seq_open(filp, &tracing_eval_map_seq_ops);
6027 }
6028 
6029 static const struct file_operations tracing_eval_map_fops = {
6030 	.open		= tracing_eval_map_open,
6031 	.read		= seq_read,
6032 	.llseek		= seq_lseek,
6033 	.release	= seq_release,
6034 };
6035 
6036 static inline union trace_eval_map_item *
6037 trace_eval_jmp_to_tail(union trace_eval_map_item *ptr)
6038 {
6039 	/* Return tail of array given the head */
6040 	return ptr + ptr->head.length + 1;
6041 }
6042 
6043 static void
6044 trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start,
6045 			   int len)
6046 {
6047 	struct trace_eval_map **stop;
6048 	struct trace_eval_map **map;
6049 	union trace_eval_map_item *map_array;
6050 	union trace_eval_map_item *ptr;
6051 
6052 	stop = start + len;
6053 
6054 	/*
6055 	 * The trace_eval_maps contains the map plus a head and tail item,
6056 	 * where the head holds the module and length of array, and the
6057 	 * tail holds a pointer to the next list.
6058 	 */
6059 	map_array = kmalloc_array(len + 2, sizeof(*map_array), GFP_KERNEL);
6060 	if (!map_array) {
6061 		pr_warn("Unable to allocate trace eval mapping\n");
6062 		return;
6063 	}
6064 
6065 	mutex_lock(&trace_eval_mutex);
6066 
6067 	if (!trace_eval_maps)
6068 		trace_eval_maps = map_array;
6069 	else {
6070 		ptr = trace_eval_maps;
6071 		for (;;) {
6072 			ptr = trace_eval_jmp_to_tail(ptr);
6073 			if (!ptr->tail.next)
6074 				break;
6075 			ptr = ptr->tail.next;
6076 
6077 		}
6078 		ptr->tail.next = map_array;
6079 	}
6080 	map_array->head.mod = mod;
6081 	map_array->head.length = len;
6082 	map_array++;
6083 
6084 	for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
6085 		map_array->map = **map;
6086 		map_array++;
6087 	}
6088 	memset(map_array, 0, sizeof(*map_array));
6089 
6090 	mutex_unlock(&trace_eval_mutex);
6091 }
6092 
6093 static void trace_create_eval_file(struct dentry *d_tracer)
6094 {
6095 	trace_create_file("eval_map", TRACE_MODE_READ, d_tracer,
6096 			  NULL, &tracing_eval_map_fops);
6097 }
6098 
6099 #else /* CONFIG_TRACE_EVAL_MAP_FILE */
6100 static inline void trace_create_eval_file(struct dentry *d_tracer) { }
6101 static inline void trace_insert_eval_map_file(struct module *mod,
6102 			      struct trace_eval_map **start, int len) { }
6103 #endif /* !CONFIG_TRACE_EVAL_MAP_FILE */
6104 
6105 static void trace_insert_eval_map(struct module *mod,
6106 				  struct trace_eval_map **start, int len)
6107 {
6108 	struct trace_eval_map **map;
6109 
6110 	if (len <= 0)
6111 		return;
6112 
6113 	map = start;
6114 
6115 	trace_event_eval_update(map, len);
6116 
6117 	trace_insert_eval_map_file(mod, start, len);
6118 }
6119 
6120 static ssize_t
6121 tracing_set_trace_read(struct file *filp, char __user *ubuf,
6122 		       size_t cnt, loff_t *ppos)
6123 {
6124 	struct trace_array *tr = filp->private_data;
6125 	char buf[MAX_TRACER_SIZE+2];
6126 	int r;
6127 
6128 	mutex_lock(&trace_types_lock);
6129 	r = sprintf(buf, "%s\n", tr->current_trace->name);
6130 	mutex_unlock(&trace_types_lock);
6131 
6132 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6133 }
6134 
6135 int tracer_init(struct tracer *t, struct trace_array *tr)
6136 {
6137 	tracing_reset_online_cpus(&tr->array_buffer);
6138 	return t->init(tr);
6139 }
6140 
6141 static void set_buffer_entries(struct array_buffer *buf, unsigned long val)
6142 {
6143 	int cpu;
6144 
6145 	for_each_tracing_cpu(cpu)
6146 		per_cpu_ptr(buf->data, cpu)->entries = val;
6147 }
6148 
6149 #ifdef CONFIG_TRACER_MAX_TRACE
6150 /* resize @tr's buffer to the size of @size_tr's entries */
6151 static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
6152 					struct array_buffer *size_buf, int cpu_id)
6153 {
6154 	int cpu, ret = 0;
6155 
6156 	if (cpu_id == RING_BUFFER_ALL_CPUS) {
6157 		for_each_tracing_cpu(cpu) {
6158 			ret = ring_buffer_resize(trace_buf->buffer,
6159 				 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
6160 			if (ret < 0)
6161 				break;
6162 			per_cpu_ptr(trace_buf->data, cpu)->entries =
6163 				per_cpu_ptr(size_buf->data, cpu)->entries;
6164 		}
6165 	} else {
6166 		ret = ring_buffer_resize(trace_buf->buffer,
6167 				 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
6168 		if (ret == 0)
6169 			per_cpu_ptr(trace_buf->data, cpu_id)->entries =
6170 				per_cpu_ptr(size_buf->data, cpu_id)->entries;
6171 	}
6172 
6173 	return ret;
6174 }
6175 #endif /* CONFIG_TRACER_MAX_TRACE */
6176 
6177 static int __tracing_resize_ring_buffer(struct trace_array *tr,
6178 					unsigned long size, int cpu)
6179 {
6180 	int ret;
6181 
6182 	/*
6183 	 * If kernel or user changes the size of the ring buffer
6184 	 * we use the size that was given, and we can forget about
6185 	 * expanding it later.
6186 	 */
6187 	ring_buffer_expanded = true;
6188 
6189 	/* May be called before buffers are initialized */
6190 	if (!tr->array_buffer.buffer)
6191 		return 0;
6192 
6193 	ret = ring_buffer_resize(tr->array_buffer.buffer, size, cpu);
6194 	if (ret < 0)
6195 		return ret;
6196 
6197 #ifdef CONFIG_TRACER_MAX_TRACE
6198 	if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
6199 	    !tr->current_trace->use_max_tr)
6200 		goto out;
6201 
6202 	ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
6203 	if (ret < 0) {
6204 		int r = resize_buffer_duplicate_size(&tr->array_buffer,
6205 						     &tr->array_buffer, cpu);
6206 		if (r < 0) {
6207 			/*
6208 			 * AARGH! We are left with different
6209 			 * size max buffer!!!!
6210 			 * The max buffer is our "snapshot" buffer.
6211 			 * When a tracer needs a snapshot (one of the
6212 			 * latency tracers), it swaps the max buffer
6213 			 * with the saved snap shot. We succeeded to
6214 			 * update the size of the main buffer, but failed to
6215 			 * update the size of the max buffer. But when we tried
6216 			 * to reset the main buffer to the original size, we
6217 			 * failed there too. This is very unlikely to
6218 			 * happen, but if it does, warn and kill all
6219 			 * tracing.
6220 			 */
6221 			WARN_ON(1);
6222 			tracing_disabled = 1;
6223 		}
6224 		return ret;
6225 	}
6226 
6227 	if (cpu == RING_BUFFER_ALL_CPUS)
6228 		set_buffer_entries(&tr->max_buffer, size);
6229 	else
6230 		per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
6231 
6232  out:
6233 #endif /* CONFIG_TRACER_MAX_TRACE */
6234 
6235 	if (cpu == RING_BUFFER_ALL_CPUS)
6236 		set_buffer_entries(&tr->array_buffer, size);
6237 	else
6238 		per_cpu_ptr(tr->array_buffer.data, cpu)->entries = size;
6239 
6240 	return ret;
6241 }
6242 
6243 ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
6244 				  unsigned long size, int cpu_id)
6245 {
6246 	int ret;
6247 
6248 	mutex_lock(&trace_types_lock);
6249 
6250 	if (cpu_id != RING_BUFFER_ALL_CPUS) {
6251 		/* make sure, this cpu is enabled in the mask */
6252 		if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
6253 			ret = -EINVAL;
6254 			goto out;
6255 		}
6256 	}
6257 
6258 	ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
6259 	if (ret < 0)
6260 		ret = -ENOMEM;
6261 
6262 out:
6263 	mutex_unlock(&trace_types_lock);
6264 
6265 	return ret;
6266 }
6267 
6268 
6269 /**
6270  * tracing_update_buffers - used by tracing facility to expand ring buffers
6271  *
6272  * To save on memory when the tracing is never used on a system with it
6273  * configured in. The ring buffers are set to a minimum size. But once
6274  * a user starts to use the tracing facility, then they need to grow
6275  * to their default size.
6276  *
6277  * This function is to be called when a tracer is about to be used.
6278  */
6279 int tracing_update_buffers(void)
6280 {
6281 	int ret = 0;
6282 
6283 	mutex_lock(&trace_types_lock);
6284 	if (!ring_buffer_expanded)
6285 		ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
6286 						RING_BUFFER_ALL_CPUS);
6287 	mutex_unlock(&trace_types_lock);
6288 
6289 	return ret;
6290 }
6291 
6292 struct trace_option_dentry;
6293 
6294 static void
6295 create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
6296 
6297 /*
6298  * Used to clear out the tracer before deletion of an instance.
6299  * Must have trace_types_lock held.
6300  */
6301 static void tracing_set_nop(struct trace_array *tr)
6302 {
6303 	if (tr->current_trace == &nop_trace)
6304 		return;
6305 
6306 	tr->current_trace->enabled--;
6307 
6308 	if (tr->current_trace->reset)
6309 		tr->current_trace->reset(tr);
6310 
6311 	tr->current_trace = &nop_trace;
6312 }
6313 
6314 static void add_tracer_options(struct trace_array *tr, struct tracer *t)
6315 {
6316 	/* Only enable if the directory has been created already. */
6317 	if (!tr->dir)
6318 		return;
6319 
6320 	create_trace_option_files(tr, t);
6321 }
6322 
6323 int tracing_set_tracer(struct trace_array *tr, const char *buf)
6324 {
6325 	struct tracer *t;
6326 #ifdef CONFIG_TRACER_MAX_TRACE
6327 	bool had_max_tr;
6328 #endif
6329 	int ret = 0;
6330 
6331 	mutex_lock(&trace_types_lock);
6332 
6333 	if (!ring_buffer_expanded) {
6334 		ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
6335 						RING_BUFFER_ALL_CPUS);
6336 		if (ret < 0)
6337 			goto out;
6338 		ret = 0;
6339 	}
6340 
6341 	for (t = trace_types; t; t = t->next) {
6342 		if (strcmp(t->name, buf) == 0)
6343 			break;
6344 	}
6345 	if (!t) {
6346 		ret = -EINVAL;
6347 		goto out;
6348 	}
6349 	if (t == tr->current_trace)
6350 		goto out;
6351 
6352 #ifdef CONFIG_TRACER_SNAPSHOT
6353 	if (t->use_max_tr) {
6354 		arch_spin_lock(&tr->max_lock);
6355 		if (tr->cond_snapshot)
6356 			ret = -EBUSY;
6357 		arch_spin_unlock(&tr->max_lock);
6358 		if (ret)
6359 			goto out;
6360 	}
6361 #endif
6362 	/* Some tracers won't work on kernel command line */
6363 	if (system_state < SYSTEM_RUNNING && t->noboot) {
6364 		pr_warn("Tracer '%s' is not allowed on command line, ignored\n",
6365 			t->name);
6366 		goto out;
6367 	}
6368 
6369 	/* Some tracers are only allowed for the top level buffer */
6370 	if (!trace_ok_for_array(t, tr)) {
6371 		ret = -EINVAL;
6372 		goto out;
6373 	}
6374 
6375 	/* If trace pipe files are being read, we can't change the tracer */
6376 	if (tr->trace_ref) {
6377 		ret = -EBUSY;
6378 		goto out;
6379 	}
6380 
6381 	trace_branch_disable();
6382 
6383 	tr->current_trace->enabled--;
6384 
6385 	if (tr->current_trace->reset)
6386 		tr->current_trace->reset(tr);
6387 
6388 	/* Current trace needs to be nop_trace before synchronize_rcu */
6389 	tr->current_trace = &nop_trace;
6390 
6391 #ifdef CONFIG_TRACER_MAX_TRACE
6392 	had_max_tr = tr->allocated_snapshot;
6393 
6394 	if (had_max_tr && !t->use_max_tr) {
6395 		/*
6396 		 * We need to make sure that the update_max_tr sees that
6397 		 * current_trace changed to nop_trace to keep it from
6398 		 * swapping the buffers after we resize it.
6399 		 * The update_max_tr is called from interrupts disabled
6400 		 * so a synchronized_sched() is sufficient.
6401 		 */
6402 		synchronize_rcu();
6403 		free_snapshot(tr);
6404 	}
6405 #endif
6406 
6407 #ifdef CONFIG_TRACER_MAX_TRACE
6408 	if (t->use_max_tr && !had_max_tr) {
6409 		ret = tracing_alloc_snapshot_instance(tr);
6410 		if (ret < 0)
6411 			goto out;
6412 	}
6413 #endif
6414 
6415 	if (t->init) {
6416 		ret = tracer_init(t, tr);
6417 		if (ret)
6418 			goto out;
6419 	}
6420 
6421 	tr->current_trace = t;
6422 	tr->current_trace->enabled++;
6423 	trace_branch_enable(tr);
6424  out:
6425 	mutex_unlock(&trace_types_lock);
6426 
6427 	return ret;
6428 }
6429 
6430 static ssize_t
6431 tracing_set_trace_write(struct file *filp, const char __user *ubuf,
6432 			size_t cnt, loff_t *ppos)
6433 {
6434 	struct trace_array *tr = filp->private_data;
6435 	char buf[MAX_TRACER_SIZE+1];
6436 	int i;
6437 	size_t ret;
6438 	int err;
6439 
6440 	ret = cnt;
6441 
6442 	if (cnt > MAX_TRACER_SIZE)
6443 		cnt = MAX_TRACER_SIZE;
6444 
6445 	if (copy_from_user(buf, ubuf, cnt))
6446 		return -EFAULT;
6447 
6448 	buf[cnt] = 0;
6449 
6450 	/* strip ending whitespace. */
6451 	for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
6452 		buf[i] = 0;
6453 
6454 	err = tracing_set_tracer(tr, buf);
6455 	if (err)
6456 		return err;
6457 
6458 	*ppos += ret;
6459 
6460 	return ret;
6461 }
6462 
6463 static ssize_t
6464 tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
6465 		   size_t cnt, loff_t *ppos)
6466 {
6467 	char buf[64];
6468 	int r;
6469 
6470 	r = snprintf(buf, sizeof(buf), "%ld\n",
6471 		     *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
6472 	if (r > sizeof(buf))
6473 		r = sizeof(buf);
6474 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6475 }
6476 
6477 static ssize_t
6478 tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
6479 		    size_t cnt, loff_t *ppos)
6480 {
6481 	unsigned long val;
6482 	int ret;
6483 
6484 	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6485 	if (ret)
6486 		return ret;
6487 
6488 	*ptr = val * 1000;
6489 
6490 	return cnt;
6491 }
6492 
6493 static ssize_t
6494 tracing_thresh_read(struct file *filp, char __user *ubuf,
6495 		    size_t cnt, loff_t *ppos)
6496 {
6497 	return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
6498 }
6499 
6500 static ssize_t
6501 tracing_thresh_write(struct file *filp, const char __user *ubuf,
6502 		     size_t cnt, loff_t *ppos)
6503 {
6504 	struct trace_array *tr = filp->private_data;
6505 	int ret;
6506 
6507 	mutex_lock(&trace_types_lock);
6508 	ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
6509 	if (ret < 0)
6510 		goto out;
6511 
6512 	if (tr->current_trace->update_thresh) {
6513 		ret = tr->current_trace->update_thresh(tr);
6514 		if (ret < 0)
6515 			goto out;
6516 	}
6517 
6518 	ret = cnt;
6519 out:
6520 	mutex_unlock(&trace_types_lock);
6521 
6522 	return ret;
6523 }
6524 
6525 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
6526 
6527 static ssize_t
6528 tracing_max_lat_read(struct file *filp, char __user *ubuf,
6529 		     size_t cnt, loff_t *ppos)
6530 {
6531 	return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
6532 }
6533 
6534 static ssize_t
6535 tracing_max_lat_write(struct file *filp, const char __user *ubuf,
6536 		      size_t cnt, loff_t *ppos)
6537 {
6538 	return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
6539 }
6540 
6541 #endif
6542 
6543 static int tracing_open_pipe(struct inode *inode, struct file *filp)
6544 {
6545 	struct trace_array *tr = inode->i_private;
6546 	struct trace_iterator *iter;
6547 	int ret;
6548 
6549 	ret = tracing_check_open_get_tr(tr);
6550 	if (ret)
6551 		return ret;
6552 
6553 	mutex_lock(&trace_types_lock);
6554 
6555 	/* create a buffer to store the information to pass to userspace */
6556 	iter = kzalloc(sizeof(*iter), GFP_KERNEL);
6557 	if (!iter) {
6558 		ret = -ENOMEM;
6559 		__trace_array_put(tr);
6560 		goto out;
6561 	}
6562 
6563 	trace_seq_init(&iter->seq);
6564 	iter->trace = tr->current_trace;
6565 
6566 	if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
6567 		ret = -ENOMEM;
6568 		goto fail;
6569 	}
6570 
6571 	/* trace pipe does not show start of buffer */
6572 	cpumask_setall(iter->started);
6573 
6574 	if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
6575 		iter->iter_flags |= TRACE_FILE_LAT_FMT;
6576 
6577 	/* Output in nanoseconds only if we are using a clock in nanoseconds. */
6578 	if (trace_clocks[tr->clock_id].in_ns)
6579 		iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
6580 
6581 	iter->tr = tr;
6582 	iter->array_buffer = &tr->array_buffer;
6583 	iter->cpu_file = tracing_get_cpu(inode);
6584 	mutex_init(&iter->mutex);
6585 	filp->private_data = iter;
6586 
6587 	if (iter->trace->pipe_open)
6588 		iter->trace->pipe_open(iter);
6589 
6590 	nonseekable_open(inode, filp);
6591 
6592 	tr->trace_ref++;
6593 out:
6594 	mutex_unlock(&trace_types_lock);
6595 	return ret;
6596 
6597 fail:
6598 	kfree(iter);
6599 	__trace_array_put(tr);
6600 	mutex_unlock(&trace_types_lock);
6601 	return ret;
6602 }
6603 
6604 static int tracing_release_pipe(struct inode *inode, struct file *file)
6605 {
6606 	struct trace_iterator *iter = file->private_data;
6607 	struct trace_array *tr = inode->i_private;
6608 
6609 	mutex_lock(&trace_types_lock);
6610 
6611 	tr->trace_ref--;
6612 
6613 	if (iter->trace->pipe_close)
6614 		iter->trace->pipe_close(iter);
6615 
6616 	mutex_unlock(&trace_types_lock);
6617 
6618 	free_cpumask_var(iter->started);
6619 	mutex_destroy(&iter->mutex);
6620 	kfree(iter);
6621 
6622 	trace_array_put(tr);
6623 
6624 	return 0;
6625 }
6626 
6627 static __poll_t
6628 trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
6629 {
6630 	struct trace_array *tr = iter->tr;
6631 
6632 	/* Iterators are static, they should be filled or empty */
6633 	if (trace_buffer_iter(iter, iter->cpu_file))
6634 		return EPOLLIN | EPOLLRDNORM;
6635 
6636 	if (tr->trace_flags & TRACE_ITER_BLOCK)
6637 		/*
6638 		 * Always select as readable when in blocking mode
6639 		 */
6640 		return EPOLLIN | EPOLLRDNORM;
6641 	else
6642 		return ring_buffer_poll_wait(iter->array_buffer->buffer, iter->cpu_file,
6643 					     filp, poll_table);
6644 }
6645 
6646 static __poll_t
6647 tracing_poll_pipe(struct file *filp, poll_table *poll_table)
6648 {
6649 	struct trace_iterator *iter = filp->private_data;
6650 
6651 	return trace_poll(iter, filp, poll_table);
6652 }
6653 
6654 /* Must be called with iter->mutex held. */
6655 static int tracing_wait_pipe(struct file *filp)
6656 {
6657 	struct trace_iterator *iter = filp->private_data;
6658 	int ret;
6659 
6660 	while (trace_empty(iter)) {
6661 
6662 		if ((filp->f_flags & O_NONBLOCK)) {
6663 			return -EAGAIN;
6664 		}
6665 
6666 		/*
6667 		 * We block until we read something and tracing is disabled.
6668 		 * We still block if tracing is disabled, but we have never
6669 		 * read anything. This allows a user to cat this file, and
6670 		 * then enable tracing. But after we have read something,
6671 		 * we give an EOF when tracing is again disabled.
6672 		 *
6673 		 * iter->pos will be 0 if we haven't read anything.
6674 		 */
6675 		if (!tracer_tracing_is_on(iter->tr) && iter->pos)
6676 			break;
6677 
6678 		mutex_unlock(&iter->mutex);
6679 
6680 		ret = wait_on_pipe(iter, 0);
6681 
6682 		mutex_lock(&iter->mutex);
6683 
6684 		if (ret)
6685 			return ret;
6686 	}
6687 
6688 	return 1;
6689 }
6690 
6691 /*
6692  * Consumer reader.
6693  */
6694 static ssize_t
6695 tracing_read_pipe(struct file *filp, char __user *ubuf,
6696 		  size_t cnt, loff_t *ppos)
6697 {
6698 	struct trace_iterator *iter = filp->private_data;
6699 	ssize_t sret;
6700 
6701 	/*
6702 	 * Avoid more than one consumer on a single file descriptor
6703 	 * This is just a matter of traces coherency, the ring buffer itself
6704 	 * is protected.
6705 	 */
6706 	mutex_lock(&iter->mutex);
6707 
6708 	/* return any leftover data */
6709 	sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
6710 	if (sret != -EBUSY)
6711 		goto out;
6712 
6713 	trace_seq_init(&iter->seq);
6714 
6715 	if (iter->trace->read) {
6716 		sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
6717 		if (sret)
6718 			goto out;
6719 	}
6720 
6721 waitagain:
6722 	sret = tracing_wait_pipe(filp);
6723 	if (sret <= 0)
6724 		goto out;
6725 
6726 	/* stop when tracing is finished */
6727 	if (trace_empty(iter)) {
6728 		sret = 0;
6729 		goto out;
6730 	}
6731 
6732 	if (cnt >= PAGE_SIZE)
6733 		cnt = PAGE_SIZE - 1;
6734 
6735 	/* reset all but tr, trace, and overruns */
6736 	trace_iterator_reset(iter);
6737 	cpumask_clear(iter->started);
6738 	trace_seq_init(&iter->seq);
6739 
6740 	trace_event_read_lock();
6741 	trace_access_lock(iter->cpu_file);
6742 	while (trace_find_next_entry_inc(iter) != NULL) {
6743 		enum print_line_t ret;
6744 		int save_len = iter->seq.seq.len;
6745 
6746 		ret = print_trace_line(iter);
6747 		if (ret == TRACE_TYPE_PARTIAL_LINE) {
6748 			/* don't print partial lines */
6749 			iter->seq.seq.len = save_len;
6750 			break;
6751 		}
6752 		if (ret != TRACE_TYPE_NO_CONSUME)
6753 			trace_consume(iter);
6754 
6755 		if (trace_seq_used(&iter->seq) >= cnt)
6756 			break;
6757 
6758 		/*
6759 		 * Setting the full flag means we reached the trace_seq buffer
6760 		 * size and we should leave by partial output condition above.
6761 		 * One of the trace_seq_* functions is not used properly.
6762 		 */
6763 		WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
6764 			  iter->ent->type);
6765 	}
6766 	trace_access_unlock(iter->cpu_file);
6767 	trace_event_read_unlock();
6768 
6769 	/* Now copy what we have to the user */
6770 	sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
6771 	if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
6772 		trace_seq_init(&iter->seq);
6773 
6774 	/*
6775 	 * If there was nothing to send to user, in spite of consuming trace
6776 	 * entries, go back to wait for more entries.
6777 	 */
6778 	if (sret == -EBUSY)
6779 		goto waitagain;
6780 
6781 out:
6782 	mutex_unlock(&iter->mutex);
6783 
6784 	return sret;
6785 }
6786 
6787 static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
6788 				     unsigned int idx)
6789 {
6790 	__free_page(spd->pages[idx]);
6791 }
6792 
6793 static size_t
6794 tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
6795 {
6796 	size_t count;
6797 	int save_len;
6798 	int ret;
6799 
6800 	/* Seq buffer is page-sized, exactly what we need. */
6801 	for (;;) {
6802 		save_len = iter->seq.seq.len;
6803 		ret = print_trace_line(iter);
6804 
6805 		if (trace_seq_has_overflowed(&iter->seq)) {
6806 			iter->seq.seq.len = save_len;
6807 			break;
6808 		}
6809 
6810 		/*
6811 		 * This should not be hit, because it should only
6812 		 * be set if the iter->seq overflowed. But check it
6813 		 * anyway to be safe.
6814 		 */
6815 		if (ret == TRACE_TYPE_PARTIAL_LINE) {
6816 			iter->seq.seq.len = save_len;
6817 			break;
6818 		}
6819 
6820 		count = trace_seq_used(&iter->seq) - save_len;
6821 		if (rem < count) {
6822 			rem = 0;
6823 			iter->seq.seq.len = save_len;
6824 			break;
6825 		}
6826 
6827 		if (ret != TRACE_TYPE_NO_CONSUME)
6828 			trace_consume(iter);
6829 		rem -= count;
6830 		if (!trace_find_next_entry_inc(iter))	{
6831 			rem = 0;
6832 			iter->ent = NULL;
6833 			break;
6834 		}
6835 	}
6836 
6837 	return rem;
6838 }
6839 
6840 static ssize_t tracing_splice_read_pipe(struct file *filp,
6841 					loff_t *ppos,
6842 					struct pipe_inode_info *pipe,
6843 					size_t len,
6844 					unsigned int flags)
6845 {
6846 	struct page *pages_def[PIPE_DEF_BUFFERS];
6847 	struct partial_page partial_def[PIPE_DEF_BUFFERS];
6848 	struct trace_iterator *iter = filp->private_data;
6849 	struct splice_pipe_desc spd = {
6850 		.pages		= pages_def,
6851 		.partial	= partial_def,
6852 		.nr_pages	= 0, /* This gets updated below. */
6853 		.nr_pages_max	= PIPE_DEF_BUFFERS,
6854 		.ops		= &default_pipe_buf_ops,
6855 		.spd_release	= tracing_spd_release_pipe,
6856 	};
6857 	ssize_t ret;
6858 	size_t rem;
6859 	unsigned int i;
6860 
6861 	if (splice_grow_spd(pipe, &spd))
6862 		return -ENOMEM;
6863 
6864 	mutex_lock(&iter->mutex);
6865 
6866 	if (iter->trace->splice_read) {
6867 		ret = iter->trace->splice_read(iter, filp,
6868 					       ppos, pipe, len, flags);
6869 		if (ret)
6870 			goto out_err;
6871 	}
6872 
6873 	ret = tracing_wait_pipe(filp);
6874 	if (ret <= 0)
6875 		goto out_err;
6876 
6877 	if (!iter->ent && !trace_find_next_entry_inc(iter)) {
6878 		ret = -EFAULT;
6879 		goto out_err;
6880 	}
6881 
6882 	trace_event_read_lock();
6883 	trace_access_lock(iter->cpu_file);
6884 
6885 	/* Fill as many pages as possible. */
6886 	for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
6887 		spd.pages[i] = alloc_page(GFP_KERNEL);
6888 		if (!spd.pages[i])
6889 			break;
6890 
6891 		rem = tracing_fill_pipe_page(rem, iter);
6892 
6893 		/* Copy the data into the page, so we can start over. */
6894 		ret = trace_seq_to_buffer(&iter->seq,
6895 					  page_address(spd.pages[i]),
6896 					  trace_seq_used(&iter->seq));
6897 		if (ret < 0) {
6898 			__free_page(spd.pages[i]);
6899 			break;
6900 		}
6901 		spd.partial[i].offset = 0;
6902 		spd.partial[i].len = trace_seq_used(&iter->seq);
6903 
6904 		trace_seq_init(&iter->seq);
6905 	}
6906 
6907 	trace_access_unlock(iter->cpu_file);
6908 	trace_event_read_unlock();
6909 	mutex_unlock(&iter->mutex);
6910 
6911 	spd.nr_pages = i;
6912 
6913 	if (i)
6914 		ret = splice_to_pipe(pipe, &spd);
6915 	else
6916 		ret = 0;
6917 out:
6918 	splice_shrink_spd(&spd);
6919 	return ret;
6920 
6921 out_err:
6922 	mutex_unlock(&iter->mutex);
6923 	goto out;
6924 }
6925 
6926 static ssize_t
6927 tracing_entries_read(struct file *filp, char __user *ubuf,
6928 		     size_t cnt, loff_t *ppos)
6929 {
6930 	struct inode *inode = file_inode(filp);
6931 	struct trace_array *tr = inode->i_private;
6932 	int cpu = tracing_get_cpu(inode);
6933 	char buf[64];
6934 	int r = 0;
6935 	ssize_t ret;
6936 
6937 	mutex_lock(&trace_types_lock);
6938 
6939 	if (cpu == RING_BUFFER_ALL_CPUS) {
6940 		int cpu, buf_size_same;
6941 		unsigned long size;
6942 
6943 		size = 0;
6944 		buf_size_same = 1;
6945 		/* check if all cpu sizes are same */
6946 		for_each_tracing_cpu(cpu) {
6947 			/* fill in the size from first enabled cpu */
6948 			if (size == 0)
6949 				size = per_cpu_ptr(tr->array_buffer.data, cpu)->entries;
6950 			if (size != per_cpu_ptr(tr->array_buffer.data, cpu)->entries) {
6951 				buf_size_same = 0;
6952 				break;
6953 			}
6954 		}
6955 
6956 		if (buf_size_same) {
6957 			if (!ring_buffer_expanded)
6958 				r = sprintf(buf, "%lu (expanded: %lu)\n",
6959 					    size >> 10,
6960 					    trace_buf_size >> 10);
6961 			else
6962 				r = sprintf(buf, "%lu\n", size >> 10);
6963 		} else
6964 			r = sprintf(buf, "X\n");
6965 	} else
6966 		r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10);
6967 
6968 	mutex_unlock(&trace_types_lock);
6969 
6970 	ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6971 	return ret;
6972 }
6973 
6974 static ssize_t
6975 tracing_entries_write(struct file *filp, const char __user *ubuf,
6976 		      size_t cnt, loff_t *ppos)
6977 {
6978 	struct inode *inode = file_inode(filp);
6979 	struct trace_array *tr = inode->i_private;
6980 	unsigned long val;
6981 	int ret;
6982 
6983 	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6984 	if (ret)
6985 		return ret;
6986 
6987 	/* must have at least 1 entry */
6988 	if (!val)
6989 		return -EINVAL;
6990 
6991 	/* value is in KB */
6992 	val <<= 10;
6993 	ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
6994 	if (ret < 0)
6995 		return ret;
6996 
6997 	*ppos += cnt;
6998 
6999 	return cnt;
7000 }
7001 
7002 static ssize_t
7003 tracing_total_entries_read(struct file *filp, char __user *ubuf,
7004 				size_t cnt, loff_t *ppos)
7005 {
7006 	struct trace_array *tr = filp->private_data;
7007 	char buf[64];
7008 	int r, cpu;
7009 	unsigned long size = 0, expanded_size = 0;
7010 
7011 	mutex_lock(&trace_types_lock);
7012 	for_each_tracing_cpu(cpu) {
7013 		size += per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10;
7014 		if (!ring_buffer_expanded)
7015 			expanded_size += trace_buf_size >> 10;
7016 	}
7017 	if (ring_buffer_expanded)
7018 		r = sprintf(buf, "%lu\n", size);
7019 	else
7020 		r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
7021 	mutex_unlock(&trace_types_lock);
7022 
7023 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
7024 }
7025 
7026 static ssize_t
7027 tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
7028 			  size_t cnt, loff_t *ppos)
7029 {
7030 	/*
7031 	 * There is no need to read what the user has written, this function
7032 	 * is just to make sure that there is no error when "echo" is used
7033 	 */
7034 
7035 	*ppos += cnt;
7036 
7037 	return cnt;
7038 }
7039 
7040 static int
7041 tracing_free_buffer_release(struct inode *inode, struct file *filp)
7042 {
7043 	struct trace_array *tr = inode->i_private;
7044 
7045 	/* disable tracing ? */
7046 	if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
7047 		tracer_tracing_off(tr);
7048 	/* resize the ring buffer to 0 */
7049 	tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
7050 
7051 	trace_array_put(tr);
7052 
7053 	return 0;
7054 }
7055 
7056 static ssize_t
7057 tracing_mark_write(struct file *filp, const char __user *ubuf,
7058 					size_t cnt, loff_t *fpos)
7059 {
7060 	struct trace_array *tr = filp->private_data;
7061 	struct ring_buffer_event *event;
7062 	enum event_trigger_type tt = ETT_NONE;
7063 	struct trace_buffer *buffer;
7064 	struct print_entry *entry;
7065 	ssize_t written;
7066 	int size;
7067 	int len;
7068 
7069 /* Used in tracing_mark_raw_write() as well */
7070 #define FAULTED_STR "<faulted>"
7071 #define FAULTED_SIZE (sizeof(FAULTED_STR) - 1) /* '\0' is already accounted for */
7072 
7073 	if (tracing_disabled)
7074 		return -EINVAL;
7075 
7076 	if (!(tr->trace_flags & TRACE_ITER_MARKERS))
7077 		return -EINVAL;
7078 
7079 	if (cnt > TRACE_BUF_SIZE)
7080 		cnt = TRACE_BUF_SIZE;
7081 
7082 	BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
7083 
7084 	size = sizeof(*entry) + cnt + 2; /* add '\0' and possible '\n' */
7085 
7086 	/* If less than "<faulted>", then make sure we can still add that */
7087 	if (cnt < FAULTED_SIZE)
7088 		size += FAULTED_SIZE - cnt;
7089 
7090 	buffer = tr->array_buffer.buffer;
7091 	event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
7092 					    tracing_gen_ctx());
7093 	if (unlikely(!event))
7094 		/* Ring buffer disabled, return as if not open for write */
7095 		return -EBADF;
7096 
7097 	entry = ring_buffer_event_data(event);
7098 	entry->ip = _THIS_IP_;
7099 
7100 	len = __copy_from_user_inatomic(&entry->buf, ubuf, cnt);
7101 	if (len) {
7102 		memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
7103 		cnt = FAULTED_SIZE;
7104 		written = -EFAULT;
7105 	} else
7106 		written = cnt;
7107 
7108 	if (tr->trace_marker_file && !list_empty(&tr->trace_marker_file->triggers)) {
7109 		/* do not add \n before testing triggers, but add \0 */
7110 		entry->buf[cnt] = '\0';
7111 		tt = event_triggers_call(tr->trace_marker_file, buffer, entry, event);
7112 	}
7113 
7114 	if (entry->buf[cnt - 1] != '\n') {
7115 		entry->buf[cnt] = '\n';
7116 		entry->buf[cnt + 1] = '\0';
7117 	} else
7118 		entry->buf[cnt] = '\0';
7119 
7120 	if (static_branch_unlikely(&trace_marker_exports_enabled))
7121 		ftrace_exports(event, TRACE_EXPORT_MARKER);
7122 	__buffer_unlock_commit(buffer, event);
7123 
7124 	if (tt)
7125 		event_triggers_post_call(tr->trace_marker_file, tt);
7126 
7127 	return written;
7128 }
7129 
7130 /* Limit it for now to 3K (including tag) */
7131 #define RAW_DATA_MAX_SIZE (1024*3)
7132 
7133 static ssize_t
7134 tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
7135 					size_t cnt, loff_t *fpos)
7136 {
7137 	struct trace_array *tr = filp->private_data;
7138 	struct ring_buffer_event *event;
7139 	struct trace_buffer *buffer;
7140 	struct raw_data_entry *entry;
7141 	ssize_t written;
7142 	int size;
7143 	int len;
7144 
7145 #define FAULT_SIZE_ID (FAULTED_SIZE + sizeof(int))
7146 
7147 	if (tracing_disabled)
7148 		return -EINVAL;
7149 
7150 	if (!(tr->trace_flags & TRACE_ITER_MARKERS))
7151 		return -EINVAL;
7152 
7153 	/* The marker must at least have a tag id */
7154 	if (cnt < sizeof(unsigned int) || cnt > RAW_DATA_MAX_SIZE)
7155 		return -EINVAL;
7156 
7157 	if (cnt > TRACE_BUF_SIZE)
7158 		cnt = TRACE_BUF_SIZE;
7159 
7160 	BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
7161 
7162 	size = sizeof(*entry) + cnt;
7163 	if (cnt < FAULT_SIZE_ID)
7164 		size += FAULT_SIZE_ID - cnt;
7165 
7166 	buffer = tr->array_buffer.buffer;
7167 	event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size,
7168 					    tracing_gen_ctx());
7169 	if (!event)
7170 		/* Ring buffer disabled, return as if not open for write */
7171 		return -EBADF;
7172 
7173 	entry = ring_buffer_event_data(event);
7174 
7175 	len = __copy_from_user_inatomic(&entry->id, ubuf, cnt);
7176 	if (len) {
7177 		entry->id = -1;
7178 		memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
7179 		written = -EFAULT;
7180 	} else
7181 		written = cnt;
7182 
7183 	__buffer_unlock_commit(buffer, event);
7184 
7185 	return written;
7186 }
7187 
7188 static int tracing_clock_show(struct seq_file *m, void *v)
7189 {
7190 	struct trace_array *tr = m->private;
7191 	int i;
7192 
7193 	for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
7194 		seq_printf(m,
7195 			"%s%s%s%s", i ? " " : "",
7196 			i == tr->clock_id ? "[" : "", trace_clocks[i].name,
7197 			i == tr->clock_id ? "]" : "");
7198 	seq_putc(m, '\n');
7199 
7200 	return 0;
7201 }
7202 
7203 int tracing_set_clock(struct trace_array *tr, const char *clockstr)
7204 {
7205 	int i;
7206 
7207 	for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
7208 		if (strcmp(trace_clocks[i].name, clockstr) == 0)
7209 			break;
7210 	}
7211 	if (i == ARRAY_SIZE(trace_clocks))
7212 		return -EINVAL;
7213 
7214 	mutex_lock(&trace_types_lock);
7215 
7216 	tr->clock_id = i;
7217 
7218 	ring_buffer_set_clock(tr->array_buffer.buffer, trace_clocks[i].func);
7219 
7220 	/*
7221 	 * New clock may not be consistent with the previous clock.
7222 	 * Reset the buffer so that it doesn't have incomparable timestamps.
7223 	 */
7224 	tracing_reset_online_cpus(&tr->array_buffer);
7225 
7226 #ifdef CONFIG_TRACER_MAX_TRACE
7227 	if (tr->max_buffer.buffer)
7228 		ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
7229 	tracing_reset_online_cpus(&tr->max_buffer);
7230 #endif
7231 
7232 	mutex_unlock(&trace_types_lock);
7233 
7234 	return 0;
7235 }
7236 
7237 static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
7238 				   size_t cnt, loff_t *fpos)
7239 {
7240 	struct seq_file *m = filp->private_data;
7241 	struct trace_array *tr = m->private;
7242 	char buf[64];
7243 	const char *clockstr;
7244 	int ret;
7245 
7246 	if (cnt >= sizeof(buf))
7247 		return -EINVAL;
7248 
7249 	if (copy_from_user(buf, ubuf, cnt))
7250 		return -EFAULT;
7251 
7252 	buf[cnt] = 0;
7253 
7254 	clockstr = strstrip(buf);
7255 
7256 	ret = tracing_set_clock(tr, clockstr);
7257 	if (ret)
7258 		return ret;
7259 
7260 	*fpos += cnt;
7261 
7262 	return cnt;
7263 }
7264 
7265 static int tracing_clock_open(struct inode *inode, struct file *file)
7266 {
7267 	struct trace_array *tr = inode->i_private;
7268 	int ret;
7269 
7270 	ret = tracing_check_open_get_tr(tr);
7271 	if (ret)
7272 		return ret;
7273 
7274 	ret = single_open(file, tracing_clock_show, inode->i_private);
7275 	if (ret < 0)
7276 		trace_array_put(tr);
7277 
7278 	return ret;
7279 }
7280 
7281 static int tracing_time_stamp_mode_show(struct seq_file *m, void *v)
7282 {
7283 	struct trace_array *tr = m->private;
7284 
7285 	mutex_lock(&trace_types_lock);
7286 
7287 	if (ring_buffer_time_stamp_abs(tr->array_buffer.buffer))
7288 		seq_puts(m, "delta [absolute]\n");
7289 	else
7290 		seq_puts(m, "[delta] absolute\n");
7291 
7292 	mutex_unlock(&trace_types_lock);
7293 
7294 	return 0;
7295 }
7296 
7297 static int tracing_time_stamp_mode_open(struct inode *inode, struct file *file)
7298 {
7299 	struct trace_array *tr = inode->i_private;
7300 	int ret;
7301 
7302 	ret = tracing_check_open_get_tr(tr);
7303 	if (ret)
7304 		return ret;
7305 
7306 	ret = single_open(file, tracing_time_stamp_mode_show, inode->i_private);
7307 	if (ret < 0)
7308 		trace_array_put(tr);
7309 
7310 	return ret;
7311 }
7312 
7313 u64 tracing_event_time_stamp(struct trace_buffer *buffer, struct ring_buffer_event *rbe)
7314 {
7315 	if (rbe == this_cpu_read(trace_buffered_event))
7316 		return ring_buffer_time_stamp(buffer);
7317 
7318 	return ring_buffer_event_time_stamp(buffer, rbe);
7319 }
7320 
7321 /*
7322  * Set or disable using the per CPU trace_buffer_event when possible.
7323  */
7324 int tracing_set_filter_buffering(struct trace_array *tr, bool set)
7325 {
7326 	int ret = 0;
7327 
7328 	mutex_lock(&trace_types_lock);
7329 
7330 	if (set && tr->no_filter_buffering_ref++)
7331 		goto out;
7332 
7333 	if (!set) {
7334 		if (WARN_ON_ONCE(!tr->no_filter_buffering_ref)) {
7335 			ret = -EINVAL;
7336 			goto out;
7337 		}
7338 
7339 		--tr->no_filter_buffering_ref;
7340 	}
7341  out:
7342 	mutex_unlock(&trace_types_lock);
7343 
7344 	return ret;
7345 }
7346 
7347 struct ftrace_buffer_info {
7348 	struct trace_iterator	iter;
7349 	void			*spare;
7350 	unsigned int		spare_cpu;
7351 	unsigned int		read;
7352 };
7353 
7354 #ifdef CONFIG_TRACER_SNAPSHOT
7355 static int tracing_snapshot_open(struct inode *inode, struct file *file)
7356 {
7357 	struct trace_array *tr = inode->i_private;
7358 	struct trace_iterator *iter;
7359 	struct seq_file *m;
7360 	int ret;
7361 
7362 	ret = tracing_check_open_get_tr(tr);
7363 	if (ret)
7364 		return ret;
7365 
7366 	if (file->f_mode & FMODE_READ) {
7367 		iter = __tracing_open(inode, file, true);
7368 		if (IS_ERR(iter))
7369 			ret = PTR_ERR(iter);
7370 	} else {
7371 		/* Writes still need the seq_file to hold the private data */
7372 		ret = -ENOMEM;
7373 		m = kzalloc(sizeof(*m), GFP_KERNEL);
7374 		if (!m)
7375 			goto out;
7376 		iter = kzalloc(sizeof(*iter), GFP_KERNEL);
7377 		if (!iter) {
7378 			kfree(m);
7379 			goto out;
7380 		}
7381 		ret = 0;
7382 
7383 		iter->tr = tr;
7384 		iter->array_buffer = &tr->max_buffer;
7385 		iter->cpu_file = tracing_get_cpu(inode);
7386 		m->private = iter;
7387 		file->private_data = m;
7388 	}
7389 out:
7390 	if (ret < 0)
7391 		trace_array_put(tr);
7392 
7393 	return ret;
7394 }
7395 
7396 static ssize_t
7397 tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
7398 		       loff_t *ppos)
7399 {
7400 	struct seq_file *m = filp->private_data;
7401 	struct trace_iterator *iter = m->private;
7402 	struct trace_array *tr = iter->tr;
7403 	unsigned long val;
7404 	int ret;
7405 
7406 	ret = tracing_update_buffers();
7407 	if (ret < 0)
7408 		return ret;
7409 
7410 	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7411 	if (ret)
7412 		return ret;
7413 
7414 	mutex_lock(&trace_types_lock);
7415 
7416 	if (tr->current_trace->use_max_tr) {
7417 		ret = -EBUSY;
7418 		goto out;
7419 	}
7420 
7421 	arch_spin_lock(&tr->max_lock);
7422 	if (tr->cond_snapshot)
7423 		ret = -EBUSY;
7424 	arch_spin_unlock(&tr->max_lock);
7425 	if (ret)
7426 		goto out;
7427 
7428 	switch (val) {
7429 	case 0:
7430 		if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
7431 			ret = -EINVAL;
7432 			break;
7433 		}
7434 		if (tr->allocated_snapshot)
7435 			free_snapshot(tr);
7436 		break;
7437 	case 1:
7438 /* Only allow per-cpu swap if the ring buffer supports it */
7439 #ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
7440 		if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
7441 			ret = -EINVAL;
7442 			break;
7443 		}
7444 #endif
7445 		if (tr->allocated_snapshot)
7446 			ret = resize_buffer_duplicate_size(&tr->max_buffer,
7447 					&tr->array_buffer, iter->cpu_file);
7448 		else
7449 			ret = tracing_alloc_snapshot_instance(tr);
7450 		if (ret < 0)
7451 			break;
7452 		local_irq_disable();
7453 		/* Now, we're going to swap */
7454 		if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
7455 			update_max_tr(tr, current, smp_processor_id(), NULL);
7456 		else
7457 			update_max_tr_single(tr, current, iter->cpu_file);
7458 		local_irq_enable();
7459 		break;
7460 	default:
7461 		if (tr->allocated_snapshot) {
7462 			if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
7463 				tracing_reset_online_cpus(&tr->max_buffer);
7464 			else
7465 				tracing_reset_cpu(&tr->max_buffer, iter->cpu_file);
7466 		}
7467 		break;
7468 	}
7469 
7470 	if (ret >= 0) {
7471 		*ppos += cnt;
7472 		ret = cnt;
7473 	}
7474 out:
7475 	mutex_unlock(&trace_types_lock);
7476 	return ret;
7477 }
7478 
7479 static int tracing_snapshot_release(struct inode *inode, struct file *file)
7480 {
7481 	struct seq_file *m = file->private_data;
7482 	int ret;
7483 
7484 	ret = tracing_release(inode, file);
7485 
7486 	if (file->f_mode & FMODE_READ)
7487 		return ret;
7488 
7489 	/* If write only, the seq_file is just a stub */
7490 	if (m)
7491 		kfree(m->private);
7492 	kfree(m);
7493 
7494 	return 0;
7495 }
7496 
7497 static int tracing_buffers_open(struct inode *inode, struct file *filp);
7498 static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
7499 				    size_t count, loff_t *ppos);
7500 static int tracing_buffers_release(struct inode *inode, struct file *file);
7501 static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
7502 		   struct pipe_inode_info *pipe, size_t len, unsigned int flags);
7503 
7504 static int snapshot_raw_open(struct inode *inode, struct file *filp)
7505 {
7506 	struct ftrace_buffer_info *info;
7507 	int ret;
7508 
7509 	/* The following checks for tracefs lockdown */
7510 	ret = tracing_buffers_open(inode, filp);
7511 	if (ret < 0)
7512 		return ret;
7513 
7514 	info = filp->private_data;
7515 
7516 	if (info->iter.trace->use_max_tr) {
7517 		tracing_buffers_release(inode, filp);
7518 		return -EBUSY;
7519 	}
7520 
7521 	info->iter.snapshot = true;
7522 	info->iter.array_buffer = &info->iter.tr->max_buffer;
7523 
7524 	return ret;
7525 }
7526 
7527 #endif /* CONFIG_TRACER_SNAPSHOT */
7528 
7529 
7530 static const struct file_operations tracing_thresh_fops = {
7531 	.open		= tracing_open_generic,
7532 	.read		= tracing_thresh_read,
7533 	.write		= tracing_thresh_write,
7534 	.llseek		= generic_file_llseek,
7535 };
7536 
7537 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
7538 static const struct file_operations tracing_max_lat_fops = {
7539 	.open		= tracing_open_generic,
7540 	.read		= tracing_max_lat_read,
7541 	.write		= tracing_max_lat_write,
7542 	.llseek		= generic_file_llseek,
7543 };
7544 #endif
7545 
7546 static const struct file_operations set_tracer_fops = {
7547 	.open		= tracing_open_generic,
7548 	.read		= tracing_set_trace_read,
7549 	.write		= tracing_set_trace_write,
7550 	.llseek		= generic_file_llseek,
7551 };
7552 
7553 static const struct file_operations tracing_pipe_fops = {
7554 	.open		= tracing_open_pipe,
7555 	.poll		= tracing_poll_pipe,
7556 	.read		= tracing_read_pipe,
7557 	.splice_read	= tracing_splice_read_pipe,
7558 	.release	= tracing_release_pipe,
7559 	.llseek		= no_llseek,
7560 };
7561 
7562 static const struct file_operations tracing_entries_fops = {
7563 	.open		= tracing_open_generic_tr,
7564 	.read		= tracing_entries_read,
7565 	.write		= tracing_entries_write,
7566 	.llseek		= generic_file_llseek,
7567 	.release	= tracing_release_generic_tr,
7568 };
7569 
7570 static const struct file_operations tracing_total_entries_fops = {
7571 	.open		= tracing_open_generic_tr,
7572 	.read		= tracing_total_entries_read,
7573 	.llseek		= generic_file_llseek,
7574 	.release	= tracing_release_generic_tr,
7575 };
7576 
7577 static const struct file_operations tracing_free_buffer_fops = {
7578 	.open		= tracing_open_generic_tr,
7579 	.write		= tracing_free_buffer_write,
7580 	.release	= tracing_free_buffer_release,
7581 };
7582 
7583 static const struct file_operations tracing_mark_fops = {
7584 	.open		= tracing_mark_open,
7585 	.write		= tracing_mark_write,
7586 	.release	= tracing_release_generic_tr,
7587 };
7588 
7589 static const struct file_operations tracing_mark_raw_fops = {
7590 	.open		= tracing_mark_open,
7591 	.write		= tracing_mark_raw_write,
7592 	.release	= tracing_release_generic_tr,
7593 };
7594 
7595 static const struct file_operations trace_clock_fops = {
7596 	.open		= tracing_clock_open,
7597 	.read		= seq_read,
7598 	.llseek		= seq_lseek,
7599 	.release	= tracing_single_release_tr,
7600 	.write		= tracing_clock_write,
7601 };
7602 
7603 static const struct file_operations trace_time_stamp_mode_fops = {
7604 	.open		= tracing_time_stamp_mode_open,
7605 	.read		= seq_read,
7606 	.llseek		= seq_lseek,
7607 	.release	= tracing_single_release_tr,
7608 };
7609 
7610 #ifdef CONFIG_TRACER_SNAPSHOT
7611 static const struct file_operations snapshot_fops = {
7612 	.open		= tracing_snapshot_open,
7613 	.read		= seq_read,
7614 	.write		= tracing_snapshot_write,
7615 	.llseek		= tracing_lseek,
7616 	.release	= tracing_snapshot_release,
7617 };
7618 
7619 static const struct file_operations snapshot_raw_fops = {
7620 	.open		= snapshot_raw_open,
7621 	.read		= tracing_buffers_read,
7622 	.release	= tracing_buffers_release,
7623 	.splice_read	= tracing_buffers_splice_read,
7624 	.llseek		= no_llseek,
7625 };
7626 
7627 #endif /* CONFIG_TRACER_SNAPSHOT */
7628 
7629 /*
7630  * trace_min_max_write - Write a u64 value to a trace_min_max_param struct
7631  * @filp: The active open file structure
7632  * @ubuf: The userspace provided buffer to read value into
7633  * @cnt: The maximum number of bytes to read
7634  * @ppos: The current "file" position
7635  *
7636  * This function implements the write interface for a struct trace_min_max_param.
7637  * The filp->private_data must point to a trace_min_max_param structure that
7638  * defines where to write the value, the min and the max acceptable values,
7639  * and a lock to protect the write.
7640  */
7641 static ssize_t
7642 trace_min_max_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
7643 {
7644 	struct trace_min_max_param *param = filp->private_data;
7645 	u64 val;
7646 	int err;
7647 
7648 	if (!param)
7649 		return -EFAULT;
7650 
7651 	err = kstrtoull_from_user(ubuf, cnt, 10, &val);
7652 	if (err)
7653 		return err;
7654 
7655 	if (param->lock)
7656 		mutex_lock(param->lock);
7657 
7658 	if (param->min && val < *param->min)
7659 		err = -EINVAL;
7660 
7661 	if (param->max && val > *param->max)
7662 		err = -EINVAL;
7663 
7664 	if (!err)
7665 		*param->val = val;
7666 
7667 	if (param->lock)
7668 		mutex_unlock(param->lock);
7669 
7670 	if (err)
7671 		return err;
7672 
7673 	return cnt;
7674 }
7675 
7676 /*
7677  * trace_min_max_read - Read a u64 value from a trace_min_max_param struct
7678  * @filp: The active open file structure
7679  * @ubuf: The userspace provided buffer to read value into
7680  * @cnt: The maximum number of bytes to read
7681  * @ppos: The current "file" position
7682  *
7683  * This function implements the read interface for a struct trace_min_max_param.
7684  * The filp->private_data must point to a trace_min_max_param struct with valid
7685  * data.
7686  */
7687 static ssize_t
7688 trace_min_max_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
7689 {
7690 	struct trace_min_max_param *param = filp->private_data;
7691 	char buf[U64_STR_SIZE];
7692 	int len;
7693 	u64 val;
7694 
7695 	if (!param)
7696 		return -EFAULT;
7697 
7698 	val = *param->val;
7699 
7700 	if (cnt > sizeof(buf))
7701 		cnt = sizeof(buf);
7702 
7703 	len = snprintf(buf, sizeof(buf), "%llu\n", val);
7704 
7705 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
7706 }
7707 
7708 const struct file_operations trace_min_max_fops = {
7709 	.open		= tracing_open_generic,
7710 	.read		= trace_min_max_read,
7711 	.write		= trace_min_max_write,
7712 };
7713 
7714 #define TRACING_LOG_ERRS_MAX	8
7715 #define TRACING_LOG_LOC_MAX	128
7716 
7717 #define CMD_PREFIX "  Command: "
7718 
7719 struct err_info {
7720 	const char	**errs;	/* ptr to loc-specific array of err strings */
7721 	u8		type;	/* index into errs -> specific err string */
7722 	u8		pos;	/* MAX_FILTER_STR_VAL = 256 */
7723 	u64		ts;
7724 };
7725 
7726 struct tracing_log_err {
7727 	struct list_head	list;
7728 	struct err_info		info;
7729 	char			loc[TRACING_LOG_LOC_MAX]; /* err location */
7730 	char			cmd[MAX_FILTER_STR_VAL]; /* what caused err */
7731 };
7732 
7733 static DEFINE_MUTEX(tracing_err_log_lock);
7734 
7735 static struct tracing_log_err *get_tracing_log_err(struct trace_array *tr)
7736 {
7737 	struct tracing_log_err *err;
7738 
7739 	if (tr->n_err_log_entries < TRACING_LOG_ERRS_MAX) {
7740 		err = kzalloc(sizeof(*err), GFP_KERNEL);
7741 		if (!err)
7742 			err = ERR_PTR(-ENOMEM);
7743 		tr->n_err_log_entries++;
7744 
7745 		return err;
7746 	}
7747 
7748 	err = list_first_entry(&tr->err_log, struct tracing_log_err, list);
7749 	list_del(&err->list);
7750 
7751 	return err;
7752 }
7753 
7754 /**
7755  * err_pos - find the position of a string within a command for error careting
7756  * @cmd: The tracing command that caused the error
7757  * @str: The string to position the caret at within @cmd
7758  *
7759  * Finds the position of the first occurrence of @str within @cmd.  The
7760  * return value can be passed to tracing_log_err() for caret placement
7761  * within @cmd.
7762  *
7763  * Returns the index within @cmd of the first occurrence of @str or 0
7764  * if @str was not found.
7765  */
7766 unsigned int err_pos(char *cmd, const char *str)
7767 {
7768 	char *found;
7769 
7770 	if (WARN_ON(!strlen(cmd)))
7771 		return 0;
7772 
7773 	found = strstr(cmd, str);
7774 	if (found)
7775 		return found - cmd;
7776 
7777 	return 0;
7778 }
7779 
7780 /**
7781  * tracing_log_err - write an error to the tracing error log
7782  * @tr: The associated trace array for the error (NULL for top level array)
7783  * @loc: A string describing where the error occurred
7784  * @cmd: The tracing command that caused the error
7785  * @errs: The array of loc-specific static error strings
7786  * @type: The index into errs[], which produces the specific static err string
7787  * @pos: The position the caret should be placed in the cmd
7788  *
7789  * Writes an error into tracing/error_log of the form:
7790  *
7791  * <loc>: error: <text>
7792  *   Command: <cmd>
7793  *              ^
7794  *
7795  * tracing/error_log is a small log file containing the last
7796  * TRACING_LOG_ERRS_MAX errors (8).  Memory for errors isn't allocated
7797  * unless there has been a tracing error, and the error log can be
7798  * cleared and have its memory freed by writing the empty string in
7799  * truncation mode to it i.e. echo > tracing/error_log.
7800  *
7801  * NOTE: the @errs array along with the @type param are used to
7802  * produce a static error string - this string is not copied and saved
7803  * when the error is logged - only a pointer to it is saved.  See
7804  * existing callers for examples of how static strings are typically
7805  * defined for use with tracing_log_err().
7806  */
7807 void tracing_log_err(struct trace_array *tr,
7808 		     const char *loc, const char *cmd,
7809 		     const char **errs, u8 type, u8 pos)
7810 {
7811 	struct tracing_log_err *err;
7812 
7813 	if (!tr)
7814 		tr = &global_trace;
7815 
7816 	mutex_lock(&tracing_err_log_lock);
7817 	err = get_tracing_log_err(tr);
7818 	if (PTR_ERR(err) == -ENOMEM) {
7819 		mutex_unlock(&tracing_err_log_lock);
7820 		return;
7821 	}
7822 
7823 	snprintf(err->loc, TRACING_LOG_LOC_MAX, "%s: error: ", loc);
7824 	snprintf(err->cmd, MAX_FILTER_STR_VAL,"\n" CMD_PREFIX "%s\n", cmd);
7825 
7826 	err->info.errs = errs;
7827 	err->info.type = type;
7828 	err->info.pos = pos;
7829 	err->info.ts = local_clock();
7830 
7831 	list_add_tail(&err->list, &tr->err_log);
7832 	mutex_unlock(&tracing_err_log_lock);
7833 }
7834 
7835 static void clear_tracing_err_log(struct trace_array *tr)
7836 {
7837 	struct tracing_log_err *err, *next;
7838 
7839 	mutex_lock(&tracing_err_log_lock);
7840 	list_for_each_entry_safe(err, next, &tr->err_log, list) {
7841 		list_del(&err->list);
7842 		kfree(err);
7843 	}
7844 
7845 	tr->n_err_log_entries = 0;
7846 	mutex_unlock(&tracing_err_log_lock);
7847 }
7848 
7849 static void *tracing_err_log_seq_start(struct seq_file *m, loff_t *pos)
7850 {
7851 	struct trace_array *tr = m->private;
7852 
7853 	mutex_lock(&tracing_err_log_lock);
7854 
7855 	return seq_list_start(&tr->err_log, *pos);
7856 }
7857 
7858 static void *tracing_err_log_seq_next(struct seq_file *m, void *v, loff_t *pos)
7859 {
7860 	struct trace_array *tr = m->private;
7861 
7862 	return seq_list_next(v, &tr->err_log, pos);
7863 }
7864 
7865 static void tracing_err_log_seq_stop(struct seq_file *m, void *v)
7866 {
7867 	mutex_unlock(&tracing_err_log_lock);
7868 }
7869 
7870 static void tracing_err_log_show_pos(struct seq_file *m, u8 pos)
7871 {
7872 	u8 i;
7873 
7874 	for (i = 0; i < sizeof(CMD_PREFIX) - 1; i++)
7875 		seq_putc(m, ' ');
7876 	for (i = 0; i < pos; i++)
7877 		seq_putc(m, ' ');
7878 	seq_puts(m, "^\n");
7879 }
7880 
7881 static int tracing_err_log_seq_show(struct seq_file *m, void *v)
7882 {
7883 	struct tracing_log_err *err = v;
7884 
7885 	if (err) {
7886 		const char *err_text = err->info.errs[err->info.type];
7887 		u64 sec = err->info.ts;
7888 		u32 nsec;
7889 
7890 		nsec = do_div(sec, NSEC_PER_SEC);
7891 		seq_printf(m, "[%5llu.%06u] %s%s", sec, nsec / 1000,
7892 			   err->loc, err_text);
7893 		seq_printf(m, "%s", err->cmd);
7894 		tracing_err_log_show_pos(m, err->info.pos);
7895 	}
7896 
7897 	return 0;
7898 }
7899 
7900 static const struct seq_operations tracing_err_log_seq_ops = {
7901 	.start  = tracing_err_log_seq_start,
7902 	.next   = tracing_err_log_seq_next,
7903 	.stop   = tracing_err_log_seq_stop,
7904 	.show   = tracing_err_log_seq_show
7905 };
7906 
7907 static int tracing_err_log_open(struct inode *inode, struct file *file)
7908 {
7909 	struct trace_array *tr = inode->i_private;
7910 	int ret = 0;
7911 
7912 	ret = tracing_check_open_get_tr(tr);
7913 	if (ret)
7914 		return ret;
7915 
7916 	/* If this file was opened for write, then erase contents */
7917 	if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC))
7918 		clear_tracing_err_log(tr);
7919 
7920 	if (file->f_mode & FMODE_READ) {
7921 		ret = seq_open(file, &tracing_err_log_seq_ops);
7922 		if (!ret) {
7923 			struct seq_file *m = file->private_data;
7924 			m->private = tr;
7925 		} else {
7926 			trace_array_put(tr);
7927 		}
7928 	}
7929 	return ret;
7930 }
7931 
7932 static ssize_t tracing_err_log_write(struct file *file,
7933 				     const char __user *buffer,
7934 				     size_t count, loff_t *ppos)
7935 {
7936 	return count;
7937 }
7938 
7939 static int tracing_err_log_release(struct inode *inode, struct file *file)
7940 {
7941 	struct trace_array *tr = inode->i_private;
7942 
7943 	trace_array_put(tr);
7944 
7945 	if (file->f_mode & FMODE_READ)
7946 		seq_release(inode, file);
7947 
7948 	return 0;
7949 }
7950 
7951 static const struct file_operations tracing_err_log_fops = {
7952 	.open           = tracing_err_log_open,
7953 	.write		= tracing_err_log_write,
7954 	.read           = seq_read,
7955 	.llseek         = seq_lseek,
7956 	.release        = tracing_err_log_release,
7957 };
7958 
7959 static int tracing_buffers_open(struct inode *inode, struct file *filp)
7960 {
7961 	struct trace_array *tr = inode->i_private;
7962 	struct ftrace_buffer_info *info;
7963 	int ret;
7964 
7965 	ret = tracing_check_open_get_tr(tr);
7966 	if (ret)
7967 		return ret;
7968 
7969 	info = kvzalloc(sizeof(*info), GFP_KERNEL);
7970 	if (!info) {
7971 		trace_array_put(tr);
7972 		return -ENOMEM;
7973 	}
7974 
7975 	mutex_lock(&trace_types_lock);
7976 
7977 	info->iter.tr		= tr;
7978 	info->iter.cpu_file	= tracing_get_cpu(inode);
7979 	info->iter.trace	= tr->current_trace;
7980 	info->iter.array_buffer = &tr->array_buffer;
7981 	info->spare		= NULL;
7982 	/* Force reading ring buffer for first read */
7983 	info->read		= (unsigned int)-1;
7984 
7985 	filp->private_data = info;
7986 
7987 	tr->trace_ref++;
7988 
7989 	mutex_unlock(&trace_types_lock);
7990 
7991 	ret = nonseekable_open(inode, filp);
7992 	if (ret < 0)
7993 		trace_array_put(tr);
7994 
7995 	return ret;
7996 }
7997 
7998 static __poll_t
7999 tracing_buffers_poll(struct file *filp, poll_table *poll_table)
8000 {
8001 	struct ftrace_buffer_info *info = filp->private_data;
8002 	struct trace_iterator *iter = &info->iter;
8003 
8004 	return trace_poll(iter, filp, poll_table);
8005 }
8006 
8007 static ssize_t
8008 tracing_buffers_read(struct file *filp, char __user *ubuf,
8009 		     size_t count, loff_t *ppos)
8010 {
8011 	struct ftrace_buffer_info *info = filp->private_data;
8012 	struct trace_iterator *iter = &info->iter;
8013 	ssize_t ret = 0;
8014 	ssize_t size;
8015 
8016 	if (!count)
8017 		return 0;
8018 
8019 #ifdef CONFIG_TRACER_MAX_TRACE
8020 	if (iter->snapshot && iter->tr->current_trace->use_max_tr)
8021 		return -EBUSY;
8022 #endif
8023 
8024 	if (!info->spare) {
8025 		info->spare = ring_buffer_alloc_read_page(iter->array_buffer->buffer,
8026 							  iter->cpu_file);
8027 		if (IS_ERR(info->spare)) {
8028 			ret = PTR_ERR(info->spare);
8029 			info->spare = NULL;
8030 		} else {
8031 			info->spare_cpu = iter->cpu_file;
8032 		}
8033 	}
8034 	if (!info->spare)
8035 		return ret;
8036 
8037 	/* Do we have previous read data to read? */
8038 	if (info->read < PAGE_SIZE)
8039 		goto read;
8040 
8041  again:
8042 	trace_access_lock(iter->cpu_file);
8043 	ret = ring_buffer_read_page(iter->array_buffer->buffer,
8044 				    &info->spare,
8045 				    count,
8046 				    iter->cpu_file, 0);
8047 	trace_access_unlock(iter->cpu_file);
8048 
8049 	if (ret < 0) {
8050 		if (trace_empty(iter)) {
8051 			if ((filp->f_flags & O_NONBLOCK))
8052 				return -EAGAIN;
8053 
8054 			ret = wait_on_pipe(iter, 0);
8055 			if (ret)
8056 				return ret;
8057 
8058 			goto again;
8059 		}
8060 		return 0;
8061 	}
8062 
8063 	info->read = 0;
8064  read:
8065 	size = PAGE_SIZE - info->read;
8066 	if (size > count)
8067 		size = count;
8068 
8069 	ret = copy_to_user(ubuf, info->spare + info->read, size);
8070 	if (ret == size)
8071 		return -EFAULT;
8072 
8073 	size -= ret;
8074 
8075 	*ppos += size;
8076 	info->read += size;
8077 
8078 	return size;
8079 }
8080 
8081 static int tracing_buffers_release(struct inode *inode, struct file *file)
8082 {
8083 	struct ftrace_buffer_info *info = file->private_data;
8084 	struct trace_iterator *iter = &info->iter;
8085 
8086 	mutex_lock(&trace_types_lock);
8087 
8088 	iter->tr->trace_ref--;
8089 
8090 	__trace_array_put(iter->tr);
8091 
8092 	if (info->spare)
8093 		ring_buffer_free_read_page(iter->array_buffer->buffer,
8094 					   info->spare_cpu, info->spare);
8095 	kvfree(info);
8096 
8097 	mutex_unlock(&trace_types_lock);
8098 
8099 	return 0;
8100 }
8101 
8102 struct buffer_ref {
8103 	struct trace_buffer	*buffer;
8104 	void			*page;
8105 	int			cpu;
8106 	refcount_t		refcount;
8107 };
8108 
8109 static void buffer_ref_release(struct buffer_ref *ref)
8110 {
8111 	if (!refcount_dec_and_test(&ref->refcount))
8112 		return;
8113 	ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
8114 	kfree(ref);
8115 }
8116 
8117 static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
8118 				    struct pipe_buffer *buf)
8119 {
8120 	struct buffer_ref *ref = (struct buffer_ref *)buf->private;
8121 
8122 	buffer_ref_release(ref);
8123 	buf->private = 0;
8124 }
8125 
8126 static bool buffer_pipe_buf_get(struct pipe_inode_info *pipe,
8127 				struct pipe_buffer *buf)
8128 {
8129 	struct buffer_ref *ref = (struct buffer_ref *)buf->private;
8130 
8131 	if (refcount_read(&ref->refcount) > INT_MAX/2)
8132 		return false;
8133 
8134 	refcount_inc(&ref->refcount);
8135 	return true;
8136 }
8137 
8138 /* Pipe buffer operations for a buffer. */
8139 static const struct pipe_buf_operations buffer_pipe_buf_ops = {
8140 	.release		= buffer_pipe_buf_release,
8141 	.get			= buffer_pipe_buf_get,
8142 };
8143 
8144 /*
8145  * Callback from splice_to_pipe(), if we need to release some pages
8146  * at the end of the spd in case we error'ed out in filling the pipe.
8147  */
8148 static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
8149 {
8150 	struct buffer_ref *ref =
8151 		(struct buffer_ref *)spd->partial[i].private;
8152 
8153 	buffer_ref_release(ref);
8154 	spd->partial[i].private = 0;
8155 }
8156 
8157 static ssize_t
8158 tracing_buffers_splice_read(struct file *file, loff_t *ppos,
8159 			    struct pipe_inode_info *pipe, size_t len,
8160 			    unsigned int flags)
8161 {
8162 	struct ftrace_buffer_info *info = file->private_data;
8163 	struct trace_iterator *iter = &info->iter;
8164 	struct partial_page partial_def[PIPE_DEF_BUFFERS];
8165 	struct page *pages_def[PIPE_DEF_BUFFERS];
8166 	struct splice_pipe_desc spd = {
8167 		.pages		= pages_def,
8168 		.partial	= partial_def,
8169 		.nr_pages_max	= PIPE_DEF_BUFFERS,
8170 		.ops		= &buffer_pipe_buf_ops,
8171 		.spd_release	= buffer_spd_release,
8172 	};
8173 	struct buffer_ref *ref;
8174 	int entries, i;
8175 	ssize_t ret = 0;
8176 
8177 #ifdef CONFIG_TRACER_MAX_TRACE
8178 	if (iter->snapshot && iter->tr->current_trace->use_max_tr)
8179 		return -EBUSY;
8180 #endif
8181 
8182 	if (*ppos & (PAGE_SIZE - 1))
8183 		return -EINVAL;
8184 
8185 	if (len & (PAGE_SIZE - 1)) {
8186 		if (len < PAGE_SIZE)
8187 			return -EINVAL;
8188 		len &= PAGE_MASK;
8189 	}
8190 
8191 	if (splice_grow_spd(pipe, &spd))
8192 		return -ENOMEM;
8193 
8194  again:
8195 	trace_access_lock(iter->cpu_file);
8196 	entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
8197 
8198 	for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
8199 		struct page *page;
8200 		int r;
8201 
8202 		ref = kzalloc(sizeof(*ref), GFP_KERNEL);
8203 		if (!ref) {
8204 			ret = -ENOMEM;
8205 			break;
8206 		}
8207 
8208 		refcount_set(&ref->refcount, 1);
8209 		ref->buffer = iter->array_buffer->buffer;
8210 		ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
8211 		if (IS_ERR(ref->page)) {
8212 			ret = PTR_ERR(ref->page);
8213 			ref->page = NULL;
8214 			kfree(ref);
8215 			break;
8216 		}
8217 		ref->cpu = iter->cpu_file;
8218 
8219 		r = ring_buffer_read_page(ref->buffer, &ref->page,
8220 					  len, iter->cpu_file, 1);
8221 		if (r < 0) {
8222 			ring_buffer_free_read_page(ref->buffer, ref->cpu,
8223 						   ref->page);
8224 			kfree(ref);
8225 			break;
8226 		}
8227 
8228 		page = virt_to_page(ref->page);
8229 
8230 		spd.pages[i] = page;
8231 		spd.partial[i].len = PAGE_SIZE;
8232 		spd.partial[i].offset = 0;
8233 		spd.partial[i].private = (unsigned long)ref;
8234 		spd.nr_pages++;
8235 		*ppos += PAGE_SIZE;
8236 
8237 		entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
8238 	}
8239 
8240 	trace_access_unlock(iter->cpu_file);
8241 	spd.nr_pages = i;
8242 
8243 	/* did we read anything? */
8244 	if (!spd.nr_pages) {
8245 		if (ret)
8246 			goto out;
8247 
8248 		ret = -EAGAIN;
8249 		if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
8250 			goto out;
8251 
8252 		ret = wait_on_pipe(iter, iter->tr->buffer_percent);
8253 		if (ret)
8254 			goto out;
8255 
8256 		goto again;
8257 	}
8258 
8259 	ret = splice_to_pipe(pipe, &spd);
8260 out:
8261 	splice_shrink_spd(&spd);
8262 
8263 	return ret;
8264 }
8265 
8266 static const struct file_operations tracing_buffers_fops = {
8267 	.open		= tracing_buffers_open,
8268 	.read		= tracing_buffers_read,
8269 	.poll		= tracing_buffers_poll,
8270 	.release	= tracing_buffers_release,
8271 	.splice_read	= tracing_buffers_splice_read,
8272 	.llseek		= no_llseek,
8273 };
8274 
8275 static ssize_t
8276 tracing_stats_read(struct file *filp, char __user *ubuf,
8277 		   size_t count, loff_t *ppos)
8278 {
8279 	struct inode *inode = file_inode(filp);
8280 	struct trace_array *tr = inode->i_private;
8281 	struct array_buffer *trace_buf = &tr->array_buffer;
8282 	int cpu = tracing_get_cpu(inode);
8283 	struct trace_seq *s;
8284 	unsigned long cnt;
8285 	unsigned long long t;
8286 	unsigned long usec_rem;
8287 
8288 	s = kmalloc(sizeof(*s), GFP_KERNEL);
8289 	if (!s)
8290 		return -ENOMEM;
8291 
8292 	trace_seq_init(s);
8293 
8294 	cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
8295 	trace_seq_printf(s, "entries: %ld\n", cnt);
8296 
8297 	cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
8298 	trace_seq_printf(s, "overrun: %ld\n", cnt);
8299 
8300 	cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
8301 	trace_seq_printf(s, "commit overrun: %ld\n", cnt);
8302 
8303 	cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
8304 	trace_seq_printf(s, "bytes: %ld\n", cnt);
8305 
8306 	if (trace_clocks[tr->clock_id].in_ns) {
8307 		/* local or global for trace_clock */
8308 		t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
8309 		usec_rem = do_div(t, USEC_PER_SEC);
8310 		trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
8311 								t, usec_rem);
8312 
8313 		t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer));
8314 		usec_rem = do_div(t, USEC_PER_SEC);
8315 		trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
8316 	} else {
8317 		/* counter or tsc mode for trace_clock */
8318 		trace_seq_printf(s, "oldest event ts: %llu\n",
8319 				ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
8320 
8321 		trace_seq_printf(s, "now ts: %llu\n",
8322 				ring_buffer_time_stamp(trace_buf->buffer));
8323 	}
8324 
8325 	cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
8326 	trace_seq_printf(s, "dropped events: %ld\n", cnt);
8327 
8328 	cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
8329 	trace_seq_printf(s, "read events: %ld\n", cnt);
8330 
8331 	count = simple_read_from_buffer(ubuf, count, ppos,
8332 					s->buffer, trace_seq_used(s));
8333 
8334 	kfree(s);
8335 
8336 	return count;
8337 }
8338 
8339 static const struct file_operations tracing_stats_fops = {
8340 	.open		= tracing_open_generic_tr,
8341 	.read		= tracing_stats_read,
8342 	.llseek		= generic_file_llseek,
8343 	.release	= tracing_release_generic_tr,
8344 };
8345 
8346 #ifdef CONFIG_DYNAMIC_FTRACE
8347 
8348 static ssize_t
8349 tracing_read_dyn_info(struct file *filp, char __user *ubuf,
8350 		  size_t cnt, loff_t *ppos)
8351 {
8352 	ssize_t ret;
8353 	char *buf;
8354 	int r;
8355 
8356 	/* 256 should be plenty to hold the amount needed */
8357 	buf = kmalloc(256, GFP_KERNEL);
8358 	if (!buf)
8359 		return -ENOMEM;
8360 
8361 	r = scnprintf(buf, 256, "%ld pages:%ld groups: %ld\n",
8362 		      ftrace_update_tot_cnt,
8363 		      ftrace_number_of_pages,
8364 		      ftrace_number_of_groups);
8365 
8366 	ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
8367 	kfree(buf);
8368 	return ret;
8369 }
8370 
8371 static const struct file_operations tracing_dyn_info_fops = {
8372 	.open		= tracing_open_generic,
8373 	.read		= tracing_read_dyn_info,
8374 	.llseek		= generic_file_llseek,
8375 };
8376 #endif /* CONFIG_DYNAMIC_FTRACE */
8377 
8378 #if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
8379 static void
8380 ftrace_snapshot(unsigned long ip, unsigned long parent_ip,
8381 		struct trace_array *tr, struct ftrace_probe_ops *ops,
8382 		void *data)
8383 {
8384 	tracing_snapshot_instance(tr);
8385 }
8386 
8387 static void
8388 ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip,
8389 		      struct trace_array *tr, struct ftrace_probe_ops *ops,
8390 		      void *data)
8391 {
8392 	struct ftrace_func_mapper *mapper = data;
8393 	long *count = NULL;
8394 
8395 	if (mapper)
8396 		count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
8397 
8398 	if (count) {
8399 
8400 		if (*count <= 0)
8401 			return;
8402 
8403 		(*count)--;
8404 	}
8405 
8406 	tracing_snapshot_instance(tr);
8407 }
8408 
8409 static int
8410 ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
8411 		      struct ftrace_probe_ops *ops, void *data)
8412 {
8413 	struct ftrace_func_mapper *mapper = data;
8414 	long *count = NULL;
8415 
8416 	seq_printf(m, "%ps:", (void *)ip);
8417 
8418 	seq_puts(m, "snapshot");
8419 
8420 	if (mapper)
8421 		count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
8422 
8423 	if (count)
8424 		seq_printf(m, ":count=%ld\n", *count);
8425 	else
8426 		seq_puts(m, ":unlimited\n");
8427 
8428 	return 0;
8429 }
8430 
8431 static int
8432 ftrace_snapshot_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
8433 		     unsigned long ip, void *init_data, void **data)
8434 {
8435 	struct ftrace_func_mapper *mapper = *data;
8436 
8437 	if (!mapper) {
8438 		mapper = allocate_ftrace_func_mapper();
8439 		if (!mapper)
8440 			return -ENOMEM;
8441 		*data = mapper;
8442 	}
8443 
8444 	return ftrace_func_mapper_add_ip(mapper, ip, init_data);
8445 }
8446 
8447 static void
8448 ftrace_snapshot_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
8449 		     unsigned long ip, void *data)
8450 {
8451 	struct ftrace_func_mapper *mapper = data;
8452 
8453 	if (!ip) {
8454 		if (!mapper)
8455 			return;
8456 		free_ftrace_func_mapper(mapper, NULL);
8457 		return;
8458 	}
8459 
8460 	ftrace_func_mapper_remove_ip(mapper, ip);
8461 }
8462 
8463 static struct ftrace_probe_ops snapshot_probe_ops = {
8464 	.func			= ftrace_snapshot,
8465 	.print			= ftrace_snapshot_print,
8466 };
8467 
8468 static struct ftrace_probe_ops snapshot_count_probe_ops = {
8469 	.func			= ftrace_count_snapshot,
8470 	.print			= ftrace_snapshot_print,
8471 	.init			= ftrace_snapshot_init,
8472 	.free			= ftrace_snapshot_free,
8473 };
8474 
8475 static int
8476 ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash,
8477 			       char *glob, char *cmd, char *param, int enable)
8478 {
8479 	struct ftrace_probe_ops *ops;
8480 	void *count = (void *)-1;
8481 	char *number;
8482 	int ret;
8483 
8484 	if (!tr)
8485 		return -ENODEV;
8486 
8487 	/* hash funcs only work with set_ftrace_filter */
8488 	if (!enable)
8489 		return -EINVAL;
8490 
8491 	ops = param ? &snapshot_count_probe_ops :  &snapshot_probe_ops;
8492 
8493 	if (glob[0] == '!')
8494 		return unregister_ftrace_function_probe_func(glob+1, tr, ops);
8495 
8496 	if (!param)
8497 		goto out_reg;
8498 
8499 	number = strsep(&param, ":");
8500 
8501 	if (!strlen(number))
8502 		goto out_reg;
8503 
8504 	/*
8505 	 * We use the callback data field (which is a pointer)
8506 	 * as our counter.
8507 	 */
8508 	ret = kstrtoul(number, 0, (unsigned long *)&count);
8509 	if (ret)
8510 		return ret;
8511 
8512  out_reg:
8513 	ret = tracing_alloc_snapshot_instance(tr);
8514 	if (ret < 0)
8515 		goto out;
8516 
8517 	ret = register_ftrace_function_probe(glob, tr, ops, count);
8518 
8519  out:
8520 	return ret < 0 ? ret : 0;
8521 }
8522 
8523 static struct ftrace_func_command ftrace_snapshot_cmd = {
8524 	.name			= "snapshot",
8525 	.func			= ftrace_trace_snapshot_callback,
8526 };
8527 
8528 static __init int register_snapshot_cmd(void)
8529 {
8530 	return register_ftrace_command(&ftrace_snapshot_cmd);
8531 }
8532 #else
8533 static inline __init int register_snapshot_cmd(void) { return 0; }
8534 #endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
8535 
8536 static struct dentry *tracing_get_dentry(struct trace_array *tr)
8537 {
8538 	if (WARN_ON(!tr->dir))
8539 		return ERR_PTR(-ENODEV);
8540 
8541 	/* Top directory uses NULL as the parent */
8542 	if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
8543 		return NULL;
8544 
8545 	/* All sub buffers have a descriptor */
8546 	return tr->dir;
8547 }
8548 
8549 static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
8550 {
8551 	struct dentry *d_tracer;
8552 
8553 	if (tr->percpu_dir)
8554 		return tr->percpu_dir;
8555 
8556 	d_tracer = tracing_get_dentry(tr);
8557 	if (IS_ERR(d_tracer))
8558 		return NULL;
8559 
8560 	tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
8561 
8562 	MEM_FAIL(!tr->percpu_dir,
8563 		  "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
8564 
8565 	return tr->percpu_dir;
8566 }
8567 
8568 static struct dentry *
8569 trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
8570 		      void *data, long cpu, const struct file_operations *fops)
8571 {
8572 	struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
8573 
8574 	if (ret) /* See tracing_get_cpu() */
8575 		d_inode(ret)->i_cdev = (void *)(cpu + 1);
8576 	return ret;
8577 }
8578 
8579 static void
8580 tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
8581 {
8582 	struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
8583 	struct dentry *d_cpu;
8584 	char cpu_dir[30]; /* 30 characters should be more than enough */
8585 
8586 	if (!d_percpu)
8587 		return;
8588 
8589 	snprintf(cpu_dir, 30, "cpu%ld", cpu);
8590 	d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
8591 	if (!d_cpu) {
8592 		pr_warn("Could not create tracefs '%s' entry\n", cpu_dir);
8593 		return;
8594 	}
8595 
8596 	/* per cpu trace_pipe */
8597 	trace_create_cpu_file("trace_pipe", TRACE_MODE_READ, d_cpu,
8598 				tr, cpu, &tracing_pipe_fops);
8599 
8600 	/* per cpu trace */
8601 	trace_create_cpu_file("trace", TRACE_MODE_WRITE, d_cpu,
8602 				tr, cpu, &tracing_fops);
8603 
8604 	trace_create_cpu_file("trace_pipe_raw", TRACE_MODE_READ, d_cpu,
8605 				tr, cpu, &tracing_buffers_fops);
8606 
8607 	trace_create_cpu_file("stats", TRACE_MODE_READ, d_cpu,
8608 				tr, cpu, &tracing_stats_fops);
8609 
8610 	trace_create_cpu_file("buffer_size_kb", TRACE_MODE_READ, d_cpu,
8611 				tr, cpu, &tracing_entries_fops);
8612 
8613 #ifdef CONFIG_TRACER_SNAPSHOT
8614 	trace_create_cpu_file("snapshot", TRACE_MODE_WRITE, d_cpu,
8615 				tr, cpu, &snapshot_fops);
8616 
8617 	trace_create_cpu_file("snapshot_raw", TRACE_MODE_READ, d_cpu,
8618 				tr, cpu, &snapshot_raw_fops);
8619 #endif
8620 }
8621 
8622 #ifdef CONFIG_FTRACE_SELFTEST
8623 /* Let selftest have access to static functions in this file */
8624 #include "trace_selftest.c"
8625 #endif
8626 
8627 static ssize_t
8628 trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
8629 			loff_t *ppos)
8630 {
8631 	struct trace_option_dentry *topt = filp->private_data;
8632 	char *buf;
8633 
8634 	if (topt->flags->val & topt->opt->bit)
8635 		buf = "1\n";
8636 	else
8637 		buf = "0\n";
8638 
8639 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
8640 }
8641 
8642 static ssize_t
8643 trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
8644 			 loff_t *ppos)
8645 {
8646 	struct trace_option_dentry *topt = filp->private_data;
8647 	unsigned long val;
8648 	int ret;
8649 
8650 	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8651 	if (ret)
8652 		return ret;
8653 
8654 	if (val != 0 && val != 1)
8655 		return -EINVAL;
8656 
8657 	if (!!(topt->flags->val & topt->opt->bit) != val) {
8658 		mutex_lock(&trace_types_lock);
8659 		ret = __set_tracer_option(topt->tr, topt->flags,
8660 					  topt->opt, !val);
8661 		mutex_unlock(&trace_types_lock);
8662 		if (ret)
8663 			return ret;
8664 	}
8665 
8666 	*ppos += cnt;
8667 
8668 	return cnt;
8669 }
8670 
8671 
8672 static const struct file_operations trace_options_fops = {
8673 	.open = tracing_open_generic,
8674 	.read = trace_options_read,
8675 	.write = trace_options_write,
8676 	.llseek	= generic_file_llseek,
8677 };
8678 
8679 /*
8680  * In order to pass in both the trace_array descriptor as well as the index
8681  * to the flag that the trace option file represents, the trace_array
8682  * has a character array of trace_flags_index[], which holds the index
8683  * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
8684  * The address of this character array is passed to the flag option file
8685  * read/write callbacks.
8686  *
8687  * In order to extract both the index and the trace_array descriptor,
8688  * get_tr_index() uses the following algorithm.
8689  *
8690  *   idx = *ptr;
8691  *
8692  * As the pointer itself contains the address of the index (remember
8693  * index[1] == 1).
8694  *
8695  * Then to get the trace_array descriptor, by subtracting that index
8696  * from the ptr, we get to the start of the index itself.
8697  *
8698  *   ptr - idx == &index[0]
8699  *
8700  * Then a simple container_of() from that pointer gets us to the
8701  * trace_array descriptor.
8702  */
8703 static void get_tr_index(void *data, struct trace_array **ptr,
8704 			 unsigned int *pindex)
8705 {
8706 	*pindex = *(unsigned char *)data;
8707 
8708 	*ptr = container_of(data - *pindex, struct trace_array,
8709 			    trace_flags_index);
8710 }
8711 
8712 static ssize_t
8713 trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
8714 			loff_t *ppos)
8715 {
8716 	void *tr_index = filp->private_data;
8717 	struct trace_array *tr;
8718 	unsigned int index;
8719 	char *buf;
8720 
8721 	get_tr_index(tr_index, &tr, &index);
8722 
8723 	if (tr->trace_flags & (1 << index))
8724 		buf = "1\n";
8725 	else
8726 		buf = "0\n";
8727 
8728 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
8729 }
8730 
8731 static ssize_t
8732 trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
8733 			 loff_t *ppos)
8734 {
8735 	void *tr_index = filp->private_data;
8736 	struct trace_array *tr;
8737 	unsigned int index;
8738 	unsigned long val;
8739 	int ret;
8740 
8741 	get_tr_index(tr_index, &tr, &index);
8742 
8743 	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8744 	if (ret)
8745 		return ret;
8746 
8747 	if (val != 0 && val != 1)
8748 		return -EINVAL;
8749 
8750 	mutex_lock(&event_mutex);
8751 	mutex_lock(&trace_types_lock);
8752 	ret = set_tracer_flag(tr, 1 << index, val);
8753 	mutex_unlock(&trace_types_lock);
8754 	mutex_unlock(&event_mutex);
8755 
8756 	if (ret < 0)
8757 		return ret;
8758 
8759 	*ppos += cnt;
8760 
8761 	return cnt;
8762 }
8763 
8764 static const struct file_operations trace_options_core_fops = {
8765 	.open = tracing_open_generic,
8766 	.read = trace_options_core_read,
8767 	.write = trace_options_core_write,
8768 	.llseek = generic_file_llseek,
8769 };
8770 
8771 struct dentry *trace_create_file(const char *name,
8772 				 umode_t mode,
8773 				 struct dentry *parent,
8774 				 void *data,
8775 				 const struct file_operations *fops)
8776 {
8777 	struct dentry *ret;
8778 
8779 	ret = tracefs_create_file(name, mode, parent, data, fops);
8780 	if (!ret)
8781 		pr_warn("Could not create tracefs '%s' entry\n", name);
8782 
8783 	return ret;
8784 }
8785 
8786 
8787 static struct dentry *trace_options_init_dentry(struct trace_array *tr)
8788 {
8789 	struct dentry *d_tracer;
8790 
8791 	if (tr->options)
8792 		return tr->options;
8793 
8794 	d_tracer = tracing_get_dentry(tr);
8795 	if (IS_ERR(d_tracer))
8796 		return NULL;
8797 
8798 	tr->options = tracefs_create_dir("options", d_tracer);
8799 	if (!tr->options) {
8800 		pr_warn("Could not create tracefs directory 'options'\n");
8801 		return NULL;
8802 	}
8803 
8804 	return tr->options;
8805 }
8806 
8807 static void
8808 create_trace_option_file(struct trace_array *tr,
8809 			 struct trace_option_dentry *topt,
8810 			 struct tracer_flags *flags,
8811 			 struct tracer_opt *opt)
8812 {
8813 	struct dentry *t_options;
8814 
8815 	t_options = trace_options_init_dentry(tr);
8816 	if (!t_options)
8817 		return;
8818 
8819 	topt->flags = flags;
8820 	topt->opt = opt;
8821 	topt->tr = tr;
8822 
8823 	topt->entry = trace_create_file(opt->name, TRACE_MODE_WRITE,
8824 					t_options, topt, &trace_options_fops);
8825 
8826 }
8827 
8828 static void
8829 create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
8830 {
8831 	struct trace_option_dentry *topts;
8832 	struct trace_options *tr_topts;
8833 	struct tracer_flags *flags;
8834 	struct tracer_opt *opts;
8835 	int cnt;
8836 	int i;
8837 
8838 	if (!tracer)
8839 		return;
8840 
8841 	flags = tracer->flags;
8842 
8843 	if (!flags || !flags->opts)
8844 		return;
8845 
8846 	/*
8847 	 * If this is an instance, only create flags for tracers
8848 	 * the instance may have.
8849 	 */
8850 	if (!trace_ok_for_array(tracer, tr))
8851 		return;
8852 
8853 	for (i = 0; i < tr->nr_topts; i++) {
8854 		/* Make sure there's no duplicate flags. */
8855 		if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags))
8856 			return;
8857 	}
8858 
8859 	opts = flags->opts;
8860 
8861 	for (cnt = 0; opts[cnt].name; cnt++)
8862 		;
8863 
8864 	topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
8865 	if (!topts)
8866 		return;
8867 
8868 	tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1),
8869 			    GFP_KERNEL);
8870 	if (!tr_topts) {
8871 		kfree(topts);
8872 		return;
8873 	}
8874 
8875 	tr->topts = tr_topts;
8876 	tr->topts[tr->nr_topts].tracer = tracer;
8877 	tr->topts[tr->nr_topts].topts = topts;
8878 	tr->nr_topts++;
8879 
8880 	for (cnt = 0; opts[cnt].name; cnt++) {
8881 		create_trace_option_file(tr, &topts[cnt], flags,
8882 					 &opts[cnt]);
8883 		MEM_FAIL(topts[cnt].entry == NULL,
8884 			  "Failed to create trace option: %s",
8885 			  opts[cnt].name);
8886 	}
8887 }
8888 
8889 static struct dentry *
8890 create_trace_option_core_file(struct trace_array *tr,
8891 			      const char *option, long index)
8892 {
8893 	struct dentry *t_options;
8894 
8895 	t_options = trace_options_init_dentry(tr);
8896 	if (!t_options)
8897 		return NULL;
8898 
8899 	return trace_create_file(option, TRACE_MODE_WRITE, t_options,
8900 				 (void *)&tr->trace_flags_index[index],
8901 				 &trace_options_core_fops);
8902 }
8903 
8904 static void create_trace_options_dir(struct trace_array *tr)
8905 {
8906 	struct dentry *t_options;
8907 	bool top_level = tr == &global_trace;
8908 	int i;
8909 
8910 	t_options = trace_options_init_dentry(tr);
8911 	if (!t_options)
8912 		return;
8913 
8914 	for (i = 0; trace_options[i]; i++) {
8915 		if (top_level ||
8916 		    !((1 << i) & TOP_LEVEL_TRACE_FLAGS))
8917 			create_trace_option_core_file(tr, trace_options[i], i);
8918 	}
8919 }
8920 
8921 static ssize_t
8922 rb_simple_read(struct file *filp, char __user *ubuf,
8923 	       size_t cnt, loff_t *ppos)
8924 {
8925 	struct trace_array *tr = filp->private_data;
8926 	char buf[64];
8927 	int r;
8928 
8929 	r = tracer_tracing_is_on(tr);
8930 	r = sprintf(buf, "%d\n", r);
8931 
8932 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
8933 }
8934 
8935 static ssize_t
8936 rb_simple_write(struct file *filp, const char __user *ubuf,
8937 		size_t cnt, loff_t *ppos)
8938 {
8939 	struct trace_array *tr = filp->private_data;
8940 	struct trace_buffer *buffer = tr->array_buffer.buffer;
8941 	unsigned long val;
8942 	int ret;
8943 
8944 	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8945 	if (ret)
8946 		return ret;
8947 
8948 	if (buffer) {
8949 		mutex_lock(&trace_types_lock);
8950 		if (!!val == tracer_tracing_is_on(tr)) {
8951 			val = 0; /* do nothing */
8952 		} else if (val) {
8953 			tracer_tracing_on(tr);
8954 			if (tr->current_trace->start)
8955 				tr->current_trace->start(tr);
8956 		} else {
8957 			tracer_tracing_off(tr);
8958 			if (tr->current_trace->stop)
8959 				tr->current_trace->stop(tr);
8960 		}
8961 		mutex_unlock(&trace_types_lock);
8962 	}
8963 
8964 	(*ppos)++;
8965 
8966 	return cnt;
8967 }
8968 
8969 static const struct file_operations rb_simple_fops = {
8970 	.open		= tracing_open_generic_tr,
8971 	.read		= rb_simple_read,
8972 	.write		= rb_simple_write,
8973 	.release	= tracing_release_generic_tr,
8974 	.llseek		= default_llseek,
8975 };
8976 
8977 static ssize_t
8978 buffer_percent_read(struct file *filp, char __user *ubuf,
8979 		    size_t cnt, loff_t *ppos)
8980 {
8981 	struct trace_array *tr = filp->private_data;
8982 	char buf[64];
8983 	int r;
8984 
8985 	r = tr->buffer_percent;
8986 	r = sprintf(buf, "%d\n", r);
8987 
8988 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
8989 }
8990 
8991 static ssize_t
8992 buffer_percent_write(struct file *filp, const char __user *ubuf,
8993 		     size_t cnt, loff_t *ppos)
8994 {
8995 	struct trace_array *tr = filp->private_data;
8996 	unsigned long val;
8997 	int ret;
8998 
8999 	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
9000 	if (ret)
9001 		return ret;
9002 
9003 	if (val > 100)
9004 		return -EINVAL;
9005 
9006 	if (!val)
9007 		val = 1;
9008 
9009 	tr->buffer_percent = val;
9010 
9011 	(*ppos)++;
9012 
9013 	return cnt;
9014 }
9015 
9016 static const struct file_operations buffer_percent_fops = {
9017 	.open		= tracing_open_generic_tr,
9018 	.read		= buffer_percent_read,
9019 	.write		= buffer_percent_write,
9020 	.release	= tracing_release_generic_tr,
9021 	.llseek		= default_llseek,
9022 };
9023 
9024 static struct dentry *trace_instance_dir;
9025 
9026 static void
9027 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
9028 
9029 static int
9030 allocate_trace_buffer(struct trace_array *tr, struct array_buffer *buf, int size)
9031 {
9032 	enum ring_buffer_flags rb_flags;
9033 
9034 	rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
9035 
9036 	buf->tr = tr;
9037 
9038 	buf->buffer = ring_buffer_alloc(size, rb_flags);
9039 	if (!buf->buffer)
9040 		return -ENOMEM;
9041 
9042 	buf->data = alloc_percpu(struct trace_array_cpu);
9043 	if (!buf->data) {
9044 		ring_buffer_free(buf->buffer);
9045 		buf->buffer = NULL;
9046 		return -ENOMEM;
9047 	}
9048 
9049 	/* Allocate the first page for all buffers */
9050 	set_buffer_entries(&tr->array_buffer,
9051 			   ring_buffer_size(tr->array_buffer.buffer, 0));
9052 
9053 	return 0;
9054 }
9055 
9056 static int allocate_trace_buffers(struct trace_array *tr, int size)
9057 {
9058 	int ret;
9059 
9060 	ret = allocate_trace_buffer(tr, &tr->array_buffer, size);
9061 	if (ret)
9062 		return ret;
9063 
9064 #ifdef CONFIG_TRACER_MAX_TRACE
9065 	ret = allocate_trace_buffer(tr, &tr->max_buffer,
9066 				    allocate_snapshot ? size : 1);
9067 	if (MEM_FAIL(ret, "Failed to allocate trace buffer\n")) {
9068 		ring_buffer_free(tr->array_buffer.buffer);
9069 		tr->array_buffer.buffer = NULL;
9070 		free_percpu(tr->array_buffer.data);
9071 		tr->array_buffer.data = NULL;
9072 		return -ENOMEM;
9073 	}
9074 	tr->allocated_snapshot = allocate_snapshot;
9075 
9076 	/*
9077 	 * Only the top level trace array gets its snapshot allocated
9078 	 * from the kernel command line.
9079 	 */
9080 	allocate_snapshot = false;
9081 #endif
9082 
9083 	return 0;
9084 }
9085 
9086 static void free_trace_buffer(struct array_buffer *buf)
9087 {
9088 	if (buf->buffer) {
9089 		ring_buffer_free(buf->buffer);
9090 		buf->buffer = NULL;
9091 		free_percpu(buf->data);
9092 		buf->data = NULL;
9093 	}
9094 }
9095 
9096 static void free_trace_buffers(struct trace_array *tr)
9097 {
9098 	if (!tr)
9099 		return;
9100 
9101 	free_trace_buffer(&tr->array_buffer);
9102 
9103 #ifdef CONFIG_TRACER_MAX_TRACE
9104 	free_trace_buffer(&tr->max_buffer);
9105 #endif
9106 }
9107 
9108 static void init_trace_flags_index(struct trace_array *tr)
9109 {
9110 	int i;
9111 
9112 	/* Used by the trace options files */
9113 	for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++)
9114 		tr->trace_flags_index[i] = i;
9115 }
9116 
9117 static void __update_tracer_options(struct trace_array *tr)
9118 {
9119 	struct tracer *t;
9120 
9121 	for (t = trace_types; t; t = t->next)
9122 		add_tracer_options(tr, t);
9123 }
9124 
9125 static void update_tracer_options(struct trace_array *tr)
9126 {
9127 	mutex_lock(&trace_types_lock);
9128 	__update_tracer_options(tr);
9129 	mutex_unlock(&trace_types_lock);
9130 }
9131 
9132 /* Must have trace_types_lock held */
9133 struct trace_array *trace_array_find(const char *instance)
9134 {
9135 	struct trace_array *tr, *found = NULL;
9136 
9137 	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9138 		if (tr->name && strcmp(tr->name, instance) == 0) {
9139 			found = tr;
9140 			break;
9141 		}
9142 	}
9143 
9144 	return found;
9145 }
9146 
9147 struct trace_array *trace_array_find_get(const char *instance)
9148 {
9149 	struct trace_array *tr;
9150 
9151 	mutex_lock(&trace_types_lock);
9152 	tr = trace_array_find(instance);
9153 	if (tr)
9154 		tr->ref++;
9155 	mutex_unlock(&trace_types_lock);
9156 
9157 	return tr;
9158 }
9159 
9160 static int trace_array_create_dir(struct trace_array *tr)
9161 {
9162 	int ret;
9163 
9164 	tr->dir = tracefs_create_dir(tr->name, trace_instance_dir);
9165 	if (!tr->dir)
9166 		return -EINVAL;
9167 
9168 	ret = event_trace_add_tracer(tr->dir, tr);
9169 	if (ret) {
9170 		tracefs_remove(tr->dir);
9171 		return ret;
9172 	}
9173 
9174 	init_tracer_tracefs(tr, tr->dir);
9175 	__update_tracer_options(tr);
9176 
9177 	return ret;
9178 }
9179 
9180 static struct trace_array *trace_array_create(const char *name)
9181 {
9182 	struct trace_array *tr;
9183 	int ret;
9184 
9185 	ret = -ENOMEM;
9186 	tr = kzalloc(sizeof(*tr), GFP_KERNEL);
9187 	if (!tr)
9188 		return ERR_PTR(ret);
9189 
9190 	tr->name = kstrdup(name, GFP_KERNEL);
9191 	if (!tr->name)
9192 		goto out_free_tr;
9193 
9194 	if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
9195 		goto out_free_tr;
9196 
9197 	tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS;
9198 
9199 	cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
9200 
9201 	raw_spin_lock_init(&tr->start_lock);
9202 
9203 	tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
9204 
9205 	tr->current_trace = &nop_trace;
9206 
9207 	INIT_LIST_HEAD(&tr->systems);
9208 	INIT_LIST_HEAD(&tr->events);
9209 	INIT_LIST_HEAD(&tr->hist_vars);
9210 	INIT_LIST_HEAD(&tr->err_log);
9211 
9212 	if (allocate_trace_buffers(tr, trace_buf_size) < 0)
9213 		goto out_free_tr;
9214 
9215 	if (ftrace_allocate_ftrace_ops(tr) < 0)
9216 		goto out_free_tr;
9217 
9218 	ftrace_init_trace_array(tr);
9219 
9220 	init_trace_flags_index(tr);
9221 
9222 	if (trace_instance_dir) {
9223 		ret = trace_array_create_dir(tr);
9224 		if (ret)
9225 			goto out_free_tr;
9226 	} else
9227 		__trace_early_add_events(tr);
9228 
9229 	list_add(&tr->list, &ftrace_trace_arrays);
9230 
9231 	tr->ref++;
9232 
9233 	return tr;
9234 
9235  out_free_tr:
9236 	ftrace_free_ftrace_ops(tr);
9237 	free_trace_buffers(tr);
9238 	free_cpumask_var(tr->tracing_cpumask);
9239 	kfree(tr->name);
9240 	kfree(tr);
9241 
9242 	return ERR_PTR(ret);
9243 }
9244 
9245 static int instance_mkdir(const char *name)
9246 {
9247 	struct trace_array *tr;
9248 	int ret;
9249 
9250 	mutex_lock(&event_mutex);
9251 	mutex_lock(&trace_types_lock);
9252 
9253 	ret = -EEXIST;
9254 	if (trace_array_find(name))
9255 		goto out_unlock;
9256 
9257 	tr = trace_array_create(name);
9258 
9259 	ret = PTR_ERR_OR_ZERO(tr);
9260 
9261 out_unlock:
9262 	mutex_unlock(&trace_types_lock);
9263 	mutex_unlock(&event_mutex);
9264 	return ret;
9265 }
9266 
9267 /**
9268  * trace_array_get_by_name - Create/Lookup a trace array, given its name.
9269  * @name: The name of the trace array to be looked up/created.
9270  *
9271  * Returns pointer to trace array with given name.
9272  * NULL, if it cannot be created.
9273  *
9274  * NOTE: This function increments the reference counter associated with the
9275  * trace array returned. This makes sure it cannot be freed while in use.
9276  * Use trace_array_put() once the trace array is no longer needed.
9277  * If the trace_array is to be freed, trace_array_destroy() needs to
9278  * be called after the trace_array_put(), or simply let user space delete
9279  * it from the tracefs instances directory. But until the
9280  * trace_array_put() is called, user space can not delete it.
9281  *
9282  */
9283 struct trace_array *trace_array_get_by_name(const char *name)
9284 {
9285 	struct trace_array *tr;
9286 
9287 	mutex_lock(&event_mutex);
9288 	mutex_lock(&trace_types_lock);
9289 
9290 	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9291 		if (tr->name && strcmp(tr->name, name) == 0)
9292 			goto out_unlock;
9293 	}
9294 
9295 	tr = trace_array_create(name);
9296 
9297 	if (IS_ERR(tr))
9298 		tr = NULL;
9299 out_unlock:
9300 	if (tr)
9301 		tr->ref++;
9302 
9303 	mutex_unlock(&trace_types_lock);
9304 	mutex_unlock(&event_mutex);
9305 	return tr;
9306 }
9307 EXPORT_SYMBOL_GPL(trace_array_get_by_name);
9308 
9309 static int __remove_instance(struct trace_array *tr)
9310 {
9311 	int i;
9312 
9313 	/* Reference counter for a newly created trace array = 1. */
9314 	if (tr->ref > 1 || (tr->current_trace && tr->trace_ref))
9315 		return -EBUSY;
9316 
9317 	list_del(&tr->list);
9318 
9319 	/* Disable all the flags that were enabled coming in */
9320 	for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) {
9321 		if ((1 << i) & ZEROED_TRACE_FLAGS)
9322 			set_tracer_flag(tr, 1 << i, 0);
9323 	}
9324 
9325 	tracing_set_nop(tr);
9326 	clear_ftrace_function_probes(tr);
9327 	event_trace_del_tracer(tr);
9328 	ftrace_clear_pids(tr);
9329 	ftrace_destroy_function_files(tr);
9330 	tracefs_remove(tr->dir);
9331 	free_percpu(tr->last_func_repeats);
9332 	free_trace_buffers(tr);
9333 
9334 	for (i = 0; i < tr->nr_topts; i++) {
9335 		kfree(tr->topts[i].topts);
9336 	}
9337 	kfree(tr->topts);
9338 
9339 	free_cpumask_var(tr->tracing_cpumask);
9340 	kfree(tr->name);
9341 	kfree(tr);
9342 
9343 	return 0;
9344 }
9345 
9346 int trace_array_destroy(struct trace_array *this_tr)
9347 {
9348 	struct trace_array *tr;
9349 	int ret;
9350 
9351 	if (!this_tr)
9352 		return -EINVAL;
9353 
9354 	mutex_lock(&event_mutex);
9355 	mutex_lock(&trace_types_lock);
9356 
9357 	ret = -ENODEV;
9358 
9359 	/* Making sure trace array exists before destroying it. */
9360 	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9361 		if (tr == this_tr) {
9362 			ret = __remove_instance(tr);
9363 			break;
9364 		}
9365 	}
9366 
9367 	mutex_unlock(&trace_types_lock);
9368 	mutex_unlock(&event_mutex);
9369 
9370 	return ret;
9371 }
9372 EXPORT_SYMBOL_GPL(trace_array_destroy);
9373 
9374 static int instance_rmdir(const char *name)
9375 {
9376 	struct trace_array *tr;
9377 	int ret;
9378 
9379 	mutex_lock(&event_mutex);
9380 	mutex_lock(&trace_types_lock);
9381 
9382 	ret = -ENODEV;
9383 	tr = trace_array_find(name);
9384 	if (tr)
9385 		ret = __remove_instance(tr);
9386 
9387 	mutex_unlock(&trace_types_lock);
9388 	mutex_unlock(&event_mutex);
9389 
9390 	return ret;
9391 }
9392 
9393 static __init void create_trace_instances(struct dentry *d_tracer)
9394 {
9395 	struct trace_array *tr;
9396 
9397 	trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
9398 							 instance_mkdir,
9399 							 instance_rmdir);
9400 	if (MEM_FAIL(!trace_instance_dir, "Failed to create instances directory\n"))
9401 		return;
9402 
9403 	mutex_lock(&event_mutex);
9404 	mutex_lock(&trace_types_lock);
9405 
9406 	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9407 		if (!tr->name)
9408 			continue;
9409 		if (MEM_FAIL(trace_array_create_dir(tr) < 0,
9410 			     "Failed to create instance directory\n"))
9411 			break;
9412 	}
9413 
9414 	mutex_unlock(&trace_types_lock);
9415 	mutex_unlock(&event_mutex);
9416 }
9417 
9418 static void
9419 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
9420 {
9421 	struct trace_event_file *file;
9422 	int cpu;
9423 
9424 	trace_create_file("available_tracers", TRACE_MODE_READ, d_tracer,
9425 			tr, &show_traces_fops);
9426 
9427 	trace_create_file("current_tracer", TRACE_MODE_WRITE, d_tracer,
9428 			tr, &set_tracer_fops);
9429 
9430 	trace_create_file("tracing_cpumask", TRACE_MODE_WRITE, d_tracer,
9431 			  tr, &tracing_cpumask_fops);
9432 
9433 	trace_create_file("trace_options", TRACE_MODE_WRITE, d_tracer,
9434 			  tr, &tracing_iter_fops);
9435 
9436 	trace_create_file("trace", TRACE_MODE_WRITE, d_tracer,
9437 			  tr, &tracing_fops);
9438 
9439 	trace_create_file("trace_pipe", TRACE_MODE_READ, d_tracer,
9440 			  tr, &tracing_pipe_fops);
9441 
9442 	trace_create_file("buffer_size_kb", TRACE_MODE_WRITE, d_tracer,
9443 			  tr, &tracing_entries_fops);
9444 
9445 	trace_create_file("buffer_total_size_kb", TRACE_MODE_READ, d_tracer,
9446 			  tr, &tracing_total_entries_fops);
9447 
9448 	trace_create_file("free_buffer", 0200, d_tracer,
9449 			  tr, &tracing_free_buffer_fops);
9450 
9451 	trace_create_file("trace_marker", 0220, d_tracer,
9452 			  tr, &tracing_mark_fops);
9453 
9454 	file = __find_event_file(tr, "ftrace", "print");
9455 	if (file && file->dir)
9456 		trace_create_file("trigger", TRACE_MODE_WRITE, file->dir,
9457 				  file, &event_trigger_fops);
9458 	tr->trace_marker_file = file;
9459 
9460 	trace_create_file("trace_marker_raw", 0220, d_tracer,
9461 			  tr, &tracing_mark_raw_fops);
9462 
9463 	trace_create_file("trace_clock", TRACE_MODE_WRITE, d_tracer, tr,
9464 			  &trace_clock_fops);
9465 
9466 	trace_create_file("tracing_on", TRACE_MODE_WRITE, d_tracer,
9467 			  tr, &rb_simple_fops);
9468 
9469 	trace_create_file("timestamp_mode", TRACE_MODE_READ, d_tracer, tr,
9470 			  &trace_time_stamp_mode_fops);
9471 
9472 	tr->buffer_percent = 50;
9473 
9474 	trace_create_file("buffer_percent", TRACE_MODE_READ, d_tracer,
9475 			tr, &buffer_percent_fops);
9476 
9477 	create_trace_options_dir(tr);
9478 
9479 	trace_create_maxlat_file(tr, d_tracer);
9480 
9481 	if (ftrace_create_function_files(tr, d_tracer))
9482 		MEM_FAIL(1, "Could not allocate function filter files");
9483 
9484 #ifdef CONFIG_TRACER_SNAPSHOT
9485 	trace_create_file("snapshot", TRACE_MODE_WRITE, d_tracer,
9486 			  tr, &snapshot_fops);
9487 #endif
9488 
9489 	trace_create_file("error_log", TRACE_MODE_WRITE, d_tracer,
9490 			  tr, &tracing_err_log_fops);
9491 
9492 	for_each_tracing_cpu(cpu)
9493 		tracing_init_tracefs_percpu(tr, cpu);
9494 
9495 	ftrace_init_tracefs(tr, d_tracer);
9496 }
9497 
9498 static struct vfsmount *trace_automount(struct dentry *mntpt, void *ingore)
9499 {
9500 	struct vfsmount *mnt;
9501 	struct file_system_type *type;
9502 
9503 	/*
9504 	 * To maintain backward compatibility for tools that mount
9505 	 * debugfs to get to the tracing facility, tracefs is automatically
9506 	 * mounted to the debugfs/tracing directory.
9507 	 */
9508 	type = get_fs_type("tracefs");
9509 	if (!type)
9510 		return NULL;
9511 	mnt = vfs_submount(mntpt, type, "tracefs", NULL);
9512 	put_filesystem(type);
9513 	if (IS_ERR(mnt))
9514 		return NULL;
9515 	mntget(mnt);
9516 
9517 	return mnt;
9518 }
9519 
9520 /**
9521  * tracing_init_dentry - initialize top level trace array
9522  *
9523  * This is called when creating files or directories in the tracing
9524  * directory. It is called via fs_initcall() by any of the boot up code
9525  * and expects to return the dentry of the top level tracing directory.
9526  */
9527 int tracing_init_dentry(void)
9528 {
9529 	struct trace_array *tr = &global_trace;
9530 
9531 	if (security_locked_down(LOCKDOWN_TRACEFS)) {
9532 		pr_warn("Tracing disabled due to lockdown\n");
9533 		return -EPERM;
9534 	}
9535 
9536 	/* The top level trace array uses  NULL as parent */
9537 	if (tr->dir)
9538 		return 0;
9539 
9540 	if (WARN_ON(!tracefs_initialized()))
9541 		return -ENODEV;
9542 
9543 	/*
9544 	 * As there may still be users that expect the tracing
9545 	 * files to exist in debugfs/tracing, we must automount
9546 	 * the tracefs file system there, so older tools still
9547 	 * work with the newer kernel.
9548 	 */
9549 	tr->dir = debugfs_create_automount("tracing", NULL,
9550 					   trace_automount, NULL);
9551 
9552 	return 0;
9553 }
9554 
9555 extern struct trace_eval_map *__start_ftrace_eval_maps[];
9556 extern struct trace_eval_map *__stop_ftrace_eval_maps[];
9557 
9558 static struct workqueue_struct *eval_map_wq __initdata;
9559 static struct work_struct eval_map_work __initdata;
9560 
9561 static void __init eval_map_work_func(struct work_struct *work)
9562 {
9563 	int len;
9564 
9565 	len = __stop_ftrace_eval_maps - __start_ftrace_eval_maps;
9566 	trace_insert_eval_map(NULL, __start_ftrace_eval_maps, len);
9567 }
9568 
9569 static int __init trace_eval_init(void)
9570 {
9571 	INIT_WORK(&eval_map_work, eval_map_work_func);
9572 
9573 	eval_map_wq = alloc_workqueue("eval_map_wq", WQ_UNBOUND, 0);
9574 	if (!eval_map_wq) {
9575 		pr_err("Unable to allocate eval_map_wq\n");
9576 		/* Do work here */
9577 		eval_map_work_func(&eval_map_work);
9578 		return -ENOMEM;
9579 	}
9580 
9581 	queue_work(eval_map_wq, &eval_map_work);
9582 	return 0;
9583 }
9584 
9585 static int __init trace_eval_sync(void)
9586 {
9587 	/* Make sure the eval map updates are finished */
9588 	if (eval_map_wq)
9589 		destroy_workqueue(eval_map_wq);
9590 	return 0;
9591 }
9592 
9593 late_initcall_sync(trace_eval_sync);
9594 
9595 
9596 #ifdef CONFIG_MODULES
9597 static void trace_module_add_evals(struct module *mod)
9598 {
9599 	if (!mod->num_trace_evals)
9600 		return;
9601 
9602 	/*
9603 	 * Modules with bad taint do not have events created, do
9604 	 * not bother with enums either.
9605 	 */
9606 	if (trace_module_has_bad_taint(mod))
9607 		return;
9608 
9609 	trace_insert_eval_map(mod, mod->trace_evals, mod->num_trace_evals);
9610 }
9611 
9612 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
9613 static void trace_module_remove_evals(struct module *mod)
9614 {
9615 	union trace_eval_map_item *map;
9616 	union trace_eval_map_item **last = &trace_eval_maps;
9617 
9618 	if (!mod->num_trace_evals)
9619 		return;
9620 
9621 	mutex_lock(&trace_eval_mutex);
9622 
9623 	map = trace_eval_maps;
9624 
9625 	while (map) {
9626 		if (map->head.mod == mod)
9627 			break;
9628 		map = trace_eval_jmp_to_tail(map);
9629 		last = &map->tail.next;
9630 		map = map->tail.next;
9631 	}
9632 	if (!map)
9633 		goto out;
9634 
9635 	*last = trace_eval_jmp_to_tail(map)->tail.next;
9636 	kfree(map);
9637  out:
9638 	mutex_unlock(&trace_eval_mutex);
9639 }
9640 #else
9641 static inline void trace_module_remove_evals(struct module *mod) { }
9642 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
9643 
9644 static int trace_module_notify(struct notifier_block *self,
9645 			       unsigned long val, void *data)
9646 {
9647 	struct module *mod = data;
9648 
9649 	switch (val) {
9650 	case MODULE_STATE_COMING:
9651 		trace_module_add_evals(mod);
9652 		break;
9653 	case MODULE_STATE_GOING:
9654 		trace_module_remove_evals(mod);
9655 		break;
9656 	}
9657 
9658 	return NOTIFY_OK;
9659 }
9660 
9661 static struct notifier_block trace_module_nb = {
9662 	.notifier_call = trace_module_notify,
9663 	.priority = 0,
9664 };
9665 #endif /* CONFIG_MODULES */
9666 
9667 static __init int tracer_init_tracefs(void)
9668 {
9669 	int ret;
9670 
9671 	trace_access_lock_init();
9672 
9673 	ret = tracing_init_dentry();
9674 	if (ret)
9675 		return 0;
9676 
9677 	event_trace_init();
9678 
9679 	init_tracer_tracefs(&global_trace, NULL);
9680 	ftrace_init_tracefs_toplevel(&global_trace, NULL);
9681 
9682 	trace_create_file("tracing_thresh", TRACE_MODE_WRITE, NULL,
9683 			&global_trace, &tracing_thresh_fops);
9684 
9685 	trace_create_file("README", TRACE_MODE_READ, NULL,
9686 			NULL, &tracing_readme_fops);
9687 
9688 	trace_create_file("saved_cmdlines", TRACE_MODE_READ, NULL,
9689 			NULL, &tracing_saved_cmdlines_fops);
9690 
9691 	trace_create_file("saved_cmdlines_size", TRACE_MODE_WRITE, NULL,
9692 			  NULL, &tracing_saved_cmdlines_size_fops);
9693 
9694 	trace_create_file("saved_tgids", TRACE_MODE_READ, NULL,
9695 			NULL, &tracing_saved_tgids_fops);
9696 
9697 	trace_eval_init();
9698 
9699 	trace_create_eval_file(NULL);
9700 
9701 #ifdef CONFIG_MODULES
9702 	register_module_notifier(&trace_module_nb);
9703 #endif
9704 
9705 #ifdef CONFIG_DYNAMIC_FTRACE
9706 	trace_create_file("dyn_ftrace_total_info", TRACE_MODE_READ, NULL,
9707 			NULL, &tracing_dyn_info_fops);
9708 #endif
9709 
9710 	create_trace_instances(NULL);
9711 
9712 	update_tracer_options(&global_trace);
9713 
9714 	return 0;
9715 }
9716 
9717 fs_initcall(tracer_init_tracefs);
9718 
9719 static int trace_panic_handler(struct notifier_block *this,
9720 			       unsigned long event, void *unused)
9721 {
9722 	if (ftrace_dump_on_oops)
9723 		ftrace_dump(ftrace_dump_on_oops);
9724 	return NOTIFY_OK;
9725 }
9726 
9727 static struct notifier_block trace_panic_notifier = {
9728 	.notifier_call  = trace_panic_handler,
9729 	.next           = NULL,
9730 	.priority       = 150   /* priority: INT_MAX >= x >= 0 */
9731 };
9732 
9733 static int trace_die_handler(struct notifier_block *self,
9734 			     unsigned long val,
9735 			     void *data)
9736 {
9737 	switch (val) {
9738 	case DIE_OOPS:
9739 		if (ftrace_dump_on_oops)
9740 			ftrace_dump(ftrace_dump_on_oops);
9741 		break;
9742 	default:
9743 		break;
9744 	}
9745 	return NOTIFY_OK;
9746 }
9747 
9748 static struct notifier_block trace_die_notifier = {
9749 	.notifier_call = trace_die_handler,
9750 	.priority = 200
9751 };
9752 
9753 /*
9754  * printk is set to max of 1024, we really don't need it that big.
9755  * Nothing should be printing 1000 characters anyway.
9756  */
9757 #define TRACE_MAX_PRINT		1000
9758 
9759 /*
9760  * Define here KERN_TRACE so that we have one place to modify
9761  * it if we decide to change what log level the ftrace dump
9762  * should be at.
9763  */
9764 #define KERN_TRACE		KERN_EMERG
9765 
9766 void
9767 trace_printk_seq(struct trace_seq *s)
9768 {
9769 	/* Probably should print a warning here. */
9770 	if (s->seq.len >= TRACE_MAX_PRINT)
9771 		s->seq.len = TRACE_MAX_PRINT;
9772 
9773 	/*
9774 	 * More paranoid code. Although the buffer size is set to
9775 	 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
9776 	 * an extra layer of protection.
9777 	 */
9778 	if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
9779 		s->seq.len = s->seq.size - 1;
9780 
9781 	/* should be zero ended, but we are paranoid. */
9782 	s->buffer[s->seq.len] = 0;
9783 
9784 	printk(KERN_TRACE "%s", s->buffer);
9785 
9786 	trace_seq_init(s);
9787 }
9788 
9789 void trace_init_global_iter(struct trace_iterator *iter)
9790 {
9791 	iter->tr = &global_trace;
9792 	iter->trace = iter->tr->current_trace;
9793 	iter->cpu_file = RING_BUFFER_ALL_CPUS;
9794 	iter->array_buffer = &global_trace.array_buffer;
9795 
9796 	if (iter->trace && iter->trace->open)
9797 		iter->trace->open(iter);
9798 
9799 	/* Annotate start of buffers if we had overruns */
9800 	if (ring_buffer_overruns(iter->array_buffer->buffer))
9801 		iter->iter_flags |= TRACE_FILE_ANNOTATE;
9802 
9803 	/* Output in nanoseconds only if we are using a clock in nanoseconds. */
9804 	if (trace_clocks[iter->tr->clock_id].in_ns)
9805 		iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
9806 }
9807 
9808 void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
9809 {
9810 	/* use static because iter can be a bit big for the stack */
9811 	static struct trace_iterator iter;
9812 	static atomic_t dump_running;
9813 	struct trace_array *tr = &global_trace;
9814 	unsigned int old_userobj;
9815 	unsigned long flags;
9816 	int cnt = 0, cpu;
9817 
9818 	/* Only allow one dump user at a time. */
9819 	if (atomic_inc_return(&dump_running) != 1) {
9820 		atomic_dec(&dump_running);
9821 		return;
9822 	}
9823 
9824 	/*
9825 	 * Always turn off tracing when we dump.
9826 	 * We don't need to show trace output of what happens
9827 	 * between multiple crashes.
9828 	 *
9829 	 * If the user does a sysrq-z, then they can re-enable
9830 	 * tracing with echo 1 > tracing_on.
9831 	 */
9832 	tracing_off();
9833 
9834 	local_irq_save(flags);
9835 
9836 	/* Simulate the iterator */
9837 	trace_init_global_iter(&iter);
9838 	/* Can not use kmalloc for iter.temp and iter.fmt */
9839 	iter.temp = static_temp_buf;
9840 	iter.temp_size = STATIC_TEMP_BUF_SIZE;
9841 	iter.fmt = static_fmt_buf;
9842 	iter.fmt_size = STATIC_FMT_BUF_SIZE;
9843 
9844 	for_each_tracing_cpu(cpu) {
9845 		atomic_inc(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
9846 	}
9847 
9848 	old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
9849 
9850 	/* don't look at user memory in panic mode */
9851 	tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
9852 
9853 	switch (oops_dump_mode) {
9854 	case DUMP_ALL:
9855 		iter.cpu_file = RING_BUFFER_ALL_CPUS;
9856 		break;
9857 	case DUMP_ORIG:
9858 		iter.cpu_file = raw_smp_processor_id();
9859 		break;
9860 	case DUMP_NONE:
9861 		goto out_enable;
9862 	default:
9863 		printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
9864 		iter.cpu_file = RING_BUFFER_ALL_CPUS;
9865 	}
9866 
9867 	printk(KERN_TRACE "Dumping ftrace buffer:\n");
9868 
9869 	/* Did function tracer already get disabled? */
9870 	if (ftrace_is_dead()) {
9871 		printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
9872 		printk("#          MAY BE MISSING FUNCTION EVENTS\n");
9873 	}
9874 
9875 	/*
9876 	 * We need to stop all tracing on all CPUS to read
9877 	 * the next buffer. This is a bit expensive, but is
9878 	 * not done often. We fill all what we can read,
9879 	 * and then release the locks again.
9880 	 */
9881 
9882 	while (!trace_empty(&iter)) {
9883 
9884 		if (!cnt)
9885 			printk(KERN_TRACE "---------------------------------\n");
9886 
9887 		cnt++;
9888 
9889 		trace_iterator_reset(&iter);
9890 		iter.iter_flags |= TRACE_FILE_LAT_FMT;
9891 
9892 		if (trace_find_next_entry_inc(&iter) != NULL) {
9893 			int ret;
9894 
9895 			ret = print_trace_line(&iter);
9896 			if (ret != TRACE_TYPE_NO_CONSUME)
9897 				trace_consume(&iter);
9898 		}
9899 		touch_nmi_watchdog();
9900 
9901 		trace_printk_seq(&iter.seq);
9902 	}
9903 
9904 	if (!cnt)
9905 		printk(KERN_TRACE "   (ftrace buffer empty)\n");
9906 	else
9907 		printk(KERN_TRACE "---------------------------------\n");
9908 
9909  out_enable:
9910 	tr->trace_flags |= old_userobj;
9911 
9912 	for_each_tracing_cpu(cpu) {
9913 		atomic_dec(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
9914 	}
9915 	atomic_dec(&dump_running);
9916 	local_irq_restore(flags);
9917 }
9918 EXPORT_SYMBOL_GPL(ftrace_dump);
9919 
9920 #define WRITE_BUFSIZE  4096
9921 
9922 ssize_t trace_parse_run_command(struct file *file, const char __user *buffer,
9923 				size_t count, loff_t *ppos,
9924 				int (*createfn)(const char *))
9925 {
9926 	char *kbuf, *buf, *tmp;
9927 	int ret = 0;
9928 	size_t done = 0;
9929 	size_t size;
9930 
9931 	kbuf = kmalloc(WRITE_BUFSIZE, GFP_KERNEL);
9932 	if (!kbuf)
9933 		return -ENOMEM;
9934 
9935 	while (done < count) {
9936 		size = count - done;
9937 
9938 		if (size >= WRITE_BUFSIZE)
9939 			size = WRITE_BUFSIZE - 1;
9940 
9941 		if (copy_from_user(kbuf, buffer + done, size)) {
9942 			ret = -EFAULT;
9943 			goto out;
9944 		}
9945 		kbuf[size] = '\0';
9946 		buf = kbuf;
9947 		do {
9948 			tmp = strchr(buf, '\n');
9949 			if (tmp) {
9950 				*tmp = '\0';
9951 				size = tmp - buf + 1;
9952 			} else {
9953 				size = strlen(buf);
9954 				if (done + size < count) {
9955 					if (buf != kbuf)
9956 						break;
9957 					/* This can accept WRITE_BUFSIZE - 2 ('\n' + '\0') */
9958 					pr_warn("Line length is too long: Should be less than %d\n",
9959 						WRITE_BUFSIZE - 2);
9960 					ret = -EINVAL;
9961 					goto out;
9962 				}
9963 			}
9964 			done += size;
9965 
9966 			/* Remove comments */
9967 			tmp = strchr(buf, '#');
9968 
9969 			if (tmp)
9970 				*tmp = '\0';
9971 
9972 			ret = createfn(buf);
9973 			if (ret)
9974 				goto out;
9975 			buf += size;
9976 
9977 		} while (done < count);
9978 	}
9979 	ret = done;
9980 
9981 out:
9982 	kfree(kbuf);
9983 
9984 	return ret;
9985 }
9986 
9987 __init static int tracer_alloc_buffers(void)
9988 {
9989 	int ring_buf_size;
9990 	int ret = -ENOMEM;
9991 
9992 
9993 	if (security_locked_down(LOCKDOWN_TRACEFS)) {
9994 		pr_warn("Tracing disabled due to lockdown\n");
9995 		return -EPERM;
9996 	}
9997 
9998 	/*
9999 	 * Make sure we don't accidentally add more trace options
10000 	 * than we have bits for.
10001 	 */
10002 	BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
10003 
10004 	if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
10005 		goto out;
10006 
10007 	if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
10008 		goto out_free_buffer_mask;
10009 
10010 	/* Only allocate trace_printk buffers if a trace_printk exists */
10011 	if (&__stop___trace_bprintk_fmt != &__start___trace_bprintk_fmt)
10012 		/* Must be called before global_trace.buffer is allocated */
10013 		trace_printk_init_buffers();
10014 
10015 	/* To save memory, keep the ring buffer size to its minimum */
10016 	if (ring_buffer_expanded)
10017 		ring_buf_size = trace_buf_size;
10018 	else
10019 		ring_buf_size = 1;
10020 
10021 	cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
10022 	cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
10023 
10024 	raw_spin_lock_init(&global_trace.start_lock);
10025 
10026 	/*
10027 	 * The prepare callbacks allocates some memory for the ring buffer. We
10028 	 * don't free the buffer if the CPU goes down. If we were to free
10029 	 * the buffer, then the user would lose any trace that was in the
10030 	 * buffer. The memory will be removed once the "instance" is removed.
10031 	 */
10032 	ret = cpuhp_setup_state_multi(CPUHP_TRACE_RB_PREPARE,
10033 				      "trace/RB:preapre", trace_rb_cpu_prepare,
10034 				      NULL);
10035 	if (ret < 0)
10036 		goto out_free_cpumask;
10037 	/* Used for event triggers */
10038 	ret = -ENOMEM;
10039 	temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
10040 	if (!temp_buffer)
10041 		goto out_rm_hp_state;
10042 
10043 	if (trace_create_savedcmd() < 0)
10044 		goto out_free_temp_buffer;
10045 
10046 	/* TODO: make the number of buffers hot pluggable with CPUS */
10047 	if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
10048 		MEM_FAIL(1, "tracer: failed to allocate ring buffer!\n");
10049 		goto out_free_savedcmd;
10050 	}
10051 
10052 	if (global_trace.buffer_disabled)
10053 		tracing_off();
10054 
10055 	if (trace_boot_clock) {
10056 		ret = tracing_set_clock(&global_trace, trace_boot_clock);
10057 		if (ret < 0)
10058 			pr_warn("Trace clock %s not defined, going back to default\n",
10059 				trace_boot_clock);
10060 	}
10061 
10062 	/*
10063 	 * register_tracer() might reference current_trace, so it
10064 	 * needs to be set before we register anything. This is
10065 	 * just a bootstrap of current_trace anyway.
10066 	 */
10067 	global_trace.current_trace = &nop_trace;
10068 
10069 	global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
10070 
10071 	ftrace_init_global_array_ops(&global_trace);
10072 
10073 	init_trace_flags_index(&global_trace);
10074 
10075 	register_tracer(&nop_trace);
10076 
10077 	/* Function tracing may start here (via kernel command line) */
10078 	init_function_trace();
10079 
10080 	/* All seems OK, enable tracing */
10081 	tracing_disabled = 0;
10082 
10083 	atomic_notifier_chain_register(&panic_notifier_list,
10084 				       &trace_panic_notifier);
10085 
10086 	register_die_notifier(&trace_die_notifier);
10087 
10088 	global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
10089 
10090 	INIT_LIST_HEAD(&global_trace.systems);
10091 	INIT_LIST_HEAD(&global_trace.events);
10092 	INIT_LIST_HEAD(&global_trace.hist_vars);
10093 	INIT_LIST_HEAD(&global_trace.err_log);
10094 	list_add(&global_trace.list, &ftrace_trace_arrays);
10095 
10096 	apply_trace_boot_options();
10097 
10098 	register_snapshot_cmd();
10099 
10100 	test_can_verify();
10101 
10102 	return 0;
10103 
10104 out_free_savedcmd:
10105 	free_saved_cmdlines_buffer(savedcmd);
10106 out_free_temp_buffer:
10107 	ring_buffer_free(temp_buffer);
10108 out_rm_hp_state:
10109 	cpuhp_remove_multi_state(CPUHP_TRACE_RB_PREPARE);
10110 out_free_cpumask:
10111 	free_cpumask_var(global_trace.tracing_cpumask);
10112 out_free_buffer_mask:
10113 	free_cpumask_var(tracing_buffer_mask);
10114 out:
10115 	return ret;
10116 }
10117 
10118 void __init early_trace_init(void)
10119 {
10120 	if (tracepoint_printk) {
10121 		tracepoint_print_iter =
10122 			kzalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
10123 		if (MEM_FAIL(!tracepoint_print_iter,
10124 			     "Failed to allocate trace iterator\n"))
10125 			tracepoint_printk = 0;
10126 		else
10127 			static_key_enable(&tracepoint_printk_key.key);
10128 	}
10129 	tracer_alloc_buffers();
10130 }
10131 
10132 void __init trace_init(void)
10133 {
10134 	trace_event_init();
10135 }
10136 
10137 __init static void clear_boot_tracer(void)
10138 {
10139 	/*
10140 	 * The default tracer at boot buffer is an init section.
10141 	 * This function is called in lateinit. If we did not
10142 	 * find the boot tracer, then clear it out, to prevent
10143 	 * later registration from accessing the buffer that is
10144 	 * about to be freed.
10145 	 */
10146 	if (!default_bootup_tracer)
10147 		return;
10148 
10149 	printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
10150 	       default_bootup_tracer);
10151 	default_bootup_tracer = NULL;
10152 }
10153 
10154 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
10155 __init static void tracing_set_default_clock(void)
10156 {
10157 	/* sched_clock_stable() is determined in late_initcall */
10158 	if (!trace_boot_clock && !sched_clock_stable()) {
10159 		if (security_locked_down(LOCKDOWN_TRACEFS)) {
10160 			pr_warn("Can not set tracing clock due to lockdown\n");
10161 			return;
10162 		}
10163 
10164 		printk(KERN_WARNING
10165 		       "Unstable clock detected, switching default tracing clock to \"global\"\n"
10166 		       "If you want to keep using the local clock, then add:\n"
10167 		       "  \"trace_clock=local\"\n"
10168 		       "on the kernel command line\n");
10169 		tracing_set_clock(&global_trace, "global");
10170 	}
10171 }
10172 #else
10173 static inline void tracing_set_default_clock(void) { }
10174 #endif
10175 
10176 __init static int late_trace_init(void)
10177 {
10178 	if (tracepoint_printk && tracepoint_printk_stop_on_boot) {
10179 		static_key_disable(&tracepoint_printk_key.key);
10180 		tracepoint_printk = 0;
10181 	}
10182 
10183 	tracing_set_default_clock();
10184 	clear_boot_tracer();
10185 	return 0;
10186 }
10187 
10188 late_initcall_sync(late_trace_init);
10189