xref: /linux/kernel/trace/trace_uprobe.c (revision 3bdab16c55f57a24245c97d707241dd9b48d1a91)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * uprobes-based tracing events
4  *
5  * Copyright (C) IBM Corporation, 2010-2012
6  * Author:	Srikar Dronamraju <srikar@linux.vnet.ibm.com>
7  */
8 #define pr_fmt(fmt)	"trace_uprobe: " fmt
9 
10 #include <linux/ctype.h>
11 #include <linux/module.h>
12 #include <linux/uaccess.h>
13 #include <linux/uprobes.h>
14 #include <linux/namei.h>
15 #include <linux/string.h>
16 #include <linux/rculist.h>
17 
18 #include "trace_dynevent.h"
19 #include "trace_probe.h"
20 #include "trace_probe_tmpl.h"
21 
22 #define UPROBE_EVENT_SYSTEM	"uprobes"
23 
24 struct uprobe_trace_entry_head {
25 	struct trace_entry	ent;
26 	unsigned long		vaddr[];
27 };
28 
29 #define SIZEOF_TRACE_ENTRY(is_return)			\
30 	(sizeof(struct uprobe_trace_entry_head) +	\
31 	 sizeof(unsigned long) * (is_return ? 2 : 1))
32 
33 #define DATAOF_TRACE_ENTRY(entry, is_return)		\
34 	((void*)(entry) + SIZEOF_TRACE_ENTRY(is_return))
35 
36 struct trace_uprobe_filter {
37 	rwlock_t		rwlock;
38 	int			nr_systemwide;
39 	struct list_head	perf_events;
40 };
41 
42 static int trace_uprobe_create(int argc, const char **argv);
43 static int trace_uprobe_show(struct seq_file *m, struct dyn_event *ev);
44 static int trace_uprobe_release(struct dyn_event *ev);
45 static bool trace_uprobe_is_busy(struct dyn_event *ev);
46 static bool trace_uprobe_match(const char *system, const char *event,
47 			       struct dyn_event *ev);
48 
49 static struct dyn_event_operations trace_uprobe_ops = {
50 	.create = trace_uprobe_create,
51 	.show = trace_uprobe_show,
52 	.is_busy = trace_uprobe_is_busy,
53 	.free = trace_uprobe_release,
54 	.match = trace_uprobe_match,
55 };
56 
57 /*
58  * uprobe event core functions
59  */
60 struct trace_uprobe {
61 	struct dyn_event		devent;
62 	struct trace_uprobe_filter	filter;
63 	struct uprobe_consumer		consumer;
64 	struct path			path;
65 	struct inode			*inode;
66 	char				*filename;
67 	unsigned long			offset;
68 	unsigned long			ref_ctr_offset;
69 	unsigned long			nhit;
70 	struct trace_probe		tp;
71 };
72 
73 static bool is_trace_uprobe(struct dyn_event *ev)
74 {
75 	return ev->ops == &trace_uprobe_ops;
76 }
77 
78 static struct trace_uprobe *to_trace_uprobe(struct dyn_event *ev)
79 {
80 	return container_of(ev, struct trace_uprobe, devent);
81 }
82 
83 /**
84  * for_each_trace_uprobe - iterate over the trace_uprobe list
85  * @pos:	the struct trace_uprobe * for each entry
86  * @dpos:	the struct dyn_event * to use as a loop cursor
87  */
88 #define for_each_trace_uprobe(pos, dpos)	\
89 	for_each_dyn_event(dpos)		\
90 		if (is_trace_uprobe(dpos) && (pos = to_trace_uprobe(dpos)))
91 
92 #define SIZEOF_TRACE_UPROBE(n)				\
93 	(offsetof(struct trace_uprobe, tp.args) +	\
94 	(sizeof(struct probe_arg) * (n)))
95 
96 static int register_uprobe_event(struct trace_uprobe *tu);
97 static int unregister_uprobe_event(struct trace_uprobe *tu);
98 
99 struct uprobe_dispatch_data {
100 	struct trace_uprobe	*tu;
101 	unsigned long		bp_addr;
102 };
103 
104 static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs);
105 static int uretprobe_dispatcher(struct uprobe_consumer *con,
106 				unsigned long func, struct pt_regs *regs);
107 
108 #ifdef CONFIG_STACK_GROWSUP
109 static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n)
110 {
111 	return addr - (n * sizeof(long));
112 }
113 #else
114 static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n)
115 {
116 	return addr + (n * sizeof(long));
117 }
118 #endif
119 
120 static unsigned long get_user_stack_nth(struct pt_regs *regs, unsigned int n)
121 {
122 	unsigned long ret;
123 	unsigned long addr = user_stack_pointer(regs);
124 
125 	addr = adjust_stack_addr(addr, n);
126 
127 	if (copy_from_user(&ret, (void __force __user *) addr, sizeof(ret)))
128 		return 0;
129 
130 	return ret;
131 }
132 
133 /*
134  * Uprobes-specific fetch functions
135  */
136 static nokprobe_inline int
137 probe_mem_read(void *dest, void *src, size_t size)
138 {
139 	void __user *vaddr = (void __force __user *)src;
140 
141 	return copy_from_user(dest, vaddr, size) ? -EFAULT : 0;
142 }
143 /*
144  * Fetch a null-terminated string. Caller MUST set *(u32 *)dest with max
145  * length and relative data location.
146  */
147 static nokprobe_inline int
148 fetch_store_string(unsigned long addr, void *dest, void *base)
149 {
150 	long ret;
151 	u32 loc = *(u32 *)dest;
152 	int maxlen  = get_loc_len(loc);
153 	u8 *dst = get_loc_data(dest, base);
154 	void __user *src = (void __force __user *) addr;
155 
156 	if (unlikely(!maxlen))
157 		return -ENOMEM;
158 
159 	if (addr == FETCH_TOKEN_COMM)
160 		ret = strlcpy(dst, current->comm, maxlen);
161 	else
162 		ret = strncpy_from_user(dst, src, maxlen);
163 	if (ret >= 0) {
164 		if (ret == maxlen)
165 			dst[ret - 1] = '\0';
166 		else
167 			/*
168 			 * Include the terminating null byte. In this case it
169 			 * was copied by strncpy_from_user but not accounted
170 			 * for in ret.
171 			 */
172 			ret++;
173 		*(u32 *)dest = make_data_loc(ret, (void *)dst - base);
174 	}
175 
176 	return ret;
177 }
178 
179 /* Return the length of string -- including null terminal byte */
180 static nokprobe_inline int
181 fetch_store_strlen(unsigned long addr)
182 {
183 	int len;
184 	void __user *vaddr = (void __force __user *) addr;
185 
186 	if (addr == FETCH_TOKEN_COMM)
187 		len = strlen(current->comm) + 1;
188 	else
189 		len = strnlen_user(vaddr, MAX_STRING_SIZE);
190 
191 	return (len > MAX_STRING_SIZE) ? 0 : len;
192 }
193 
194 static unsigned long translate_user_vaddr(unsigned long file_offset)
195 {
196 	unsigned long base_addr;
197 	struct uprobe_dispatch_data *udd;
198 
199 	udd = (void *) current->utask->vaddr;
200 
201 	base_addr = udd->bp_addr - udd->tu->offset;
202 	return base_addr + file_offset;
203 }
204 
205 /* Note that we don't verify it, since the code does not come from user space */
206 static int
207 process_fetch_insn(struct fetch_insn *code, struct pt_regs *regs, void *dest,
208 		   void *base)
209 {
210 	unsigned long val;
211 
212 	/* 1st stage: get value from context */
213 	switch (code->op) {
214 	case FETCH_OP_REG:
215 		val = regs_get_register(regs, code->param);
216 		break;
217 	case FETCH_OP_STACK:
218 		val = get_user_stack_nth(regs, code->param);
219 		break;
220 	case FETCH_OP_STACKP:
221 		val = user_stack_pointer(regs);
222 		break;
223 	case FETCH_OP_RETVAL:
224 		val = regs_return_value(regs);
225 		break;
226 	case FETCH_OP_IMM:
227 		val = code->immediate;
228 		break;
229 	case FETCH_OP_COMM:
230 		val = FETCH_TOKEN_COMM;
231 		break;
232 	case FETCH_OP_FOFFS:
233 		val = translate_user_vaddr(code->immediate);
234 		break;
235 	default:
236 		return -EILSEQ;
237 	}
238 	code++;
239 
240 	return process_fetch_insn_bottom(code, val, dest, base);
241 }
242 NOKPROBE_SYMBOL(process_fetch_insn)
243 
244 static inline void init_trace_uprobe_filter(struct trace_uprobe_filter *filter)
245 {
246 	rwlock_init(&filter->rwlock);
247 	filter->nr_systemwide = 0;
248 	INIT_LIST_HEAD(&filter->perf_events);
249 }
250 
251 static inline bool uprobe_filter_is_empty(struct trace_uprobe_filter *filter)
252 {
253 	return !filter->nr_systemwide && list_empty(&filter->perf_events);
254 }
255 
256 static inline bool is_ret_probe(struct trace_uprobe *tu)
257 {
258 	return tu->consumer.ret_handler != NULL;
259 }
260 
261 static bool trace_uprobe_is_busy(struct dyn_event *ev)
262 {
263 	struct trace_uprobe *tu = to_trace_uprobe(ev);
264 
265 	return trace_probe_is_enabled(&tu->tp);
266 }
267 
268 static bool trace_uprobe_match(const char *system, const char *event,
269 			       struct dyn_event *ev)
270 {
271 	struct trace_uprobe *tu = to_trace_uprobe(ev);
272 
273 	return strcmp(trace_event_name(&tu->tp.call), event) == 0 &&
274 		(!system || strcmp(tu->tp.call.class->system, system) == 0);
275 }
276 
277 /*
278  * Allocate new trace_uprobe and initialize it (including uprobes).
279  */
280 static struct trace_uprobe *
281 alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret)
282 {
283 	struct trace_uprobe *tu;
284 
285 	if (!event || !group)
286 		return ERR_PTR(-EINVAL);
287 
288 	tu = kzalloc(SIZEOF_TRACE_UPROBE(nargs), GFP_KERNEL);
289 	if (!tu)
290 		return ERR_PTR(-ENOMEM);
291 
292 	tu->tp.call.class = &tu->tp.class;
293 	tu->tp.call.name = kstrdup(event, GFP_KERNEL);
294 	if (!tu->tp.call.name)
295 		goto error;
296 
297 	tu->tp.class.system = kstrdup(group, GFP_KERNEL);
298 	if (!tu->tp.class.system)
299 		goto error;
300 
301 	dyn_event_init(&tu->devent, &trace_uprobe_ops);
302 	INIT_LIST_HEAD(&tu->tp.files);
303 	tu->consumer.handler = uprobe_dispatcher;
304 	if (is_ret)
305 		tu->consumer.ret_handler = uretprobe_dispatcher;
306 	init_trace_uprobe_filter(&tu->filter);
307 	return tu;
308 
309 error:
310 	kfree(tu->tp.call.name);
311 	kfree(tu);
312 
313 	return ERR_PTR(-ENOMEM);
314 }
315 
316 static void free_trace_uprobe(struct trace_uprobe *tu)
317 {
318 	int i;
319 
320 	if (!tu)
321 		return;
322 
323 	for (i = 0; i < tu->tp.nr_args; i++)
324 		traceprobe_free_probe_arg(&tu->tp.args[i]);
325 
326 	path_put(&tu->path);
327 	kfree(tu->tp.call.class->system);
328 	kfree(tu->tp.call.name);
329 	kfree(tu->filename);
330 	kfree(tu);
331 }
332 
333 static struct trace_uprobe *find_probe_event(const char *event, const char *group)
334 {
335 	struct dyn_event *pos;
336 	struct trace_uprobe *tu;
337 
338 	for_each_trace_uprobe(tu, pos)
339 		if (strcmp(trace_event_name(&tu->tp.call), event) == 0 &&
340 		    strcmp(tu->tp.call.class->system, group) == 0)
341 			return tu;
342 
343 	return NULL;
344 }
345 
346 /* Unregister a trace_uprobe and probe_event */
347 static int unregister_trace_uprobe(struct trace_uprobe *tu)
348 {
349 	int ret;
350 
351 	ret = unregister_uprobe_event(tu);
352 	if (ret)
353 		return ret;
354 
355 	dyn_event_remove(&tu->devent);
356 	free_trace_uprobe(tu);
357 	return 0;
358 }
359 
360 /*
361  * Uprobe with multiple reference counter is not allowed. i.e.
362  * If inode and offset matches, reference counter offset *must*
363  * match as well. Though, there is one exception: If user is
364  * replacing old trace_uprobe with new one(same group/event),
365  * then we allow same uprobe with new reference counter as far
366  * as the new one does not conflict with any other existing
367  * ones.
368  */
369 static struct trace_uprobe *find_old_trace_uprobe(struct trace_uprobe *new)
370 {
371 	struct dyn_event *pos;
372 	struct trace_uprobe *tmp, *old = NULL;
373 	struct inode *new_inode = d_real_inode(new->path.dentry);
374 
375 	old = find_probe_event(trace_event_name(&new->tp.call),
376 				new->tp.call.class->system);
377 
378 	for_each_trace_uprobe(tmp, pos) {
379 		if ((old ? old != tmp : true) &&
380 		    new_inode == d_real_inode(tmp->path.dentry) &&
381 		    new->offset == tmp->offset &&
382 		    new->ref_ctr_offset != tmp->ref_ctr_offset) {
383 			pr_warn("Reference counter offset mismatch.");
384 			return ERR_PTR(-EINVAL);
385 		}
386 	}
387 	return old;
388 }
389 
390 /* Register a trace_uprobe and probe_event */
391 static int register_trace_uprobe(struct trace_uprobe *tu)
392 {
393 	struct trace_uprobe *old_tu;
394 	int ret;
395 
396 	mutex_lock(&event_mutex);
397 
398 	/* register as an event */
399 	old_tu = find_old_trace_uprobe(tu);
400 	if (IS_ERR(old_tu)) {
401 		ret = PTR_ERR(old_tu);
402 		goto end;
403 	}
404 
405 	if (old_tu) {
406 		/* delete old event */
407 		ret = unregister_trace_uprobe(old_tu);
408 		if (ret)
409 			goto end;
410 	}
411 
412 	ret = register_uprobe_event(tu);
413 	if (ret) {
414 		pr_warn("Failed to register probe event(%d)\n", ret);
415 		goto end;
416 	}
417 
418 	dyn_event_add(&tu->devent);
419 
420 end:
421 	mutex_unlock(&event_mutex);
422 
423 	return ret;
424 }
425 
426 /*
427  * Argument syntax:
428  *  - Add uprobe: p|r[:[GRP/]EVENT] PATH:OFFSET [FETCHARGS]
429  *
430  *  - Remove uprobe: -:[GRP/]EVENT
431  */
432 static int trace_uprobe_create(int argc, const char **argv)
433 {
434 	struct trace_uprobe *tu;
435 	const char *event = NULL, *group = UPROBE_EVENT_SYSTEM;
436 	char *arg, *filename, *rctr, *rctr_end, *tmp;
437 	char buf[MAX_EVENT_NAME_LEN];
438 	struct path path;
439 	unsigned long offset, ref_ctr_offset;
440 	bool is_return = false;
441 	int i, ret;
442 
443 	ret = 0;
444 	ref_ctr_offset = 0;
445 
446 	/* argc must be >= 1 */
447 	if (argv[0][0] == 'r')
448 		is_return = true;
449 	else if (argv[0][0] != 'p' || argc < 2)
450 		return -ECANCELED;
451 
452 	if (argv[0][1] == ':')
453 		event = &argv[0][2];
454 
455 	if (!strchr(argv[1], '/'))
456 		return -ECANCELED;
457 
458 	filename = kstrdup(argv[1], GFP_KERNEL);
459 	if (!filename)
460 		return -ENOMEM;
461 
462 	/* Find the last occurrence, in case the path contains ':' too. */
463 	arg = strrchr(filename, ':');
464 	if (!arg || !isdigit(arg[1])) {
465 		kfree(filename);
466 		return -ECANCELED;
467 	}
468 
469 	trace_probe_log_init("trace_uprobe", argc, argv);
470 	trace_probe_log_set_index(1);	/* filename is the 2nd argument */
471 
472 	*arg++ = '\0';
473 	ret = kern_path(filename, LOOKUP_FOLLOW, &path);
474 	if (ret) {
475 		trace_probe_log_err(0, FILE_NOT_FOUND);
476 		kfree(filename);
477 		trace_probe_log_clear();
478 		return ret;
479 	}
480 	if (!d_is_reg(path.dentry)) {
481 		trace_probe_log_err(0, NO_REGULAR_FILE);
482 		ret = -EINVAL;
483 		goto fail_address_parse;
484 	}
485 
486 	/* Parse reference counter offset if specified. */
487 	rctr = strchr(arg, '(');
488 	if (rctr) {
489 		rctr_end = strchr(rctr, ')');
490 		if (!rctr_end) {
491 			ret = -EINVAL;
492 			rctr_end = rctr + strlen(rctr);
493 			trace_probe_log_err(rctr_end - filename,
494 					    REFCNT_OPEN_BRACE);
495 			goto fail_address_parse;
496 		} else if (rctr_end[1] != '\0') {
497 			ret = -EINVAL;
498 			trace_probe_log_err(rctr_end + 1 - filename,
499 					    BAD_REFCNT_SUFFIX);
500 			goto fail_address_parse;
501 		}
502 
503 		*rctr++ = '\0';
504 		*rctr_end = '\0';
505 		ret = kstrtoul(rctr, 0, &ref_ctr_offset);
506 		if (ret) {
507 			trace_probe_log_err(rctr - filename, BAD_REFCNT);
508 			goto fail_address_parse;
509 		}
510 	}
511 
512 	/* Parse uprobe offset. */
513 	ret = kstrtoul(arg, 0, &offset);
514 	if (ret) {
515 		trace_probe_log_err(arg - filename, BAD_UPROBE_OFFS);
516 		goto fail_address_parse;
517 	}
518 
519 	/* setup a probe */
520 	trace_probe_log_set_index(0);
521 	if (event) {
522 		ret = traceprobe_parse_event_name(&event, &group, buf,
523 						  event - argv[0]);
524 		if (ret)
525 			goto fail_address_parse;
526 	} else {
527 		char *tail;
528 		char *ptr;
529 
530 		tail = kstrdup(kbasename(filename), GFP_KERNEL);
531 		if (!tail) {
532 			ret = -ENOMEM;
533 			goto fail_address_parse;
534 		}
535 
536 		ptr = strpbrk(tail, ".-_");
537 		if (ptr)
538 			*ptr = '\0';
539 
540 		snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_0x%lx", 'p', tail, offset);
541 		event = buf;
542 		kfree(tail);
543 	}
544 
545 	argc -= 2;
546 	argv += 2;
547 
548 	tu = alloc_trace_uprobe(group, event, argc, is_return);
549 	if (IS_ERR(tu)) {
550 		ret = PTR_ERR(tu);
551 		/* This must return -ENOMEM otherwise there is a bug */
552 		WARN_ON_ONCE(ret != -ENOMEM);
553 		goto fail_address_parse;
554 	}
555 	tu->offset = offset;
556 	tu->ref_ctr_offset = ref_ctr_offset;
557 	tu->path = path;
558 	tu->filename = filename;
559 
560 	/* parse arguments */
561 	for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
562 		tmp = kstrdup(argv[i], GFP_KERNEL);
563 		if (!tmp) {
564 			ret = -ENOMEM;
565 			goto error;
566 		}
567 
568 		trace_probe_log_set_index(i + 2);
569 		ret = traceprobe_parse_probe_arg(&tu->tp, i, tmp,
570 					is_return ? TPARG_FL_RETURN : 0);
571 		kfree(tmp);
572 		if (ret)
573 			goto error;
574 	}
575 
576 	ret = register_trace_uprobe(tu);
577 	if (!ret)
578 		goto out;
579 
580 error:
581 	free_trace_uprobe(tu);
582 out:
583 	trace_probe_log_clear();
584 	return ret;
585 
586 fail_address_parse:
587 	trace_probe_log_clear();
588 	path_put(&path);
589 	kfree(filename);
590 
591 	return ret;
592 }
593 
594 static int create_or_delete_trace_uprobe(int argc, char **argv)
595 {
596 	int ret;
597 
598 	if (argv[0][0] == '-')
599 		return dyn_event_release(argc, argv, &trace_uprobe_ops);
600 
601 	ret = trace_uprobe_create(argc, (const char **)argv);
602 	return ret == -ECANCELED ? -EINVAL : ret;
603 }
604 
605 static int trace_uprobe_release(struct dyn_event *ev)
606 {
607 	struct trace_uprobe *tu = to_trace_uprobe(ev);
608 
609 	return unregister_trace_uprobe(tu);
610 }
611 
612 /* Probes listing interfaces */
613 static int trace_uprobe_show(struct seq_file *m, struct dyn_event *ev)
614 {
615 	struct trace_uprobe *tu = to_trace_uprobe(ev);
616 	char c = is_ret_probe(tu) ? 'r' : 'p';
617 	int i;
618 
619 	seq_printf(m, "%c:%s/%s %s:0x%0*lx", c, tu->tp.call.class->system,
620 			trace_event_name(&tu->tp.call), tu->filename,
621 			(int)(sizeof(void *) * 2), tu->offset);
622 
623 	if (tu->ref_ctr_offset)
624 		seq_printf(m, "(0x%lx)", tu->ref_ctr_offset);
625 
626 	for (i = 0; i < tu->tp.nr_args; i++)
627 		seq_printf(m, " %s=%s", tu->tp.args[i].name, tu->tp.args[i].comm);
628 
629 	seq_putc(m, '\n');
630 	return 0;
631 }
632 
633 static int probes_seq_show(struct seq_file *m, void *v)
634 {
635 	struct dyn_event *ev = v;
636 
637 	if (!is_trace_uprobe(ev))
638 		return 0;
639 
640 	return trace_uprobe_show(m, ev);
641 }
642 
643 static const struct seq_operations probes_seq_op = {
644 	.start  = dyn_event_seq_start,
645 	.next   = dyn_event_seq_next,
646 	.stop   = dyn_event_seq_stop,
647 	.show   = probes_seq_show
648 };
649 
650 static int probes_open(struct inode *inode, struct file *file)
651 {
652 	int ret;
653 
654 	if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
655 		ret = dyn_events_release_all(&trace_uprobe_ops);
656 		if (ret)
657 			return ret;
658 	}
659 
660 	return seq_open(file, &probes_seq_op);
661 }
662 
663 static ssize_t probes_write(struct file *file, const char __user *buffer,
664 			    size_t count, loff_t *ppos)
665 {
666 	return trace_parse_run_command(file, buffer, count, ppos,
667 					create_or_delete_trace_uprobe);
668 }
669 
670 static const struct file_operations uprobe_events_ops = {
671 	.owner		= THIS_MODULE,
672 	.open		= probes_open,
673 	.read		= seq_read,
674 	.llseek		= seq_lseek,
675 	.release	= seq_release,
676 	.write		= probes_write,
677 };
678 
679 /* Probes profiling interfaces */
680 static int probes_profile_seq_show(struct seq_file *m, void *v)
681 {
682 	struct dyn_event *ev = v;
683 	struct trace_uprobe *tu;
684 
685 	if (!is_trace_uprobe(ev))
686 		return 0;
687 
688 	tu = to_trace_uprobe(ev);
689 	seq_printf(m, "  %s %-44s %15lu\n", tu->filename,
690 			trace_event_name(&tu->tp.call), tu->nhit);
691 	return 0;
692 }
693 
694 static const struct seq_operations profile_seq_op = {
695 	.start  = dyn_event_seq_start,
696 	.next   = dyn_event_seq_next,
697 	.stop   = dyn_event_seq_stop,
698 	.show	= probes_profile_seq_show
699 };
700 
701 static int profile_open(struct inode *inode, struct file *file)
702 {
703 	return seq_open(file, &profile_seq_op);
704 }
705 
706 static const struct file_operations uprobe_profile_ops = {
707 	.owner		= THIS_MODULE,
708 	.open		= profile_open,
709 	.read		= seq_read,
710 	.llseek		= seq_lseek,
711 	.release	= seq_release,
712 };
713 
714 struct uprobe_cpu_buffer {
715 	struct mutex mutex;
716 	void *buf;
717 };
718 static struct uprobe_cpu_buffer __percpu *uprobe_cpu_buffer;
719 static int uprobe_buffer_refcnt;
720 
721 static int uprobe_buffer_init(void)
722 {
723 	int cpu, err_cpu;
724 
725 	uprobe_cpu_buffer = alloc_percpu(struct uprobe_cpu_buffer);
726 	if (uprobe_cpu_buffer == NULL)
727 		return -ENOMEM;
728 
729 	for_each_possible_cpu(cpu) {
730 		struct page *p = alloc_pages_node(cpu_to_node(cpu),
731 						  GFP_KERNEL, 0);
732 		if (p == NULL) {
733 			err_cpu = cpu;
734 			goto err;
735 		}
736 		per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf = page_address(p);
737 		mutex_init(&per_cpu_ptr(uprobe_cpu_buffer, cpu)->mutex);
738 	}
739 
740 	return 0;
741 
742 err:
743 	for_each_possible_cpu(cpu) {
744 		if (cpu == err_cpu)
745 			break;
746 		free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf);
747 	}
748 
749 	free_percpu(uprobe_cpu_buffer);
750 	return -ENOMEM;
751 }
752 
753 static int uprobe_buffer_enable(void)
754 {
755 	int ret = 0;
756 
757 	BUG_ON(!mutex_is_locked(&event_mutex));
758 
759 	if (uprobe_buffer_refcnt++ == 0) {
760 		ret = uprobe_buffer_init();
761 		if (ret < 0)
762 			uprobe_buffer_refcnt--;
763 	}
764 
765 	return ret;
766 }
767 
768 static void uprobe_buffer_disable(void)
769 {
770 	int cpu;
771 
772 	BUG_ON(!mutex_is_locked(&event_mutex));
773 
774 	if (--uprobe_buffer_refcnt == 0) {
775 		for_each_possible_cpu(cpu)
776 			free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer,
777 							     cpu)->buf);
778 
779 		free_percpu(uprobe_cpu_buffer);
780 		uprobe_cpu_buffer = NULL;
781 	}
782 }
783 
784 static struct uprobe_cpu_buffer *uprobe_buffer_get(void)
785 {
786 	struct uprobe_cpu_buffer *ucb;
787 	int cpu;
788 
789 	cpu = raw_smp_processor_id();
790 	ucb = per_cpu_ptr(uprobe_cpu_buffer, cpu);
791 
792 	/*
793 	 * Use per-cpu buffers for fastest access, but we might migrate
794 	 * so the mutex makes sure we have sole access to it.
795 	 */
796 	mutex_lock(&ucb->mutex);
797 
798 	return ucb;
799 }
800 
801 static void uprobe_buffer_put(struct uprobe_cpu_buffer *ucb)
802 {
803 	mutex_unlock(&ucb->mutex);
804 }
805 
806 static void __uprobe_trace_func(struct trace_uprobe *tu,
807 				unsigned long func, struct pt_regs *regs,
808 				struct uprobe_cpu_buffer *ucb, int dsize,
809 				struct trace_event_file *trace_file)
810 {
811 	struct uprobe_trace_entry_head *entry;
812 	struct ring_buffer_event *event;
813 	struct ring_buffer *buffer;
814 	void *data;
815 	int size, esize;
816 	struct trace_event_call *call = &tu->tp.call;
817 
818 	WARN_ON(call != trace_file->event_call);
819 
820 	if (WARN_ON_ONCE(tu->tp.size + dsize > PAGE_SIZE))
821 		return;
822 
823 	if (trace_trigger_soft_disabled(trace_file))
824 		return;
825 
826 	esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
827 	size = esize + tu->tp.size + dsize;
828 	event = trace_event_buffer_lock_reserve(&buffer, trace_file,
829 						call->event.type, size, 0, 0);
830 	if (!event)
831 		return;
832 
833 	entry = ring_buffer_event_data(event);
834 	if (is_ret_probe(tu)) {
835 		entry->vaddr[0] = func;
836 		entry->vaddr[1] = instruction_pointer(regs);
837 		data = DATAOF_TRACE_ENTRY(entry, true);
838 	} else {
839 		entry->vaddr[0] = instruction_pointer(regs);
840 		data = DATAOF_TRACE_ENTRY(entry, false);
841 	}
842 
843 	memcpy(data, ucb->buf, tu->tp.size + dsize);
844 
845 	event_trigger_unlock_commit(trace_file, buffer, event, entry, 0, 0);
846 }
847 
848 /* uprobe handler */
849 static int uprobe_trace_func(struct trace_uprobe *tu, struct pt_regs *regs,
850 			     struct uprobe_cpu_buffer *ucb, int dsize)
851 {
852 	struct event_file_link *link;
853 
854 	if (is_ret_probe(tu))
855 		return 0;
856 
857 	rcu_read_lock();
858 	list_for_each_entry_rcu(link, &tu->tp.files, list)
859 		__uprobe_trace_func(tu, 0, regs, ucb, dsize, link->file);
860 	rcu_read_unlock();
861 
862 	return 0;
863 }
864 
865 static void uretprobe_trace_func(struct trace_uprobe *tu, unsigned long func,
866 				 struct pt_regs *regs,
867 				 struct uprobe_cpu_buffer *ucb, int dsize)
868 {
869 	struct event_file_link *link;
870 
871 	rcu_read_lock();
872 	list_for_each_entry_rcu(link, &tu->tp.files, list)
873 		__uprobe_trace_func(tu, func, regs, ucb, dsize, link->file);
874 	rcu_read_unlock();
875 }
876 
877 /* Event entry printers */
878 static enum print_line_t
879 print_uprobe_event(struct trace_iterator *iter, int flags, struct trace_event *event)
880 {
881 	struct uprobe_trace_entry_head *entry;
882 	struct trace_seq *s = &iter->seq;
883 	struct trace_uprobe *tu;
884 	u8 *data;
885 
886 	entry = (struct uprobe_trace_entry_head *)iter->ent;
887 	tu = container_of(event, struct trace_uprobe, tp.call.event);
888 
889 	if (is_ret_probe(tu)) {
890 		trace_seq_printf(s, "%s: (0x%lx <- 0x%lx)",
891 				 trace_event_name(&tu->tp.call),
892 				 entry->vaddr[1], entry->vaddr[0]);
893 		data = DATAOF_TRACE_ENTRY(entry, true);
894 	} else {
895 		trace_seq_printf(s, "%s: (0x%lx)",
896 				 trace_event_name(&tu->tp.call),
897 				 entry->vaddr[0]);
898 		data = DATAOF_TRACE_ENTRY(entry, false);
899 	}
900 
901 	if (print_probe_args(s, tu->tp.args, tu->tp.nr_args, data, entry) < 0)
902 		goto out;
903 
904 	trace_seq_putc(s, '\n');
905 
906  out:
907 	return trace_handle_return(s);
908 }
909 
910 typedef bool (*filter_func_t)(struct uprobe_consumer *self,
911 				enum uprobe_filter_ctx ctx,
912 				struct mm_struct *mm);
913 
914 static int
915 probe_event_enable(struct trace_uprobe *tu, struct trace_event_file *file,
916 		   filter_func_t filter)
917 {
918 	bool enabled = trace_probe_is_enabled(&tu->tp);
919 	struct event_file_link *link = NULL;
920 	int ret;
921 
922 	if (file) {
923 		if (tu->tp.flags & TP_FLAG_PROFILE)
924 			return -EINTR;
925 
926 		link = kmalloc(sizeof(*link), GFP_KERNEL);
927 		if (!link)
928 			return -ENOMEM;
929 
930 		link->file = file;
931 		list_add_tail_rcu(&link->list, &tu->tp.files);
932 
933 		tu->tp.flags |= TP_FLAG_TRACE;
934 	} else {
935 		if (tu->tp.flags & TP_FLAG_TRACE)
936 			return -EINTR;
937 
938 		tu->tp.flags |= TP_FLAG_PROFILE;
939 	}
940 
941 	WARN_ON(!uprobe_filter_is_empty(&tu->filter));
942 
943 	if (enabled)
944 		return 0;
945 
946 	ret = uprobe_buffer_enable();
947 	if (ret)
948 		goto err_flags;
949 
950 	tu->consumer.filter = filter;
951 	tu->inode = d_real_inode(tu->path.dentry);
952 	if (tu->ref_ctr_offset) {
953 		ret = uprobe_register_refctr(tu->inode, tu->offset,
954 				tu->ref_ctr_offset, &tu->consumer);
955 	} else {
956 		ret = uprobe_register(tu->inode, tu->offset, &tu->consumer);
957 	}
958 
959 	if (ret)
960 		goto err_buffer;
961 
962 	return 0;
963 
964  err_buffer:
965 	uprobe_buffer_disable();
966 
967  err_flags:
968 	if (file) {
969 		list_del(&link->list);
970 		kfree(link);
971 		tu->tp.flags &= ~TP_FLAG_TRACE;
972 	} else {
973 		tu->tp.flags &= ~TP_FLAG_PROFILE;
974 	}
975 	return ret;
976 }
977 
978 static void
979 probe_event_disable(struct trace_uprobe *tu, struct trace_event_file *file)
980 {
981 	if (!trace_probe_is_enabled(&tu->tp))
982 		return;
983 
984 	if (file) {
985 		struct event_file_link *link;
986 
987 		link = find_event_file_link(&tu->tp, file);
988 		if (!link)
989 			return;
990 
991 		list_del_rcu(&link->list);
992 		/* synchronize with u{,ret}probe_trace_func */
993 		synchronize_rcu();
994 		kfree(link);
995 
996 		if (!list_empty(&tu->tp.files))
997 			return;
998 	}
999 
1000 	WARN_ON(!uprobe_filter_is_empty(&tu->filter));
1001 
1002 	uprobe_unregister(tu->inode, tu->offset, &tu->consumer);
1003 	tu->inode = NULL;
1004 	tu->tp.flags &= file ? ~TP_FLAG_TRACE : ~TP_FLAG_PROFILE;
1005 
1006 	uprobe_buffer_disable();
1007 }
1008 
1009 static int uprobe_event_define_fields(struct trace_event_call *event_call)
1010 {
1011 	int ret, size;
1012 	struct uprobe_trace_entry_head field;
1013 	struct trace_uprobe *tu = event_call->data;
1014 
1015 	if (is_ret_probe(tu)) {
1016 		DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_FUNC, 0);
1017 		DEFINE_FIELD(unsigned long, vaddr[1], FIELD_STRING_RETIP, 0);
1018 		size = SIZEOF_TRACE_ENTRY(true);
1019 	} else {
1020 		DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_IP, 0);
1021 		size = SIZEOF_TRACE_ENTRY(false);
1022 	}
1023 
1024 	return traceprobe_define_arg_fields(event_call, size, &tu->tp);
1025 }
1026 
1027 #ifdef CONFIG_PERF_EVENTS
1028 static bool
1029 __uprobe_perf_filter(struct trace_uprobe_filter *filter, struct mm_struct *mm)
1030 {
1031 	struct perf_event *event;
1032 
1033 	if (filter->nr_systemwide)
1034 		return true;
1035 
1036 	list_for_each_entry(event, &filter->perf_events, hw.tp_list) {
1037 		if (event->hw.target->mm == mm)
1038 			return true;
1039 	}
1040 
1041 	return false;
1042 }
1043 
1044 static inline bool
1045 uprobe_filter_event(struct trace_uprobe *tu, struct perf_event *event)
1046 {
1047 	return __uprobe_perf_filter(&tu->filter, event->hw.target->mm);
1048 }
1049 
1050 static int uprobe_perf_close(struct trace_uprobe *tu, struct perf_event *event)
1051 {
1052 	bool done;
1053 
1054 	write_lock(&tu->filter.rwlock);
1055 	if (event->hw.target) {
1056 		list_del(&event->hw.tp_list);
1057 		done = tu->filter.nr_systemwide ||
1058 			(event->hw.target->flags & PF_EXITING) ||
1059 			uprobe_filter_event(tu, event);
1060 	} else {
1061 		tu->filter.nr_systemwide--;
1062 		done = tu->filter.nr_systemwide;
1063 	}
1064 	write_unlock(&tu->filter.rwlock);
1065 
1066 	if (!done)
1067 		return uprobe_apply(tu->inode, tu->offset, &tu->consumer, false);
1068 
1069 	return 0;
1070 }
1071 
1072 static int uprobe_perf_open(struct trace_uprobe *tu, struct perf_event *event)
1073 {
1074 	bool done;
1075 	int err;
1076 
1077 	write_lock(&tu->filter.rwlock);
1078 	if (event->hw.target) {
1079 		/*
1080 		 * event->parent != NULL means copy_process(), we can avoid
1081 		 * uprobe_apply(). current->mm must be probed and we can rely
1082 		 * on dup_mmap() which preserves the already installed bp's.
1083 		 *
1084 		 * attr.enable_on_exec means that exec/mmap will install the
1085 		 * breakpoints we need.
1086 		 */
1087 		done = tu->filter.nr_systemwide ||
1088 			event->parent || event->attr.enable_on_exec ||
1089 			uprobe_filter_event(tu, event);
1090 		list_add(&event->hw.tp_list, &tu->filter.perf_events);
1091 	} else {
1092 		done = tu->filter.nr_systemwide;
1093 		tu->filter.nr_systemwide++;
1094 	}
1095 	write_unlock(&tu->filter.rwlock);
1096 
1097 	err = 0;
1098 	if (!done) {
1099 		err = uprobe_apply(tu->inode, tu->offset, &tu->consumer, true);
1100 		if (err)
1101 			uprobe_perf_close(tu, event);
1102 	}
1103 	return err;
1104 }
1105 
1106 static bool uprobe_perf_filter(struct uprobe_consumer *uc,
1107 				enum uprobe_filter_ctx ctx, struct mm_struct *mm)
1108 {
1109 	struct trace_uprobe *tu;
1110 	int ret;
1111 
1112 	tu = container_of(uc, struct trace_uprobe, consumer);
1113 	read_lock(&tu->filter.rwlock);
1114 	ret = __uprobe_perf_filter(&tu->filter, mm);
1115 	read_unlock(&tu->filter.rwlock);
1116 
1117 	return ret;
1118 }
1119 
1120 static void __uprobe_perf_func(struct trace_uprobe *tu,
1121 			       unsigned long func, struct pt_regs *regs,
1122 			       struct uprobe_cpu_buffer *ucb, int dsize)
1123 {
1124 	struct trace_event_call *call = &tu->tp.call;
1125 	struct uprobe_trace_entry_head *entry;
1126 	struct hlist_head *head;
1127 	void *data;
1128 	int size, esize;
1129 	int rctx;
1130 
1131 	if (bpf_prog_array_valid(call) && !trace_call_bpf(call, regs))
1132 		return;
1133 
1134 	esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1135 
1136 	size = esize + tu->tp.size + dsize;
1137 	size = ALIGN(size + sizeof(u32), sizeof(u64)) - sizeof(u32);
1138 	if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, "profile buffer not large enough"))
1139 		return;
1140 
1141 	preempt_disable();
1142 	head = this_cpu_ptr(call->perf_events);
1143 	if (hlist_empty(head))
1144 		goto out;
1145 
1146 	entry = perf_trace_buf_alloc(size, NULL, &rctx);
1147 	if (!entry)
1148 		goto out;
1149 
1150 	if (is_ret_probe(tu)) {
1151 		entry->vaddr[0] = func;
1152 		entry->vaddr[1] = instruction_pointer(regs);
1153 		data = DATAOF_TRACE_ENTRY(entry, true);
1154 	} else {
1155 		entry->vaddr[0] = instruction_pointer(regs);
1156 		data = DATAOF_TRACE_ENTRY(entry, false);
1157 	}
1158 
1159 	memcpy(data, ucb->buf, tu->tp.size + dsize);
1160 
1161 	if (size - esize > tu->tp.size + dsize) {
1162 		int len = tu->tp.size + dsize;
1163 
1164 		memset(data + len, 0, size - esize - len);
1165 	}
1166 
1167 	perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
1168 			      head, NULL);
1169  out:
1170 	preempt_enable();
1171 }
1172 
1173 /* uprobe profile handler */
1174 static int uprobe_perf_func(struct trace_uprobe *tu, struct pt_regs *regs,
1175 			    struct uprobe_cpu_buffer *ucb, int dsize)
1176 {
1177 	if (!uprobe_perf_filter(&tu->consumer, 0, current->mm))
1178 		return UPROBE_HANDLER_REMOVE;
1179 
1180 	if (!is_ret_probe(tu))
1181 		__uprobe_perf_func(tu, 0, regs, ucb, dsize);
1182 	return 0;
1183 }
1184 
1185 static void uretprobe_perf_func(struct trace_uprobe *tu, unsigned long func,
1186 				struct pt_regs *regs,
1187 				struct uprobe_cpu_buffer *ucb, int dsize)
1188 {
1189 	__uprobe_perf_func(tu, func, regs, ucb, dsize);
1190 }
1191 
1192 int bpf_get_uprobe_info(const struct perf_event *event, u32 *fd_type,
1193 			const char **filename, u64 *probe_offset,
1194 			bool perf_type_tracepoint)
1195 {
1196 	const char *pevent = trace_event_name(event->tp_event);
1197 	const char *group = event->tp_event->class->system;
1198 	struct trace_uprobe *tu;
1199 
1200 	if (perf_type_tracepoint)
1201 		tu = find_probe_event(pevent, group);
1202 	else
1203 		tu = event->tp_event->data;
1204 	if (!tu)
1205 		return -EINVAL;
1206 
1207 	*fd_type = is_ret_probe(tu) ? BPF_FD_TYPE_URETPROBE
1208 				    : BPF_FD_TYPE_UPROBE;
1209 	*filename = tu->filename;
1210 	*probe_offset = tu->offset;
1211 	return 0;
1212 }
1213 #endif	/* CONFIG_PERF_EVENTS */
1214 
1215 static int
1216 trace_uprobe_register(struct trace_event_call *event, enum trace_reg type,
1217 		      void *data)
1218 {
1219 	struct trace_uprobe *tu = event->data;
1220 	struct trace_event_file *file = data;
1221 
1222 	switch (type) {
1223 	case TRACE_REG_REGISTER:
1224 		return probe_event_enable(tu, file, NULL);
1225 
1226 	case TRACE_REG_UNREGISTER:
1227 		probe_event_disable(tu, file);
1228 		return 0;
1229 
1230 #ifdef CONFIG_PERF_EVENTS
1231 	case TRACE_REG_PERF_REGISTER:
1232 		return probe_event_enable(tu, NULL, uprobe_perf_filter);
1233 
1234 	case TRACE_REG_PERF_UNREGISTER:
1235 		probe_event_disable(tu, NULL);
1236 		return 0;
1237 
1238 	case TRACE_REG_PERF_OPEN:
1239 		return uprobe_perf_open(tu, data);
1240 
1241 	case TRACE_REG_PERF_CLOSE:
1242 		return uprobe_perf_close(tu, data);
1243 
1244 #endif
1245 	default:
1246 		return 0;
1247 	}
1248 	return 0;
1249 }
1250 
1251 static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs)
1252 {
1253 	struct trace_uprobe *tu;
1254 	struct uprobe_dispatch_data udd;
1255 	struct uprobe_cpu_buffer *ucb;
1256 	int dsize, esize;
1257 	int ret = 0;
1258 
1259 
1260 	tu = container_of(con, struct trace_uprobe, consumer);
1261 	tu->nhit++;
1262 
1263 	udd.tu = tu;
1264 	udd.bp_addr = instruction_pointer(regs);
1265 
1266 	current->utask->vaddr = (unsigned long) &udd;
1267 
1268 	if (WARN_ON_ONCE(!uprobe_cpu_buffer))
1269 		return 0;
1270 
1271 	dsize = __get_data_size(&tu->tp, regs);
1272 	esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1273 
1274 	ucb = uprobe_buffer_get();
1275 	store_trace_args(ucb->buf, &tu->tp, regs, esize, dsize);
1276 
1277 	if (tu->tp.flags & TP_FLAG_TRACE)
1278 		ret |= uprobe_trace_func(tu, regs, ucb, dsize);
1279 
1280 #ifdef CONFIG_PERF_EVENTS
1281 	if (tu->tp.flags & TP_FLAG_PROFILE)
1282 		ret |= uprobe_perf_func(tu, regs, ucb, dsize);
1283 #endif
1284 	uprobe_buffer_put(ucb);
1285 	return ret;
1286 }
1287 
1288 static int uretprobe_dispatcher(struct uprobe_consumer *con,
1289 				unsigned long func, struct pt_regs *regs)
1290 {
1291 	struct trace_uprobe *tu;
1292 	struct uprobe_dispatch_data udd;
1293 	struct uprobe_cpu_buffer *ucb;
1294 	int dsize, esize;
1295 
1296 	tu = container_of(con, struct trace_uprobe, consumer);
1297 
1298 	udd.tu = tu;
1299 	udd.bp_addr = func;
1300 
1301 	current->utask->vaddr = (unsigned long) &udd;
1302 
1303 	if (WARN_ON_ONCE(!uprobe_cpu_buffer))
1304 		return 0;
1305 
1306 	dsize = __get_data_size(&tu->tp, regs);
1307 	esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1308 
1309 	ucb = uprobe_buffer_get();
1310 	store_trace_args(ucb->buf, &tu->tp, regs, esize, dsize);
1311 
1312 	if (tu->tp.flags & TP_FLAG_TRACE)
1313 		uretprobe_trace_func(tu, func, regs, ucb, dsize);
1314 
1315 #ifdef CONFIG_PERF_EVENTS
1316 	if (tu->tp.flags & TP_FLAG_PROFILE)
1317 		uretprobe_perf_func(tu, func, regs, ucb, dsize);
1318 #endif
1319 	uprobe_buffer_put(ucb);
1320 	return 0;
1321 }
1322 
1323 static struct trace_event_functions uprobe_funcs = {
1324 	.trace		= print_uprobe_event
1325 };
1326 
1327 static inline void init_trace_event_call(struct trace_uprobe *tu,
1328 					 struct trace_event_call *call)
1329 {
1330 	INIT_LIST_HEAD(&call->class->fields);
1331 	call->event.funcs = &uprobe_funcs;
1332 	call->class->define_fields = uprobe_event_define_fields;
1333 
1334 	call->flags = TRACE_EVENT_FL_UPROBE;
1335 	call->class->reg = trace_uprobe_register;
1336 	call->data = tu;
1337 }
1338 
1339 static int register_uprobe_event(struct trace_uprobe *tu)
1340 {
1341 	struct trace_event_call *call = &tu->tp.call;
1342 	int ret = 0;
1343 
1344 	init_trace_event_call(tu, call);
1345 
1346 	if (traceprobe_set_print_fmt(&tu->tp, is_ret_probe(tu)) < 0)
1347 		return -ENOMEM;
1348 
1349 	ret = register_trace_event(&call->event);
1350 	if (!ret) {
1351 		kfree(call->print_fmt);
1352 		return -ENODEV;
1353 	}
1354 
1355 	ret = trace_add_event_call(call);
1356 
1357 	if (ret) {
1358 		pr_info("Failed to register uprobe event: %s\n",
1359 			trace_event_name(call));
1360 		kfree(call->print_fmt);
1361 		unregister_trace_event(&call->event);
1362 	}
1363 
1364 	return ret;
1365 }
1366 
1367 static int unregister_uprobe_event(struct trace_uprobe *tu)
1368 {
1369 	int ret;
1370 
1371 	/* tu->event is unregistered in trace_remove_event_call() */
1372 	ret = trace_remove_event_call(&tu->tp.call);
1373 	if (ret)
1374 		return ret;
1375 	kfree(tu->tp.call.print_fmt);
1376 	tu->tp.call.print_fmt = NULL;
1377 	return 0;
1378 }
1379 
1380 #ifdef CONFIG_PERF_EVENTS
1381 struct trace_event_call *
1382 create_local_trace_uprobe(char *name, unsigned long offs,
1383 			  unsigned long ref_ctr_offset, bool is_return)
1384 {
1385 	struct trace_uprobe *tu;
1386 	struct path path;
1387 	int ret;
1388 
1389 	ret = kern_path(name, LOOKUP_FOLLOW, &path);
1390 	if (ret)
1391 		return ERR_PTR(ret);
1392 
1393 	if (!d_is_reg(path.dentry)) {
1394 		path_put(&path);
1395 		return ERR_PTR(-EINVAL);
1396 	}
1397 
1398 	/*
1399 	 * local trace_kprobes are not added to dyn_event, so they are never
1400 	 * searched in find_trace_kprobe(). Therefore, there is no concern of
1401 	 * duplicated name "DUMMY_EVENT" here.
1402 	 */
1403 	tu = alloc_trace_uprobe(UPROBE_EVENT_SYSTEM, "DUMMY_EVENT", 0,
1404 				is_return);
1405 
1406 	if (IS_ERR(tu)) {
1407 		pr_info("Failed to allocate trace_uprobe.(%d)\n",
1408 			(int)PTR_ERR(tu));
1409 		path_put(&path);
1410 		return ERR_CAST(tu);
1411 	}
1412 
1413 	tu->offset = offs;
1414 	tu->path = path;
1415 	tu->ref_ctr_offset = ref_ctr_offset;
1416 	tu->filename = kstrdup(name, GFP_KERNEL);
1417 	init_trace_event_call(tu, &tu->tp.call);
1418 
1419 	if (traceprobe_set_print_fmt(&tu->tp, is_ret_probe(tu)) < 0) {
1420 		ret = -ENOMEM;
1421 		goto error;
1422 	}
1423 
1424 	return &tu->tp.call;
1425 error:
1426 	free_trace_uprobe(tu);
1427 	return ERR_PTR(ret);
1428 }
1429 
1430 void destroy_local_trace_uprobe(struct trace_event_call *event_call)
1431 {
1432 	struct trace_uprobe *tu;
1433 
1434 	tu = container_of(event_call, struct trace_uprobe, tp.call);
1435 
1436 	kfree(tu->tp.call.print_fmt);
1437 	tu->tp.call.print_fmt = NULL;
1438 
1439 	free_trace_uprobe(tu);
1440 }
1441 #endif /* CONFIG_PERF_EVENTS */
1442 
1443 /* Make a trace interface for controling probe points */
1444 static __init int init_uprobe_trace(void)
1445 {
1446 	struct dentry *d_tracer;
1447 	int ret;
1448 
1449 	ret = dyn_event_register(&trace_uprobe_ops);
1450 	if (ret)
1451 		return ret;
1452 
1453 	d_tracer = tracing_init_dentry();
1454 	if (IS_ERR(d_tracer))
1455 		return 0;
1456 
1457 	trace_create_file("uprobe_events", 0644, d_tracer,
1458 				    NULL, &uprobe_events_ops);
1459 	/* Profile interface */
1460 	trace_create_file("uprobe_profile", 0444, d_tracer,
1461 				    NULL, &uprobe_profile_ops);
1462 	return 0;
1463 }
1464 
1465 fs_initcall(init_uprobe_trace);
1466