xref: /linux/arch/powerpc/kernel/trace/ftrace_64_pg.c (revision 42874e4eb35bdfc54f8514685e50434098ba4f6c)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Code for replacing ftrace calls with jumps.
4  *
5  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
6  *
7  * Thanks goes out to P.A. Semi, Inc for supplying me with a PPC64 box.
8  *
9  * Added function graph tracer code, taken from x86 that was written
10  * by Frederic Weisbecker, and ported to PPC by Steven Rostedt.
11  *
12  */
13 
14 #define pr_fmt(fmt) "ftrace-powerpc: " fmt
15 
16 #include <linux/spinlock.h>
17 #include <linux/hardirq.h>
18 #include <linux/uaccess.h>
19 #include <linux/module.h>
20 #include <linux/ftrace.h>
21 #include <linux/percpu.h>
22 #include <linux/init.h>
23 #include <linux/list.h>
24 
25 #include <asm/cacheflush.h>
26 #include <asm/code-patching.h>
27 #include <asm/ftrace.h>
28 #include <asm/syscall.h>
29 #include <asm/inst.h>
30 
31 /*
32  * We generally only have a single long_branch tramp and at most 2 or 3 plt
33  * tramps generated. But, we don't use the plt tramps currently. We also allot
34  * 2 tramps after .text and .init.text. So, we only end up with around 3 usable
35  * tramps in total. Set aside 8 just to be sure.
36  */
37 #define	NUM_FTRACE_TRAMPS	8
38 static unsigned long ftrace_tramps[NUM_FTRACE_TRAMPS];
39 
40 static ppc_inst_t
41 ftrace_call_replace(unsigned long ip, unsigned long addr, int link)
42 {
43 	ppc_inst_t op;
44 
45 	addr = ppc_function_entry((void *)addr);
46 
47 	/* if (link) set op to 'bl' else 'b' */
48 	create_branch(&op, (u32 *)ip, addr, link ? BRANCH_SET_LINK : 0);
49 
50 	return op;
51 }
52 
53 static inline int
54 ftrace_modify_code(unsigned long ip, ppc_inst_t old, ppc_inst_t new)
55 {
56 	ppc_inst_t replaced;
57 
58 	/*
59 	 * Note:
60 	 * We are paranoid about modifying text, as if a bug was to happen, it
61 	 * could cause us to read or write to someplace that could cause harm.
62 	 * Carefully read and modify the code with probe_kernel_*(), and make
63 	 * sure what we read is what we expected it to be before modifying it.
64 	 */
65 
66 	/* read the text we want to modify */
67 	if (copy_inst_from_kernel_nofault(&replaced, (void *)ip))
68 		return -EFAULT;
69 
70 	/* Make sure it is what we expect it to be */
71 	if (!ppc_inst_equal(replaced, old)) {
72 		pr_err("%p: replaced (%08lx) != old (%08lx)", (void *)ip,
73 		       ppc_inst_as_ulong(replaced), ppc_inst_as_ulong(old));
74 		return -EINVAL;
75 	}
76 
77 	/* replace the text with the new text */
78 	return patch_instruction((u32 *)ip, new);
79 }
80 
81 /*
82  * Helper functions that are the same for both PPC64 and PPC32.
83  */
84 static int test_24bit_addr(unsigned long ip, unsigned long addr)
85 {
86 	addr = ppc_function_entry((void *)addr);
87 
88 	return is_offset_in_branch_range(addr - ip);
89 }
90 
91 static int is_bl_op(ppc_inst_t op)
92 {
93 	return (ppc_inst_val(op) & ~PPC_LI_MASK) == PPC_RAW_BL(0);
94 }
95 
96 static int is_b_op(ppc_inst_t op)
97 {
98 	return (ppc_inst_val(op) & ~PPC_LI_MASK) == PPC_RAW_BRANCH(0);
99 }
100 
101 static unsigned long find_bl_target(unsigned long ip, ppc_inst_t op)
102 {
103 	int offset;
104 
105 	offset = PPC_LI(ppc_inst_val(op));
106 	/* make it signed */
107 	if (offset & 0x02000000)
108 		offset |= 0xfe000000;
109 
110 	return ip + (long)offset;
111 }
112 
113 #ifdef CONFIG_MODULES
114 static int
115 __ftrace_make_nop(struct module *mod,
116 		  struct dyn_ftrace *rec, unsigned long addr)
117 {
118 	unsigned long entry, ptr, tramp;
119 	unsigned long ip = rec->ip;
120 	ppc_inst_t op, pop;
121 
122 	/* read where this goes */
123 	if (copy_inst_from_kernel_nofault(&op, (void *)ip)) {
124 		pr_err("Fetching opcode failed.\n");
125 		return -EFAULT;
126 	}
127 
128 	/* Make sure that this is still a 24bit jump */
129 	if (!is_bl_op(op)) {
130 		pr_err("Not expected bl: opcode is %08lx\n", ppc_inst_as_ulong(op));
131 		return -EINVAL;
132 	}
133 
134 	/* lets find where the pointer goes */
135 	tramp = find_bl_target(ip, op);
136 
137 	pr_devel("ip:%lx jumps to %lx", ip, tramp);
138 
139 	if (module_trampoline_target(mod, tramp, &ptr)) {
140 		pr_err("Failed to get trampoline target\n");
141 		return -EFAULT;
142 	}
143 
144 	pr_devel("trampoline target %lx", ptr);
145 
146 	entry = ppc_global_function_entry((void *)addr);
147 	/* This should match what was called */
148 	if (ptr != entry) {
149 		pr_err("addr %lx does not match expected %lx\n", ptr, entry);
150 		return -EINVAL;
151 	}
152 
153 	if (IS_ENABLED(CONFIG_MPROFILE_KERNEL)) {
154 		if (copy_inst_from_kernel_nofault(&op, (void *)(ip - 4))) {
155 			pr_err("Fetching instruction at %lx failed.\n", ip - 4);
156 			return -EFAULT;
157 		}
158 
159 		/* We expect either a mflr r0, or a std r0, LRSAVE(r1) */
160 		if (!ppc_inst_equal(op, ppc_inst(PPC_RAW_MFLR(_R0))) &&
161 		    !ppc_inst_equal(op, ppc_inst(PPC_INST_STD_LR))) {
162 			pr_err("Unexpected instruction %08lx around bl _mcount\n",
163 			       ppc_inst_as_ulong(op));
164 			return -EINVAL;
165 		}
166 	} else if (IS_ENABLED(CONFIG_PPC64)) {
167 		/*
168 		 * Check what is in the next instruction. We can see ld r2,40(r1), but
169 		 * on first pass after boot we will see mflr r0.
170 		 */
171 		if (copy_inst_from_kernel_nofault(&op, (void *)(ip + 4))) {
172 			pr_err("Fetching op failed.\n");
173 			return -EFAULT;
174 		}
175 
176 		if (!ppc_inst_equal(op,  ppc_inst(PPC_INST_LD_TOC))) {
177 			pr_err("Expected %08lx found %08lx\n", PPC_INST_LD_TOC,
178 			       ppc_inst_as_ulong(op));
179 			return -EINVAL;
180 		}
181 	}
182 
183 	/*
184 	 * When using -mprofile-kernel or PPC32 there is no load to jump over.
185 	 *
186 	 * Otherwise our original call site looks like:
187 	 *
188 	 * bl <tramp>
189 	 * ld r2,XX(r1)
190 	 *
191 	 * Milton Miller pointed out that we can not simply nop the branch.
192 	 * If a task was preempted when calling a trace function, the nops
193 	 * will remove the way to restore the TOC in r2 and the r2 TOC will
194 	 * get corrupted.
195 	 *
196 	 * Use a b +8 to jump over the load.
197 	 */
198 	if (IS_ENABLED(CONFIG_MPROFILE_KERNEL) || IS_ENABLED(CONFIG_PPC32))
199 		pop = ppc_inst(PPC_RAW_NOP());
200 	else
201 		pop = ppc_inst(PPC_RAW_BRANCH(8));	/* b +8 */
202 
203 	if (patch_instruction((u32 *)ip, pop)) {
204 		pr_err("Patching NOP failed.\n");
205 		return -EPERM;
206 	}
207 
208 	return 0;
209 }
210 #else
211 static int __ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr)
212 {
213 	return 0;
214 }
215 #endif /* CONFIG_MODULES */
216 
217 static unsigned long find_ftrace_tramp(unsigned long ip)
218 {
219 	int i;
220 
221 	/*
222 	 * We have the compiler generated long_branch tramps at the end
223 	 * and we prefer those
224 	 */
225 	for (i = NUM_FTRACE_TRAMPS - 1; i >= 0; i--)
226 		if (!ftrace_tramps[i])
227 			continue;
228 		else if (is_offset_in_branch_range(ftrace_tramps[i] - ip))
229 			return ftrace_tramps[i];
230 
231 	return 0;
232 }
233 
234 static int add_ftrace_tramp(unsigned long tramp)
235 {
236 	int i;
237 
238 	for (i = 0; i < NUM_FTRACE_TRAMPS; i++)
239 		if (!ftrace_tramps[i]) {
240 			ftrace_tramps[i] = tramp;
241 			return 0;
242 		}
243 
244 	return -1;
245 }
246 
247 /*
248  * If this is a compiler generated long_branch trampoline (essentially, a
249  * trampoline that has a branch to _mcount()), we re-write the branch to
250  * instead go to ftrace_[regs_]caller() and note down the location of this
251  * trampoline.
252  */
253 static int setup_mcount_compiler_tramp(unsigned long tramp)
254 {
255 	int i;
256 	ppc_inst_t op;
257 	unsigned long ptr;
258 
259 	/* Is this a known long jump tramp? */
260 	for (i = 0; i < NUM_FTRACE_TRAMPS; i++)
261 		if (ftrace_tramps[i] == tramp)
262 			return 0;
263 
264 	/* New trampoline -- read where this goes */
265 	if (copy_inst_from_kernel_nofault(&op, (void *)tramp)) {
266 		pr_debug("Fetching opcode failed.\n");
267 		return -1;
268 	}
269 
270 	/* Is this a 24 bit branch? */
271 	if (!is_b_op(op)) {
272 		pr_debug("Trampoline is not a long branch tramp.\n");
273 		return -1;
274 	}
275 
276 	/* lets find where the pointer goes */
277 	ptr = find_bl_target(tramp, op);
278 
279 	if (ptr != ppc_global_function_entry((void *)_mcount)) {
280 		pr_debug("Trampoline target %p is not _mcount\n", (void *)ptr);
281 		return -1;
282 	}
283 
284 	/* Let's re-write the tramp to go to ftrace_[regs_]caller */
285 	if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS))
286 		ptr = ppc_global_function_entry((void *)ftrace_regs_caller);
287 	else
288 		ptr = ppc_global_function_entry((void *)ftrace_caller);
289 
290 	if (patch_branch((u32 *)tramp, ptr, 0)) {
291 		pr_debug("REL24 out of range!\n");
292 		return -1;
293 	}
294 
295 	if (add_ftrace_tramp(tramp)) {
296 		pr_debug("No tramp locations left\n");
297 		return -1;
298 	}
299 
300 	return 0;
301 }
302 
303 static int __ftrace_make_nop_kernel(struct dyn_ftrace *rec, unsigned long addr)
304 {
305 	unsigned long tramp, ip = rec->ip;
306 	ppc_inst_t op;
307 
308 	/* Read where this goes */
309 	if (copy_inst_from_kernel_nofault(&op, (void *)ip)) {
310 		pr_err("Fetching opcode failed.\n");
311 		return -EFAULT;
312 	}
313 
314 	/* Make sure that this is still a 24bit jump */
315 	if (!is_bl_op(op)) {
316 		pr_err("Not expected bl: opcode is %08lx\n", ppc_inst_as_ulong(op));
317 		return -EINVAL;
318 	}
319 
320 	/* Let's find where the pointer goes */
321 	tramp = find_bl_target(ip, op);
322 
323 	pr_devel("ip:%lx jumps to %lx", ip, tramp);
324 
325 	if (setup_mcount_compiler_tramp(tramp)) {
326 		/* Are other trampolines reachable? */
327 		if (!find_ftrace_tramp(ip)) {
328 			pr_err("No ftrace trampolines reachable from %ps\n",
329 					(void *)ip);
330 			return -EINVAL;
331 		}
332 	}
333 
334 	if (patch_instruction((u32 *)ip, ppc_inst(PPC_RAW_NOP()))) {
335 		pr_err("Patching NOP failed.\n");
336 		return -EPERM;
337 	}
338 
339 	return 0;
340 }
341 
342 int ftrace_make_nop(struct module *mod,
343 		    struct dyn_ftrace *rec, unsigned long addr)
344 {
345 	unsigned long ip = rec->ip;
346 	ppc_inst_t old, new;
347 
348 	/*
349 	 * If the calling address is more that 24 bits away,
350 	 * then we had to use a trampoline to make the call.
351 	 * Otherwise just update the call site.
352 	 */
353 	if (test_24bit_addr(ip, addr)) {
354 		/* within range */
355 		old = ftrace_call_replace(ip, addr, 1);
356 		new = ppc_inst(PPC_RAW_NOP());
357 		return ftrace_modify_code(ip, old, new);
358 	} else if (core_kernel_text(ip)) {
359 		return __ftrace_make_nop_kernel(rec, addr);
360 	} else if (!IS_ENABLED(CONFIG_MODULES)) {
361 		return -EINVAL;
362 	}
363 
364 	/*
365 	 * Out of range jumps are called from modules.
366 	 * We should either already have a pointer to the module
367 	 * or it has been passed in.
368 	 */
369 	if (!rec->arch.mod) {
370 		if (!mod) {
371 			pr_err("No module loaded addr=%lx\n", addr);
372 			return -EFAULT;
373 		}
374 		rec->arch.mod = mod;
375 	} else if (mod) {
376 		if (mod != rec->arch.mod) {
377 			pr_err("Record mod %p not equal to passed in mod %p\n",
378 			       rec->arch.mod, mod);
379 			return -EINVAL;
380 		}
381 		/* nothing to do if mod == rec->arch.mod */
382 	} else
383 		mod = rec->arch.mod;
384 
385 	return __ftrace_make_nop(mod, rec, addr);
386 }
387 
388 #ifdef CONFIG_MODULES
389 /*
390  * Examine the existing instructions for __ftrace_make_call.
391  * They should effectively be a NOP, and follow formal constraints,
392  * depending on the ABI. Return false if they don't.
393  */
394 static bool expected_nop_sequence(void *ip, ppc_inst_t op0, ppc_inst_t op1)
395 {
396 	if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS))
397 		return ppc_inst_equal(op0, ppc_inst(PPC_RAW_NOP()));
398 	else
399 		return ppc_inst_equal(op0, ppc_inst(PPC_RAW_BRANCH(8))) &&
400 		       ppc_inst_equal(op1, ppc_inst(PPC_INST_LD_TOC));
401 }
402 
403 static int
404 __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
405 {
406 	ppc_inst_t op[2];
407 	void *ip = (void *)rec->ip;
408 	unsigned long entry, ptr, tramp;
409 	struct module *mod = rec->arch.mod;
410 
411 	/* read where this goes */
412 	if (copy_inst_from_kernel_nofault(op, ip))
413 		return -EFAULT;
414 
415 	if (!IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS) &&
416 	    copy_inst_from_kernel_nofault(op + 1, ip + 4))
417 		return -EFAULT;
418 
419 	if (!expected_nop_sequence(ip, op[0], op[1])) {
420 		pr_err("Unexpected call sequence at %p: %08lx %08lx\n", ip,
421 		       ppc_inst_as_ulong(op[0]), ppc_inst_as_ulong(op[1]));
422 		return -EINVAL;
423 	}
424 
425 	/* If we never set up ftrace trampoline(s), then bail */
426 	if (!mod->arch.tramp ||
427 	    (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS) && !mod->arch.tramp_regs)) {
428 		pr_err("No ftrace trampoline\n");
429 		return -EINVAL;
430 	}
431 
432 	if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS) && rec->flags & FTRACE_FL_REGS)
433 		tramp = mod->arch.tramp_regs;
434 	else
435 		tramp = mod->arch.tramp;
436 
437 	if (module_trampoline_target(mod, tramp, &ptr)) {
438 		pr_err("Failed to get trampoline target\n");
439 		return -EFAULT;
440 	}
441 
442 	pr_devel("trampoline target %lx", ptr);
443 
444 	entry = ppc_global_function_entry((void *)addr);
445 	/* This should match what was called */
446 	if (ptr != entry) {
447 		pr_err("addr %lx does not match expected %lx\n", ptr, entry);
448 		return -EINVAL;
449 	}
450 
451 	if (patch_branch(ip, tramp, BRANCH_SET_LINK)) {
452 		pr_err("REL24 out of range!\n");
453 		return -EINVAL;
454 	}
455 
456 	return 0;
457 }
458 #else
459 static int __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
460 {
461 	return 0;
462 }
463 #endif /* CONFIG_MODULES */
464 
465 static int __ftrace_make_call_kernel(struct dyn_ftrace *rec, unsigned long addr)
466 {
467 	ppc_inst_t op;
468 	void *ip = (void *)rec->ip;
469 	unsigned long tramp, entry, ptr;
470 
471 	/* Make sure we're being asked to patch branch to a known ftrace addr */
472 	entry = ppc_global_function_entry((void *)ftrace_caller);
473 	ptr = ppc_global_function_entry((void *)addr);
474 
475 	if (ptr != entry && IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS))
476 		entry = ppc_global_function_entry((void *)ftrace_regs_caller);
477 
478 	if (ptr != entry) {
479 		pr_err("Unknown ftrace addr to patch: %ps\n", (void *)ptr);
480 		return -EINVAL;
481 	}
482 
483 	/* Make sure we have a nop */
484 	if (copy_inst_from_kernel_nofault(&op, ip)) {
485 		pr_err("Unable to read ftrace location %p\n", ip);
486 		return -EFAULT;
487 	}
488 
489 	if (!ppc_inst_equal(op, ppc_inst(PPC_RAW_NOP()))) {
490 		pr_err("Unexpected call sequence at %p: %08lx\n",
491 		       ip, ppc_inst_as_ulong(op));
492 		return -EINVAL;
493 	}
494 
495 	tramp = find_ftrace_tramp((unsigned long)ip);
496 	if (!tramp) {
497 		pr_err("No ftrace trampolines reachable from %ps\n", ip);
498 		return -EINVAL;
499 	}
500 
501 	if (patch_branch(ip, tramp, BRANCH_SET_LINK)) {
502 		pr_err("Error patching branch to ftrace tramp!\n");
503 		return -EINVAL;
504 	}
505 
506 	return 0;
507 }
508 
509 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
510 {
511 	unsigned long ip = rec->ip;
512 	ppc_inst_t old, new;
513 
514 	/*
515 	 * If the calling address is more that 24 bits away,
516 	 * then we had to use a trampoline to make the call.
517 	 * Otherwise just update the call site.
518 	 */
519 	if (test_24bit_addr(ip, addr)) {
520 		/* within range */
521 		old = ppc_inst(PPC_RAW_NOP());
522 		new = ftrace_call_replace(ip, addr, 1);
523 		return ftrace_modify_code(ip, old, new);
524 	} else if (core_kernel_text(ip)) {
525 		return __ftrace_make_call_kernel(rec, addr);
526 	} else if (!IS_ENABLED(CONFIG_MODULES)) {
527 		/* We should not get here without modules */
528 		return -EINVAL;
529 	}
530 
531 	/*
532 	 * Out of range jumps are called from modules.
533 	 * Being that we are converting from nop, it had better
534 	 * already have a module defined.
535 	 */
536 	if (!rec->arch.mod) {
537 		pr_err("No module loaded\n");
538 		return -EINVAL;
539 	}
540 
541 	return __ftrace_make_call(rec, addr);
542 }
543 
544 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
545 #ifdef CONFIG_MODULES
546 static int
547 __ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
548 					unsigned long addr)
549 {
550 	ppc_inst_t op;
551 	unsigned long ip = rec->ip;
552 	unsigned long entry, ptr, tramp;
553 	struct module *mod = rec->arch.mod;
554 
555 	/* If we never set up ftrace trampolines, then bail */
556 	if (!mod->arch.tramp || !mod->arch.tramp_regs) {
557 		pr_err("No ftrace trampoline\n");
558 		return -EINVAL;
559 	}
560 
561 	/* read where this goes */
562 	if (copy_inst_from_kernel_nofault(&op, (void *)ip)) {
563 		pr_err("Fetching opcode failed.\n");
564 		return -EFAULT;
565 	}
566 
567 	/* Make sure that this is still a 24bit jump */
568 	if (!is_bl_op(op)) {
569 		pr_err("Not expected bl: opcode is %08lx\n", ppc_inst_as_ulong(op));
570 		return -EINVAL;
571 	}
572 
573 	/* lets find where the pointer goes */
574 	tramp = find_bl_target(ip, op);
575 	entry = ppc_global_function_entry((void *)old_addr);
576 
577 	pr_devel("ip:%lx jumps to %lx", ip, tramp);
578 
579 	if (tramp != entry) {
580 		/* old_addr is not within range, so we must have used a trampoline */
581 		if (module_trampoline_target(mod, tramp, &ptr)) {
582 			pr_err("Failed to get trampoline target\n");
583 			return -EFAULT;
584 		}
585 
586 		pr_devel("trampoline target %lx", ptr);
587 
588 		/* This should match what was called */
589 		if (ptr != entry) {
590 			pr_err("addr %lx does not match expected %lx\n", ptr, entry);
591 			return -EINVAL;
592 		}
593 	}
594 
595 	/* The new target may be within range */
596 	if (test_24bit_addr(ip, addr)) {
597 		/* within range */
598 		if (patch_branch((u32 *)ip, addr, BRANCH_SET_LINK)) {
599 			pr_err("REL24 out of range!\n");
600 			return -EINVAL;
601 		}
602 
603 		return 0;
604 	}
605 
606 	if (rec->flags & FTRACE_FL_REGS)
607 		tramp = mod->arch.tramp_regs;
608 	else
609 		tramp = mod->arch.tramp;
610 
611 	if (module_trampoline_target(mod, tramp, &ptr)) {
612 		pr_err("Failed to get trampoline target\n");
613 		return -EFAULT;
614 	}
615 
616 	pr_devel("trampoline target %lx", ptr);
617 
618 	entry = ppc_global_function_entry((void *)addr);
619 	/* This should match what was called */
620 	if (ptr != entry) {
621 		pr_err("addr %lx does not match expected %lx\n", ptr, entry);
622 		return -EINVAL;
623 	}
624 
625 	if (patch_branch((u32 *)ip, tramp, BRANCH_SET_LINK)) {
626 		pr_err("REL24 out of range!\n");
627 		return -EINVAL;
628 	}
629 
630 	return 0;
631 }
632 #else
633 static int __ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, unsigned long addr)
634 {
635 	return 0;
636 }
637 #endif
638 
639 int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
640 			unsigned long addr)
641 {
642 	unsigned long ip = rec->ip;
643 	ppc_inst_t old, new;
644 
645 	/*
646 	 * If the calling address is more that 24 bits away,
647 	 * then we had to use a trampoline to make the call.
648 	 * Otherwise just update the call site.
649 	 */
650 	if (test_24bit_addr(ip, addr) && test_24bit_addr(ip, old_addr)) {
651 		/* within range */
652 		old = ftrace_call_replace(ip, old_addr, 1);
653 		new = ftrace_call_replace(ip, addr, 1);
654 		return ftrace_modify_code(ip, old, new);
655 	} else if (core_kernel_text(ip)) {
656 		/*
657 		 * We always patch out of range locations to go to the regs
658 		 * variant, so there is nothing to do here
659 		 */
660 		return 0;
661 	} else if (!IS_ENABLED(CONFIG_MODULES)) {
662 		/* We should not get here without modules */
663 		return -EINVAL;
664 	}
665 
666 	/*
667 	 * Out of range jumps are called from modules.
668 	 */
669 	if (!rec->arch.mod) {
670 		pr_err("No module loaded\n");
671 		return -EINVAL;
672 	}
673 
674 	return __ftrace_modify_call(rec, old_addr, addr);
675 }
676 #endif
677 
678 int ftrace_update_ftrace_func(ftrace_func_t func)
679 {
680 	unsigned long ip = (unsigned long)(&ftrace_call);
681 	ppc_inst_t old, new;
682 	int ret;
683 
684 	old = ppc_inst_read((u32 *)&ftrace_call);
685 	new = ftrace_call_replace(ip, (unsigned long)func, 1);
686 	ret = ftrace_modify_code(ip, old, new);
687 
688 	/* Also update the regs callback function */
689 	if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS) && !ret) {
690 		ip = (unsigned long)(&ftrace_regs_call);
691 		old = ppc_inst_read((u32 *)&ftrace_regs_call);
692 		new = ftrace_call_replace(ip, (unsigned long)func, 1);
693 		ret = ftrace_modify_code(ip, old, new);
694 	}
695 
696 	return ret;
697 }
698 
699 /*
700  * Use the default ftrace_modify_all_code, but without
701  * stop_machine().
702  */
703 void arch_ftrace_update_code(int command)
704 {
705 	ftrace_modify_all_code(command);
706 }
707 
708 #ifdef CONFIG_PPC64
709 #define PACATOC offsetof(struct paca_struct, kernel_toc)
710 
711 extern unsigned int ftrace_tramp_text[], ftrace_tramp_init[];
712 
713 void ftrace_free_init_tramp(void)
714 {
715 	int i;
716 
717 	for (i = 0; i < NUM_FTRACE_TRAMPS && ftrace_tramps[i]; i++)
718 		if (ftrace_tramps[i] == (unsigned long)ftrace_tramp_init) {
719 			ftrace_tramps[i] = 0;
720 			return;
721 		}
722 }
723 
724 int __init ftrace_dyn_arch_init(void)
725 {
726 	int i;
727 	unsigned int *tramp[] = { ftrace_tramp_text, ftrace_tramp_init };
728 	u32 stub_insns[] = {
729 		PPC_RAW_LD(_R12, _R13, PACATOC),
730 		PPC_RAW_ADDIS(_R12, _R12, 0),
731 		PPC_RAW_ADDI(_R12, _R12, 0),
732 		PPC_RAW_MTCTR(_R12),
733 		PPC_RAW_BCTR()
734 	};
735 	unsigned long addr;
736 	long reladdr;
737 
738 	if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS))
739 		addr = ppc_global_function_entry((void *)ftrace_regs_caller);
740 	else
741 		addr = ppc_global_function_entry((void *)ftrace_caller);
742 
743 	reladdr = addr - kernel_toc_addr();
744 
745 	if (reladdr >= SZ_2G || reladdr < -(long)SZ_2G) {
746 		pr_err("Address of %ps out of range of kernel_toc.\n",
747 				(void *)addr);
748 		return -1;
749 	}
750 
751 	for (i = 0; i < 2; i++) {
752 		memcpy(tramp[i], stub_insns, sizeof(stub_insns));
753 		tramp[i][1] |= PPC_HA(reladdr);
754 		tramp[i][2] |= PPC_LO(reladdr);
755 		add_ftrace_tramp((unsigned long)tramp[i]);
756 	}
757 
758 	return 0;
759 }
760 #endif
761 
762 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
763 
764 extern void ftrace_graph_call(void);
765 extern void ftrace_graph_stub(void);
766 
767 static int ftrace_modify_ftrace_graph_caller(bool enable)
768 {
769 	unsigned long ip = (unsigned long)(&ftrace_graph_call);
770 	unsigned long addr = (unsigned long)(&ftrace_graph_caller);
771 	unsigned long stub = (unsigned long)(&ftrace_graph_stub);
772 	ppc_inst_t old, new;
773 
774 	if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_ARGS))
775 		return 0;
776 
777 	old = ftrace_call_replace(ip, enable ? stub : addr, 0);
778 	new = ftrace_call_replace(ip, enable ? addr : stub, 0);
779 
780 	return ftrace_modify_code(ip, old, new);
781 }
782 
783 int ftrace_enable_ftrace_graph_caller(void)
784 {
785 	return ftrace_modify_ftrace_graph_caller(true);
786 }
787 
788 int ftrace_disable_ftrace_graph_caller(void)
789 {
790 	return ftrace_modify_ftrace_graph_caller(false);
791 }
792 
793 /*
794  * Hook the return address and push it in the stack of return addrs
795  * in current thread info. Return the address we want to divert to.
796  */
797 static unsigned long
798 __prepare_ftrace_return(unsigned long parent, unsigned long ip, unsigned long sp)
799 {
800 	unsigned long return_hooker;
801 	int bit;
802 
803 	if (unlikely(ftrace_graph_is_dead()))
804 		goto out;
805 
806 	if (unlikely(atomic_read(&current->tracing_graph_pause)))
807 		goto out;
808 
809 	bit = ftrace_test_recursion_trylock(ip, parent);
810 	if (bit < 0)
811 		goto out;
812 
813 	return_hooker = ppc_function_entry(return_to_handler);
814 
815 	if (!function_graph_enter(parent, ip, 0, (unsigned long *)sp))
816 		parent = return_hooker;
817 
818 	ftrace_test_recursion_unlock(bit);
819 out:
820 	return parent;
821 }
822 
823 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_ARGS
824 void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
825 		       struct ftrace_ops *op, struct ftrace_regs *fregs)
826 {
827 	fregs->regs.link = __prepare_ftrace_return(parent_ip, ip, fregs->regs.gpr[1]);
828 }
829 #else
830 unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip,
831 				    unsigned long sp)
832 {
833 	return __prepare_ftrace_return(parent, ip, sp);
834 }
835 #endif
836 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
837 
838 #ifdef CONFIG_PPC64_ELF_ABI_V1
839 char *arch_ftrace_match_adjust(char *str, const char *search)
840 {
841 	if (str[0] == '.' && search[0] != '.')
842 		return str + 1;
843 	else
844 		return str;
845 }
846 #endif /* CONFIG_PPC64_ELF_ABI_V1 */
847