~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/mips/kernel/ftrace.c

Version: ~ [ linux-4.19-rc5 ] ~ [ linux-4.18.9 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.71 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.128 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.157 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.122 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.57 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-3.9.11 ] ~ [ linux-3.8.13 ] ~ [ linux-3.7.10 ] ~ [ linux-3.6.11 ] ~ [ linux-3.5.7 ] ~ [ linux-3.4.113 ] ~ [ linux-3.3.8 ] ~ [ linux-3.2.102 ] ~ [ linux-3.1.10 ] ~ [ linux-3.0.101 ] ~ [ linux-2.6.39.4 ] ~ [ linux-2.6.38.8 ] ~ [ linux-2.6.37.6 ] ~ [ linux-2.6.36.4 ] ~ [ linux-2.6.35.14 ] ~ [ linux-2.6.34.15 ] ~ [ linux-2.6.33.20 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.31.14 ] ~ [ linux-2.6.30.10 ] ~ [ linux-2.6.29.6 ] ~ [ linux-2.6.28.10 ] ~ [ linux-2.6.27.62 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0
  2 /*
  3  * Code for replacing ftrace calls with jumps.
  4  *
  5  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
  6  * Copyright (C) 2009, 2010 DSLab, Lanzhou University, China
  7  * Author: Wu Zhangjin <wuzhangjin@gmail.com>
  8  *
  9  * Thanks goes to Steven Rostedt for writing the original x86 version.
 10  */
 11 
 12 #include <linux/uaccess.h>
 13 #include <linux/init.h>
 14 #include <linux/ftrace.h>
 15 #include <linux/syscalls.h>
 16 
 17 #include <asm/asm.h>
 18 #include <asm/asm-offsets.h>
 19 #include <asm/cacheflush.h>
 20 #include <asm/syscall.h>
 21 #include <asm/uasm.h>
 22 #include <asm/unistd.h>
 23 
 24 #include <asm-generic/sections.h>
 25 
 26 #if defined(KBUILD_MCOUNT_RA_ADDRESS) && defined(CONFIG_32BIT)
 27 #define MCOUNT_OFFSET_INSNS 5
 28 #else
 29 #define MCOUNT_OFFSET_INSNS 4
 30 #endif
 31 
 32 #ifdef CONFIG_DYNAMIC_FTRACE
 33 
 34 /* Arch override because MIPS doesn't need to run this from stop_machine() */
 35 void arch_ftrace_update_code(int command)
 36 {
 37         ftrace_modify_all_code(command);
 38 }
 39 
 40 #endif
 41 
 42 #ifdef CONFIG_DYNAMIC_FTRACE
 43 
 44 #define JAL 0x0c000000          /* jump & link: ip --> ra, jump to target */
 45 #define ADDR_MASK 0x03ffffff    /*  op_code|addr : 31...26|25 ....0 */
 46 #define JUMP_RANGE_MASK ((1UL << 28) - 1)
 47 
 48 #define INSN_NOP 0x00000000     /* nop */
 49 #define INSN_JAL(addr)  \
 50         ((unsigned int)(JAL | (((addr) >> 2) & ADDR_MASK)))
 51 
 52 static unsigned int insn_jal_ftrace_caller __read_mostly;
 53 static unsigned int insn_la_mcount[2] __read_mostly;
 54 static unsigned int insn_j_ftrace_graph_caller __maybe_unused __read_mostly;
 55 
 56 static inline void ftrace_dyn_arch_init_insns(void)
 57 {
 58         u32 *buf;
 59         unsigned int v1;
 60 
 61         /* la v1, _mcount */
 62         v1 = 3;
 63         buf = (u32 *)&insn_la_mcount[0];
 64         UASM_i_LA(&buf, v1, MCOUNT_ADDR);
 65 
 66         /* jal (ftrace_caller + 8), jump over the first two instruction */
 67         buf = (u32 *)&insn_jal_ftrace_caller;
 68         uasm_i_jal(&buf, (FTRACE_ADDR + 8) & JUMP_RANGE_MASK);
 69 
 70 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
 71         /* j ftrace_graph_caller */
 72         buf = (u32 *)&insn_j_ftrace_graph_caller;
 73         uasm_i_j(&buf, (unsigned long)ftrace_graph_caller & JUMP_RANGE_MASK);
 74 #endif
 75 }
 76 
 77 static int ftrace_modify_code(unsigned long ip, unsigned int new_code)
 78 {
 79         int faulted;
 80         mm_segment_t old_fs;
 81 
 82         /* *(unsigned int *)ip = new_code; */
 83         safe_store_code(new_code, ip, faulted);
 84 
 85         if (unlikely(faulted))
 86                 return -EFAULT;
 87 
 88         old_fs = get_fs();
 89         set_fs(get_ds());
 90         flush_icache_range(ip, ip + 8);
 91         set_fs(old_fs);
 92 
 93         return 0;
 94 }
 95 
 96 #ifndef CONFIG_64BIT
 97 static int ftrace_modify_code_2(unsigned long ip, unsigned int new_code1,
 98                                 unsigned int new_code2)
 99 {
100         int faulted;
101         mm_segment_t old_fs;
102 
103         safe_store_code(new_code1, ip, faulted);
104         if (unlikely(faulted))
105                 return -EFAULT;
106 
107         ip += 4;
108         safe_store_code(new_code2, ip, faulted);
109         if (unlikely(faulted))
110                 return -EFAULT;
111 
112         ip -= 4;
113         old_fs = get_fs();
114         set_fs(get_ds());
115         flush_icache_range(ip, ip + 8);
116         set_fs(old_fs);
117 
118         return 0;
119 }
120 
121 static int ftrace_modify_code_2r(unsigned long ip, unsigned int new_code1,
122                                  unsigned int new_code2)
123 {
124         int faulted;
125         mm_segment_t old_fs;
126 
127         ip += 4;
128         safe_store_code(new_code2, ip, faulted);
129         if (unlikely(faulted))
130                 return -EFAULT;
131 
132         ip -= 4;
133         safe_store_code(new_code1, ip, faulted);
134         if (unlikely(faulted))
135                 return -EFAULT;
136 
137         old_fs = get_fs();
138         set_fs(get_ds());
139         flush_icache_range(ip, ip + 8);
140         set_fs(old_fs);
141 
142         return 0;
143 }
144 #endif
145 
146 /*
147  * The details about the calling site of mcount on MIPS
148  *
149  * 1. For kernel:
150  *
151  * move at, ra
152  * jal _mcount          --> nop
153  *  sub sp, sp, 8       --> nop  (CONFIG_32BIT)
154  *
155  * 2. For modules:
156  *
157  * 2.1 For KBUILD_MCOUNT_RA_ADDRESS and CONFIG_32BIT
158  *
159  * lui v1, hi_16bit_of_mcount        --> b 1f (0x10000005)
160  * addiu v1, v1, low_16bit_of_mcount --> nop  (CONFIG_32BIT)
161  * move at, ra
162  * move $12, ra_address
163  * jalr v1
164  *  sub sp, sp, 8
165  *                                  1: offset = 5 instructions
166  * 2.2 For the Other situations
167  *
168  * lui v1, hi_16bit_of_mcount        --> b 1f (0x10000004)
169  * addiu v1, v1, low_16bit_of_mcount --> nop  (CONFIG_32BIT)
170  * move at, ra
171  * jalr v1
172  *  nop | move $12, ra_address | sub sp, sp, 8
173  *                                  1: offset = 4 instructions
174  */
175 
176 #define INSN_B_1F (0x10000000 | MCOUNT_OFFSET_INSNS)
177 
178 int ftrace_make_nop(struct module *mod,
179                     struct dyn_ftrace *rec, unsigned long addr)
180 {
181         unsigned int new;
182         unsigned long ip = rec->ip;
183 
184         /*
185          * If ip is in kernel space, no long call, otherwise, long call is
186          * needed.
187          */
188         new = core_kernel_text(ip) ? INSN_NOP : INSN_B_1F;
189 #ifdef CONFIG_64BIT
190         return ftrace_modify_code(ip, new);
191 #else
192         /*
193          * On 32 bit MIPS platforms, gcc adds a stack adjust
194          * instruction in the delay slot after the branch to
195          * mcount and expects mcount to restore the sp on return.
196          * This is based on a legacy API and does nothing but
197          * waste instructions so it's being removed at runtime.
198          */
199         return ftrace_modify_code_2(ip, new, INSN_NOP);
200 #endif
201 }
202 
203 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
204 {
205         unsigned int new;
206         unsigned long ip = rec->ip;
207 
208         new = core_kernel_text(ip) ? insn_jal_ftrace_caller : insn_la_mcount[0];
209 
210 #ifdef CONFIG_64BIT
211         return ftrace_modify_code(ip, new);
212 #else
213         return ftrace_modify_code_2r(ip, new, core_kernel_text(ip) ?
214                                                 INSN_NOP : insn_la_mcount[1]);
215 #endif
216 }
217 
218 #define FTRACE_CALL_IP ((unsigned long)(&ftrace_call))
219 
220 int ftrace_update_ftrace_func(ftrace_func_t func)
221 {
222         unsigned int new;
223 
224         new = INSN_JAL((unsigned long)func);
225 
226         return ftrace_modify_code(FTRACE_CALL_IP, new);
227 }
228 
229 int __init ftrace_dyn_arch_init(void)
230 {
231         /* Encode the instructions when booting */
232         ftrace_dyn_arch_init_insns();
233 
234         /* Remove "b ftrace_stub" to ensure ftrace_caller() is executed */
235         ftrace_modify_code(MCOUNT_ADDR, INSN_NOP);
236 
237         return 0;
238 }
239 #endif  /* CONFIG_DYNAMIC_FTRACE */
240 
241 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
242 
243 #ifdef CONFIG_DYNAMIC_FTRACE
244 
245 extern void ftrace_graph_call(void);
246 #define FTRACE_GRAPH_CALL_IP    ((unsigned long)(&ftrace_graph_call))
247 
248 int ftrace_enable_ftrace_graph_caller(void)
249 {
250         return ftrace_modify_code(FTRACE_GRAPH_CALL_IP,
251                         insn_j_ftrace_graph_caller);
252 }
253 
254 int ftrace_disable_ftrace_graph_caller(void)
255 {
256         return ftrace_modify_code(FTRACE_GRAPH_CALL_IP, INSN_NOP);
257 }
258 
259 #endif  /* CONFIG_DYNAMIC_FTRACE */
260 
261 #ifndef KBUILD_MCOUNT_RA_ADDRESS
262 
263 #define S_RA_SP (0xafbf << 16)  /* s{d,w} ra, offset(sp) */
264 #define S_R_SP  (0xafb0 << 16)  /* s{d,w} R, offset(sp) */
265 #define OFFSET_MASK     0xffff  /* stack offset range: 0 ~ PT_SIZE */
266 
267 unsigned long ftrace_get_parent_ra_addr(unsigned long self_ra, unsigned long
268                 old_parent_ra, unsigned long parent_ra_addr, unsigned long fp)
269 {
270         unsigned long sp, ip, tmp;
271         unsigned int code;
272         int faulted;
273 
274         /*
275          * For module, move the ip from the return address after the
276          * instruction "lui v1, hi_16bit_of_mcount"(offset is 24), but for
277          * kernel, move after the instruction "move ra, at"(offset is 16)
278          */
279         ip = self_ra - (core_kernel_text(self_ra) ? 16 : 24);
280 
281         /*
282          * search the text until finding the non-store instruction or "s{d,w}
283          * ra, offset(sp)" instruction
284          */
285         do {
286                 /* get the code at "ip": code = *(unsigned int *)ip; */
287                 safe_load_code(code, ip, faulted);
288 
289                 if (unlikely(faulted))
290                         return 0;
291                 /*
292                  * If we hit the non-store instruction before finding where the
293                  * ra is stored, then this is a leaf function and it does not
294                  * store the ra on the stack
295                  */
296                 if ((code & S_R_SP) != S_R_SP)
297                         return parent_ra_addr;
298 
299                 /* Move to the next instruction */
300                 ip -= 4;
301         } while ((code & S_RA_SP) != S_RA_SP);
302 
303         sp = fp + (code & OFFSET_MASK);
304 
305         /* tmp = *(unsigned long *)sp; */
306         safe_load_stack(tmp, sp, faulted);
307         if (unlikely(faulted))
308                 return 0;
309 
310         if (tmp == old_parent_ra)
311                 return sp;
312         return 0;
313 }
314 
315 #endif  /* !KBUILD_MCOUNT_RA_ADDRESS */
316 
317 /*
318  * Hook the return address and push it in the stack of return addrs
319  * in current thread info.
320  */
321 void prepare_ftrace_return(unsigned long *parent_ra_addr, unsigned long self_ra,
322                            unsigned long fp)
323 {
324         unsigned long old_parent_ra;
325         struct ftrace_graph_ent trace;
326         unsigned long return_hooker = (unsigned long)
327             &return_to_handler;
328         int faulted, insns;
329 
330         if (unlikely(ftrace_graph_is_dead()))
331                 return;
332 
333         if (unlikely(atomic_read(&current->tracing_graph_pause)))
334                 return;
335 
336         /*
337          * "parent_ra_addr" is the stack address saved the return address of
338          * the caller of _mcount.
339          *
340          * if the gcc < 4.5, a leaf function does not save the return address
341          * in the stack address, so, we "emulate" one in _mcount's stack space,
342          * and hijack it directly, but for a non-leaf function, it save the
343          * return address to the its own stack space, we can not hijack it
344          * directly, but need to find the real stack address,
345          * ftrace_get_parent_addr() does it!
346          *
347          * if gcc>= 4.5, with the new -mmcount-ra-address option, for a
348          * non-leaf function, the location of the return address will be saved
349          * to $12 for us, and for a leaf function, only put a zero into $12. we
350          * do it in ftrace_graph_caller of mcount.S.
351          */
352 
353         /* old_parent_ra = *parent_ra_addr; */
354         safe_load_stack(old_parent_ra, parent_ra_addr, faulted);
355         if (unlikely(faulted))
356                 goto out;
357 #ifndef KBUILD_MCOUNT_RA_ADDRESS
358         parent_ra_addr = (unsigned long *)ftrace_get_parent_ra_addr(self_ra,
359                         old_parent_ra, (unsigned long)parent_ra_addr, fp);
360         /*
361          * If fails when getting the stack address of the non-leaf function's
362          * ra, stop function graph tracer and return
363          */
364         if (parent_ra_addr == NULL)
365                 goto out;
366 #endif
367         /* *parent_ra_addr = return_hooker; */
368         safe_store_stack(return_hooker, parent_ra_addr, faulted);
369         if (unlikely(faulted))
370                 goto out;
371 
372         if (ftrace_push_return_trace(old_parent_ra, self_ra, &trace.depth, fp,
373                                      NULL) == -EBUSY) {
374                 *parent_ra_addr = old_parent_ra;
375                 return;
376         }
377 
378         /*
379          * Get the recorded ip of the current mcount calling site in the
380          * __mcount_loc section, which will be used to filter the function
381          * entries configured through the tracing/set_graph_function interface.
382          */
383 
384         insns = core_kernel_text(self_ra) ? 2 : MCOUNT_OFFSET_INSNS + 1;
385         trace.func = self_ra - (MCOUNT_INSN_SIZE * insns);
386 
387         /* Only trace if the calling function expects to */
388         if (!ftrace_graph_entry(&trace)) {
389                 current->curr_ret_stack--;
390                 *parent_ra_addr = old_parent_ra;
391         }
392         return;
393 out:
394         ftrace_graph_stop();
395         WARN_ON(1);
396 }
397 #endif  /* CONFIG_FUNCTION_GRAPH_TRACER */
398 
399 #ifdef CONFIG_FTRACE_SYSCALLS
400 
401 #ifdef CONFIG_32BIT
402 unsigned long __init arch_syscall_addr(int nr)
403 {
404         return (unsigned long)sys_call_table[nr - __NR_O32_Linux];
405 }
406 #endif
407 
408 #ifdef CONFIG_64BIT
409 
410 unsigned long __init arch_syscall_addr(int nr)
411 {
412 #ifdef CONFIG_MIPS32_N32
413         if (nr >= __NR_N32_Linux && nr <= __NR_N32_Linux + __NR_N32_Linux_syscalls)
414                 return (unsigned long)sysn32_call_table[nr - __NR_N32_Linux];
415 #endif
416         if (nr >= __NR_64_Linux  && nr <= __NR_64_Linux + __NR_64_Linux_syscalls)
417                 return (unsigned long)sys_call_table[nr - __NR_64_Linux];
418 #ifdef CONFIG_MIPS32_O32
419         if (nr >= __NR_O32_Linux && nr <= __NR_O32_Linux + __NR_O32_Linux_syscalls)
420                 return (unsigned long)sys32_call_table[nr - __NR_O32_Linux];
421 #endif
422 
423         return (unsigned long) &sys_ni_syscall;
424 }
425 #endif
426 
427 #endif /* CONFIG_FTRACE_SYSCALLS */
428 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp