~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/powerpc/kernel/kprobes.c

Version: ~ [ linux-5.11 ] ~ [ linux-5.10.17 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.99 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.176 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.221 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.257 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.257 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.85 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  *  Kernel Probes (KProbes)
  3  *
  4  * This program is free software; you can redistribute it and/or modify
  5  * it under the terms of the GNU General Public License as published by
  6  * the Free Software Foundation; either version 2 of the License, or
  7  * (at your option) any later version.
  8  *
  9  * This program is distributed in the hope that it will be useful,
 10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
 11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 12  * GNU General Public License for more details.
 13  *
 14  * You should have received a copy of the GNU General Public License
 15  * along with this program; if not, write to the Free Software
 16  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
 17  *
 18  * Copyright (C) IBM Corporation, 2002, 2004
 19  *
 20  * 2002-Oct     Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
 21  *              Probes initial implementation ( includes contributions from
 22  *              Rusty Russell).
 23  * 2004-July    Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
 24  *              interface to access function arguments.
 25  * 2004-Nov     Ananth N Mavinakayanahalli <ananth@in.ibm.com> kprobes port
 26  *              for PPC64
 27  */
 28 
 29 #include <linux/kprobes.h>
 30 #include <linux/ptrace.h>
 31 #include <linux/preempt.h>
 32 #include <linux/extable.h>
 33 #include <linux/kdebug.h>
 34 #include <linux/slab.h>
 35 #include <asm/code-patching.h>
 36 #include <asm/cacheflush.h>
 37 #include <asm/sstep.h>
 38 #include <asm/sections.h>
 39 #include <linux/uaccess.h>
 40 
 41 DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
 42 DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
 43 
 44 struct kretprobe_blackpoint kretprobe_blacklist[] = {{NULL, NULL}};
 45 
 46 int is_current_kprobe_addr(unsigned long addr)
 47 {
 48         struct kprobe *p = kprobe_running();
 49         return (p && (unsigned long)p->addr == addr) ? 1 : 0;
 50 }
 51 
 52 bool arch_within_kprobe_blacklist(unsigned long addr)
 53 {
 54         return  (addr >= (unsigned long)__kprobes_text_start &&
 55                  addr < (unsigned long)__kprobes_text_end) ||
 56                 (addr >= (unsigned long)_stext &&
 57                  addr < (unsigned long)__head_end);
 58 }
 59 
 60 kprobe_opcode_t *kprobe_lookup_name(const char *name, unsigned int offset)
 61 {
 62         kprobe_opcode_t *addr;
 63 
 64 #ifdef PPC64_ELF_ABI_v2
 65         /* PPC64 ABIv2 needs local entry point */
 66         addr = (kprobe_opcode_t *)kallsyms_lookup_name(name);
 67         if (addr && !offset) {
 68 #ifdef CONFIG_KPROBES_ON_FTRACE
 69                 unsigned long faddr;
 70                 /*
 71                  * Per livepatch.h, ftrace location is always within the first
 72                  * 16 bytes of a function on powerpc with -mprofile-kernel.
 73                  */
 74                 faddr = ftrace_location_range((unsigned long)addr,
 75                                               (unsigned long)addr + 16);
 76                 if (faddr)
 77                         addr = (kprobe_opcode_t *)faddr;
 78                 else
 79 #endif
 80                         addr = (kprobe_opcode_t *)ppc_function_entry(addr);
 81         }
 82 #elif defined(PPC64_ELF_ABI_v1)
 83         /*
 84          * 64bit powerpc ABIv1 uses function descriptors:
 85          * - Check for the dot variant of the symbol first.
 86          * - If that fails, try looking up the symbol provided.
 87          *
 88          * This ensures we always get to the actual symbol and not
 89          * the descriptor.
 90          *
 91          * Also handle <module:symbol> format.
 92          */
 93         char dot_name[MODULE_NAME_LEN + 1 + KSYM_NAME_LEN];
 94         const char *modsym;
 95         bool dot_appended = false;
 96         if ((modsym = strchr(name, ':')) != NULL) {
 97                 modsym++;
 98                 if (*modsym != '\0' && *modsym != '.') {
 99                         /* Convert to <module:.symbol> */
100                         strncpy(dot_name, name, modsym - name);
101                         dot_name[modsym - name] = '.';
102                         dot_name[modsym - name + 1] = '\0';
103                         strncat(dot_name, modsym,
104                                 sizeof(dot_name) - (modsym - name) - 2);
105                         dot_appended = true;
106                 } else {
107                         dot_name[0] = '\0';
108                         strncat(dot_name, name, sizeof(dot_name) - 1);
109                 }
110         } else if (name[0] != '.') {
111                 dot_name[0] = '.';
112                 dot_name[1] = '\0';
113                 strncat(dot_name, name, KSYM_NAME_LEN - 2);
114                 dot_appended = true;
115         } else {
116                 dot_name[0] = '\0';
117                 strncat(dot_name, name, KSYM_NAME_LEN - 1);
118         }
119         addr = (kprobe_opcode_t *)kallsyms_lookup_name(dot_name);
120         if (!addr && dot_appended) {
121                 /* Let's try the original non-dot symbol lookup */
122                 addr = (kprobe_opcode_t *)kallsyms_lookup_name(name);
123         }
124 #else
125         addr = (kprobe_opcode_t *)kallsyms_lookup_name(name);
126 #endif
127 
128         return addr;
129 }
130 
131 int arch_prepare_kprobe(struct kprobe *p)
132 {
133         int ret = 0;
134         kprobe_opcode_t insn = *p->addr;
135 
136         if ((unsigned long)p->addr & 0x03) {
137                 printk("Attempt to register kprobe at an unaligned address\n");
138                 ret = -EINVAL;
139         } else if (IS_MTMSRD(insn) || IS_RFID(insn) || IS_RFI(insn)) {
140                 printk("Cannot register a kprobe on rfi/rfid or mtmsr[d]\n");
141                 ret = -EINVAL;
142         }
143 
144         /* insn must be on a special executable page on ppc64.  This is
145          * not explicitly required on ppc32 (right now), but it doesn't hurt */
146         if (!ret) {
147                 p->ainsn.insn = get_insn_slot();
148                 if (!p->ainsn.insn)
149                         ret = -ENOMEM;
150         }
151 
152         if (!ret) {
153                 memcpy(p->ainsn.insn, p->addr,
154                                 MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
155                 p->opcode = *p->addr;
156                 flush_icache_range((unsigned long)p->ainsn.insn,
157                         (unsigned long)p->ainsn.insn + sizeof(kprobe_opcode_t));
158         }
159 
160         p->ainsn.boostable = 0;
161         return ret;
162 }
163 NOKPROBE_SYMBOL(arch_prepare_kprobe);
164 
165 void arch_arm_kprobe(struct kprobe *p)
166 {
167         patch_instruction(p->addr, BREAKPOINT_INSTRUCTION);
168 }
169 NOKPROBE_SYMBOL(arch_arm_kprobe);
170 
171 void arch_disarm_kprobe(struct kprobe *p)
172 {
173         patch_instruction(p->addr, p->opcode);
174 }
175 NOKPROBE_SYMBOL(arch_disarm_kprobe);
176 
177 void arch_remove_kprobe(struct kprobe *p)
178 {
179         if (p->ainsn.insn) {
180                 free_insn_slot(p->ainsn.insn, 0);
181                 p->ainsn.insn = NULL;
182         }
183 }
184 NOKPROBE_SYMBOL(arch_remove_kprobe);
185 
186 static nokprobe_inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
187 {
188         enable_single_step(regs);
189 
190         /*
191          * On powerpc we should single step on the original
192          * instruction even if the probed insn is a trap
193          * variant as values in regs could play a part in
194          * if the trap is taken or not
195          */
196         regs->nip = (unsigned long)p->ainsn.insn;
197 }
198 
199 static nokprobe_inline void save_previous_kprobe(struct kprobe_ctlblk *kcb)
200 {
201         kcb->prev_kprobe.kp = kprobe_running();
202         kcb->prev_kprobe.status = kcb->kprobe_status;
203         kcb->prev_kprobe.saved_msr = kcb->kprobe_saved_msr;
204 }
205 
206 static nokprobe_inline void restore_previous_kprobe(struct kprobe_ctlblk *kcb)
207 {
208         __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
209         kcb->kprobe_status = kcb->prev_kprobe.status;
210         kcb->kprobe_saved_msr = kcb->prev_kprobe.saved_msr;
211 }
212 
213 static nokprobe_inline void set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
214                                 struct kprobe_ctlblk *kcb)
215 {
216         __this_cpu_write(current_kprobe, p);
217         kcb->kprobe_saved_msr = regs->msr;
218 }
219 
220 bool arch_kprobe_on_func_entry(unsigned long offset)
221 {
222 #ifdef PPC64_ELF_ABI_v2
223 #ifdef CONFIG_KPROBES_ON_FTRACE
224         return offset <= 16;
225 #else
226         return offset <= 8;
227 #endif
228 #else
229         return !offset;
230 #endif
231 }
232 
233 void arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs)
234 {
235         ri->ret_addr = (kprobe_opcode_t *)regs->link;
236 
237         /* Replace the return addr with trampoline addr */
238         regs->link = (unsigned long)kretprobe_trampoline;
239 }
240 NOKPROBE_SYMBOL(arch_prepare_kretprobe);
241 
242 int try_to_emulate(struct kprobe *p, struct pt_regs *regs)
243 {
244         int ret;
245         unsigned int insn = *p->ainsn.insn;
246 
247         /* regs->nip is also adjusted if emulate_step returns 1 */
248         ret = emulate_step(regs, insn);
249         if (ret > 0) {
250                 /*
251                  * Once this instruction has been boosted
252                  * successfully, set the boostable flag
253                  */
254                 if (unlikely(p->ainsn.boostable == 0))
255                         p->ainsn.boostable = 1;
256         } else if (ret < 0) {
257                 /*
258                  * We don't allow kprobes on mtmsr(d)/rfi(d), etc.
259                  * So, we should never get here... but, its still
260                  * good to catch them, just in case...
261                  */
262                 printk("Can't step on instruction %x\n", insn);
263                 BUG();
264         } else if (ret == 0)
265                 /* This instruction can't be boosted */
266                 p->ainsn.boostable = -1;
267 
268         return ret;
269 }
270 NOKPROBE_SYMBOL(try_to_emulate);
271 
272 int kprobe_handler(struct pt_regs *regs)
273 {
274         struct kprobe *p;
275         int ret = 0;
276         unsigned int *addr = (unsigned int *)regs->nip;
277         struct kprobe_ctlblk *kcb;
278 
279         if (user_mode(regs))
280                 return 0;
281 
282         /*
283          * We don't want to be preempted for the entire
284          * duration of kprobe processing
285          */
286         preempt_disable();
287         kcb = get_kprobe_ctlblk();
288 
289         /* Check we're not actually recursing */
290         if (kprobe_running()) {
291                 p = get_kprobe(addr);
292                 if (p) {
293                         kprobe_opcode_t insn = *p->ainsn.insn;
294                         if (kcb->kprobe_status == KPROBE_HIT_SS &&
295                                         is_trap(insn)) {
296                                 /* Turn off 'trace' bits */
297                                 regs->msr &= ~MSR_SINGLESTEP;
298                                 regs->msr |= kcb->kprobe_saved_msr;
299                                 goto no_kprobe;
300                         }
301                         /* We have reentered the kprobe_handler(), since
302                          * another probe was hit while within the handler.
303                          * We here save the original kprobes variables and
304                          * just single step on the instruction of the new probe
305                          * without calling any user handlers.
306                          */
307                         save_previous_kprobe(kcb);
308                         set_current_kprobe(p, regs, kcb);
309                         kprobes_inc_nmissed_count(p);
310                         kcb->kprobe_status = KPROBE_REENTER;
311                         if (p->ainsn.boostable >= 0) {
312                                 ret = try_to_emulate(p, regs);
313 
314                                 if (ret > 0) {
315                                         restore_previous_kprobe(kcb);
316                                         preempt_enable_no_resched();
317                                         return 1;
318                                 }
319                         }
320                         prepare_singlestep(p, regs);
321                         return 1;
322                 } else {
323                         if (*addr != BREAKPOINT_INSTRUCTION) {
324                                 /* If trap variant, then it belongs not to us */
325                                 kprobe_opcode_t cur_insn = *addr;
326                                 if (is_trap(cur_insn))
327                                         goto no_kprobe;
328                                 /* The breakpoint instruction was removed by
329                                  * another cpu right after we hit, no further
330                                  * handling of this interrupt is appropriate
331                                  */
332                                 ret = 1;
333                                 goto no_kprobe;
334                         }
335                         p = __this_cpu_read(current_kprobe);
336                         if (p->break_handler && p->break_handler(p, regs)) {
337                                 if (!skip_singlestep(p, regs, kcb))
338                                         goto ss_probe;
339                                 ret = 1;
340                         }
341                 }
342                 goto no_kprobe;
343         }
344 
345         p = get_kprobe(addr);
346         if (!p) {
347                 if (*addr != BREAKPOINT_INSTRUCTION) {
348                         /*
349                          * PowerPC has multiple variants of the "trap"
350                          * instruction. If the current instruction is a
351                          * trap variant, it could belong to someone else
352                          */
353                         kprobe_opcode_t cur_insn = *addr;
354                         if (is_trap(cur_insn))
355                                 goto no_kprobe;
356                         /*
357                          * The breakpoint instruction was removed right
358                          * after we hit it.  Another cpu has removed
359                          * either a probepoint or a debugger breakpoint
360                          * at this address.  In either case, no further
361                          * handling of this interrupt is appropriate.
362                          */
363                         ret = 1;
364                 }
365                 /* Not one of ours: let kernel handle it */
366                 goto no_kprobe;
367         }
368 
369         kcb->kprobe_status = KPROBE_HIT_ACTIVE;
370         set_current_kprobe(p, regs, kcb);
371         if (p->pre_handler && p->pre_handler(p, regs))
372                 /* handler has already set things up, so skip ss setup */
373                 return 1;
374 
375 ss_probe:
376         if (p->ainsn.boostable >= 0) {
377                 ret = try_to_emulate(p, regs);
378 
379                 if (ret > 0) {
380                         if (p->post_handler)
381                                 p->post_handler(p, regs, 0);
382 
383                         kcb->kprobe_status = KPROBE_HIT_SSDONE;
384                         reset_current_kprobe();
385                         preempt_enable_no_resched();
386                         return 1;
387                 }
388         }
389         prepare_singlestep(p, regs);
390         kcb->kprobe_status = KPROBE_HIT_SS;
391         return 1;
392 
393 no_kprobe:
394         preempt_enable_no_resched();
395         return ret;
396 }
397 NOKPROBE_SYMBOL(kprobe_handler);
398 
399 /*
400  * Function return probe trampoline:
401  *      - init_kprobes() establishes a probepoint here
402  *      - When the probed function returns, this probe
403  *              causes the handlers to fire
404  */
405 asm(".global kretprobe_trampoline\n"
406         ".type kretprobe_trampoline, @function\n"
407         "kretprobe_trampoline:\n"
408         "nop\n"
409         "blr\n"
410         ".size kretprobe_trampoline, .-kretprobe_trampoline\n");
411 
412 /*
413  * Called when the probe at kretprobe trampoline is hit
414  */
415 static int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
416 {
417         struct kretprobe_instance *ri = NULL;
418         struct hlist_head *head, empty_rp;
419         struct hlist_node *tmp;
420         unsigned long flags, orig_ret_address = 0;
421         unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline;
422 
423         INIT_HLIST_HEAD(&empty_rp);
424         kretprobe_hash_lock(current, &head, &flags);
425 
426         /*
427          * It is possible to have multiple instances associated with a given
428          * task either because an multiple functions in the call path
429          * have a return probe installed on them, and/or more than one return
430          * return probe was registered for a target function.
431          *
432          * We can handle this because:
433          *     - instances are always inserted at the head of the list
434          *     - when multiple return probes are registered for the same
435          *       function, the first instance's ret_addr will point to the
436          *       real return address, and all the rest will point to
437          *       kretprobe_trampoline
438          */
439         hlist_for_each_entry_safe(ri, tmp, head, hlist) {
440                 if (ri->task != current)
441                         /* another task is sharing our hash bucket */
442                         continue;
443 
444                 if (ri->rp && ri->rp->handler)
445                         ri->rp->handler(ri, regs);
446 
447                 orig_ret_address = (unsigned long)ri->ret_addr;
448                 recycle_rp_inst(ri, &empty_rp);
449 
450                 if (orig_ret_address != trampoline_address)
451                         /*
452                          * This is the real return address. Any other
453                          * instances associated with this task are for
454                          * other calls deeper on the call stack
455                          */
456                         break;
457         }
458 
459         kretprobe_assert(ri, orig_ret_address, trampoline_address);
460         regs->nip = orig_ret_address;
461         /*
462          * Make LR point to the orig_ret_address.
463          * When the 'nop' inside the kretprobe_trampoline
464          * is optimized, we can do a 'blr' after executing the
465          * detour buffer code.
466          */
467         regs->link = orig_ret_address;
468 
469         reset_current_kprobe();
470         kretprobe_hash_unlock(current, &flags);
471         preempt_enable_no_resched();
472 
473         hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
474                 hlist_del(&ri->hlist);
475                 kfree(ri);
476         }
477         /*
478          * By returning a non-zero value, we are telling
479          * kprobe_handler() that we don't want the post_handler
480          * to run (and have re-enabled preemption)
481          */
482         return 1;
483 }
484 NOKPROBE_SYMBOL(trampoline_probe_handler);
485 
486 /*
487  * Called after single-stepping.  p->addr is the address of the
488  * instruction whose first byte has been replaced by the "breakpoint"
489  * instruction.  To avoid the SMP problems that can occur when we
490  * temporarily put back the original opcode to single-step, we
491  * single-stepped a copy of the instruction.  The address of this
492  * copy is p->ainsn.insn.
493  */
494 int kprobe_post_handler(struct pt_regs *regs)
495 {
496         struct kprobe *cur = kprobe_running();
497         struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
498 
499         if (!cur || user_mode(regs))
500                 return 0;
501 
502         /* make sure we got here for instruction we have a kprobe on */
503         if (((unsigned long)cur->ainsn.insn + 4) != regs->nip)
504                 return 0;
505 
506         if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
507                 kcb->kprobe_status = KPROBE_HIT_SSDONE;
508                 cur->post_handler(cur, regs, 0);
509         }
510 
511         /* Adjust nip to after the single-stepped instruction */
512         regs->nip = (unsigned long)cur->addr + 4;
513         regs->msr |= kcb->kprobe_saved_msr;
514 
515         /*Restore back the original saved kprobes variables and continue. */
516         if (kcb->kprobe_status == KPROBE_REENTER) {
517                 restore_previous_kprobe(kcb);
518                 goto out;
519         }
520         reset_current_kprobe();
521 out:
522         preempt_enable_no_resched();
523 
524         /*
525          * if somebody else is singlestepping across a probe point, msr
526          * will have DE/SE set, in which case, continue the remaining processing
527          * of do_debug, as if this is not a probe hit.
528          */
529         if (regs->msr & MSR_SINGLESTEP)
530                 return 0;
531 
532         return 1;
533 }
534 NOKPROBE_SYMBOL(kprobe_post_handler);
535 
536 int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
537 {
538         struct kprobe *cur = kprobe_running();
539         struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
540         const struct exception_table_entry *entry;
541 
542         switch(kcb->kprobe_status) {
543         case KPROBE_HIT_SS:
544         case KPROBE_REENTER:
545                 /*
546                  * We are here because the instruction being single
547                  * stepped caused a page fault. We reset the current
548                  * kprobe and the nip points back to the probe address
549                  * and allow the page fault handler to continue as a
550                  * normal page fault.
551                  */
552                 regs->nip = (unsigned long)cur->addr;
553                 regs->msr &= ~MSR_SINGLESTEP; /* Turn off 'trace' bits */
554                 regs->msr |= kcb->kprobe_saved_msr;
555                 if (kcb->kprobe_status == KPROBE_REENTER)
556                         restore_previous_kprobe(kcb);
557                 else
558                         reset_current_kprobe();
559                 preempt_enable_no_resched();
560                 break;
561         case KPROBE_HIT_ACTIVE:
562         case KPROBE_HIT_SSDONE:
563                 /*
564                  * We increment the nmissed count for accounting,
565                  * we can also use npre/npostfault count for accounting
566                  * these specific fault cases.
567                  */
568                 kprobes_inc_nmissed_count(cur);
569 
570                 /*
571                  * We come here because instructions in the pre/post
572                  * handler caused the page_fault, this could happen
573                  * if handler tries to access user space by
574                  * copy_from_user(), get_user() etc. Let the
575                  * user-specified handler try to fix it first.
576                  */
577                 if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
578                         return 1;
579 
580                 /*
581                  * In case the user-specified fault handler returned
582                  * zero, try to fix up.
583                  */
584                 if ((entry = search_exception_tables(regs->nip)) != NULL) {
585                         regs->nip = extable_fixup(entry);
586                         return 1;
587                 }
588 
589                 /*
590                  * fixup_exception() could not handle it,
591                  * Let do_page_fault() fix it.
592                  */
593                 break;
594         default:
595                 break;
596         }
597         return 0;
598 }
599 NOKPROBE_SYMBOL(kprobe_fault_handler);
600 
601 unsigned long arch_deref_entry_point(void *entry)
602 {
603 #ifdef PPC64_ELF_ABI_v1
604         if (!kernel_text_address((unsigned long)entry))
605                 return ppc_global_function_entry(entry);
606         else
607 #endif
608                 return (unsigned long)entry;
609 }
610 NOKPROBE_SYMBOL(arch_deref_entry_point);
611 
612 int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
613 {
614         struct jprobe *jp = container_of(p, struct jprobe, kp);
615         struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
616 
617         memcpy(&kcb->jprobe_saved_regs, regs, sizeof(struct pt_regs));
618 
619         /* setup return addr to the jprobe handler routine */
620         regs->nip = arch_deref_entry_point(jp->entry);
621 #ifdef PPC64_ELF_ABI_v2
622         regs->gpr[12] = (unsigned long)jp->entry;
623 #elif defined(PPC64_ELF_ABI_v1)
624         regs->gpr[2] = (unsigned long)(((func_descr_t *)jp->entry)->toc);
625 #endif
626 
627         /*
628          * jprobes use jprobe_return() which skips the normal return
629          * path of the function, and this messes up the accounting of the
630          * function graph tracer.
631          *
632          * Pause function graph tracing while performing the jprobe function.
633          */
634         pause_graph_tracing();
635 
636         return 1;
637 }
638 NOKPROBE_SYMBOL(setjmp_pre_handler);
639 
640 void __used jprobe_return(void)
641 {
642         asm volatile("trap" ::: "memory");
643 }
644 NOKPROBE_SYMBOL(jprobe_return);
645 
646 static void __used jprobe_return_end(void)
647 {
648 }
649 NOKPROBE_SYMBOL(jprobe_return_end);
650 
651 int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
652 {
653         struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
654 
655         /*
656          * FIXME - we should ideally be validating that we got here 'cos
657          * of the "trap" in jprobe_return() above, before restoring the
658          * saved regs...
659          */
660         memcpy(regs, &kcb->jprobe_saved_regs, sizeof(struct pt_regs));
661         /* It's OK to start function graph tracing again */
662         unpause_graph_tracing();
663         preempt_enable_no_resched();
664         return 1;
665 }
666 NOKPROBE_SYMBOL(longjmp_break_handler);
667 
668 static struct kprobe trampoline_p = {
669         .addr = (kprobe_opcode_t *) &kretprobe_trampoline,
670         .pre_handler = trampoline_probe_handler
671 };
672 
673 int __init arch_init_kprobes(void)
674 {
675         return register_kprobe(&trampoline_p);
676 }
677 
678 int arch_trampoline_kprobe(struct kprobe *p)
679 {
680         if (p->addr == (kprobe_opcode_t *)&kretprobe_trampoline)
681                 return 1;
682 
683         return 0;
684 }
685 NOKPROBE_SYMBOL(arch_trampoline_kprobe);
686 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp