~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/x86/mm/kmmio.c

Version: ~ [ linux-5.8 ] ~ [ linux-5.7.14 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.57 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.138 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.193 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.232 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.232 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.85 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0
  2 /* Support for MMIO probes.
  3  * Benfit many code from kprobes
  4  * (C) 2002 Louis Zhuang <louis.zhuang@intel.com>.
  5  *     2007 Alexander Eichner
  6  *     2008 Pekka Paalanen <pq@iki.fi>
  7  */
  8 
  9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 10 
 11 #include <linux/list.h>
 12 #include <linux/rculist.h>
 13 #include <linux/spinlock.h>
 14 #include <linux/hash.h>
 15 #include <linux/export.h>
 16 #include <linux/kernel.h>
 17 #include <linux/uaccess.h>
 18 #include <linux/ptrace.h>
 19 #include <linux/preempt.h>
 20 #include <linux/percpu.h>
 21 #include <linux/kdebug.h>
 22 #include <linux/mutex.h>
 23 #include <linux/io.h>
 24 #include <linux/slab.h>
 25 #include <asm/cacheflush.h>
 26 #include <asm/tlbflush.h>
 27 #include <linux/errno.h>
 28 #include <asm/debugreg.h>
 29 #include <linux/mmiotrace.h>
 30 
 31 #define KMMIO_PAGE_HASH_BITS 4
 32 #define KMMIO_PAGE_TABLE_SIZE (1 << KMMIO_PAGE_HASH_BITS)
 33 
 34 struct kmmio_fault_page {
 35         struct list_head list;
 36         struct kmmio_fault_page *release_next;
 37         unsigned long addr; /* the requested address */
 38         pteval_t old_presence; /* page presence prior to arming */
 39         bool armed;
 40 
 41         /*
 42          * Number of times this page has been registered as a part
 43          * of a probe. If zero, page is disarmed and this may be freed.
 44          * Used only by writers (RCU) and post_kmmio_handler().
 45          * Protected by kmmio_lock, when linked into kmmio_page_table.
 46          */
 47         int count;
 48 
 49         bool scheduled_for_release;
 50 };
 51 
 52 struct kmmio_delayed_release {
 53         struct rcu_head rcu;
 54         struct kmmio_fault_page *release_list;
 55 };
 56 
 57 struct kmmio_context {
 58         struct kmmio_fault_page *fpage;
 59         struct kmmio_probe *probe;
 60         unsigned long saved_flags;
 61         unsigned long addr;
 62         int active;
 63 };
 64 
 65 static DEFINE_SPINLOCK(kmmio_lock);
 66 
 67 /* Protected by kmmio_lock */
 68 unsigned int kmmio_count;
 69 
 70 /* Read-protected by RCU, write-protected by kmmio_lock. */
 71 static struct list_head kmmio_page_table[KMMIO_PAGE_TABLE_SIZE];
 72 static LIST_HEAD(kmmio_probes);
 73 
 74 static struct list_head *kmmio_page_list(unsigned long addr)
 75 {
 76         unsigned int l;
 77         pte_t *pte = lookup_address(addr, &l);
 78 
 79         if (!pte)
 80                 return NULL;
 81         addr &= page_level_mask(l);
 82 
 83         return &kmmio_page_table[hash_long(addr, KMMIO_PAGE_HASH_BITS)];
 84 }
 85 
 86 /* Accessed per-cpu */
 87 static DEFINE_PER_CPU(struct kmmio_context, kmmio_ctx);
 88 
 89 /*
 90  * this is basically a dynamic stabbing problem:
 91  * Could use the existing prio tree code or
 92  * Possible better implementations:
 93  * The Interval Skip List: A Data Structure for Finding All Intervals That
 94  * Overlap a Point (might be simple)
 95  * Space Efficient Dynamic Stabbing with Fast Queries - Mikkel Thorup
 96  */
 97 /* Get the kmmio at this addr (if any). You must be holding RCU read lock. */
 98 static struct kmmio_probe *get_kmmio_probe(unsigned long addr)
 99 {
100         struct kmmio_probe *p;
101         list_for_each_entry_rcu(p, &kmmio_probes, list) {
102                 if (addr >= p->addr && addr < (p->addr + p->len))
103                         return p;
104         }
105         return NULL;
106 }
107 
108 /* You must be holding RCU read lock. */
109 static struct kmmio_fault_page *get_kmmio_fault_page(unsigned long addr)
110 {
111         struct list_head *head;
112         struct kmmio_fault_page *f;
113         unsigned int l;
114         pte_t *pte = lookup_address(addr, &l);
115 
116         if (!pte)
117                 return NULL;
118         addr &= page_level_mask(l);
119         head = kmmio_page_list(addr);
120         list_for_each_entry_rcu(f, head, list) {
121                 if (f->addr == addr)
122                         return f;
123         }
124         return NULL;
125 }
126 
127 static void clear_pmd_presence(pmd_t *pmd, bool clear, pmdval_t *old)
128 {
129         pmd_t new_pmd;
130         pmdval_t v = pmd_val(*pmd);
131         if (clear) {
132                 *old = v;
133                 new_pmd = pmd_mknotpresent(*pmd);
134         } else {
135                 /* Presume this has been called with clear==true previously */
136                 new_pmd = __pmd(*old);
137         }
138         set_pmd(pmd, new_pmd);
139 }
140 
141 static void clear_pte_presence(pte_t *pte, bool clear, pteval_t *old)
142 {
143         pteval_t v = pte_val(*pte);
144         if (clear) {
145                 *old = v;
146                 /* Nothing should care about address */
147                 pte_clear(&init_mm, 0, pte);
148         } else {
149                 /* Presume this has been called with clear==true previously */
150                 set_pte_atomic(pte, __pte(*old));
151         }
152 }
153 
154 static int clear_page_presence(struct kmmio_fault_page *f, bool clear)
155 {
156         unsigned int level;
157         pte_t *pte = lookup_address(f->addr, &level);
158 
159         if (!pte) {
160                 pr_err("no pte for addr 0x%08lx\n", f->addr);
161                 return -1;
162         }
163 
164         switch (level) {
165         case PG_LEVEL_2M:
166                 clear_pmd_presence((pmd_t *)pte, clear, &f->old_presence);
167                 break;
168         case PG_LEVEL_4K:
169                 clear_pte_presence(pte, clear, &f->old_presence);
170                 break;
171         default:
172                 pr_err("unexpected page level 0x%x.\n", level);
173                 return -1;
174         }
175 
176         __flush_tlb_one_kernel(f->addr);
177         return 0;
178 }
179 
180 /*
181  * Mark the given page as not present. Access to it will trigger a fault.
182  *
183  * Struct kmmio_fault_page is protected by RCU and kmmio_lock, but the
184  * protection is ignored here. RCU read lock is assumed held, so the struct
185  * will not disappear unexpectedly. Furthermore, the caller must guarantee,
186  * that double arming the same virtual address (page) cannot occur.
187  *
188  * Double disarming on the other hand is allowed, and may occur when a fault
189  * and mmiotrace shutdown happen simultaneously.
190  */
191 static int arm_kmmio_fault_page(struct kmmio_fault_page *f)
192 {
193         int ret;
194         WARN_ONCE(f->armed, KERN_ERR pr_fmt("kmmio page already armed.\n"));
195         if (f->armed) {
196                 pr_warning("double-arm: addr 0x%08lx, ref %d, old %d\n",
197                            f->addr, f->count, !!f->old_presence);
198         }
199         ret = clear_page_presence(f, true);
200         WARN_ONCE(ret < 0, KERN_ERR pr_fmt("arming at 0x%08lx failed.\n"),
201                   f->addr);
202         f->armed = true;
203         return ret;
204 }
205 
206 /** Restore the given page to saved presence state. */
207 static void disarm_kmmio_fault_page(struct kmmio_fault_page *f)
208 {
209         int ret = clear_page_presence(f, false);
210         WARN_ONCE(ret < 0,
211                         KERN_ERR "kmmio disarming at 0x%08lx failed.\n", f->addr);
212         f->armed = false;
213 }
214 
215 /*
216  * This is being called from do_page_fault().
217  *
218  * We may be in an interrupt or a critical section. Also prefecthing may
219  * trigger a page fault. We may be in the middle of process switch.
220  * We cannot take any locks, because we could be executing especially
221  * within a kmmio critical section.
222  *
223  * Local interrupts are disabled, so preemption cannot happen.
224  * Do not enable interrupts, do not sleep, and watch out for other CPUs.
225  */
226 /*
227  * Interrupts are disabled on entry as trap3 is an interrupt gate
228  * and they remain disabled throughout this function.
229  */
230 int kmmio_handler(struct pt_regs *regs, unsigned long addr)
231 {
232         struct kmmio_context *ctx;
233         struct kmmio_fault_page *faultpage;
234         int ret = 0; /* default to fault not handled */
235         unsigned long page_base = addr;
236         unsigned int l;
237         pte_t *pte = lookup_address(addr, &l);
238         if (!pte)
239                 return -EINVAL;
240         page_base &= page_level_mask(l);
241 
242         /*
243          * Preemption is now disabled to prevent process switch during
244          * single stepping. We can only handle one active kmmio trace
245          * per cpu, so ensure that we finish it before something else
246          * gets to run. We also hold the RCU read lock over single
247          * stepping to avoid looking up the probe and kmmio_fault_page
248          * again.
249          */
250         preempt_disable();
251         rcu_read_lock();
252 
253         faultpage = get_kmmio_fault_page(page_base);
254         if (!faultpage) {
255                 /*
256                  * Either this page fault is not caused by kmmio, or
257                  * another CPU just pulled the kmmio probe from under
258                  * our feet. The latter case should not be possible.
259                  */
260                 goto no_kmmio;
261         }
262 
263         ctx = &get_cpu_var(kmmio_ctx);
264         if (ctx->active) {
265                 if (page_base == ctx->addr) {
266                         /*
267                          * A second fault on the same page means some other
268                          * condition needs handling by do_page_fault(), the
269                          * page really not being present is the most common.
270                          */
271                         pr_debug("secondary hit for 0x%08lx CPU %d.\n",
272                                  addr, smp_processor_id());
273 
274                         if (!faultpage->old_presence)
275                                 pr_info("unexpected secondary hit for address 0x%08lx on CPU %d.\n",
276                                         addr, smp_processor_id());
277                 } else {
278                         /*
279                          * Prevent overwriting already in-flight context.
280                          * This should not happen, let's hope disarming at
281                          * least prevents a panic.
282                          */
283                         pr_emerg("recursive probe hit on CPU %d, for address 0x%08lx. Ignoring.\n",
284                                  smp_processor_id(), addr);
285                         pr_emerg("previous hit was at 0x%08lx.\n", ctx->addr);
286                         disarm_kmmio_fault_page(faultpage);
287                 }
288                 goto no_kmmio_ctx;
289         }
290         ctx->active++;
291 
292         ctx->fpage = faultpage;
293         ctx->probe = get_kmmio_probe(page_base);
294         ctx->saved_flags = (regs->flags & (X86_EFLAGS_TF | X86_EFLAGS_IF));
295         ctx->addr = page_base;
296 
297         if (ctx->probe && ctx->probe->pre_handler)
298                 ctx->probe->pre_handler(ctx->probe, regs, addr);
299 
300         /*
301          * Enable single-stepping and disable interrupts for the faulting
302          * context. Local interrupts must not get enabled during stepping.
303          */
304         regs->flags |= X86_EFLAGS_TF;
305         regs->flags &= ~X86_EFLAGS_IF;
306 
307         /* Now we set present bit in PTE and single step. */
308         disarm_kmmio_fault_page(ctx->fpage);
309 
310         /*
311          * If another cpu accesses the same page while we are stepping,
312          * the access will not be caught. It will simply succeed and the
313          * only downside is we lose the event. If this becomes a problem,
314          * the user should drop to single cpu before tracing.
315          */
316 
317         put_cpu_var(kmmio_ctx);
318         return 1; /* fault handled */
319 
320 no_kmmio_ctx:
321         put_cpu_var(kmmio_ctx);
322 no_kmmio:
323         rcu_read_unlock();
324         preempt_enable_no_resched();
325         return ret;
326 }
327 
328 /*
329  * Interrupts are disabled on entry as trap1 is an interrupt gate
330  * and they remain disabled throughout this function.
331  * This must always get called as the pair to kmmio_handler().
332  */
333 static int post_kmmio_handler(unsigned long condition, struct pt_regs *regs)
334 {
335         int ret = 0;
336         struct kmmio_context *ctx = &get_cpu_var(kmmio_ctx);
337 
338         if (!ctx->active) {
339                 /*
340                  * debug traps without an active context are due to either
341                  * something external causing them (f.e. using a debugger while
342                  * mmio tracing enabled), or erroneous behaviour
343                  */
344                 pr_warning("unexpected debug trap on CPU %d.\n",
345                            smp_processor_id());
346                 goto out;
347         }
348 
349         if (ctx->probe && ctx->probe->post_handler)
350                 ctx->probe->post_handler(ctx->probe, condition, regs);
351 
352         /* Prevent racing against release_kmmio_fault_page(). */
353         spin_lock(&kmmio_lock);
354         if (ctx->fpage->count)
355                 arm_kmmio_fault_page(ctx->fpage);
356         spin_unlock(&kmmio_lock);
357 
358         regs->flags &= ~X86_EFLAGS_TF;
359         regs->flags |= ctx->saved_flags;
360 
361         /* These were acquired in kmmio_handler(). */
362         ctx->active--;
363         BUG_ON(ctx->active);
364         rcu_read_unlock();
365         preempt_enable_no_resched();
366 
367         /*
368          * if somebody else is singlestepping across a probe point, flags
369          * will have TF set, in which case, continue the remaining processing
370          * of do_debug, as if this is not a probe hit.
371          */
372         if (!(regs->flags & X86_EFLAGS_TF))
373                 ret = 1;
374 out:
375         put_cpu_var(kmmio_ctx);
376         return ret;
377 }
378 
379 /* You must be holding kmmio_lock. */
380 static int add_kmmio_fault_page(unsigned long addr)
381 {
382         struct kmmio_fault_page *f;
383 
384         f = get_kmmio_fault_page(addr);
385         if (f) {
386                 if (!f->count)
387                         arm_kmmio_fault_page(f);
388                 f->count++;
389                 return 0;
390         }
391 
392         f = kzalloc(sizeof(*f), GFP_ATOMIC);
393         if (!f)
394                 return -1;
395 
396         f->count = 1;
397         f->addr = addr;
398 
399         if (arm_kmmio_fault_page(f)) {
400                 kfree(f);
401                 return -1;
402         }
403 
404         list_add_rcu(&f->list, kmmio_page_list(f->addr));
405 
406         return 0;
407 }
408 
409 /* You must be holding kmmio_lock. */
410 static void release_kmmio_fault_page(unsigned long addr,
411                                 struct kmmio_fault_page **release_list)
412 {
413         struct kmmio_fault_page *f;
414 
415         f = get_kmmio_fault_page(addr);
416         if (!f)
417                 return;
418 
419         f->count--;
420         BUG_ON(f->count < 0);
421         if (!f->count) {
422                 disarm_kmmio_fault_page(f);
423                 if (!f->scheduled_for_release) {
424                         f->release_next = *release_list;
425                         *release_list = f;
426                         f->scheduled_for_release = true;
427                 }
428         }
429 }
430 
431 /*
432  * With page-unaligned ioremaps, one or two armed pages may contain
433  * addresses from outside the intended mapping. Events for these addresses
434  * are currently silently dropped. The events may result only from programming
435  * mistakes by accessing addresses before the beginning or past the end of a
436  * mapping.
437  */
438 int register_kmmio_probe(struct kmmio_probe *p)
439 {
440         unsigned long flags;
441         int ret = 0;
442         unsigned long size = 0;
443         unsigned long addr = p->addr & PAGE_MASK;
444         const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK);
445         unsigned int l;
446         pte_t *pte;
447 
448         spin_lock_irqsave(&kmmio_lock, flags);
449         if (get_kmmio_probe(addr)) {
450                 ret = -EEXIST;
451                 goto out;
452         }
453 
454         pte = lookup_address(addr, &l);
455         if (!pte) {
456                 ret = -EINVAL;
457                 goto out;
458         }
459 
460         kmmio_count++;
461         list_add_rcu(&p->list, &kmmio_probes);
462         while (size < size_lim) {
463                 if (add_kmmio_fault_page(addr + size))
464                         pr_err("Unable to set page fault.\n");
465                 size += page_level_size(l);
466         }
467 out:
468         spin_unlock_irqrestore(&kmmio_lock, flags);
469         /*
470          * XXX: What should I do here?
471          * Here was a call to global_flush_tlb(), but it does not exist
472          * anymore. It seems it's not needed after all.
473          */
474         return ret;
475 }
476 EXPORT_SYMBOL(register_kmmio_probe);
477 
478 static void rcu_free_kmmio_fault_pages(struct rcu_head *head)
479 {
480         struct kmmio_delayed_release *dr = container_of(
481                                                 head,
482                                                 struct kmmio_delayed_release,
483                                                 rcu);
484         struct kmmio_fault_page *f = dr->release_list;
485         while (f) {
486                 struct kmmio_fault_page *next = f->release_next;
487                 BUG_ON(f->count);
488                 kfree(f);
489                 f = next;
490         }
491         kfree(dr);
492 }
493 
494 static void remove_kmmio_fault_pages(struct rcu_head *head)
495 {
496         struct kmmio_delayed_release *dr =
497                 container_of(head, struct kmmio_delayed_release, rcu);
498         struct kmmio_fault_page *f = dr->release_list;
499         struct kmmio_fault_page **prevp = &dr->release_list;
500         unsigned long flags;
501 
502         spin_lock_irqsave(&kmmio_lock, flags);
503         while (f) {
504                 if (!f->count) {
505                         list_del_rcu(&f->list);
506                         prevp = &f->release_next;
507                 } else {
508                         *prevp = f->release_next;
509                         f->release_next = NULL;
510                         f->scheduled_for_release = false;
511                 }
512                 f = *prevp;
513         }
514         spin_unlock_irqrestore(&kmmio_lock, flags);
515 
516         /* This is the real RCU destroy call. */
517         call_rcu(&dr->rcu, rcu_free_kmmio_fault_pages);
518 }
519 
520 /*
521  * Remove a kmmio probe. You have to synchronize_rcu() before you can be
522  * sure that the callbacks will not be called anymore. Only after that
523  * you may actually release your struct kmmio_probe.
524  *
525  * Unregistering a kmmio fault page has three steps:
526  * 1. release_kmmio_fault_page()
527  *    Disarm the page, wait a grace period to let all faults finish.
528  * 2. remove_kmmio_fault_pages()
529  *    Remove the pages from kmmio_page_table.
530  * 3. rcu_free_kmmio_fault_pages()
531  *    Actually free the kmmio_fault_page structs as with RCU.
532  */
533 void unregister_kmmio_probe(struct kmmio_probe *p)
534 {
535         unsigned long flags;
536         unsigned long size = 0;
537         unsigned long addr = p->addr & PAGE_MASK;
538         const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK);
539         struct kmmio_fault_page *release_list = NULL;
540         struct kmmio_delayed_release *drelease;
541         unsigned int l;
542         pte_t *pte;
543 
544         pte = lookup_address(addr, &l);
545         if (!pte)
546                 return;
547 
548         spin_lock_irqsave(&kmmio_lock, flags);
549         while (size < size_lim) {
550                 release_kmmio_fault_page(addr + size, &release_list);
551                 size += page_level_size(l);
552         }
553         list_del_rcu(&p->list);
554         kmmio_count--;
555         spin_unlock_irqrestore(&kmmio_lock, flags);
556 
557         if (!release_list)
558                 return;
559 
560         drelease = kmalloc(sizeof(*drelease), GFP_ATOMIC);
561         if (!drelease) {
562                 pr_crit("leaking kmmio_fault_page objects.\n");
563                 return;
564         }
565         drelease->release_list = release_list;
566 
567         /*
568          * This is not really RCU here. We have just disarmed a set of
569          * pages so that they cannot trigger page faults anymore. However,
570          * we cannot remove the pages from kmmio_page_table,
571          * because a probe hit might be in flight on another CPU. The
572          * pages are collected into a list, and they will be removed from
573          * kmmio_page_table when it is certain that no probe hit related to
574          * these pages can be in flight. RCU grace period sounds like a
575          * good choice.
576          *
577          * If we removed the pages too early, kmmio page fault handler might
578          * not find the respective kmmio_fault_page and determine it's not
579          * a kmmio fault, when it actually is. This would lead to madness.
580          */
581         call_rcu(&drelease->rcu, remove_kmmio_fault_pages);
582 }
583 EXPORT_SYMBOL(unregister_kmmio_probe);
584 
585 static int
586 kmmio_die_notifier(struct notifier_block *nb, unsigned long val, void *args)
587 {
588         struct die_args *arg = args;
589         unsigned long* dr6_p = (unsigned long *)ERR_PTR(arg->err);
590 
591         if (val == DIE_DEBUG && (*dr6_p & DR_STEP))
592                 if (post_kmmio_handler(*dr6_p, arg->regs) == 1) {
593                         /*
594                          * Reset the BS bit in dr6 (pointed by args->err) to
595                          * denote completion of processing
596                          */
597                         *dr6_p &= ~DR_STEP;
598                         return NOTIFY_STOP;
599                 }
600 
601         return NOTIFY_DONE;
602 }
603 
604 static struct notifier_block nb_die = {
605         .notifier_call = kmmio_die_notifier
606 };
607 
608 int kmmio_init(void)
609 {
610         int i;
611 
612         for (i = 0; i < KMMIO_PAGE_TABLE_SIZE; i++)
613                 INIT_LIST_HEAD(&kmmio_page_table[i]);
614 
615         return register_die_notifier(&nb_die);
616 }
617 
618 void kmmio_cleanup(void)
619 {
620         int i;
621 
622         unregister_die_notifier(&nb_die);
623         for (i = 0; i < KMMIO_PAGE_TABLE_SIZE; i++) {
624                 WARN_ONCE(!list_empty(&kmmio_page_table[i]),
625                         KERN_ERR "kmmio_page_table not empty at cleanup, any further tracing will leak memory.\n");
626         }
627 }
628 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp