~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/x86/kernel/alternative.c

Version: ~ [ linux-5.4.2 ] ~ [ linux-5.3.15 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.88 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.158 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.206 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.206 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.78 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-3.9.11 ] ~ [ linux-3.8.13 ] ~ [ linux-3.7.10 ] ~ [ linux-3.6.11 ] ~ [ linux-3.5.7 ] ~ [ linux-3.4.113 ] ~ [ linux-3.3.8 ] ~ [ linux-3.2.102 ] ~ [ linux-3.1.10 ] ~ [ linux-3.0.101 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 #define pr_fmt(fmt) "SMP alternatives: " fmt
  2 
  3 #include <linux/module.h>
  4 #include <linux/sched.h>
  5 #include <linux/mutex.h>
  6 #include <linux/list.h>
  7 #include <linux/stringify.h>
  8 #include <linux/mm.h>
  9 #include <linux/vmalloc.h>
 10 #include <linux/memory.h>
 11 #include <linux/stop_machine.h>
 12 #include <linux/slab.h>
 13 #include <linux/kdebug.h>
 14 #include <asm/text-patching.h>
 15 #include <asm/alternative.h>
 16 #include <asm/sections.h>
 17 #include <asm/pgtable.h>
 18 #include <asm/mce.h>
 19 #include <asm/nmi.h>
 20 #include <asm/cacheflush.h>
 21 #include <asm/tlbflush.h>
 22 #include <asm/io.h>
 23 #include <asm/fixmap.h>
 24 
 25 int __read_mostly alternatives_patched;
 26 
 27 EXPORT_SYMBOL_GPL(alternatives_patched);
 28 
 29 #define MAX_PATCH_LEN (255-1)
 30 
 31 static int __initdata_or_module debug_alternative;
 32 
 33 static int __init debug_alt(char *str)
 34 {
 35         debug_alternative = 1;
 36         return 1;
 37 }
 38 __setup("debug-alternative", debug_alt);
 39 
 40 static int noreplace_smp;
 41 
 42 static int __init setup_noreplace_smp(char *str)
 43 {
 44         noreplace_smp = 1;
 45         return 1;
 46 }
 47 __setup("noreplace-smp", setup_noreplace_smp);
 48 
 49 #define DPRINTK(fmt, args...)                                           \
 50 do {                                                                    \
 51         if (debug_alternative)                                          \
 52                 printk(KERN_DEBUG "%s: " fmt "\n", __func__, ##args);   \
 53 } while (0)
 54 
 55 #define DUMP_BYTES(buf, len, fmt, args...)                              \
 56 do {                                                                    \
 57         if (unlikely(debug_alternative)) {                              \
 58                 int j;                                                  \
 59                                                                         \
 60                 if (!(len))                                             \
 61                         break;                                          \
 62                                                                         \
 63                 printk(KERN_DEBUG fmt, ##args);                         \
 64                 for (j = 0; j < (len) - 1; j++)                         \
 65                         printk(KERN_CONT "%02hhx ", buf[j]);            \
 66                 printk(KERN_CONT "%02hhx\n", buf[j]);                   \
 67         }                                                               \
 68 } while (0)
 69 
 70 /*
 71  * Each GENERIC_NOPX is of X bytes, and defined as an array of bytes
 72  * that correspond to that nop. Getting from one nop to the next, we
 73  * add to the array the offset that is equal to the sum of all sizes of
 74  * nops preceding the one we are after.
 75  *
 76  * Note: The GENERIC_NOP5_ATOMIC is at the end, as it breaks the
 77  * nice symmetry of sizes of the previous nops.
 78  */
 79 #if defined(GENERIC_NOP1) && !defined(CONFIG_X86_64)
 80 static const unsigned char intelnops[] =
 81 {
 82         GENERIC_NOP1,
 83         GENERIC_NOP2,
 84         GENERIC_NOP3,
 85         GENERIC_NOP4,
 86         GENERIC_NOP5,
 87         GENERIC_NOP6,
 88         GENERIC_NOP7,
 89         GENERIC_NOP8,
 90         GENERIC_NOP5_ATOMIC
 91 };
 92 static const unsigned char * const intel_nops[ASM_NOP_MAX+2] =
 93 {
 94         NULL,
 95         intelnops,
 96         intelnops + 1,
 97         intelnops + 1 + 2,
 98         intelnops + 1 + 2 + 3,
 99         intelnops + 1 + 2 + 3 + 4,
100         intelnops + 1 + 2 + 3 + 4 + 5,
101         intelnops + 1 + 2 + 3 + 4 + 5 + 6,
102         intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
103         intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
104 };
105 #endif
106 
107 #ifdef K8_NOP1
108 static const unsigned char k8nops[] =
109 {
110         K8_NOP1,
111         K8_NOP2,
112         K8_NOP3,
113         K8_NOP4,
114         K8_NOP5,
115         K8_NOP6,
116         K8_NOP7,
117         K8_NOP8,
118         K8_NOP5_ATOMIC
119 };
120 static const unsigned char * const k8_nops[ASM_NOP_MAX+2] =
121 {
122         NULL,
123         k8nops,
124         k8nops + 1,
125         k8nops + 1 + 2,
126         k8nops + 1 + 2 + 3,
127         k8nops + 1 + 2 + 3 + 4,
128         k8nops + 1 + 2 + 3 + 4 + 5,
129         k8nops + 1 + 2 + 3 + 4 + 5 + 6,
130         k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
131         k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
132 };
133 #endif
134 
135 #if defined(K7_NOP1) && !defined(CONFIG_X86_64)
136 static const unsigned char k7nops[] =
137 {
138         K7_NOP1,
139         K7_NOP2,
140         K7_NOP3,
141         K7_NOP4,
142         K7_NOP5,
143         K7_NOP6,
144         K7_NOP7,
145         K7_NOP8,
146         K7_NOP5_ATOMIC
147 };
148 static const unsigned char * const k7_nops[ASM_NOP_MAX+2] =
149 {
150         NULL,
151         k7nops,
152         k7nops + 1,
153         k7nops + 1 + 2,
154         k7nops + 1 + 2 + 3,
155         k7nops + 1 + 2 + 3 + 4,
156         k7nops + 1 + 2 + 3 + 4 + 5,
157         k7nops + 1 + 2 + 3 + 4 + 5 + 6,
158         k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
159         k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
160 };
161 #endif
162 
163 #ifdef P6_NOP1
164 static const unsigned char p6nops[] =
165 {
166         P6_NOP1,
167         P6_NOP2,
168         P6_NOP3,
169         P6_NOP4,
170         P6_NOP5,
171         P6_NOP6,
172         P6_NOP7,
173         P6_NOP8,
174         P6_NOP5_ATOMIC
175 };
176 static const unsigned char * const p6_nops[ASM_NOP_MAX+2] =
177 {
178         NULL,
179         p6nops,
180         p6nops + 1,
181         p6nops + 1 + 2,
182         p6nops + 1 + 2 + 3,
183         p6nops + 1 + 2 + 3 + 4,
184         p6nops + 1 + 2 + 3 + 4 + 5,
185         p6nops + 1 + 2 + 3 + 4 + 5 + 6,
186         p6nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
187         p6nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
188 };
189 #endif
190 
191 /* Initialize these to a safe default */
192 #ifdef CONFIG_X86_64
193 const unsigned char * const *ideal_nops = p6_nops;
194 #else
195 const unsigned char * const *ideal_nops = intel_nops;
196 #endif
197 
198 void __init arch_init_ideal_nops(void)
199 {
200         switch (boot_cpu_data.x86_vendor) {
201         case X86_VENDOR_INTEL:
202                 /*
203                  * Due to a decoder implementation quirk, some
204                  * specific Intel CPUs actually perform better with
205                  * the "k8_nops" than with the SDM-recommended NOPs.
206                  */
207                 if (boot_cpu_data.x86 == 6 &&
208                     boot_cpu_data.x86_model >= 0x0f &&
209                     boot_cpu_data.x86_model != 0x1c &&
210                     boot_cpu_data.x86_model != 0x26 &&
211                     boot_cpu_data.x86_model != 0x27 &&
212                     boot_cpu_data.x86_model < 0x30) {
213                         ideal_nops = k8_nops;
214                 } else if (boot_cpu_has(X86_FEATURE_NOPL)) {
215                            ideal_nops = p6_nops;
216                 } else {
217 #ifdef CONFIG_X86_64
218                         ideal_nops = k8_nops;
219 #else
220                         ideal_nops = intel_nops;
221 #endif
222                 }
223                 break;
224 
225         case X86_VENDOR_AMD:
226                 if (boot_cpu_data.x86 > 0xf) {
227                         ideal_nops = p6_nops;
228                         return;
229                 }
230 
231                 /* fall through */
232 
233         default:
234 #ifdef CONFIG_X86_64
235                 ideal_nops = k8_nops;
236 #else
237                 if (boot_cpu_has(X86_FEATURE_K8))
238                         ideal_nops = k8_nops;
239                 else if (boot_cpu_has(X86_FEATURE_K7))
240                         ideal_nops = k7_nops;
241                 else
242                         ideal_nops = intel_nops;
243 #endif
244         }
245 }
246 
247 /* Use this to add nops to a buffer, then text_poke the whole buffer. */
248 static void __init_or_module add_nops(void *insns, unsigned int len)
249 {
250         while (len > 0) {
251                 unsigned int noplen = len;
252                 if (noplen > ASM_NOP_MAX)
253                         noplen = ASM_NOP_MAX;
254                 memcpy(insns, ideal_nops[noplen], noplen);
255                 insns += noplen;
256                 len -= noplen;
257         }
258 }
259 
260 extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
261 extern s32 __smp_locks[], __smp_locks_end[];
262 void *text_poke_early(void *addr, const void *opcode, size_t len);
263 
264 /*
265  * Are we looking at a near JMP with a 1 or 4-byte displacement.
266  */
267 static inline bool is_jmp(const u8 opcode)
268 {
269         return opcode == 0xeb || opcode == 0xe9;
270 }
271 
272 static void __init_or_module
273 recompute_jump(struct alt_instr *a, u8 *orig_insn, u8 *repl_insn, u8 *insnbuf)
274 {
275         u8 *next_rip, *tgt_rip;
276         s32 n_dspl, o_dspl;
277         int repl_len;
278 
279         if (a->replacementlen != 5)
280                 return;
281 
282         o_dspl = *(s32 *)(insnbuf + 1);
283 
284         /* next_rip of the replacement JMP */
285         next_rip = repl_insn + a->replacementlen;
286         /* target rip of the replacement JMP */
287         tgt_rip  = next_rip + o_dspl;
288         n_dspl = tgt_rip - orig_insn;
289 
290         DPRINTK("target RIP: %px, new_displ: 0x%x", tgt_rip, n_dspl);
291 
292         if (tgt_rip - orig_insn >= 0) {
293                 if (n_dspl - 2 <= 127)
294                         goto two_byte_jmp;
295                 else
296                         goto five_byte_jmp;
297         /* negative offset */
298         } else {
299                 if (((n_dspl - 2) & 0xff) == (n_dspl - 2))
300                         goto two_byte_jmp;
301                 else
302                         goto five_byte_jmp;
303         }
304 
305 two_byte_jmp:
306         n_dspl -= 2;
307 
308         insnbuf[0] = 0xeb;
309         insnbuf[1] = (s8)n_dspl;
310         add_nops(insnbuf + 2, 3);
311 
312         repl_len = 2;
313         goto done;
314 
315 five_byte_jmp:
316         n_dspl -= 5;
317 
318         insnbuf[0] = 0xe9;
319         *(s32 *)&insnbuf[1] = n_dspl;
320 
321         repl_len = 5;
322 
323 done:
324 
325         DPRINTK("final displ: 0x%08x, JMP 0x%lx",
326                 n_dspl, (unsigned long)orig_insn + n_dspl + repl_len);
327 }
328 
329 /*
330  * "noinline" to cause control flow change and thus invalidate I$ and
331  * cause refetch after modification.
332  */
333 static void __init_or_module noinline optimize_nops(struct alt_instr *a, u8 *instr)
334 {
335         unsigned long flags;
336         int i;
337 
338         for (i = 0; i < a->padlen; i++) {
339                 if (instr[i] != 0x90)
340                         return;
341         }
342 
343         local_irq_save(flags);
344         add_nops(instr + (a->instrlen - a->padlen), a->padlen);
345         local_irq_restore(flags);
346 
347         DUMP_BYTES(instr, a->instrlen, "%px: [%d:%d) optimized NOPs: ",
348                    instr, a->instrlen - a->padlen, a->padlen);
349 }
350 
351 /*
352  * Replace instructions with better alternatives for this CPU type. This runs
353  * before SMP is initialized to avoid SMP problems with self modifying code.
354  * This implies that asymmetric systems where APs have less capabilities than
355  * the boot processor are not handled. Tough. Make sure you disable such
356  * features by hand.
357  *
358  * Marked "noinline" to cause control flow change and thus insn cache
359  * to refetch changed I$ lines.
360  */
361 void __init_or_module noinline apply_alternatives(struct alt_instr *start,
362                                                   struct alt_instr *end)
363 {
364         struct alt_instr *a;
365         u8 *instr, *replacement;
366         u8 insnbuf[MAX_PATCH_LEN];
367 
368         DPRINTK("alt table %px, -> %px", start, end);
369         /*
370          * The scan order should be from start to end. A later scanned
371          * alternative code can overwrite previously scanned alternative code.
372          * Some kernel functions (e.g. memcpy, memset, etc) use this order to
373          * patch code.
374          *
375          * So be careful if you want to change the scan order to any other
376          * order.
377          */
378         for (a = start; a < end; a++) {
379                 int insnbuf_sz = 0;
380 
381                 instr = (u8 *)&a->instr_offset + a->instr_offset;
382                 replacement = (u8 *)&a->repl_offset + a->repl_offset;
383                 BUG_ON(a->instrlen > sizeof(insnbuf));
384                 BUG_ON(a->cpuid >= (NCAPINTS + NBUGINTS) * 32);
385                 if (!boot_cpu_has(a->cpuid)) {
386                         if (a->padlen > 1)
387                                 optimize_nops(a, instr);
388 
389                         continue;
390                 }
391 
392                 DPRINTK("feat: %d*32+%d, old: (%px len: %d), repl: (%px, len: %d), pad: %d",
393                         a->cpuid >> 5,
394                         a->cpuid & 0x1f,
395                         instr, a->instrlen,
396                         replacement, a->replacementlen, a->padlen);
397 
398                 DUMP_BYTES(instr, a->instrlen, "%px: old_insn: ", instr);
399                 DUMP_BYTES(replacement, a->replacementlen, "%px: rpl_insn: ", replacement);
400 
401                 memcpy(insnbuf, replacement, a->replacementlen);
402                 insnbuf_sz = a->replacementlen;
403 
404                 /*
405                  * 0xe8 is a relative jump; fix the offset.
406                  *
407                  * Instruction length is checked before the opcode to avoid
408                  * accessing uninitialized bytes for zero-length replacements.
409                  */
410                 if (a->replacementlen == 5 && *insnbuf == 0xe8) {
411                         *(s32 *)(insnbuf + 1) += replacement - instr;
412                         DPRINTK("Fix CALL offset: 0x%x, CALL 0x%lx",
413                                 *(s32 *)(insnbuf + 1),
414                                 (unsigned long)instr + *(s32 *)(insnbuf + 1) + 5);
415                 }
416 
417                 if (a->replacementlen && is_jmp(replacement[0]))
418                         recompute_jump(a, instr, replacement, insnbuf);
419 
420                 if (a->instrlen > a->replacementlen) {
421                         add_nops(insnbuf + a->replacementlen,
422                                  a->instrlen - a->replacementlen);
423                         insnbuf_sz += a->instrlen - a->replacementlen;
424                 }
425                 DUMP_BYTES(insnbuf, insnbuf_sz, "%px: final_insn: ", instr);
426 
427                 text_poke_early(instr, insnbuf, insnbuf_sz);
428         }
429 }
430 
431 #ifdef CONFIG_SMP
432 static void alternatives_smp_lock(const s32 *start, const s32 *end,
433                                   u8 *text, u8 *text_end)
434 {
435         const s32 *poff;
436 
437         for (poff = start; poff < end; poff++) {
438                 u8 *ptr = (u8 *)poff + *poff;
439 
440                 if (!*poff || ptr < text || ptr >= text_end)
441                         continue;
442                 /* turn DS segment override prefix into lock prefix */
443                 if (*ptr == 0x3e)
444                         text_poke(ptr, ((unsigned char []){0xf0}), 1);
445         }
446 }
447 
448 static void alternatives_smp_unlock(const s32 *start, const s32 *end,
449                                     u8 *text, u8 *text_end)
450 {
451         const s32 *poff;
452 
453         for (poff = start; poff < end; poff++) {
454                 u8 *ptr = (u8 *)poff + *poff;
455 
456                 if (!*poff || ptr < text || ptr >= text_end)
457                         continue;
458                 /* turn lock prefix into DS segment override prefix */
459                 if (*ptr == 0xf0)
460                         text_poke(ptr, ((unsigned char []){0x3E}), 1);
461         }
462 }
463 
464 struct smp_alt_module {
465         /* what is this ??? */
466         struct module   *mod;
467         char            *name;
468 
469         /* ptrs to lock prefixes */
470         const s32       *locks;
471         const s32       *locks_end;
472 
473         /* .text segment, needed to avoid patching init code ;) */
474         u8              *text;
475         u8              *text_end;
476 
477         struct list_head next;
478 };
479 static LIST_HEAD(smp_alt_modules);
480 static bool uniproc_patched = false;    /* protected by text_mutex */
481 
482 void __init_or_module alternatives_smp_module_add(struct module *mod,
483                                                   char *name,
484                                                   void *locks, void *locks_end,
485                                                   void *text,  void *text_end)
486 {
487         struct smp_alt_module *smp;
488 
489         mutex_lock(&text_mutex);
490         if (!uniproc_patched)
491                 goto unlock;
492 
493         if (num_possible_cpus() == 1)
494                 /* Don't bother remembering, we'll never have to undo it. */
495                 goto smp_unlock;
496 
497         smp = kzalloc(sizeof(*smp), GFP_KERNEL);
498         if (NULL == smp)
499                 /* we'll run the (safe but slow) SMP code then ... */
500                 goto unlock;
501 
502         smp->mod        = mod;
503         smp->name       = name;
504         smp->locks      = locks;
505         smp->locks_end  = locks_end;
506         smp->text       = text;
507         smp->text_end   = text_end;
508         DPRINTK("locks %p -> %p, text %p -> %p, name %s\n",
509                 smp->locks, smp->locks_end,
510                 smp->text, smp->text_end, smp->name);
511 
512         list_add_tail(&smp->next, &smp_alt_modules);
513 smp_unlock:
514         alternatives_smp_unlock(locks, locks_end, text, text_end);
515 unlock:
516         mutex_unlock(&text_mutex);
517 }
518 
519 void __init_or_module alternatives_smp_module_del(struct module *mod)
520 {
521         struct smp_alt_module *item;
522 
523         mutex_lock(&text_mutex);
524         list_for_each_entry(item, &smp_alt_modules, next) {
525                 if (mod != item->mod)
526                         continue;
527                 list_del(&item->next);
528                 kfree(item);
529                 break;
530         }
531         mutex_unlock(&text_mutex);
532 }
533 
534 void alternatives_enable_smp(void)
535 {
536         struct smp_alt_module *mod;
537 
538         /* Why bother if there are no other CPUs? */
539         BUG_ON(num_possible_cpus() == 1);
540 
541         mutex_lock(&text_mutex);
542 
543         if (uniproc_patched) {
544                 pr_info("switching to SMP code\n");
545                 BUG_ON(num_online_cpus() != 1);
546                 clear_cpu_cap(&boot_cpu_data, X86_FEATURE_UP);
547                 clear_cpu_cap(&cpu_data(0), X86_FEATURE_UP);
548                 list_for_each_entry(mod, &smp_alt_modules, next)
549                         alternatives_smp_lock(mod->locks, mod->locks_end,
550                                               mod->text, mod->text_end);
551                 uniproc_patched = false;
552         }
553         mutex_unlock(&text_mutex);
554 }
555 
556 /*
557  * Return 1 if the address range is reserved for SMP-alternatives.
558  * Must hold text_mutex.
559  */
560 int alternatives_text_reserved(void *start, void *end)
561 {
562         struct smp_alt_module *mod;
563         const s32 *poff;
564         u8 *text_start = start;
565         u8 *text_end = end;
566 
567         lockdep_assert_held(&text_mutex);
568 
569         list_for_each_entry(mod, &smp_alt_modules, next) {
570                 if (mod->text > text_end || mod->text_end < text_start)
571                         continue;
572                 for (poff = mod->locks; poff < mod->locks_end; poff++) {
573                         const u8 *ptr = (const u8 *)poff + *poff;
574 
575                         if (text_start <= ptr && text_end > ptr)
576                                 return 1;
577                 }
578         }
579 
580         return 0;
581 }
582 #endif /* CONFIG_SMP */
583 
584 #ifdef CONFIG_PARAVIRT
585 void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
586                                      struct paravirt_patch_site *end)
587 {
588         struct paravirt_patch_site *p;
589         char insnbuf[MAX_PATCH_LEN];
590 
591         for (p = start; p < end; p++) {
592                 unsigned int used;
593 
594                 BUG_ON(p->len > MAX_PATCH_LEN);
595                 /* prep the buffer with the original instructions */
596                 memcpy(insnbuf, p->instr, p->len);
597                 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
598                                          (unsigned long)p->instr, p->len);
599 
600                 BUG_ON(used > p->len);
601 
602                 /* Pad the rest with nops */
603                 add_nops(insnbuf + used, p->len - used);
604                 text_poke_early(p->instr, insnbuf, p->len);
605         }
606 }
607 extern struct paravirt_patch_site __start_parainstructions[],
608         __stop_parainstructions[];
609 #endif  /* CONFIG_PARAVIRT */
610 
611 void __init alternative_instructions(void)
612 {
613         /* The patching is not fully atomic, so try to avoid local interruptions
614            that might execute the to be patched code.
615            Other CPUs are not running. */
616         stop_nmi();
617 
618         /*
619          * Don't stop machine check exceptions while patching.
620          * MCEs only happen when something got corrupted and in this
621          * case we must do something about the corruption.
622          * Ignoring it is worse than a unlikely patching race.
623          * Also machine checks tend to be broadcast and if one CPU
624          * goes into machine check the others follow quickly, so we don't
625          * expect a machine check to cause undue problems during to code
626          * patching.
627          */
628 
629         apply_alternatives(__alt_instructions, __alt_instructions_end);
630 
631 #ifdef CONFIG_SMP
632         /* Patch to UP if other cpus not imminent. */
633         if (!noreplace_smp && (num_present_cpus() == 1 || setup_max_cpus <= 1)) {
634                 uniproc_patched = true;
635                 alternatives_smp_module_add(NULL, "core kernel",
636                                             __smp_locks, __smp_locks_end,
637                                             _text, _etext);
638         }
639 
640         if (!uniproc_patched || num_possible_cpus() == 1)
641                 free_init_pages("SMP alternatives",
642                                 (unsigned long)__smp_locks,
643                                 (unsigned long)__smp_locks_end);
644 #endif
645 
646         apply_paravirt(__parainstructions, __parainstructions_end);
647 
648         restart_nmi();
649         alternatives_patched = 1;
650 }
651 
652 /**
653  * text_poke_early - Update instructions on a live kernel at boot time
654  * @addr: address to modify
655  * @opcode: source of the copy
656  * @len: length to copy
657  *
658  * When you use this code to patch more than one byte of an instruction
659  * you need to make sure that other CPUs cannot execute this code in parallel.
660  * Also no thread must be currently preempted in the middle of these
661  * instructions. And on the local CPU you need to be protected again NMI or MCE
662  * handlers seeing an inconsistent instruction while you patch.
663  */
664 void *__init_or_module text_poke_early(void *addr, const void *opcode,
665                                               size_t len)
666 {
667         unsigned long flags;
668         local_irq_save(flags);
669         memcpy(addr, opcode, len);
670         local_irq_restore(flags);
671         /* Could also do a CLFLUSH here to speed up CPU recovery; but
672            that causes hangs on some VIA CPUs. */
673         return addr;
674 }
675 
676 /**
677  * text_poke - Update instructions on a live kernel
678  * @addr: address to modify
679  * @opcode: source of the copy
680  * @len: length to copy
681  *
682  * Only atomic text poke/set should be allowed when not doing early patching.
683  * It means the size must be writable atomically and the address must be aligned
684  * in a way that permits an atomic write. It also makes sure we fit on a single
685  * page.
686  *
687  * Note: Must be called under text_mutex.
688  */
689 void *text_poke(void *addr, const void *opcode, size_t len)
690 {
691         unsigned long flags;
692         char *vaddr;
693         struct page *pages[2];
694         int i;
695 
696         if (!core_kernel_text((unsigned long)addr)) {
697                 pages[0] = vmalloc_to_page(addr);
698                 pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
699         } else {
700                 pages[0] = virt_to_page(addr);
701                 WARN_ON(!PageReserved(pages[0]));
702                 pages[1] = virt_to_page(addr + PAGE_SIZE);
703         }
704         BUG_ON(!pages[0]);
705         local_irq_save(flags);
706         set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
707         if (pages[1])
708                 set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
709         vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
710         memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
711         clear_fixmap(FIX_TEXT_POKE0);
712         if (pages[1])
713                 clear_fixmap(FIX_TEXT_POKE1);
714         local_flush_tlb();
715         sync_core();
716         /* Could also do a CLFLUSH here to speed up CPU recovery; but
717            that causes hangs on some VIA CPUs. */
718         for (i = 0; i < len; i++)
719                 BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
720         local_irq_restore(flags);
721         return addr;
722 }
723 
724 static void do_sync_core(void *info)
725 {
726         sync_core();
727 }
728 
729 static bool bp_patching_in_progress;
730 static void *bp_int3_handler, *bp_int3_addr;
731 
732 int poke_int3_handler(struct pt_regs *regs)
733 {
734         /*
735          * Having observed our INT3 instruction, we now must observe
736          * bp_patching_in_progress.
737          *
738          *      in_progress = TRUE              INT3
739          *      WMB                             RMB
740          *      write INT3                      if (in_progress)
741          *
742          * Idem for bp_int3_handler.
743          */
744         smp_rmb();
745 
746         if (likely(!bp_patching_in_progress))
747                 return 0;
748 
749         if (user_mode(regs) || regs->ip != (unsigned long)bp_int3_addr)
750                 return 0;
751 
752         /* set up the specified breakpoint handler */
753         regs->ip = (unsigned long) bp_int3_handler;
754 
755         return 1;
756 
757 }
758 
759 /**
760  * text_poke_bp() -- update instructions on live kernel on SMP
761  * @addr:       address to patch
762  * @opcode:     opcode of new instruction
763  * @len:        length to copy
764  * @handler:    address to jump to when the temporary breakpoint is hit
765  *
766  * Modify multi-byte instruction by using int3 breakpoint on SMP.
767  * We completely avoid stop_machine() here, and achieve the
768  * synchronization using int3 breakpoint.
769  *
770  * The way it is done:
771  *      - add a int3 trap to the address that will be patched
772  *      - sync cores
773  *      - update all but the first byte of the patched range
774  *      - sync cores
775  *      - replace the first byte (int3) by the first byte of
776  *        replacing opcode
777  *      - sync cores
778  *
779  * Note: must be called under text_mutex.
780  */
781 void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler)
782 {
783         unsigned char int3 = 0xcc;
784 
785         bp_int3_handler = handler;
786         bp_int3_addr = (u8 *)addr + sizeof(int3);
787         bp_patching_in_progress = true;
788         /*
789          * Corresponding read barrier in int3 notifier for making sure the
790          * in_progress and handler are correctly ordered wrt. patching.
791          */
792         smp_wmb();
793 
794         text_poke(addr, &int3, sizeof(int3));
795 
796         on_each_cpu(do_sync_core, NULL, 1);
797 
798         if (len - sizeof(int3) > 0) {
799                 /* patch all but the first byte */
800                 text_poke((char *)addr + sizeof(int3),
801                           (const char *) opcode + sizeof(int3),
802                           len - sizeof(int3));
803                 /*
804                  * According to Intel, this core syncing is very likely
805                  * not necessary and we'd be safe even without it. But
806                  * better safe than sorry (plus there's not only Intel).
807                  */
808                 on_each_cpu(do_sync_core, NULL, 1);
809         }
810 
811         /* patch the first byte */
812         text_poke(addr, opcode, sizeof(int3));
813 
814         on_each_cpu(do_sync_core, NULL, 1);
815         /*
816          * sync_core() implies an smp_mb() and orders this store against
817          * the writing of the new instruction.
818          */
819         bp_patching_in_progress = false;
820 
821         return addr;
822 }
823 
824 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp