~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/arm64/kernel/process.c

Version: ~ [ linux-5.17-rc1 ] ~ [ linux-5.16.2 ] ~ [ linux-5.15.16 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.93 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.173 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.225 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.262 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.297 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.299 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.18.140 ] ~ [ linux-3.16.85 ] ~ [ linux-3.14.79 ] ~ [ linux-3.12.74 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0-only
  2 /*
  3  * Based on arch/arm/kernel/process.c
  4  *
  5  * Original Copyright (C) 1995  Linus Torvalds
  6  * Copyright (C) 1996-2000 Russell King - Converted to ARM.
  7  * Copyright (C) 2012 ARM Ltd.
  8  */
  9 
 10 #include <stdarg.h>
 11 
 12 #include <linux/compat.h>
 13 #include <linux/efi.h>
 14 #include <linux/export.h>
 15 #include <linux/sched.h>
 16 #include <linux/sched/debug.h>
 17 #include <linux/sched/task.h>
 18 #include <linux/sched/task_stack.h>
 19 #include <linux/kernel.h>
 20 #include <linux/lockdep.h>
 21 #include <linux/mm.h>
 22 #include <linux/stddef.h>
 23 #include <linux/sysctl.h>
 24 #include <linux/unistd.h>
 25 #include <linux/user.h>
 26 #include <linux/delay.h>
 27 #include <linux/reboot.h>
 28 #include <linux/interrupt.h>
 29 #include <linux/init.h>
 30 #include <linux/cpu.h>
 31 #include <linux/elfcore.h>
 32 #include <linux/pm.h>
 33 #include <linux/tick.h>
 34 #include <linux/utsname.h>
 35 #include <linux/uaccess.h>
 36 #include <linux/random.h>
 37 #include <linux/hw_breakpoint.h>
 38 #include <linux/personality.h>
 39 #include <linux/notifier.h>
 40 #include <trace/events/power.h>
 41 #include <linux/percpu.h>
 42 #include <linux/thread_info.h>
 43 #include <linux/prctl.h>
 44 
 45 #include <asm/alternative.h>
 46 #include <asm/arch_gicv3.h>
 47 #include <asm/compat.h>
 48 #include <asm/cpufeature.h>
 49 #include <asm/cacheflush.h>
 50 #include <asm/exec.h>
 51 #include <asm/fpsimd.h>
 52 #include <asm/mmu_context.h>
 53 #include <asm/processor.h>
 54 #include <asm/pointer_auth.h>
 55 #include <asm/stacktrace.h>
 56 
 57 #if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_STACKPROTECTOR_PER_TASK)
 58 #include <linux/stackprotector.h>
 59 unsigned long __stack_chk_guard __read_mostly;
 60 EXPORT_SYMBOL(__stack_chk_guard);
 61 #endif
 62 
 63 /*
 64  * Function pointers to optional machine specific functions
 65  */
 66 void (*pm_power_off)(void);
 67 EXPORT_SYMBOL_GPL(pm_power_off);
 68 
 69 void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd);
 70 
 71 static void __cpu_do_idle(void)
 72 {
 73         dsb(sy);
 74         wfi();
 75 }
 76 
 77 static void __cpu_do_idle_irqprio(void)
 78 {
 79         unsigned long pmr;
 80         unsigned long daif_bits;
 81 
 82         daif_bits = read_sysreg(daif);
 83         write_sysreg(daif_bits | PSR_I_BIT, daif);
 84 
 85         /*
 86          * Unmask PMR before going idle to make sure interrupts can
 87          * be raised.
 88          */
 89         pmr = gic_read_pmr();
 90         gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
 91 
 92         __cpu_do_idle();
 93 
 94         gic_write_pmr(pmr);
 95         write_sysreg(daif_bits, daif);
 96 }
 97 
 98 /*
 99  *      cpu_do_idle()
100  *
101  *      Idle the processor (wait for interrupt).
102  *
103  *      If the CPU supports priority masking we must do additional work to
104  *      ensure that interrupts are not masked at the PMR (because the core will
105  *      not wake up if we block the wake up signal in the interrupt controller).
106  */
107 void cpu_do_idle(void)
108 {
109         if (system_uses_irq_prio_masking())
110                 __cpu_do_idle_irqprio();
111         else
112                 __cpu_do_idle();
113 }
114 
115 /*
116  * This is our default idle handler.
117  */
118 void arch_cpu_idle(void)
119 {
120         /*
121          * This should do all the clock switching and wait for interrupt
122          * tricks
123          */
124         trace_cpu_idle_rcuidle(1, smp_processor_id());
125         cpu_do_idle();
126         local_irq_enable();
127         trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
128 }
129 
130 #ifdef CONFIG_HOTPLUG_CPU
131 void arch_cpu_idle_dead(void)
132 {
133        cpu_die();
134 }
135 #endif
136 
137 /*
138  * Called by kexec, immediately prior to machine_kexec().
139  *
140  * This must completely disable all secondary CPUs; simply causing those CPUs
141  * to execute e.g. a RAM-based pin loop is not sufficient. This allows the
142  * kexec'd kernel to use any and all RAM as it sees fit, without having to
143  * avoid any code or data used by any SW CPU pin loop. The CPU hotplug
144  * functionality embodied in smpt_shutdown_nonboot_cpus() to achieve this.
145  */
146 void machine_shutdown(void)
147 {
148         smp_shutdown_nonboot_cpus(reboot_cpu);
149 }
150 
151 /*
152  * Halting simply requires that the secondary CPUs stop performing any
153  * activity (executing tasks, handling interrupts). smp_send_stop()
154  * achieves this.
155  */
156 void machine_halt(void)
157 {
158         local_irq_disable();
159         smp_send_stop();
160         while (1);
161 }
162 
163 /*
164  * Power-off simply requires that the secondary CPUs stop performing any
165  * activity (executing tasks, handling interrupts). smp_send_stop()
166  * achieves this. When the system power is turned off, it will take all CPUs
167  * with it.
168  */
169 void machine_power_off(void)
170 {
171         local_irq_disable();
172         smp_send_stop();
173         if (pm_power_off)
174                 pm_power_off();
175 }
176 
177 /*
178  * Restart requires that the secondary CPUs stop performing any activity
179  * while the primary CPU resets the system. Systems with multiple CPUs must
180  * provide a HW restart implementation, to ensure that all CPUs reset at once.
181  * This is required so that any code running after reset on the primary CPU
182  * doesn't have to co-ordinate with other CPUs to ensure they aren't still
183  * executing pre-reset code, and using RAM that the primary CPU's code wishes
184  * to use. Implementing such co-ordination would be essentially impossible.
185  */
186 void machine_restart(char *cmd)
187 {
188         /* Disable interrupts first */
189         local_irq_disable();
190         smp_send_stop();
191 
192         /*
193          * UpdateCapsule() depends on the system being reset via
194          * ResetSystem().
195          */
196         if (efi_enabled(EFI_RUNTIME_SERVICES))
197                 efi_reboot(reboot_mode, NULL);
198 
199         /* Now call the architecture specific reboot code. */
200         if (arm_pm_restart)
201                 arm_pm_restart(reboot_mode, cmd);
202         else
203                 do_kernel_restart(cmd);
204 
205         /*
206          * Whoops - the architecture was unable to reboot.
207          */
208         printk("Reboot failed -- System halted\n");
209         while (1);
210 }
211 
212 static void print_pstate(struct pt_regs *regs)
213 {
214         u64 pstate = regs->pstate;
215 
216         if (compat_user_mode(regs)) {
217                 printk("pstate: %08llx (%c%c%c%c %c %s %s %c%c%c)\n",
218                         pstate,
219                         pstate & PSR_AA32_N_BIT ? 'N' : 'n',
220                         pstate & PSR_AA32_Z_BIT ? 'Z' : 'z',
221                         pstate & PSR_AA32_C_BIT ? 'C' : 'c',
222                         pstate & PSR_AA32_V_BIT ? 'V' : 'v',
223                         pstate & PSR_AA32_Q_BIT ? 'Q' : 'q',
224                         pstate & PSR_AA32_T_BIT ? "T32" : "A32",
225                         pstate & PSR_AA32_E_BIT ? "BE" : "LE",
226                         pstate & PSR_AA32_A_BIT ? 'A' : 'a',
227                         pstate & PSR_AA32_I_BIT ? 'I' : 'i',
228                         pstate & PSR_AA32_F_BIT ? 'F' : 'f');
229         } else {
230                 printk("pstate: %08llx (%c%c%c%c %c%c%c%c %cPAN %cUAO)\n",
231                         pstate,
232                         pstate & PSR_N_BIT ? 'N' : 'n',
233                         pstate & PSR_Z_BIT ? 'Z' : 'z',
234                         pstate & PSR_C_BIT ? 'C' : 'c',
235                         pstate & PSR_V_BIT ? 'V' : 'v',
236                         pstate & PSR_D_BIT ? 'D' : 'd',
237                         pstate & PSR_A_BIT ? 'A' : 'a',
238                         pstate & PSR_I_BIT ? 'I' : 'i',
239                         pstate & PSR_F_BIT ? 'F' : 'f',
240                         pstate & PSR_PAN_BIT ? '+' : '-',
241                         pstate & PSR_UAO_BIT ? '+' : '-');
242         }
243 }
244 
245 void __show_regs(struct pt_regs *regs)
246 {
247         int i, top_reg;
248         u64 lr, sp;
249 
250         if (compat_user_mode(regs)) {
251                 lr = regs->compat_lr;
252                 sp = regs->compat_sp;
253                 top_reg = 12;
254         } else {
255                 lr = regs->regs[30];
256                 sp = regs->sp;
257                 top_reg = 29;
258         }
259 
260         show_regs_print_info(KERN_DEFAULT);
261         print_pstate(regs);
262 
263         if (!user_mode(regs)) {
264                 printk("pc : %pS\n", (void *)regs->pc);
265                 printk("lr : %pS\n", (void *)ptrauth_strip_insn_pac(lr));
266         } else {
267                 printk("pc : %016llx\n", regs->pc);
268                 printk("lr : %016llx\n", lr);
269         }
270 
271         printk("sp : %016llx\n", sp);
272 
273         if (system_uses_irq_prio_masking())
274                 printk("pmr_save: %08llx\n", regs->pmr_save);
275 
276         i = top_reg;
277 
278         while (i >= 0) {
279                 printk("x%-2d: %016llx ", i, regs->regs[i]);
280                 i--;
281 
282                 if (i % 2 == 0) {
283                         pr_cont("x%-2d: %016llx ", i, regs->regs[i]);
284                         i--;
285                 }
286 
287                 pr_cont("\n");
288         }
289 }
290 
291 void show_regs(struct pt_regs * regs)
292 {
293         __show_regs(regs);
294         dump_backtrace(regs, NULL);
295 }
296 
297 static void tls_thread_flush(void)
298 {
299         write_sysreg(0, tpidr_el0);
300 
301         if (is_compat_task()) {
302                 current->thread.uw.tp_value = 0;
303 
304                 /*
305                  * We need to ensure ordering between the shadow state and the
306                  * hardware state, so that we don't corrupt the hardware state
307                  * with a stale shadow state during context switch.
308                  */
309                 barrier();
310                 write_sysreg(0, tpidrro_el0);
311         }
312 }
313 
314 static void flush_tagged_addr_state(void)
315 {
316         if (IS_ENABLED(CONFIG_ARM64_TAGGED_ADDR_ABI))
317                 clear_thread_flag(TIF_TAGGED_ADDR);
318 }
319 
320 void flush_thread(void)
321 {
322         fpsimd_flush_thread();
323         tls_thread_flush();
324         flush_ptrace_hw_breakpoint(current);
325         flush_tagged_addr_state();
326 }
327 
328 void release_thread(struct task_struct *dead_task)
329 {
330 }
331 
332 void arch_release_task_struct(struct task_struct *tsk)
333 {
334         fpsimd_release_task(tsk);
335 }
336 
337 int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
338 {
339         if (current->mm)
340                 fpsimd_preserve_current_state();
341         *dst = *src;
342 
343         /* We rely on the above assignment to initialize dst's thread_flags: */
344         BUILD_BUG_ON(!IS_ENABLED(CONFIG_THREAD_INFO_IN_TASK));
345 
346         /*
347          * Detach src's sve_state (if any) from dst so that it does not
348          * get erroneously used or freed prematurely.  dst's sve_state
349          * will be allocated on demand later on if dst uses SVE.
350          * For consistency, also clear TIF_SVE here: this could be done
351          * later in copy_process(), but to avoid tripping up future
352          * maintainers it is best not to leave TIF_SVE and sve_state in
353          * an inconsistent state, even temporarily.
354          */
355         dst->thread.sve_state = NULL;
356         clear_tsk_thread_flag(dst, TIF_SVE);
357 
358         return 0;
359 }
360 
361 asmlinkage void ret_from_fork(void) asm("ret_from_fork");
362 
363 int copy_thread_tls(unsigned long clone_flags, unsigned long stack_start,
364                 unsigned long stk_sz, struct task_struct *p, unsigned long tls)
365 {
366         struct pt_regs *childregs = task_pt_regs(p);
367 
368         memset(&p->thread.cpu_context, 0, sizeof(struct cpu_context));
369 
370         /*
371          * In case p was allocated the same task_struct pointer as some
372          * other recently-exited task, make sure p is disassociated from
373          * any cpu that may have run that now-exited task recently.
374          * Otherwise we could erroneously skip reloading the FPSIMD
375          * registers for p.
376          */
377         fpsimd_flush_task_state(p);
378 
379         ptrauth_thread_init_kernel(p);
380 
381         if (likely(!(p->flags & PF_KTHREAD))) {
382                 *childregs = *current_pt_regs();
383                 childregs->regs[0] = 0;
384 
385                 /*
386                  * Read the current TLS pointer from tpidr_el0 as it may be
387                  * out-of-sync with the saved value.
388                  */
389                 *task_user_tls(p) = read_sysreg(tpidr_el0);
390 
391                 if (stack_start) {
392                         if (is_compat_thread(task_thread_info(p)))
393                                 childregs->compat_sp = stack_start;
394                         else
395                                 childregs->sp = stack_start;
396                 }
397 
398                 /*
399                  * If a TLS pointer was passed to clone, use it for the new
400                  * thread.
401                  */
402                 if (clone_flags & CLONE_SETTLS)
403                         p->thread.uw.tp_value = tls;
404         } else {
405                 memset(childregs, 0, sizeof(struct pt_regs));
406                 childregs->pstate = PSR_MODE_EL1h;
407                 if (IS_ENABLED(CONFIG_ARM64_UAO) &&
408                     cpus_have_const_cap(ARM64_HAS_UAO))
409                         childregs->pstate |= PSR_UAO_BIT;
410 
411                 if (arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE)
412                         set_ssbs_bit(childregs);
413 
414                 if (system_uses_irq_prio_masking())
415                         childregs->pmr_save = GIC_PRIO_IRQON;
416 
417                 p->thread.cpu_context.x19 = stack_start;
418                 p->thread.cpu_context.x20 = stk_sz;
419         }
420         p->thread.cpu_context.pc = (unsigned long)ret_from_fork;
421         p->thread.cpu_context.sp = (unsigned long)childregs;
422 
423         ptrace_hw_copy_thread(p);
424 
425         return 0;
426 }
427 
428 void tls_preserve_current_state(void)
429 {
430         *task_user_tls(current) = read_sysreg(tpidr_el0);
431 }
432 
433 static void tls_thread_switch(struct task_struct *next)
434 {
435         tls_preserve_current_state();
436 
437         if (is_compat_thread(task_thread_info(next)))
438                 write_sysreg(next->thread.uw.tp_value, tpidrro_el0);
439         else if (!arm64_kernel_unmapped_at_el0())
440                 write_sysreg(0, tpidrro_el0);
441 
442         write_sysreg(*task_user_tls(next), tpidr_el0);
443 }
444 
445 /* Restore the UAO state depending on next's addr_limit */
446 void uao_thread_switch(struct task_struct *next)
447 {
448         if (IS_ENABLED(CONFIG_ARM64_UAO)) {
449                 if (task_thread_info(next)->addr_limit == KERNEL_DS)
450                         asm(ALTERNATIVE("nop", SET_PSTATE_UAO(1), ARM64_HAS_UAO));
451                 else
452                         asm(ALTERNATIVE("nop", SET_PSTATE_UAO(0), ARM64_HAS_UAO));
453         }
454 }
455 
456 /*
457  * Force SSBS state on context-switch, since it may be lost after migrating
458  * from a CPU which treats the bit as RES0 in a heterogeneous system.
459  */
460 static void ssbs_thread_switch(struct task_struct *next)
461 {
462         struct pt_regs *regs = task_pt_regs(next);
463 
464         /*
465          * Nothing to do for kernel threads, but 'regs' may be junk
466          * (e.g. idle task) so check the flags and bail early.
467          */
468         if (unlikely(next->flags & PF_KTHREAD))
469                 return;
470 
471         /*
472          * If all CPUs implement the SSBS extension, then we just need to
473          * context-switch the PSTATE field.
474          */
475         if (cpu_have_feature(cpu_feature(SSBS)))
476                 return;
477 
478         /* If the mitigation is enabled, then we leave SSBS clear. */
479         if ((arm64_get_ssbd_state() == ARM64_SSBD_FORCE_ENABLE) ||
480             test_tsk_thread_flag(next, TIF_SSBD))
481                 return;
482 
483         if (compat_user_mode(regs))
484                 set_compat_ssbs_bit(regs);
485         else if (user_mode(regs))
486                 set_ssbs_bit(regs);
487 }
488 
489 /*
490  * We store our current task in sp_el0, which is clobbered by userspace. Keep a
491  * shadow copy so that we can restore this upon entry from userspace.
492  *
493  * This is *only* for exception entry from EL0, and is not valid until we
494  * __switch_to() a user task.
495  */
496 DEFINE_PER_CPU(struct task_struct *, __entry_task);
497 
498 static void entry_task_switch(struct task_struct *next)
499 {
500         __this_cpu_write(__entry_task, next);
501 }
502 
503 /*
504  * Thread switching.
505  */
506 __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev,
507                                 struct task_struct *next)
508 {
509         struct task_struct *last;
510 
511         fpsimd_thread_switch(next);
512         tls_thread_switch(next);
513         hw_breakpoint_thread_switch(next);
514         contextidr_thread_switch(next);
515         entry_task_switch(next);
516         uao_thread_switch(next);
517         ssbs_thread_switch(next);
518 
519         /*
520          * Complete any pending TLB or cache maintenance on this CPU in case
521          * the thread migrates to a different CPU.
522          * This full barrier is also required by the membarrier system
523          * call.
524          */
525         dsb(ish);
526 
527         /* the actual thread switch */
528         last = cpu_switch_to(prev, next);
529 
530         return last;
531 }
532 
533 unsigned long get_wchan(struct task_struct *p)
534 {
535         struct stackframe frame;
536         unsigned long stack_page, ret = 0;
537         int count = 0;
538         if (!p || p == current || p->state == TASK_RUNNING)
539                 return 0;
540 
541         stack_page = (unsigned long)try_get_task_stack(p);
542         if (!stack_page)
543                 return 0;
544 
545         start_backtrace(&frame, thread_saved_fp(p), thread_saved_pc(p));
546 
547         do {
548                 if (unwind_frame(p, &frame))
549                         goto out;
550                 if (!in_sched_functions(frame.pc)) {
551                         ret = frame.pc;
552                         goto out;
553                 }
554         } while (count ++ < 16);
555 
556 out:
557         put_task_stack(p);
558         return ret;
559 }
560 
561 unsigned long arch_align_stack(unsigned long sp)
562 {
563         if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
564                 sp -= get_random_int() & ~PAGE_MASK;
565         return sp & ~0xf;
566 }
567 
568 /*
569  * Called from setup_new_exec() after (COMPAT_)SET_PERSONALITY.
570  */
571 void arch_setup_new_exec(void)
572 {
573         current->mm->context.flags = is_compat_task() ? MMCF_AARCH32 : 0;
574 
575         ptrauth_thread_init_user(current);
576 }
577 
578 #ifdef CONFIG_ARM64_TAGGED_ADDR_ABI
579 /*
580  * Control the relaxed ABI allowing tagged user addresses into the kernel.
581  */
582 static unsigned int tagged_addr_disabled;
583 
584 long set_tagged_addr_ctrl(unsigned long arg)
585 {
586         if (is_compat_task())
587                 return -EINVAL;
588         if (arg & ~PR_TAGGED_ADDR_ENABLE)
589                 return -EINVAL;
590 
591         /*
592          * Do not allow the enabling of the tagged address ABI if globally
593          * disabled via sysctl abi.tagged_addr_disabled.
594          */
595         if (arg & PR_TAGGED_ADDR_ENABLE && tagged_addr_disabled)
596                 return -EINVAL;
597 
598         update_thread_flag(TIF_TAGGED_ADDR, arg & PR_TAGGED_ADDR_ENABLE);
599 
600         return 0;
601 }
602 
603 long get_tagged_addr_ctrl(void)
604 {
605         if (is_compat_task())
606                 return -EINVAL;
607 
608         if (test_thread_flag(TIF_TAGGED_ADDR))
609                 return PR_TAGGED_ADDR_ENABLE;
610 
611         return 0;
612 }
613 
614 /*
615  * Global sysctl to disable the tagged user addresses support. This control
616  * only prevents the tagged address ABI enabling via prctl() and does not
617  * disable it for tasks that already opted in to the relaxed ABI.
618  */
619 
620 static struct ctl_table tagged_addr_sysctl_table[] = {
621         {
622                 .procname       = "tagged_addr_disabled",
623                 .mode           = 0644,
624                 .data           = &tagged_addr_disabled,
625                 .maxlen         = sizeof(int),
626                 .proc_handler   = proc_dointvec_minmax,
627                 .extra1         = SYSCTL_ZERO,
628                 .extra2         = SYSCTL_ONE,
629         },
630         { }
631 };
632 
633 static int __init tagged_addr_init(void)
634 {
635         if (!register_sysctl("abi", tagged_addr_sysctl_table))
636                 return -EINVAL;
637         return 0;
638 }
639 
640 core_initcall(tagged_addr_init);
641 #endif  /* CONFIG_ARM64_TAGGED_ADDR_ABI */
642 
643 asmlinkage void __sched arm64_preempt_schedule_irq(void)
644 {
645         lockdep_assert_irqs_disabled();
646 
647         /*
648          * Preempting a task from an IRQ means we leave copies of PSTATE
649          * on the stack. cpufeature's enable calls may modify PSTATE, but
650          * resuming one of these preempted tasks would undo those changes.
651          *
652          * Only allow a task to be preempted once cpufeatures have been
653          * enabled.
654          */
655         if (system_capabilities_finalized())
656                 preempt_schedule_irq();
657 }
658 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp