~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/x86/include/asm/preempt.h

Version: ~ [ linux-5.4-rc7 ] ~ [ linux-5.3.11 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.84 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.154 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.201 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.201 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.77 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-3.9.11 ] ~ [ linux-3.8.13 ] ~ [ linux-3.7.10 ] ~ [ linux-3.6.11 ] ~ [ linux-3.5.7 ] ~ [ linux-3.4.113 ] ~ [ linux-3.3.8 ] ~ [ linux-3.2.102 ] ~ [ linux-3.1.10 ] ~ [ linux-3.0.101 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 #ifndef __ASM_PREEMPT_H
  2 #define __ASM_PREEMPT_H
  3 
  4 #include <asm/rmwcc.h>
  5 #include <asm/percpu.h>
  6 #include <linux/thread_info.h>
  7 
  8 DECLARE_PER_CPU(int, __preempt_count);
  9 
 10 /*
 11  * We use the PREEMPT_NEED_RESCHED bit as an inverted NEED_RESCHED such
 12  * that a decrement hitting 0 means we can and should reschedule.
 13  */
 14 #define PREEMPT_ENABLED (0 + PREEMPT_NEED_RESCHED)
 15 
 16 /*
 17  * We mask the PREEMPT_NEED_RESCHED bit so as not to confuse all current users
 18  * that think a non-zero value indicates we cannot preempt.
 19  */
 20 static __always_inline int preempt_count(void)
 21 {
 22         return raw_cpu_read_4(__preempt_count) & ~PREEMPT_NEED_RESCHED;
 23 }
 24 
 25 static __always_inline void preempt_count_set(int pc)
 26 {
 27         raw_cpu_write_4(__preempt_count, pc);
 28 }
 29 
 30 /*
 31  * must be macros to avoid header recursion hell
 32  */
 33 #define init_task_preempt_count(p) do { \
 34         task_thread_info(p)->saved_preempt_count = PREEMPT_DISABLED; \
 35 } while (0)
 36 
 37 #define init_idle_preempt_count(p, cpu) do { \
 38         task_thread_info(p)->saved_preempt_count = PREEMPT_ENABLED; \
 39         per_cpu(__preempt_count, (cpu)) = PREEMPT_ENABLED; \
 40 } while (0)
 41 
 42 /*
 43  * We fold the NEED_RESCHED bit into the preempt count such that
 44  * preempt_enable() can decrement and test for needing to reschedule with a
 45  * single instruction.
 46  *
 47  * We invert the actual bit, so that when the decrement hits 0 we know we both
 48  * need to resched (the bit is cleared) and can resched (no preempt count).
 49  */
 50 
 51 static __always_inline void set_preempt_need_resched(void)
 52 {
 53         raw_cpu_and_4(__preempt_count, ~PREEMPT_NEED_RESCHED);
 54 }
 55 
 56 static __always_inline void clear_preempt_need_resched(void)
 57 {
 58         raw_cpu_or_4(__preempt_count, PREEMPT_NEED_RESCHED);
 59 }
 60 
 61 static __always_inline bool test_preempt_need_resched(void)
 62 {
 63         return !(raw_cpu_read_4(__preempt_count) & PREEMPT_NEED_RESCHED);
 64 }
 65 
 66 /*
 67  * The various preempt_count add/sub methods
 68  */
 69 
 70 static __always_inline void __preempt_count_add(int val)
 71 {
 72         raw_cpu_add_4(__preempt_count, val);
 73 }
 74 
 75 static __always_inline void __preempt_count_sub(int val)
 76 {
 77         raw_cpu_add_4(__preempt_count, -val);
 78 }
 79 
 80 /*
 81  * Because we keep PREEMPT_NEED_RESCHED set when we do _not_ need to reschedule
 82  * a decrement which hits zero means we have no preempt_count and should
 83  * reschedule.
 84  */
 85 static __always_inline bool __preempt_count_dec_and_test(void)
 86 {
 87         GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), "e");
 88 }
 89 
 90 /*
 91  * Returns true when we need to resched and can (barring IRQ state).
 92  */
 93 static __always_inline bool should_resched(int preempt_offset)
 94 {
 95         return unlikely(raw_cpu_read_4(__preempt_count) == preempt_offset);
 96 }
 97 
 98 #ifdef CONFIG_PREEMPT
 99   extern asmlinkage void ___preempt_schedule(void);
100 # define __preempt_schedule() asm ("call ___preempt_schedule")
101   extern asmlinkage void preempt_schedule(void);
102   extern asmlinkage void ___preempt_schedule_notrace(void);
103 # define __preempt_schedule_notrace() asm ("call ___preempt_schedule_notrace")
104   extern asmlinkage void preempt_schedule_notrace(void);
105 #endif
106 
107 #endif /* __ASM_PREEMPT_H */
108 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp