~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/include/linux/preempt.h

Version: ~ [ linux-5.2 ] ~ [ linux-5.1.16 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.57 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.132 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.184 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.184 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.69 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-3.9.11 ] ~ [ linux-3.8.13 ] ~ [ linux-3.7.10 ] ~ [ linux-3.6.11 ] ~ [ linux-3.5.7 ] ~ [ linux-3.4.113 ] ~ [ linux-3.3.8 ] ~ [ linux-3.2.102 ] ~ [ linux-3.1.10 ] ~ [ linux-3.0.101 ] ~ [ linux-2.6.39.4 ] ~ [ linux-2.6.38.8 ] ~ [ linux-2.6.37.6 ] ~ [ linux-2.6.36.4 ] ~ [ linux-2.6.35.14 ] ~ [ linux-2.6.34.15 ] ~ [ linux-2.6.33.20 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* SPDX-License-Identifier: GPL-2.0 */
  2 #ifndef __LINUX_PREEMPT_H
  3 #define __LINUX_PREEMPT_H
  4 
  5 /*
  6  * include/linux/preempt.h - macros for accessing and manipulating
  7  * preempt_count (used for kernel preemption, interrupt count, etc.)
  8  */
  9 
 10 #include <linux/linkage.h>
 11 #include <linux/list.h>
 12 
 13 /*
 14  * We put the hardirq and softirq counter into the preemption
 15  * counter. The bitmask has the following meaning:
 16  *
 17  * - bits 0-7 are the preemption count (max preemption depth: 256)
 18  * - bits 8-15 are the softirq count (max # of softirqs: 256)
 19  *
 20  * The hardirq count could in theory be the same as the number of
 21  * interrupts in the system, but we run all interrupt handlers with
 22  * interrupts disabled, so we cannot have nesting interrupts. Though
 23  * there are a few palaeontologic drivers which reenable interrupts in
 24  * the handler, so we need more than one bit here.
 25  *
 26  *         PREEMPT_MASK:        0x000000ff
 27  *         SOFTIRQ_MASK:        0x0000ff00
 28  *         HARDIRQ_MASK:        0x000f0000
 29  *             NMI_MASK:        0x00100000
 30  * PREEMPT_NEED_RESCHED:        0x80000000
 31  */
 32 #define PREEMPT_BITS    8
 33 #define SOFTIRQ_BITS    8
 34 #define HARDIRQ_BITS    4
 35 #define NMI_BITS        1
 36 
 37 #define PREEMPT_SHIFT   0
 38 #define SOFTIRQ_SHIFT   (PREEMPT_SHIFT + PREEMPT_BITS)
 39 #define HARDIRQ_SHIFT   (SOFTIRQ_SHIFT + SOFTIRQ_BITS)
 40 #define NMI_SHIFT       (HARDIRQ_SHIFT + HARDIRQ_BITS)
 41 
 42 #define __IRQ_MASK(x)   ((1UL << (x))-1)
 43 
 44 #define PREEMPT_MASK    (__IRQ_MASK(PREEMPT_BITS) << PREEMPT_SHIFT)
 45 #define SOFTIRQ_MASK    (__IRQ_MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT)
 46 #define HARDIRQ_MASK    (__IRQ_MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT)
 47 #define NMI_MASK        (__IRQ_MASK(NMI_BITS)     << NMI_SHIFT)
 48 
 49 #define PREEMPT_OFFSET  (1UL << PREEMPT_SHIFT)
 50 #define SOFTIRQ_OFFSET  (1UL << SOFTIRQ_SHIFT)
 51 #define HARDIRQ_OFFSET  (1UL << HARDIRQ_SHIFT)
 52 #define NMI_OFFSET      (1UL << NMI_SHIFT)
 53 
 54 #define SOFTIRQ_DISABLE_OFFSET  (2 * SOFTIRQ_OFFSET)
 55 
 56 /* We use the MSB mostly because its available */
 57 #define PREEMPT_NEED_RESCHED    0x80000000
 58 
 59 #define PREEMPT_DISABLED        (PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED)
 60 
 61 /*
 62  * Disable preemption until the scheduler is running -- use an unconditional
 63  * value so that it also works on !PREEMPT_COUNT kernels.
 64  *
 65  * Reset by start_kernel()->sched_init()->init_idle()->init_idle_preempt_count().
 66  */
 67 #define INIT_PREEMPT_COUNT      PREEMPT_OFFSET
 68 
 69 /*
 70  * Initial preempt_count value; reflects the preempt_count schedule invariant
 71  * which states that during context switches:
 72  *
 73  *    preempt_count() == 2*PREEMPT_DISABLE_OFFSET
 74  *
 75  * Note: PREEMPT_DISABLE_OFFSET is 0 for !PREEMPT_COUNT kernels.
 76  * Note: See finish_task_switch().
 77  */
 78 #define FORK_PREEMPT_COUNT      (2*PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED)
 79 
 80 /* preempt_count() and related functions, depends on PREEMPT_NEED_RESCHED */
 81 #include <asm/preempt.h>
 82 
 83 #define hardirq_count() (preempt_count() & HARDIRQ_MASK)
 84 #define softirq_count() (preempt_count() & SOFTIRQ_MASK)
 85 #define irq_count()     (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \
 86                                  | NMI_MASK))
 87 
 88 /*
 89  * Are we doing bottom half or hardware interrupt processing?
 90  *
 91  * in_irq()       - We're in (hard) IRQ context
 92  * in_softirq()   - We have BH disabled, or are processing softirqs
 93  * in_interrupt() - We're in NMI,IRQ,SoftIRQ context or have BH disabled
 94  * in_serving_softirq() - We're in softirq context
 95  * in_nmi()       - We're in NMI context
 96  * in_task()      - We're in task context
 97  *
 98  * Note: due to the BH disabled confusion: in_softirq(),in_interrupt() really
 99  *       should not be used in new code.
100  */
101 #define in_irq()                (hardirq_count())
102 #define in_softirq()            (softirq_count())
103 #define in_interrupt()          (irq_count())
104 #define in_serving_softirq()    (softirq_count() & SOFTIRQ_OFFSET)
105 #define in_nmi()                (preempt_count() & NMI_MASK)
106 #define in_task()               (!(preempt_count() & \
107                                    (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET)))
108 
109 /*
110  * The preempt_count offset after preempt_disable();
111  */
112 #if defined(CONFIG_PREEMPT_COUNT)
113 # define PREEMPT_DISABLE_OFFSET PREEMPT_OFFSET
114 #else
115 # define PREEMPT_DISABLE_OFFSET 0
116 #endif
117 
118 /*
119  * The preempt_count offset after spin_lock()
120  */
121 #define PREEMPT_LOCK_OFFSET     PREEMPT_DISABLE_OFFSET
122 
123 /*
124  * The preempt_count offset needed for things like:
125  *
126  *  spin_lock_bh()
127  *
128  * Which need to disable both preemption (CONFIG_PREEMPT_COUNT) and
129  * softirqs, such that unlock sequences of:
130  *
131  *  spin_unlock();
132  *  local_bh_enable();
133  *
134  * Work as expected.
135  */
136 #define SOFTIRQ_LOCK_OFFSET (SOFTIRQ_DISABLE_OFFSET + PREEMPT_LOCK_OFFSET)
137 
138 /*
139  * Are we running in atomic context?  WARNING: this macro cannot
140  * always detect atomic context; in particular, it cannot know about
141  * held spinlocks in non-preemptible kernels.  Thus it should not be
142  * used in the general case to determine whether sleeping is possible.
143  * Do not use in_atomic() in driver code.
144  */
145 #define in_atomic()     (preempt_count() != 0)
146 
147 /*
148  * Check whether we were atomic before we did preempt_disable():
149  * (used by the scheduler)
150  */
151 #define in_atomic_preempt_off() (preempt_count() != PREEMPT_DISABLE_OFFSET)
152 
153 #if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER)
154 extern void preempt_count_add(int val);
155 extern void preempt_count_sub(int val);
156 #define preempt_count_dec_and_test() \
157         ({ preempt_count_sub(1); should_resched(0); })
158 #else
159 #define preempt_count_add(val)  __preempt_count_add(val)
160 #define preempt_count_sub(val)  __preempt_count_sub(val)
161 #define preempt_count_dec_and_test() __preempt_count_dec_and_test()
162 #endif
163 
164 #define __preempt_count_inc() __preempt_count_add(1)
165 #define __preempt_count_dec() __preempt_count_sub(1)
166 
167 #define preempt_count_inc() preempt_count_add(1)
168 #define preempt_count_dec() preempt_count_sub(1)
169 
170 #ifdef CONFIG_PREEMPT_COUNT
171 
172 #define preempt_disable() \
173 do { \
174         preempt_count_inc(); \
175         barrier(); \
176 } while (0)
177 
178 #define sched_preempt_enable_no_resched() \
179 do { \
180         barrier(); \
181         preempt_count_dec(); \
182 } while (0)
183 
184 #define preempt_enable_no_resched() sched_preempt_enable_no_resched()
185 
186 #define preemptible()   (preempt_count() == 0 && !irqs_disabled())
187 
188 #ifdef CONFIG_PREEMPT
189 #define preempt_enable() \
190 do { \
191         barrier(); \
192         if (unlikely(preempt_count_dec_and_test())) \
193                 __preempt_schedule(); \
194 } while (0)
195 
196 #define preempt_enable_notrace() \
197 do { \
198         barrier(); \
199         if (unlikely(__preempt_count_dec_and_test())) \
200                 __preempt_schedule_notrace(); \
201 } while (0)
202 
203 #define preempt_check_resched() \
204 do { \
205         if (should_resched(0)) \
206                 __preempt_schedule(); \
207 } while (0)
208 
209 #else /* !CONFIG_PREEMPT */
210 #define preempt_enable() \
211 do { \
212         barrier(); \
213         preempt_count_dec(); \
214 } while (0)
215 
216 #define preempt_enable_notrace() \
217 do { \
218         barrier(); \
219         __preempt_count_dec(); \
220 } while (0)
221 
222 #define preempt_check_resched() do { } while (0)
223 #endif /* CONFIG_PREEMPT */
224 
225 #define preempt_disable_notrace() \
226 do { \
227         __preempt_count_inc(); \
228         barrier(); \
229 } while (0)
230 
231 #define preempt_enable_no_resched_notrace() \
232 do { \
233         barrier(); \
234         __preempt_count_dec(); \
235 } while (0)
236 
237 #else /* !CONFIG_PREEMPT_COUNT */
238 
239 /*
240  * Even if we don't have any preemption, we need preempt disable/enable
241  * to be barriers, so that we don't have things like get_user/put_user
242  * that can cause faults and scheduling migrate into our preempt-protected
243  * region.
244  */
245 #define preempt_disable()                       barrier()
246 #define sched_preempt_enable_no_resched()       barrier()
247 #define preempt_enable_no_resched()             barrier()
248 #define preempt_enable()                        barrier()
249 #define preempt_check_resched()                 do { } while (0)
250 
251 #define preempt_disable_notrace()               barrier()
252 #define preempt_enable_no_resched_notrace()     barrier()
253 #define preempt_enable_notrace()                barrier()
254 #define preemptible()                           0
255 
256 #endif /* CONFIG_PREEMPT_COUNT */
257 
258 #ifdef MODULE
259 /*
260  * Modules have no business playing preemption tricks.
261  */
262 #undef sched_preempt_enable_no_resched
263 #undef preempt_enable_no_resched
264 #undef preempt_enable_no_resched_notrace
265 #undef preempt_check_resched
266 #endif
267 
268 #define preempt_set_need_resched() \
269 do { \
270         set_preempt_need_resched(); \
271 } while (0)
272 #define preempt_fold_need_resched() \
273 do { \
274         if (tif_need_resched()) \
275                 set_preempt_need_resched(); \
276 } while (0)
277 
278 #ifdef CONFIG_PREEMPT_NOTIFIERS
279 
280 struct preempt_notifier;
281 
282 /**
283  * preempt_ops - notifiers called when a task is preempted and rescheduled
284  * @sched_in: we're about to be rescheduled:
285  *    notifier: struct preempt_notifier for the task being scheduled
286  *    cpu:  cpu we're scheduled on
287  * @sched_out: we've just been preempted
288  *    notifier: struct preempt_notifier for the task being preempted
289  *    next: the task that's kicking us out
290  *
291  * Please note that sched_in and out are called under different
292  * contexts.  sched_out is called with rq lock held and irq disabled
293  * while sched_in is called without rq lock and irq enabled.  This
294  * difference is intentional and depended upon by its users.
295  */
296 struct preempt_ops {
297         void (*sched_in)(struct preempt_notifier *notifier, int cpu);
298         void (*sched_out)(struct preempt_notifier *notifier,
299                           struct task_struct *next);
300 };
301 
302 /**
303  * preempt_notifier - key for installing preemption notifiers
304  * @link: internal use
305  * @ops: defines the notifier functions to be called
306  *
307  * Usually used in conjunction with container_of().
308  */
309 struct preempt_notifier {
310         struct hlist_node link;
311         struct preempt_ops *ops;
312 };
313 
314 void preempt_notifier_inc(void);
315 void preempt_notifier_dec(void);
316 void preempt_notifier_register(struct preempt_notifier *notifier);
317 void preempt_notifier_unregister(struct preempt_notifier *notifier);
318 
319 static inline void preempt_notifier_init(struct preempt_notifier *notifier,
320                                      struct preempt_ops *ops)
321 {
322         INIT_HLIST_NODE(&notifier->link);
323         notifier->ops = ops;
324 }
325 
326 #endif
327 
328 #endif /* __LINUX_PREEMPT_H */
329 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp