~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/include/linux/compiler.h

Version: ~ [ linux-5.4-rc3 ] ~ [ linux-5.3.6 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.79 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.149 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.196 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.196 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.75 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-3.9.11 ] ~ [ linux-3.8.13 ] ~ [ linux-3.7.10 ] ~ [ linux-3.6.11 ] ~ [ linux-3.5.7 ] ~ [ linux-3.4.113 ] ~ [ linux-3.3.8 ] ~ [ linux-3.2.102 ] ~ [ linux-3.1.10 ] ~ [ linux-3.0.101 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 #ifndef __LINUX_COMPILER_H
  2 #define __LINUX_COMPILER_H
  3 
  4 #ifndef __ASSEMBLY__
  5 
  6 #ifdef __CHECKER__
  7 # define __user         __attribute__((noderef, address_space(1)))
  8 # define __kernel       __attribute__((address_space(0)))
  9 # define __safe         __attribute__((safe))
 10 # define __force        __attribute__((force))
 11 # define __nocast       __attribute__((nocast))
 12 # define __iomem        __attribute__((noderef, address_space(2)))
 13 # define __must_hold(x) __attribute__((context(x,1,1)))
 14 # define __acquires(x)  __attribute__((context(x,0,1)))
 15 # define __releases(x)  __attribute__((context(x,1,0)))
 16 # define __acquire(x)   __context__(x,1)
 17 # define __release(x)   __context__(x,-1)
 18 # define __cond_lock(x,c)       ((c) ? ({ __acquire(x); 1; }) : 0)
 19 # define __percpu       __attribute__((noderef, address_space(3)))
 20 #ifdef CONFIG_SPARSE_RCU_POINTER
 21 # define __rcu          __attribute__((noderef, address_space(4)))
 22 #else
 23 # define __rcu
 24 #endif
 25 extern void __chk_user_ptr(const volatile void __user *);
 26 extern void __chk_io_ptr(const volatile void __iomem *);
 27 #else
 28 # define __user
 29 # define __kernel
 30 # define __safe
 31 # define __force
 32 # define __nocast
 33 # define __iomem
 34 # define __chk_user_ptr(x) (void)0
 35 # define __chk_io_ptr(x) (void)0
 36 # define __builtin_warning(x, y...) (1)
 37 # define __must_hold(x)
 38 # define __acquires(x)
 39 # define __releases(x)
 40 # define __acquire(x) (void)0
 41 # define __release(x) (void)0
 42 # define __cond_lock(x,c) (c)
 43 # define __percpu
 44 # define __rcu
 45 #endif
 46 
 47 /* Indirect macros required for expanded argument pasting, eg. __LINE__. */
 48 #define ___PASTE(a,b) a##b
 49 #define __PASTE(a,b) ___PASTE(a,b)
 50 
 51 #ifdef __KERNEL__
 52 
 53 #ifdef __GNUC__
 54 #include <linux/compiler-gcc.h>
 55 #endif
 56 
 57 #define notrace __attribute__((no_instrument_function))
 58 
 59 /* Intel compiler defines __GNUC__. So we will overwrite implementations
 60  * coming from above header files here
 61  */
 62 #ifdef __INTEL_COMPILER
 63 # include <linux/compiler-intel.h>
 64 #endif
 65 
 66 /*
 67  * Generic compiler-dependent macros required for kernel
 68  * build go below this comment. Actual compiler/compiler version
 69  * specific implementations come from the above header files
 70  */
 71 
 72 struct ftrace_branch_data {
 73         const char *func;
 74         const char *file;
 75         unsigned line;
 76         union {
 77                 struct {
 78                         unsigned long correct;
 79                         unsigned long incorrect;
 80                 };
 81                 struct {
 82                         unsigned long miss;
 83                         unsigned long hit;
 84                 };
 85                 unsigned long miss_hit[2];
 86         };
 87 };
 88 
 89 /*
 90  * Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code
 91  * to disable branch tracing on a per file basis.
 92  */
 93 #if defined(CONFIG_TRACE_BRANCH_PROFILING) \
 94     && !defined(DISABLE_BRANCH_PROFILING) && !defined(__CHECKER__)
 95 void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
 96 
 97 #define likely_notrace(x)       __builtin_expect(!!(x), 1)
 98 #define unlikely_notrace(x)     __builtin_expect(!!(x), 0)
 99 
100 #define __branch_check__(x, expect) ({                                  \
101                         int ______r;                                    \
102                         static struct ftrace_branch_data                \
103                                 __attribute__((__aligned__(4)))         \
104                                 __attribute__((section("_ftrace_annotated_branch"))) \
105                                 ______f = {                             \
106                                 .func = __func__,                       \
107                                 .file = __FILE__,                       \
108                                 .line = __LINE__,                       \
109                         };                                              \
110                         ______r = likely_notrace(x);                    \
111                         ftrace_likely_update(&______f, ______r, expect); \
112                         ______r;                                        \
113                 })
114 
115 /*
116  * Using __builtin_constant_p(x) to ignore cases where the return
117  * value is always the same.  This idea is taken from a similar patch
118  * written by Daniel Walker.
119  */
120 # ifndef likely
121 #  define likely(x)     (__builtin_constant_p(x) ? !!(x) : __branch_check__(x, 1))
122 # endif
123 # ifndef unlikely
124 #  define unlikely(x)   (__builtin_constant_p(x) ? !!(x) : __branch_check__(x, 0))
125 # endif
126 
127 #ifdef CONFIG_PROFILE_ALL_BRANCHES
128 /*
129  * "Define 'is'", Bill Clinton
130  * "Define 'if'", Steven Rostedt
131  */
132 #define if(cond, ...) __trace_if( (cond , ## __VA_ARGS__) )
133 #define __trace_if(cond) \
134         if (__builtin_constant_p(!!(cond)) ? !!(cond) :                 \
135         ({                                                              \
136                 int ______r;                                            \
137                 static struct ftrace_branch_data                        \
138                         __attribute__((__aligned__(4)))                 \
139                         __attribute__((section("_ftrace_branch")))      \
140                         ______f = {                                     \
141                                 .func = __func__,                       \
142                                 .file = __FILE__,                       \
143                                 .line = __LINE__,                       \
144                         };                                              \
145                 ______r = !!(cond);                                     \
146                 ______f.miss_hit[______r]++;                                    \
147                 ______r;                                                \
148         }))
149 #endif /* CONFIG_PROFILE_ALL_BRANCHES */
150 
151 #else
152 # define likely(x)      __builtin_expect(!!(x), 1)
153 # define unlikely(x)    __builtin_expect(!!(x), 0)
154 #endif
155 
156 /* Optimization barrier */
157 #ifndef barrier
158 # define barrier() __memory_barrier()
159 #endif
160 
161 /* Unreachable code */
162 #ifndef unreachable
163 # define unreachable() do { } while (1)
164 #endif
165 
166 #ifndef RELOC_HIDE
167 # define RELOC_HIDE(ptr, off)                                   \
168   ({ unsigned long __ptr;                                       \
169      __ptr = (unsigned long) (ptr);                             \
170     (typeof(ptr)) (__ptr + (off)); })
171 #endif
172 
173 #ifndef OPTIMIZER_HIDE_VAR
174 #define OPTIMIZER_HIDE_VAR(var) barrier()
175 #endif
176 
177 /* Not-quite-unique ID. */
178 #ifndef __UNIQUE_ID
179 # define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__)
180 #endif
181 
182 #include <uapi/linux/types.h>
183 
184 static __always_inline void __read_once_size(const volatile void *p, void *res, int size)
185 {
186         switch (size) {
187         case 1: *(__u8 *)res = *(volatile __u8 *)p; break;
188         case 2: *(__u16 *)res = *(volatile __u16 *)p; break;
189         case 4: *(__u32 *)res = *(volatile __u32 *)p; break;
190         case 8: *(__u64 *)res = *(volatile __u64 *)p; break;
191         default:
192                 barrier();
193                 __builtin_memcpy((void *)res, (const void *)p, size);
194                 barrier();
195         }
196 }
197 
198 static __always_inline void __write_once_size(volatile void *p, void *res, int size)
199 {
200         switch (size) {
201         case 1: *(volatile __u8 *)p = *(__u8 *)res; break;
202         case 2: *(volatile __u16 *)p = *(__u16 *)res; break;
203         case 4: *(volatile __u32 *)p = *(__u32 *)res; break;
204         case 8: *(volatile __u64 *)p = *(__u64 *)res; break;
205         default:
206                 barrier();
207                 __builtin_memcpy((void *)p, (const void *)res, size);
208                 barrier();
209         }
210 }
211 
212 /*
213  * Prevent the compiler from merging or refetching reads or writes. The
214  * compiler is also forbidden from reordering successive instances of
215  * READ_ONCE, WRITE_ONCE and ACCESS_ONCE (see below), but only when the
216  * compiler is aware of some particular ordering.  One way to make the
217  * compiler aware of ordering is to put the two invocations of READ_ONCE,
218  * WRITE_ONCE or ACCESS_ONCE() in different C statements.
219  *
220  * In contrast to ACCESS_ONCE these two macros will also work on aggregate
221  * data types like structs or unions. If the size of the accessed data
222  * type exceeds the word size of the machine (e.g., 32 bits or 64 bits)
223  * READ_ONCE() and WRITE_ONCE()  will fall back to memcpy and print a
224  * compile-time warning.
225  *
226  * Their two major use cases are: (1) Mediating communication between
227  * process-level code and irq/NMI handlers, all running on the same CPU,
228  * and (2) Ensuring that the compiler does not  fold, spindle, or otherwise
229  * mutilate accesses that either do not require ordering or that interact
230  * with an explicit memory barrier or atomic instruction that provides the
231  * required ordering.
232  */
233 
234 #define READ_ONCE(x) \
235         ({ union { typeof(x) __val; char __c[1]; } __u; __read_once_size(&(x), __u.__c, sizeof(x)); __u.__val; })
236 
237 #define WRITE_ONCE(x, val) \
238         ({ typeof(x) __val = (val); __write_once_size(&(x), &__val, sizeof(__val)); __val; })
239 
240 #endif /* __KERNEL__ */
241 
242 #endif /* __ASSEMBLY__ */
243 
244 #ifdef __KERNEL__
245 /*
246  * Allow us to mark functions as 'deprecated' and have gcc emit a nice
247  * warning for each use, in hopes of speeding the functions removal.
248  * Usage is:
249  *              int __deprecated foo(void)
250  */
251 #ifndef __deprecated
252 # define __deprecated           /* unimplemented */
253 #endif
254 
255 #ifdef MODULE
256 #define __deprecated_for_modules __deprecated
257 #else
258 #define __deprecated_for_modules
259 #endif
260 
261 #ifndef __must_check
262 #define __must_check
263 #endif
264 
265 #ifndef CONFIG_ENABLE_MUST_CHECK
266 #undef __must_check
267 #define __must_check
268 #endif
269 #ifndef CONFIG_ENABLE_WARN_DEPRECATED
270 #undef __deprecated
271 #undef __deprecated_for_modules
272 #define __deprecated
273 #define __deprecated_for_modules
274 #endif
275 
276 /*
277  * Allow us to avoid 'defined but not used' warnings on functions and data,
278  * as well as force them to be emitted to the assembly file.
279  *
280  * As of gcc 3.4, static functions that are not marked with attribute((used))
281  * may be elided from the assembly file.  As of gcc 3.4, static data not so
282  * marked will not be elided, but this may change in a future gcc version.
283  *
284  * NOTE: Because distributions shipped with a backported unit-at-a-time
285  * compiler in gcc 3.3, we must define __used to be __attribute__((used))
286  * for gcc >=3.3 instead of 3.4.
287  *
288  * In prior versions of gcc, such functions and data would be emitted, but
289  * would be warned about except with attribute((unused)).
290  *
291  * Mark functions that are referenced only in inline assembly as __used so
292  * the code is emitted even though it appears to be unreferenced.
293  */
294 #ifndef __used
295 # define __used                 /* unimplemented */
296 #endif
297 
298 #ifndef __maybe_unused
299 # define __maybe_unused         /* unimplemented */
300 #endif
301 
302 #ifndef __always_unused
303 # define __always_unused        /* unimplemented */
304 #endif
305 
306 #ifndef noinline
307 #define noinline
308 #endif
309 
310 /*
311  * Rather then using noinline to prevent stack consumption, use
312  * noinline_for_stack instead.  For documentation reasons.
313  */
314 #define noinline_for_stack noinline
315 
316 #ifndef __always_inline
317 #define __always_inline inline
318 #endif
319 
320 #endif /* __KERNEL__ */
321 
322 /*
323  * From the GCC manual:
324  *
325  * Many functions do not examine any values except their arguments,
326  * and have no effects except the return value.  Basically this is
327  * just slightly more strict class than the `pure' attribute above,
328  * since function is not allowed to read global memory.
329  *
330  * Note that a function that has pointer arguments and examines the
331  * data pointed to must _not_ be declared `const'.  Likewise, a
332  * function that calls a non-`const' function usually must not be
333  * `const'.  It does not make sense for a `const' function to return
334  * `void'.
335  */
336 #ifndef __attribute_const__
337 # define __attribute_const__    /* unimplemented */
338 #endif
339 
340 /*
341  * Tell gcc if a function is cold. The compiler will assume any path
342  * directly leading to the call is unlikely.
343  */
344 
345 #ifndef __cold
346 #define __cold
347 #endif
348 
349 /* Simple shorthand for a section definition */
350 #ifndef __section
351 # define __section(S) __attribute__ ((__section__(#S)))
352 #endif
353 
354 #ifndef __visible
355 #define __visible
356 #endif
357 
358 /* Are two types/vars the same type (ignoring qualifiers)? */
359 #ifndef __same_type
360 # define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
361 #endif
362 
363 /* Is this type a native word size -- useful for atomic operations */
364 #ifndef __native_word
365 # define __native_word(t) (sizeof(t) == sizeof(char) || sizeof(t) == sizeof(short) || sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long))
366 #endif
367 
368 /* Compile time object size, -1 for unknown */
369 #ifndef __compiletime_object_size
370 # define __compiletime_object_size(obj) -1
371 #endif
372 #ifndef __compiletime_warning
373 # define __compiletime_warning(message)
374 #endif
375 #ifndef __compiletime_error
376 # define __compiletime_error(message)
377 # define __compiletime_error_fallback(condition) \
378         do { ((void)sizeof(char[1 - 2 * condition])); } while (0)
379 #else
380 # define __compiletime_error_fallback(condition) do { } while (0)
381 #endif
382 
383 #define __compiletime_assert(condition, msg, prefix, suffix)            \
384         do {                                                            \
385                 bool __cond = !(condition);                             \
386                 extern void prefix ## suffix(void) __compiletime_error(msg); \
387                 if (__cond)                                             \
388                         prefix ## suffix();                             \
389                 __compiletime_error_fallback(__cond);                   \
390         } while (0)
391 
392 #define _compiletime_assert(condition, msg, prefix, suffix) \
393         __compiletime_assert(condition, msg, prefix, suffix)
394 
395 /**
396  * compiletime_assert - break build and emit msg if condition is false
397  * @condition: a compile-time constant condition to check
398  * @msg:       a message to emit if condition is false
399  *
400  * In tradition of POSIX assert, this macro will break the build if the
401  * supplied condition is *false*, emitting the supplied error message if the
402  * compiler has support to do so.
403  */
404 #define compiletime_assert(condition, msg) \
405         _compiletime_assert(condition, msg, __compiletime_assert_, __LINE__)
406 
407 #define compiletime_assert_atomic_type(t)                               \
408         compiletime_assert(__native_word(t),                            \
409                 "Need native word sized stores/loads for atomicity.")
410 
411 /*
412  * Prevent the compiler from merging or refetching accesses.  The compiler
413  * is also forbidden from reordering successive instances of ACCESS_ONCE(),
414  * but only when the compiler is aware of some particular ordering.  One way
415  * to make the compiler aware of ordering is to put the two invocations of
416  * ACCESS_ONCE() in different C statements.
417  *
418  * This macro does absolutely -nothing- to prevent the CPU from reordering,
419  * merging, or refetching absolutely anything at any time.  Its main intended
420  * use is to mediate communication between process-level code and irq/NMI
421  * handlers, all running on the same CPU.
422  */
423 #define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
424 
425 /**
426  * lockless_dereference() - safely load a pointer for later dereference
427  * @p: The pointer to load
428  *
429  * Similar to rcu_dereference(), but for situations where the pointed-to
430  * object's lifetime is managed by something other than RCU.  That
431  * "something other" might be reference counting or simple immortality.
432  */
433 #define lockless_dereference(p) \
434 ({ \
435         typeof(p) _________p1 = ACCESS_ONCE(p); \
436         smp_read_barrier_depends(); /* Dependency order vs. p above. */ \
437         (_________p1); \
438 })
439 
440 /* Ignore/forbid kprobes attach on very low level functions marked by this attribute: */
441 #ifdef CONFIG_KPROBES
442 # define __kprobes      __attribute__((__section__(".kprobes.text")))
443 #else
444 # define __kprobes
445 #endif
446 #endif /* __LINUX_COMPILER_H */
447 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp