~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/include/linux/compiler.h

Version: ~ [ linux-4.20-rc6 ] ~ [ linux-4.19.8 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.87 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.144 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.166 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.128 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.61 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-3.9.11 ] ~ [ linux-3.8.13 ] ~ [ linux-3.7.10 ] ~ [ linux-3.6.11 ] ~ [ linux-3.5.7 ] ~ [ linux-3.4.113 ] ~ [ linux-3.3.8 ] ~ [ linux-3.2.102 ] ~ [ linux-3.1.10 ] ~ [ linux-3.0.101 ] ~ [ linux-2.6.39.4 ] ~ [ linux-2.6.38.8 ] ~ [ linux-2.6.37.6 ] ~ [ linux-2.6.36.4 ] ~ [ linux-2.6.35.14 ] ~ [ linux-2.6.34.15 ] ~ [ linux-2.6.33.20 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.31.14 ] ~ [ linux-2.6.30.10 ] ~ [ linux-2.6.29.6 ] ~ [ linux-2.6.28.10 ] ~ [ linux-2.6.27.62 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* SPDX-License-Identifier: GPL-2.0 */
  2 #ifndef __LINUX_COMPILER_H
  3 #define __LINUX_COMPILER_H
  4 
  5 #include <linux/compiler_types.h>
  6 
  7 #ifndef __ASSEMBLY__
  8 
  9 #ifdef __KERNEL__
 10 
 11 /*
 12  * Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code
 13  * to disable branch tracing on a per file basis.
 14  */
 15 #if defined(CONFIG_TRACE_BRANCH_PROFILING) \
 16     && !defined(DISABLE_BRANCH_PROFILING) && !defined(__CHECKER__)
 17 void ftrace_likely_update(struct ftrace_likely_data *f, int val,
 18                           int expect, int is_constant);
 19 
 20 #define likely_notrace(x)       __builtin_expect(!!(x), 1)
 21 #define unlikely_notrace(x)     __builtin_expect(!!(x), 0)
 22 
 23 #define __branch_check__(x, expect, is_constant) ({                     \
 24                         long ______r;                                   \
 25                         static struct ftrace_likely_data                \
 26                                 __aligned(4)                            \
 27                                 __section("_ftrace_annotated_branch")   \
 28                                 ______f = {                             \
 29                                 .data.func = __func__,                  \
 30                                 .data.file = __FILE__,                  \
 31                                 .data.line = __LINE__,                  \
 32                         };                                              \
 33                         ______r = __builtin_expect(!!(x), expect);      \
 34                         ftrace_likely_update(&______f, ______r,         \
 35                                              expect, is_constant);      \
 36                         ______r;                                        \
 37                 })
 38 
 39 /*
 40  * Using __builtin_constant_p(x) to ignore cases where the return
 41  * value is always the same.  This idea is taken from a similar patch
 42  * written by Daniel Walker.
 43  */
 44 # ifndef likely
 45 #  define likely(x)     (__branch_check__(x, 1, __builtin_constant_p(x)))
 46 # endif
 47 # ifndef unlikely
 48 #  define unlikely(x)   (__branch_check__(x, 0, __builtin_constant_p(x)))
 49 # endif
 50 
 51 #ifdef CONFIG_PROFILE_ALL_BRANCHES
 52 /*
 53  * "Define 'is'", Bill Clinton
 54  * "Define 'if'", Steven Rostedt
 55  */
 56 #define if(cond, ...) __trace_if( (cond , ## __VA_ARGS__) )
 57 #define __trace_if(cond) \
 58         if (__builtin_constant_p(!!(cond)) ? !!(cond) :                 \
 59         ({                                                              \
 60                 int ______r;                                            \
 61                 static struct ftrace_branch_data                        \
 62                         __aligned(4)                                    \
 63                         __section("_ftrace_branch")                     \
 64                         ______f = {                                     \
 65                                 .func = __func__,                       \
 66                                 .file = __FILE__,                       \
 67                                 .line = __LINE__,                       \
 68                         };                                              \
 69                 ______r = !!(cond);                                     \
 70                 ______f.miss_hit[______r]++;                                    \
 71                 ______r;                                                \
 72         }))
 73 #endif /* CONFIG_PROFILE_ALL_BRANCHES */
 74 
 75 #else
 76 # define likely(x)      __builtin_expect(!!(x), 1)
 77 # define unlikely(x)    __builtin_expect(!!(x), 0)
 78 #endif
 79 
 80 /* Optimization barrier */
 81 #ifndef barrier
 82 # define barrier() __memory_barrier()
 83 #endif
 84 
 85 #ifndef barrier_data
 86 # define barrier_data(ptr) barrier()
 87 #endif
 88 
 89 /* workaround for GCC PR82365 if needed */
 90 #ifndef barrier_before_unreachable
 91 # define barrier_before_unreachable() do { } while (0)
 92 #endif
 93 
 94 /* Unreachable code */
 95 #ifdef CONFIG_STACK_VALIDATION
 96 /*
 97  * These macros help objtool understand GCC code flow for unreachable code.
 98  * The __COUNTER__ based labels are a hack to make each instance of the macros
 99  * unique, to convince GCC not to merge duplicate inline asm statements.
100  */
101 #define annotate_reachable() ({                                         \
102         asm volatile("ANNOTATE_REACHABLE counter=%c0"                   \
103                      : : "i" (__COUNTER__));                            \
104 })
105 #define annotate_unreachable() ({                                       \
106         asm volatile("ANNOTATE_UNREACHABLE counter=%c0"                 \
107                      : : "i" (__COUNTER__));                            \
108 })
109 #else
110 #define annotate_reachable()
111 #define annotate_unreachable()
112 #endif
113 
114 #ifndef ASM_UNREACHABLE
115 # define ASM_UNREACHABLE
116 #endif
117 #ifndef unreachable
118 # define unreachable() do {             \
119         annotate_unreachable();         \
120         __builtin_unreachable();        \
121 } while (0)
122 #endif
123 
124 /*
125  * KENTRY - kernel entry point
126  * This can be used to annotate symbols (functions or data) that are used
127  * without their linker symbol being referenced explicitly. For example,
128  * interrupt vector handlers, or functions in the kernel image that are found
129  * programatically.
130  *
131  * Not required for symbols exported with EXPORT_SYMBOL, or initcalls. Those
132  * are handled in their own way (with KEEP() in linker scripts).
133  *
134  * KENTRY can be avoided if the symbols in question are marked as KEEP() in the
135  * linker script. For example an architecture could KEEP() its entire
136  * boot/exception vector code rather than annotate each function and data.
137  */
138 #ifndef KENTRY
139 # define KENTRY(sym)                                            \
140         extern typeof(sym) sym;                                 \
141         static const unsigned long __kentry_##sym               \
142         __used                                                  \
143         __section("___kentry" "+" #sym )                        \
144         = (unsigned long)&sym;
145 #endif
146 
147 #ifndef RELOC_HIDE
148 # define RELOC_HIDE(ptr, off)                                   \
149   ({ unsigned long __ptr;                                       \
150      __ptr = (unsigned long) (ptr);                             \
151     (typeof(ptr)) (__ptr + (off)); })
152 #endif
153 
154 #ifndef OPTIMIZER_HIDE_VAR
155 #define OPTIMIZER_HIDE_VAR(var) barrier()
156 #endif
157 
158 /* Not-quite-unique ID. */
159 #ifndef __UNIQUE_ID
160 # define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__)
161 #endif
162 
163 #include <uapi/linux/types.h>
164 
165 #define __READ_ONCE_SIZE                                                \
166 ({                                                                      \
167         switch (size) {                                                 \
168         case 1: *(__u8 *)res = *(volatile __u8 *)p; break;              \
169         case 2: *(__u16 *)res = *(volatile __u16 *)p; break;            \
170         case 4: *(__u32 *)res = *(volatile __u32 *)p; break;            \
171         case 8: *(__u64 *)res = *(volatile __u64 *)p; break;            \
172         default:                                                        \
173                 barrier();                                              \
174                 __builtin_memcpy((void *)res, (const void *)p, size);   \
175                 barrier();                                              \
176         }                                                               \
177 })
178 
179 static __always_inline
180 void __read_once_size(const volatile void *p, void *res, int size)
181 {
182         __READ_ONCE_SIZE;
183 }
184 
185 #ifdef CONFIG_KASAN
186 /*
187  * We can't declare function 'inline' because __no_sanitize_address confilcts
188  * with inlining. Attempt to inline it may cause a build failure.
189  *      https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368
190  * '__maybe_unused' allows us to avoid defined-but-not-used warnings.
191  */
192 # define __no_kasan_or_inline __no_sanitize_address notrace __maybe_unused
193 #else
194 # define __no_kasan_or_inline __always_inline
195 #endif
196 
197 static __no_kasan_or_inline
198 void __read_once_size_nocheck(const volatile void *p, void *res, int size)
199 {
200         __READ_ONCE_SIZE;
201 }
202 
203 static __always_inline void __write_once_size(volatile void *p, void *res, int size)
204 {
205         switch (size) {
206         case 1: *(volatile __u8 *)p = *(__u8 *)res; break;
207         case 2: *(volatile __u16 *)p = *(__u16 *)res; break;
208         case 4: *(volatile __u32 *)p = *(__u32 *)res; break;
209         case 8: *(volatile __u64 *)p = *(__u64 *)res; break;
210         default:
211                 barrier();
212                 __builtin_memcpy((void *)p, (const void *)res, size);
213                 barrier();
214         }
215 }
216 
217 /*
218  * Prevent the compiler from merging or refetching reads or writes. The
219  * compiler is also forbidden from reordering successive instances of
220  * READ_ONCE and WRITE_ONCE, but only when the compiler is aware of some
221  * particular ordering. One way to make the compiler aware of ordering is to
222  * put the two invocations of READ_ONCE or WRITE_ONCE in different C
223  * statements.
224  *
225  * These two macros will also work on aggregate data types like structs or
226  * unions. If the size of the accessed data type exceeds the word size of
227  * the machine (e.g., 32 bits or 64 bits) READ_ONCE() and WRITE_ONCE() will
228  * fall back to memcpy(). There's at least two memcpy()s: one for the
229  * __builtin_memcpy() and then one for the macro doing the copy of variable
230  * - '__u' allocated on the stack.
231  *
232  * Their two major use cases are: (1) Mediating communication between
233  * process-level code and irq/NMI handlers, all running on the same CPU,
234  * and (2) Ensuring that the compiler does not fold, spindle, or otherwise
235  * mutilate accesses that either do not require ordering or that interact
236  * with an explicit memory barrier or atomic instruction that provides the
237  * required ordering.
238  */
239 #include <asm/barrier.h>
240 #include <linux/kasan-checks.h>
241 
242 #define __READ_ONCE(x, check)                                           \
243 ({                                                                      \
244         union { typeof(x) __val; char __c[1]; } __u;                    \
245         if (check)                                                      \
246                 __read_once_size(&(x), __u.__c, sizeof(x));             \
247         else                                                            \
248                 __read_once_size_nocheck(&(x), __u.__c, sizeof(x));     \
249         smp_read_barrier_depends(); /* Enforce dependency ordering from x */ \
250         __u.__val;                                                      \
251 })
252 #define READ_ONCE(x) __READ_ONCE(x, 1)
253 
254 /*
255  * Use READ_ONCE_NOCHECK() instead of READ_ONCE() if you need
256  * to hide memory access from KASAN.
257  */
258 #define READ_ONCE_NOCHECK(x) __READ_ONCE(x, 0)
259 
260 static __no_kasan_or_inline
261 unsigned long read_word_at_a_time(const void *addr)
262 {
263         kasan_check_read(addr, 1);
264         return *(unsigned long *)addr;
265 }
266 
267 #define WRITE_ONCE(x, val) \
268 ({                                                      \
269         union { typeof(x) __val; char __c[1]; } __u =   \
270                 { .__val = (__force typeof(x)) (val) }; \
271         __write_once_size(&(x), __u.__c, sizeof(x));    \
272         __u.__val;                                      \
273 })
274 
275 #endif /* __KERNEL__ */
276 
277 /*
278  * Force the compiler to emit 'sym' as a symbol, so that we can reference
279  * it from inline assembler. Necessary in case 'sym' could be inlined
280  * otherwise, or eliminated entirely due to lack of references that are
281  * visible to the compiler.
282  */
283 #define __ADDRESSABLE(sym) \
284         static void * __section(".discard.addressable") __used \
285                 __PASTE(__addressable_##sym, __LINE__) = (void *)&sym;
286 
287 /**
288  * offset_to_ptr - convert a relative memory offset to an absolute pointer
289  * @off:        the address of the 32-bit offset value
290  */
291 static inline void *offset_to_ptr(const int *off)
292 {
293         return (void *)((unsigned long)off + *off);
294 }
295 
296 #else /* __ASSEMBLY__ */
297 
298 #ifdef __KERNEL__
299 #ifndef LINKER_SCRIPT
300 
301 #ifdef CONFIG_STACK_VALIDATION
302 .macro ANNOTATE_UNREACHABLE counter:req
303 \counter:
304         .pushsection .discard.unreachable
305         .long \counter\()b -.
306         .popsection
307 .endm
308 
309 .macro ANNOTATE_REACHABLE counter:req
310 \counter:
311         .pushsection .discard.reachable
312         .long \counter\()b -.
313         .popsection
314 .endm
315 
316 .macro ASM_UNREACHABLE
317 999:
318         .pushsection .discard.unreachable
319         .long 999b - .
320         .popsection
321 .endm
322 #else /* CONFIG_STACK_VALIDATION */
323 .macro ANNOTATE_UNREACHABLE counter:req
324 .endm
325 
326 .macro ANNOTATE_REACHABLE counter:req
327 .endm
328 
329 .macro ASM_UNREACHABLE
330 .endm
331 #endif /* CONFIG_STACK_VALIDATION */
332 
333 #endif /* LINKER_SCRIPT */
334 #endif /* __KERNEL__ */
335 #endif /* __ASSEMBLY__ */
336 
337 /* Compile time object size, -1 for unknown */
338 #ifndef __compiletime_object_size
339 # define __compiletime_object_size(obj) -1
340 #endif
341 #ifndef __compiletime_warning
342 # define __compiletime_warning(message)
343 #endif
344 #ifndef __compiletime_error
345 # define __compiletime_error(message)
346 #endif
347 
348 #ifdef __OPTIMIZE__
349 # define __compiletime_assert(condition, msg, prefix, suffix)           \
350         do {                                                            \
351                 extern void prefix ## suffix(void) __compiletime_error(msg); \
352                 if (!(condition))                                       \
353                         prefix ## suffix();                             \
354         } while (0)
355 #else
356 # define __compiletime_assert(condition, msg, prefix, suffix) do { } while (0)
357 #endif
358 
359 #define _compiletime_assert(condition, msg, prefix, suffix) \
360         __compiletime_assert(condition, msg, prefix, suffix)
361 
362 /**
363  * compiletime_assert - break build and emit msg if condition is false
364  * @condition: a compile-time constant condition to check
365  * @msg:       a message to emit if condition is false
366  *
367  * In tradition of POSIX assert, this macro will break the build if the
368  * supplied condition is *false*, emitting the supplied error message if the
369  * compiler has support to do so.
370  */
371 #define compiletime_assert(condition, msg) \
372         _compiletime_assert(condition, msg, __compiletime_assert_, __LINE__)
373 
374 #define compiletime_assert_atomic_type(t)                               \
375         compiletime_assert(__native_word(t),                            \
376                 "Need native word sized stores/loads for atomicity.")
377 
378 /* &a[0] degrades to a pointer: a different type from an array */
379 #define __must_be_array(a)      BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0]))
380 
381 #endif /* __LINUX_COMPILER_H */
382 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp