~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/mips/include/asm/bitops.h

Version: ~ [ linux-5.5-rc7 ] ~ [ linux-5.4.13 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.97 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.166 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.210 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.210 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.81 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-3.9.11 ] ~ [ linux-3.8.13 ] ~ [ linux-3.7.10 ] ~ [ linux-3.6.11 ] ~ [ linux-3.5.7 ] ~ [ linux-3.4.113 ] ~ [ linux-3.3.8 ] ~ [ linux-3.2.102 ] ~ [ linux-3.1.10 ] ~ [ linux-3.0.101 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  * This file is subject to the terms and conditions of the GNU General Public
  3  * License.  See the file "COPYING" in the main directory of this archive
  4  * for more details.
  5  *
  6  * Copyright (c) 1994 - 1997, 99, 2000, 06, 07  Ralf Baechle (ralf@linux-mips.org)
  7  * Copyright (c) 1999, 2000  Silicon Graphics, Inc.
  8  */
  9 #ifndef _ASM_BITOPS_H
 10 #define _ASM_BITOPS_H
 11 
 12 #ifndef _LINUX_BITOPS_H
 13 #error only <linux/bitops.h> can be included directly
 14 #endif
 15 
 16 #include <linux/compiler.h>
 17 #include <linux/types.h>
 18 #include <asm/barrier.h>
 19 #include <asm/byteorder.h>              /* sigh ... */
 20 #include <asm/compiler.h>
 21 #include <asm/cpu-features.h>
 22 #include <asm/llsc.h>
 23 #include <asm/sgidefs.h>
 24 #include <asm/war.h>
 25 
 26 /*
 27  * These are the "slower" versions of the functions and are in bitops.c.
 28  * These functions call raw_local_irq_{save,restore}().
 29  */
 30 void __mips_set_bit(unsigned long nr, volatile unsigned long *addr);
 31 void __mips_clear_bit(unsigned long nr, volatile unsigned long *addr);
 32 void __mips_change_bit(unsigned long nr, volatile unsigned long *addr);
 33 int __mips_test_and_set_bit(unsigned long nr,
 34                             volatile unsigned long *addr);
 35 int __mips_test_and_set_bit_lock(unsigned long nr,
 36                                  volatile unsigned long *addr);
 37 int __mips_test_and_clear_bit(unsigned long nr,
 38                               volatile unsigned long *addr);
 39 int __mips_test_and_change_bit(unsigned long nr,
 40                                volatile unsigned long *addr);
 41 
 42 
 43 /*
 44  * set_bit - Atomically set a bit in memory
 45  * @nr: the bit to set
 46  * @addr: the address to start counting from
 47  *
 48  * This function is atomic and may not be reordered.  See __set_bit()
 49  * if you do not require the atomic guarantees.
 50  * Note that @nr may be almost arbitrarily large; this function is not
 51  * restricted to acting on a single-word quantity.
 52  */
 53 static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
 54 {
 55         unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
 56         int bit = nr & SZLONG_MASK;
 57         unsigned long temp;
 58 
 59         if (kernel_uses_llsc && R10000_LLSC_WAR) {
 60                 __asm__ __volatile__(
 61                 "       .set    arch=r4000                              \n"
 62                 "1:     " __LL "%0, %1                  # set_bit       \n"
 63                 "       or      %0, %2                                  \n"
 64                 "       " __SC  "%0, %1                                 \n"
 65                 "       beqzl   %0, 1b                                  \n"
 66                 "       .set    mips0                                   \n"
 67                 : "=&r" (temp), "=" GCC_OFF_SMALL_ASM() (*m)
 68                 : "ir" (1UL << bit), GCC_OFF_SMALL_ASM() (*m));
 69 #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
 70         } else if (kernel_uses_llsc && __builtin_constant_p(bit)) {
 71                 do {
 72                         __asm__ __volatile__(
 73                         "       " __LL "%0, %1          # set_bit       \n"
 74                         "       " __INS "%0, %3, %2, 1                  \n"
 75                         "       " __SC "%0, %1                          \n"
 76                         : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
 77                         : "ir" (bit), "r" (~0));
 78                 } while (unlikely(!temp));
 79 #endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */
 80         } else if (kernel_uses_llsc) {
 81                 do {
 82                         __asm__ __volatile__(
 83                         "       .set    "MIPS_ISA_ARCH_LEVEL"           \n"
 84                         "       " __LL "%0, %1          # set_bit       \n"
 85                         "       or      %0, %2                          \n"
 86                         "       " __SC  "%0, %1                         \n"
 87                         "       .set    mips0                           \n"
 88                         : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
 89                         : "ir" (1UL << bit));
 90                 } while (unlikely(!temp));
 91         } else
 92                 __mips_set_bit(nr, addr);
 93 }
 94 
 95 /*
 96  * clear_bit - Clears a bit in memory
 97  * @nr: Bit to clear
 98  * @addr: Address to start counting from
 99  *
100  * clear_bit() is atomic and may not be reordered.  However, it does
101  * not contain a memory barrier, so if it is used for locking purposes,
102  * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic()
103  * in order to ensure changes are visible on other processors.
104  */
105 static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
106 {
107         unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
108         int bit = nr & SZLONG_MASK;
109         unsigned long temp;
110 
111         if (kernel_uses_llsc && R10000_LLSC_WAR) {
112                 __asm__ __volatile__(
113                 "       .set    arch=r4000                              \n"
114                 "1:     " __LL "%0, %1                  # clear_bit     \n"
115                 "       and     %0, %2                                  \n"
116                 "       " __SC "%0, %1                                  \n"
117                 "       beqzl   %0, 1b                                  \n"
118                 "       .set    mips0                                   \n"
119                 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
120                 : "ir" (~(1UL << bit)));
121 #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
122         } else if (kernel_uses_llsc && __builtin_constant_p(bit)) {
123                 do {
124                         __asm__ __volatile__(
125                         "       " __LL "%0, %1          # clear_bit     \n"
126                         "       " __INS "%0, $0, %2, 1                  \n"
127                         "       " __SC "%0, %1                          \n"
128                         : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
129                         : "ir" (bit));
130                 } while (unlikely(!temp));
131 #endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */
132         } else if (kernel_uses_llsc) {
133                 do {
134                         __asm__ __volatile__(
135                         "       .set    "MIPS_ISA_ARCH_LEVEL"           \n"
136                         "       " __LL "%0, %1          # clear_bit     \n"
137                         "       and     %0, %2                          \n"
138                         "       " __SC "%0, %1                          \n"
139                         "       .set    mips0                           \n"
140                         : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
141                         : "ir" (~(1UL << bit)));
142                 } while (unlikely(!temp));
143         } else
144                 __mips_clear_bit(nr, addr);
145 }
146 
147 /*
148  * clear_bit_unlock - Clears a bit in memory
149  * @nr: Bit to clear
150  * @addr: Address to start counting from
151  *
152  * clear_bit() is atomic and implies release semantics before the memory
153  * operation. It can be used for an unlock.
154  */
155 static inline void clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
156 {
157         smp_mb__before_atomic();
158         clear_bit(nr, addr);
159 }
160 
161 /*
162  * change_bit - Toggle a bit in memory
163  * @nr: Bit to change
164  * @addr: Address to start counting from
165  *
166  * change_bit() is atomic and may not be reordered.
167  * Note that @nr may be almost arbitrarily large; this function is not
168  * restricted to acting on a single-word quantity.
169  */
170 static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
171 {
172         int bit = nr & SZLONG_MASK;
173 
174         if (kernel_uses_llsc && R10000_LLSC_WAR) {
175                 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
176                 unsigned long temp;
177 
178                 __asm__ __volatile__(
179                 "       .set    arch=r4000                      \n"
180                 "1:     " __LL "%0, %1          # change_bit    \n"
181                 "       xor     %0, %2                          \n"
182                 "       " __SC  "%0, %1                         \n"
183                 "       beqzl   %0, 1b                          \n"
184                 "       .set    mips0                           \n"
185                 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
186                 : "ir" (1UL << bit));
187         } else if (kernel_uses_llsc) {
188                 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
189                 unsigned long temp;
190 
191                 do {
192                         __asm__ __volatile__(
193                         "       .set    "MIPS_ISA_ARCH_LEVEL"           \n"
194                         "       " __LL "%0, %1          # change_bit    \n"
195                         "       xor     %0, %2                          \n"
196                         "       " __SC  "%0, %1                         \n"
197                         "       .set    mips0                           \n"
198                         : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
199                         : "ir" (1UL << bit));
200                 } while (unlikely(!temp));
201         } else
202                 __mips_change_bit(nr, addr);
203 }
204 
205 /*
206  * test_and_set_bit - Set a bit and return its old value
207  * @nr: Bit to set
208  * @addr: Address to count from
209  *
210  * This operation is atomic and cannot be reordered.
211  * It also implies a memory barrier.
212  */
213 static inline int test_and_set_bit(unsigned long nr,
214         volatile unsigned long *addr)
215 {
216         int bit = nr & SZLONG_MASK;
217         unsigned long res;
218 
219         smp_mb__before_llsc();
220 
221         if (kernel_uses_llsc && R10000_LLSC_WAR) {
222                 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
223                 unsigned long temp;
224 
225                 __asm__ __volatile__(
226                 "       .set    arch=r4000                              \n"
227                 "1:     " __LL "%0, %1          # test_and_set_bit      \n"
228                 "       or      %2, %0, %3                              \n"
229                 "       " __SC  "%2, %1                                 \n"
230                 "       beqzl   %2, 1b                                  \n"
231                 "       and     %2, %0, %3                              \n"
232                 "       .set    mips0                                   \n"
233                 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
234                 : "r" (1UL << bit)
235                 : "memory");
236         } else if (kernel_uses_llsc) {
237                 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
238                 unsigned long temp;
239 
240                 do {
241                         __asm__ __volatile__(
242                         "       .set    "MIPS_ISA_ARCH_LEVEL"           \n"
243                         "       " __LL "%0, %1  # test_and_set_bit      \n"
244                         "       or      %2, %0, %3                      \n"
245                         "       " __SC  "%2, %1                         \n"
246                         "       .set    mips0                           \n"
247                         : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
248                         : "r" (1UL << bit)
249                         : "memory");
250                 } while (unlikely(!res));
251 
252                 res = temp & (1UL << bit);
253         } else
254                 res = __mips_test_and_set_bit(nr, addr);
255 
256         smp_llsc_mb();
257 
258         return res != 0;
259 }
260 
261 /*
262  * test_and_set_bit_lock - Set a bit and return its old value
263  * @nr: Bit to set
264  * @addr: Address to count from
265  *
266  * This operation is atomic and implies acquire ordering semantics
267  * after the memory operation.
268  */
269 static inline int test_and_set_bit_lock(unsigned long nr,
270         volatile unsigned long *addr)
271 {
272         int bit = nr & SZLONG_MASK;
273         unsigned long res;
274 
275         if (kernel_uses_llsc && R10000_LLSC_WAR) {
276                 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
277                 unsigned long temp;
278 
279                 __asm__ __volatile__(
280                 "       .set    arch=r4000                              \n"
281                 "1:     " __LL "%0, %1          # test_and_set_bit      \n"
282                 "       or      %2, %0, %3                              \n"
283                 "       " __SC  "%2, %1                                 \n"
284                 "       beqzl   %2, 1b                                  \n"
285                 "       and     %2, %0, %3                              \n"
286                 "       .set    mips0                                   \n"
287                 : "=&r" (temp), "+m" (*m), "=&r" (res)
288                 : "r" (1UL << bit)
289                 : "memory");
290         } else if (kernel_uses_llsc) {
291                 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
292                 unsigned long temp;
293 
294                 do {
295                         __asm__ __volatile__(
296                         "       .set    "MIPS_ISA_ARCH_LEVEL"           \n"
297                         "       " __LL "%0, %1  # test_and_set_bit      \n"
298                         "       or      %2, %0, %3                      \n"
299                         "       " __SC  "%2, %1                         \n"
300                         "       .set    mips0                           \n"
301                         : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
302                         : "r" (1UL << bit)
303                         : "memory");
304                 } while (unlikely(!res));
305 
306                 res = temp & (1UL << bit);
307         } else
308                 res = __mips_test_and_set_bit_lock(nr, addr);
309 
310         smp_llsc_mb();
311 
312         return res != 0;
313 }
314 /*
315  * test_and_clear_bit - Clear a bit and return its old value
316  * @nr: Bit to clear
317  * @addr: Address to count from
318  *
319  * This operation is atomic and cannot be reordered.
320  * It also implies a memory barrier.
321  */
322 static inline int test_and_clear_bit(unsigned long nr,
323         volatile unsigned long *addr)
324 {
325         int bit = nr & SZLONG_MASK;
326         unsigned long res;
327 
328         smp_mb__before_llsc();
329 
330         if (kernel_uses_llsc && R10000_LLSC_WAR) {
331                 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
332                 unsigned long temp;
333 
334                 __asm__ __volatile__(
335                 "       .set    arch=r4000                              \n"
336                 "1:     " __LL  "%0, %1         # test_and_clear_bit    \n"
337                 "       or      %2, %0, %3                              \n"
338                 "       xor     %2, %3                                  \n"
339                 "       " __SC  "%2, %1                                 \n"
340                 "       beqzl   %2, 1b                                  \n"
341                 "       and     %2, %0, %3                              \n"
342                 "       .set    mips0                                   \n"
343                 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
344                 : "r" (1UL << bit)
345                 : "memory");
346 #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
347         } else if (kernel_uses_llsc && __builtin_constant_p(nr)) {
348                 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
349                 unsigned long temp;
350 
351                 do {
352                         __asm__ __volatile__(
353                         "       " __LL  "%0, %1 # test_and_clear_bit    \n"
354                         "       " __EXT "%2, %0, %3, 1                  \n"
355                         "       " __INS "%0, $0, %3, 1                  \n"
356                         "       " __SC  "%0, %1                         \n"
357                         : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
358                         : "ir" (bit)
359                         : "memory");
360                 } while (unlikely(!temp));
361 #endif
362         } else if (kernel_uses_llsc) {
363                 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
364                 unsigned long temp;
365 
366                 do {
367                         __asm__ __volatile__(
368                         "       .set    "MIPS_ISA_ARCH_LEVEL"           \n"
369                         "       " __LL  "%0, %1 # test_and_clear_bit    \n"
370                         "       or      %2, %0, %3                      \n"
371                         "       xor     %2, %3                          \n"
372                         "       " __SC  "%2, %1                         \n"
373                         "       .set    mips0                           \n"
374                         : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
375                         : "r" (1UL << bit)
376                         : "memory");
377                 } while (unlikely(!res));
378 
379                 res = temp & (1UL << bit);
380         } else
381                 res = __mips_test_and_clear_bit(nr, addr);
382 
383         smp_llsc_mb();
384 
385         return res != 0;
386 }
387 
388 /*
389  * test_and_change_bit - Change a bit and return its old value
390  * @nr: Bit to change
391  * @addr: Address to count from
392  *
393  * This operation is atomic and cannot be reordered.
394  * It also implies a memory barrier.
395  */
396 static inline int test_and_change_bit(unsigned long nr,
397         volatile unsigned long *addr)
398 {
399         int bit = nr & SZLONG_MASK;
400         unsigned long res;
401 
402         smp_mb__before_llsc();
403 
404         if (kernel_uses_llsc && R10000_LLSC_WAR) {
405                 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
406                 unsigned long temp;
407 
408                 __asm__ __volatile__(
409                 "       .set    arch=r4000                              \n"
410                 "1:     " __LL  "%0, %1         # test_and_change_bit   \n"
411                 "       xor     %2, %0, %3                              \n"
412                 "       " __SC  "%2, %1                                 \n"
413                 "       beqzl   %2, 1b                                  \n"
414                 "       and     %2, %0, %3                              \n"
415                 "       .set    mips0                                   \n"
416                 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
417                 : "r" (1UL << bit)
418                 : "memory");
419         } else if (kernel_uses_llsc) {
420                 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
421                 unsigned long temp;
422 
423                 do {
424                         __asm__ __volatile__(
425                         "       .set    "MIPS_ISA_ARCH_LEVEL"           \n"
426                         "       " __LL  "%0, %1 # test_and_change_bit   \n"
427                         "       xor     %2, %0, %3                      \n"
428                         "       " __SC  "\t%2, %1                       \n"
429                         "       .set    mips0                           \n"
430                         : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
431                         : "r" (1UL << bit)
432                         : "memory");
433                 } while (unlikely(!res));
434 
435                 res = temp & (1UL << bit);
436         } else
437                 res = __mips_test_and_change_bit(nr, addr);
438 
439         smp_llsc_mb();
440 
441         return res != 0;
442 }
443 
444 #include <asm-generic/bitops/non-atomic.h>
445 
446 /*
447  * __clear_bit_unlock - Clears a bit in memory
448  * @nr: Bit to clear
449  * @addr: Address to start counting from
450  *
451  * __clear_bit() is non-atomic and implies release semantics before the memory
452  * operation. It can be used for an unlock if no other CPUs can concurrently
453  * modify other bits in the word.
454  */
455 static inline void __clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
456 {
457         smp_mb__before_llsc();
458         __clear_bit(nr, addr);
459         nudge_writes();
460 }
461 
462 /*
463  * Return the bit position (0..63) of the most significant 1 bit in a word
464  * Returns -1 if no 1 bit exists
465  */
466 static inline unsigned long __fls(unsigned long word)
467 {
468         int num;
469 
470         if (BITS_PER_LONG == 32 && !__builtin_constant_p(word) &&
471             __builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) {
472                 __asm__(
473                 "       .set    push                                    \n"
474                 "       .set    "MIPS_ISA_LEVEL"                        \n"
475                 "       clz     %0, %1                                  \n"
476                 "       .set    pop                                     \n"
477                 : "=r" (num)
478                 : "r" (word));
479 
480                 return 31 - num;
481         }
482 
483         if (BITS_PER_LONG == 64 && !__builtin_constant_p(word) &&
484             __builtin_constant_p(cpu_has_mips64) && cpu_has_mips64) {
485                 __asm__(
486                 "       .set    push                                    \n"
487                 "       .set    "MIPS_ISA_LEVEL"                        \n"
488                 "       dclz    %0, %1                                  \n"
489                 "       .set    pop                                     \n"
490                 : "=r" (num)
491                 : "r" (word));
492 
493                 return 63 - num;
494         }
495 
496         num = BITS_PER_LONG - 1;
497 
498 #if BITS_PER_LONG == 64
499         if (!(word & (~0ul << 32))) {
500                 num -= 32;
501                 word <<= 32;
502         }
503 #endif
504         if (!(word & (~0ul << (BITS_PER_LONG-16)))) {
505                 num -= 16;
506                 word <<= 16;
507         }
508         if (!(word & (~0ul << (BITS_PER_LONG-8)))) {
509                 num -= 8;
510                 word <<= 8;
511         }
512         if (!(word & (~0ul << (BITS_PER_LONG-4)))) {
513                 num -= 4;
514                 word <<= 4;
515         }
516         if (!(word & (~0ul << (BITS_PER_LONG-2)))) {
517                 num -= 2;
518                 word <<= 2;
519         }
520         if (!(word & (~0ul << (BITS_PER_LONG-1))))
521                 num -= 1;
522         return num;
523 }
524 
525 /*
526  * __ffs - find first bit in word.
527  * @word: The word to search
528  *
529  * Returns 0..SZLONG-1
530  * Undefined if no bit exists, so code should check against 0 first.
531  */
532 static inline unsigned long __ffs(unsigned long word)
533 {
534         return __fls(word & -word);
535 }
536 
537 /*
538  * fls - find last bit set.
539  * @word: The word to search
540  *
541  * This is defined the same way as ffs.
542  * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
543  */
544 static inline int fls(int x)
545 {
546         int r;
547 
548         if (!__builtin_constant_p(x) &&
549             __builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) {
550                 __asm__(
551                 "       .set    push                                    \n"
552                 "       .set    "MIPS_ISA_LEVEL"                        \n"
553                 "       clz     %0, %1                                  \n"
554                 "       .set    pop                                     \n"
555                 : "=r" (x)
556                 : "r" (x));
557 
558                 return 32 - x;
559         }
560 
561         r = 32;
562         if (!x)
563                 return 0;
564         if (!(x & 0xffff0000u)) {
565                 x <<= 16;
566                 r -= 16;
567         }
568         if (!(x & 0xff000000u)) {
569                 x <<= 8;
570                 r -= 8;
571         }
572         if (!(x & 0xf0000000u)) {
573                 x <<= 4;
574                 r -= 4;
575         }
576         if (!(x & 0xc0000000u)) {
577                 x <<= 2;
578                 r -= 2;
579         }
580         if (!(x & 0x80000000u)) {
581                 x <<= 1;
582                 r -= 1;
583         }
584         return r;
585 }
586 
587 #include <asm-generic/bitops/fls64.h>
588 
589 /*
590  * ffs - find first bit set.
591  * @word: The word to search
592  *
593  * This is defined the same way as
594  * the libc and compiler builtin ffs routines, therefore
595  * differs in spirit from the above ffz (man ffs).
596  */
597 static inline int ffs(int word)
598 {
599         if (!word)
600                 return 0;
601 
602         return fls(word & -word);
603 }
604 
605 #include <asm-generic/bitops/ffz.h>
606 #include <asm-generic/bitops/find.h>
607 
608 #ifdef __KERNEL__
609 
610 #include <asm-generic/bitops/sched.h>
611 
612 #include <asm/arch_hweight.h>
613 #include <asm-generic/bitops/const_hweight.h>
614 
615 #include <asm-generic/bitops/le.h>
616 #include <asm-generic/bitops/ext2-atomic.h>
617 
618 #endif /* __KERNEL__ */
619 
620 #endif /* _ASM_BITOPS_H */
621 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp