~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/ia64/include/asm/bitops.h

Version: ~ [ linux-5.5-rc6 ] ~ [ linux-5.4.11 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.95 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.164 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.209 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.209 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.81 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-3.9.11 ] ~ [ linux-3.8.13 ] ~ [ linux-3.7.10 ] ~ [ linux-3.6.11 ] ~ [ linux-3.5.7 ] ~ [ linux-3.4.113 ] ~ [ linux-3.3.8 ] ~ [ linux-3.2.102 ] ~ [ linux-3.1.10 ] ~ [ linux-3.0.101 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* SPDX-License-Identifier: GPL-2.0 */
  2 #ifndef _ASM_IA64_BITOPS_H
  3 #define _ASM_IA64_BITOPS_H
  4 
  5 /*
  6  * Copyright (C) 1998-2003 Hewlett-Packard Co
  7  *      David Mosberger-Tang <davidm@hpl.hp.com>
  8  *
  9  * 02/06/02 find_next_bit() and find_first_bit() added from Erich Focht's ia64
 10  * O(1) scheduler patch
 11  */
 12 
 13 #ifndef _LINUX_BITOPS_H
 14 #error only <linux/bitops.h> can be included directly
 15 #endif
 16 
 17 #include <linux/compiler.h>
 18 #include <linux/types.h>
 19 #include <asm/intrinsics.h>
 20 #include <asm/barrier.h>
 21 
 22 /**
 23  * set_bit - Atomically set a bit in memory
 24  * @nr: the bit to set
 25  * @addr: the address to start counting from
 26  *
 27  * This function is atomic and may not be reordered.  See __set_bit()
 28  * if you do not require the atomic guarantees.
 29  * Note that @nr may be almost arbitrarily large; this function is not
 30  * restricted to acting on a single-word quantity.
 31  *
 32  * The address must be (at least) "long" aligned.
 33  * Note that there are driver (e.g., eepro100) which use these operations to
 34  * operate on hw-defined data-structures, so we can't easily change these
 35  * operations to force a bigger alignment.
 36  *
 37  * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
 38  */
 39 static __inline__ void
 40 set_bit (int nr, volatile void *addr)
 41 {
 42         __u32 bit, old, new;
 43         volatile __u32 *m;
 44         CMPXCHG_BUGCHECK_DECL
 45 
 46         m = (volatile __u32 *) addr + (nr >> 5);
 47         bit = 1 << (nr & 31);
 48         do {
 49                 CMPXCHG_BUGCHECK(m);
 50                 old = *m;
 51                 new = old | bit;
 52         } while (cmpxchg_acq(m, old, new) != old);
 53 }
 54 
 55 /**
 56  * __set_bit - Set a bit in memory
 57  * @nr: the bit to set
 58  * @addr: the address to start counting from
 59  *
 60  * Unlike set_bit(), this function is non-atomic and may be reordered.
 61  * If it's called on the same region of memory simultaneously, the effect
 62  * may be that only one operation succeeds.
 63  */
 64 static __inline__ void
 65 __set_bit (int nr, volatile void *addr)
 66 {
 67         *((__u32 *) addr + (nr >> 5)) |= (1 << (nr & 31));
 68 }
 69 
 70 /**
 71  * clear_bit - Clears a bit in memory
 72  * @nr: Bit to clear
 73  * @addr: Address to start counting from
 74  *
 75  * clear_bit() is atomic and may not be reordered.  However, it does
 76  * not contain a memory barrier, so if it is used for locking purposes,
 77  * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic()
 78  * in order to ensure changes are visible on other processors.
 79  */
 80 static __inline__ void
 81 clear_bit (int nr, volatile void *addr)
 82 {
 83         __u32 mask, old, new;
 84         volatile __u32 *m;
 85         CMPXCHG_BUGCHECK_DECL
 86 
 87         m = (volatile __u32 *) addr + (nr >> 5);
 88         mask = ~(1 << (nr & 31));
 89         do {
 90                 CMPXCHG_BUGCHECK(m);
 91                 old = *m;
 92                 new = old & mask;
 93         } while (cmpxchg_acq(m, old, new) != old);
 94 }
 95 
 96 /**
 97  * clear_bit_unlock - Clears a bit in memory with release
 98  * @nr: Bit to clear
 99  * @addr: Address to start counting from
100  *
101  * clear_bit_unlock() is atomic and may not be reordered.  It does
102  * contain a memory barrier suitable for unlock type operations.
103  */
104 static __inline__ void
105 clear_bit_unlock (int nr, volatile void *addr)
106 {
107         __u32 mask, old, new;
108         volatile __u32 *m;
109         CMPXCHG_BUGCHECK_DECL
110 
111         m = (volatile __u32 *) addr + (nr >> 5);
112         mask = ~(1 << (nr & 31));
113         do {
114                 CMPXCHG_BUGCHECK(m);
115                 old = *m;
116                 new = old & mask;
117         } while (cmpxchg_rel(m, old, new) != old);
118 }
119 
120 /**
121  * __clear_bit_unlock - Non-atomically clears a bit in memory with release
122  * @nr: Bit to clear
123  * @addr: Address to start counting from
124  *
125  * Similarly to clear_bit_unlock, the implementation uses a store
126  * with release semantics. See also arch_spin_unlock().
127  */
128 static __inline__ void
129 __clear_bit_unlock(int nr, void *addr)
130 {
131         __u32 * const m = (__u32 *) addr + (nr >> 5);
132         __u32 const new = *m & ~(1 << (nr & 31));
133 
134         ia64_st4_rel_nta(m, new);
135 }
136 
137 /**
138  * __clear_bit - Clears a bit in memory (non-atomic version)
139  * @nr: the bit to clear
140  * @addr: the address to start counting from
141  *
142  * Unlike clear_bit(), this function is non-atomic and may be reordered.
143  * If it's called on the same region of memory simultaneously, the effect
144  * may be that only one operation succeeds.
145  */
146 static __inline__ void
147 __clear_bit (int nr, volatile void *addr)
148 {
149         *((__u32 *) addr + (nr >> 5)) &= ~(1 << (nr & 31));
150 }
151 
152 /**
153  * change_bit - Toggle a bit in memory
154  * @nr: Bit to toggle
155  * @addr: Address to start counting from
156  *
157  * change_bit() is atomic and may not be reordered.
158  * Note that @nr may be almost arbitrarily large; this function is not
159  * restricted to acting on a single-word quantity.
160  */
161 static __inline__ void
162 change_bit (int nr, volatile void *addr)
163 {
164         __u32 bit, old, new;
165         volatile __u32 *m;
166         CMPXCHG_BUGCHECK_DECL
167 
168         m = (volatile __u32 *) addr + (nr >> 5);
169         bit = (1 << (nr & 31));
170         do {
171                 CMPXCHG_BUGCHECK(m);
172                 old = *m;
173                 new = old ^ bit;
174         } while (cmpxchg_acq(m, old, new) != old);
175 }
176 
177 /**
178  * __change_bit - Toggle a bit in memory
179  * @nr: the bit to toggle
180  * @addr: the address to start counting from
181  *
182  * Unlike change_bit(), this function is non-atomic and may be reordered.
183  * If it's called on the same region of memory simultaneously, the effect
184  * may be that only one operation succeeds.
185  */
186 static __inline__ void
187 __change_bit (int nr, volatile void *addr)
188 {
189         *((__u32 *) addr + (nr >> 5)) ^= (1 << (nr & 31));
190 }
191 
192 /**
193  * test_and_set_bit - Set a bit and return its old value
194  * @nr: Bit to set
195  * @addr: Address to count from
196  *
197  * This operation is atomic and cannot be reordered.  
198  * It also implies the acquisition side of the memory barrier.
199  */
200 static __inline__ int
201 test_and_set_bit (int nr, volatile void *addr)
202 {
203         __u32 bit, old, new;
204         volatile __u32 *m;
205         CMPXCHG_BUGCHECK_DECL
206 
207         m = (volatile __u32 *) addr + (nr >> 5);
208         bit = 1 << (nr & 31);
209         do {
210                 CMPXCHG_BUGCHECK(m);
211                 old = *m;
212                 new = old | bit;
213         } while (cmpxchg_acq(m, old, new) != old);
214         return (old & bit) != 0;
215 }
216 
217 /**
218  * test_and_set_bit_lock - Set a bit and return its old value for lock
219  * @nr: Bit to set
220  * @addr: Address to count from
221  *
222  * This is the same as test_and_set_bit on ia64
223  */
224 #define test_and_set_bit_lock test_and_set_bit
225 
226 /**
227  * __test_and_set_bit - Set a bit and return its old value
228  * @nr: Bit to set
229  * @addr: Address to count from
230  *
231  * This operation is non-atomic and can be reordered.  
232  * If two examples of this operation race, one can appear to succeed
233  * but actually fail.  You must protect multiple accesses with a lock.
234  */
235 static __inline__ int
236 __test_and_set_bit (int nr, volatile void *addr)
237 {
238         __u32 *p = (__u32 *) addr + (nr >> 5);
239         __u32 m = 1 << (nr & 31);
240         int oldbitset = (*p & m) != 0;
241 
242         *p |= m;
243         return oldbitset;
244 }
245 
246 /**
247  * test_and_clear_bit - Clear a bit and return its old value
248  * @nr: Bit to clear
249  * @addr: Address to count from
250  *
251  * This operation is atomic and cannot be reordered.  
252  * It also implies the acquisition side of the memory barrier.
253  */
254 static __inline__ int
255 test_and_clear_bit (int nr, volatile void *addr)
256 {
257         __u32 mask, old, new;
258         volatile __u32 *m;
259         CMPXCHG_BUGCHECK_DECL
260 
261         m = (volatile __u32 *) addr + (nr >> 5);
262         mask = ~(1 << (nr & 31));
263         do {
264                 CMPXCHG_BUGCHECK(m);
265                 old = *m;
266                 new = old & mask;
267         } while (cmpxchg_acq(m, old, new) != old);
268         return (old & ~mask) != 0;
269 }
270 
271 /**
272  * __test_and_clear_bit - Clear a bit and return its old value
273  * @nr: Bit to clear
274  * @addr: Address to count from
275  *
276  * This operation is non-atomic and can be reordered.  
277  * If two examples of this operation race, one can appear to succeed
278  * but actually fail.  You must protect multiple accesses with a lock.
279  */
280 static __inline__ int
281 __test_and_clear_bit(int nr, volatile void * addr)
282 {
283         __u32 *p = (__u32 *) addr + (nr >> 5);
284         __u32 m = 1 << (nr & 31);
285         int oldbitset = (*p & m) != 0;
286 
287         *p &= ~m;
288         return oldbitset;
289 }
290 
291 /**
292  * test_and_change_bit - Change a bit and return its old value
293  * @nr: Bit to change
294  * @addr: Address to count from
295  *
296  * This operation is atomic and cannot be reordered.  
297  * It also implies the acquisition side of the memory barrier.
298  */
299 static __inline__ int
300 test_and_change_bit (int nr, volatile void *addr)
301 {
302         __u32 bit, old, new;
303         volatile __u32 *m;
304         CMPXCHG_BUGCHECK_DECL
305 
306         m = (volatile __u32 *) addr + (nr >> 5);
307         bit = (1 << (nr & 31));
308         do {
309                 CMPXCHG_BUGCHECK(m);
310                 old = *m;
311                 new = old ^ bit;
312         } while (cmpxchg_acq(m, old, new) != old);
313         return (old & bit) != 0;
314 }
315 
316 /**
317  * __test_and_change_bit - Change a bit and return its old value
318  * @nr: Bit to change
319  * @addr: Address to count from
320  *
321  * This operation is non-atomic and can be reordered.
322  */
323 static __inline__ int
324 __test_and_change_bit (int nr, void *addr)
325 {
326         __u32 old, bit = (1 << (nr & 31));
327         __u32 *m = (__u32 *) addr + (nr >> 5);
328 
329         old = *m;
330         *m = old ^ bit;
331         return (old & bit) != 0;
332 }
333 
334 static __inline__ int
335 test_bit (int nr, const volatile void *addr)
336 {
337         return 1 & (((const volatile __u32 *) addr)[nr >> 5] >> (nr & 31));
338 }
339 
340 /**
341  * ffz - find the first zero bit in a long word
342  * @x: The long word to find the bit in
343  *
344  * Returns the bit-number (0..63) of the first (least significant) zero bit.
345  * Undefined if no zero exists, so code should check against ~0UL first...
346  */
347 static inline unsigned long
348 ffz (unsigned long x)
349 {
350         unsigned long result;
351 
352         result = ia64_popcnt(x & (~x - 1));
353         return result;
354 }
355 
356 /**
357  * __ffs - find first bit in word.
358  * @x: The word to search
359  *
360  * Undefined if no bit exists, so code should check against 0 first.
361  */
362 static __inline__ unsigned long
363 __ffs (unsigned long x)
364 {
365         unsigned long result;
366 
367         result = ia64_popcnt((x-1) & ~x);
368         return result;
369 }
370 
371 #ifdef __KERNEL__
372 
373 /*
374  * Return bit number of last (most-significant) bit set.  Undefined
375  * for x==0.  Bits are numbered from 0..63 (e.g., ia64_fls(9) == 3).
376  */
377 static inline unsigned long
378 ia64_fls (unsigned long x)
379 {
380         long double d = x;
381         long exp;
382 
383         exp = ia64_getf_exp(d);
384         return exp - 0xffff;
385 }
386 
387 /*
388  * Find the last (most significant) bit set.  Returns 0 for x==0 and
389  * bits are numbered from 1..32 (e.g., fls(9) == 4).
390  */
391 static inline int
392 fls (int t)
393 {
394         unsigned long x = t & 0xffffffffu;
395 
396         if (!x)
397                 return 0;
398         x |= x >> 1;
399         x |= x >> 2;
400         x |= x >> 4;
401         x |= x >> 8;
402         x |= x >> 16;
403         return ia64_popcnt(x);
404 }
405 
406 /*
407  * Find the last (most significant) bit set.  Undefined for x==0.
408  * Bits are numbered from 0..63 (e.g., __fls(9) == 3).
409  */
410 static inline unsigned long
411 __fls (unsigned long x)
412 {
413         x |= x >> 1;
414         x |= x >> 2;
415         x |= x >> 4;
416         x |= x >> 8;
417         x |= x >> 16;
418         x |= x >> 32;
419         return ia64_popcnt(x) - 1;
420 }
421 
422 #include <asm-generic/bitops/fls64.h>
423 
424 #include <asm-generic/bitops/builtin-ffs.h>
425 
426 /*
427  * hweightN: returns the hamming weight (i.e. the number
428  * of bits set) of a N-bit word
429  */
430 static __inline__ unsigned long __arch_hweight64(unsigned long x)
431 {
432         unsigned long result;
433         result = ia64_popcnt(x);
434         return result;
435 }
436 
437 #define __arch_hweight32(x) ((unsigned int) __arch_hweight64((x) & 0xfffffffful))
438 #define __arch_hweight16(x) ((unsigned int) __arch_hweight64((x) & 0xfffful))
439 #define __arch_hweight8(x)  ((unsigned int) __arch_hweight64((x) & 0xfful))
440 
441 #include <asm-generic/bitops/const_hweight.h>
442 
443 #endif /* __KERNEL__ */
444 
445 #include <asm-generic/bitops/find.h>
446 
447 #ifdef __KERNEL__
448 
449 #include <asm-generic/bitops/le.h>
450 
451 #include <asm-generic/bitops/ext2-atomic-setbit.h>
452 
453 #include <asm-generic/bitops/sched.h>
454 
455 #endif /* __KERNEL__ */
456 
457 #endif /* _ASM_IA64_BITOPS_H */
458 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp