~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/include/linux/bitops.h

Version: ~ [ linux-5.6 ] ~ [ linux-5.5.13 ] ~ [ linux-5.4.28 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.113 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.174 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.217 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.217 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.82 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-3.9.11 ] ~ [ linux-3.8.13 ] ~ [ linux-3.7.10 ] ~ [ linux-3.6.11 ] ~ [ linux-3.5.7 ] ~ [ linux-3.4.113 ] ~ [ linux-3.3.8 ] ~ [ linux-3.2.102 ] ~ [ linux-3.1.10 ] ~ [ linux-3.0.101 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 #ifndef _LINUX_BITOPS_H
  2 #define _LINUX_BITOPS_H
  3 #include <asm/types.h>
  4 
  5 #ifdef  __KERNEL__
  6 #define BIT(nr)                 (1UL << (nr))
  7 #define BIT_MASK(nr)            (1UL << ((nr) % BITS_PER_LONG))
  8 #define BIT_WORD(nr)            ((nr) / BITS_PER_LONG)
  9 #define BITS_PER_BYTE           8
 10 #define BITS_TO_LONGS(nr)       DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long))
 11 #endif
 12 
 13 extern unsigned int __sw_hweight8(unsigned int w);
 14 extern unsigned int __sw_hweight16(unsigned int w);
 15 extern unsigned int __sw_hweight32(unsigned int w);
 16 extern unsigned long __sw_hweight64(__u64 w);
 17 
 18 /*
 19  * Include this here because some architectures need generic_ffs/fls in
 20  * scope
 21  */
 22 #include <asm/bitops.h>
 23 
 24 #define for_each_set_bit(bit, addr, size) \
 25         for ((bit) = find_first_bit((addr), (size));            \
 26              (bit) < (size);                                    \
 27              (bit) = find_next_bit((addr), (size), (bit) + 1))
 28 
 29 /* same as for_each_set_bit() but use bit as value to start with */
 30 #define for_each_set_bit_from(bit, addr, size) \
 31         for ((bit) = find_next_bit((addr), (size), (bit));      \
 32              (bit) < (size);                                    \
 33              (bit) = find_next_bit((addr), (size), (bit) + 1))
 34 
 35 #define for_each_clear_bit(bit, addr, size) \
 36         for ((bit) = find_first_zero_bit((addr), (size));       \
 37              (bit) < (size);                                    \
 38              (bit) = find_next_zero_bit((addr), (size), (bit) + 1))
 39 
 40 /* same as for_each_clear_bit() but use bit as value to start with */
 41 #define for_each_clear_bit_from(bit, addr, size) \
 42         for ((bit) = find_next_zero_bit((addr), (size), (bit)); \
 43              (bit) < (size);                                    \
 44              (bit) = find_next_zero_bit((addr), (size), (bit) + 1))
 45 
 46 static __inline__ int get_bitmask_order(unsigned int count)
 47 {
 48         int order;
 49 
 50         order = fls(count);
 51         return order;   /* We could be slightly more clever with -1 here... */
 52 }
 53 
 54 static __inline__ int get_count_order(unsigned int count)
 55 {
 56         int order;
 57 
 58         order = fls(count) - 1;
 59         if (count & (count - 1))
 60                 order++;
 61         return order;
 62 }
 63 
 64 static inline unsigned long hweight_long(unsigned long w)
 65 {
 66         return sizeof(w) == 4 ? hweight32(w) : hweight64(w);
 67 }
 68 
 69 /**
 70  * rol64 - rotate a 64-bit value left
 71  * @word: value to rotate
 72  * @shift: bits to roll
 73  */
 74 static inline __u64 rol64(__u64 word, unsigned int shift)
 75 {
 76         return (word << shift) | (word >> (64 - shift));
 77 }
 78 
 79 /**
 80  * ror64 - rotate a 64-bit value right
 81  * @word: value to rotate
 82  * @shift: bits to roll
 83  */
 84 static inline __u64 ror64(__u64 word, unsigned int shift)
 85 {
 86         return (word >> shift) | (word << (64 - shift));
 87 }
 88 
 89 /**
 90  * rol32 - rotate a 32-bit value left
 91  * @word: value to rotate
 92  * @shift: bits to roll
 93  */
 94 static inline __u32 rol32(__u32 word, unsigned int shift)
 95 {
 96         return (word << shift) | (word >> (32 - shift));
 97 }
 98 
 99 /**
100  * ror32 - rotate a 32-bit value right
101  * @word: value to rotate
102  * @shift: bits to roll
103  */
104 static inline __u32 ror32(__u32 word, unsigned int shift)
105 {
106         return (word >> shift) | (word << (32 - shift));
107 }
108 
109 /**
110  * rol16 - rotate a 16-bit value left
111  * @word: value to rotate
112  * @shift: bits to roll
113  */
114 static inline __u16 rol16(__u16 word, unsigned int shift)
115 {
116         return (word << shift) | (word >> (16 - shift));
117 }
118 
119 /**
120  * ror16 - rotate a 16-bit value right
121  * @word: value to rotate
122  * @shift: bits to roll
123  */
124 static inline __u16 ror16(__u16 word, unsigned int shift)
125 {
126         return (word >> shift) | (word << (16 - shift));
127 }
128 
129 /**
130  * rol8 - rotate an 8-bit value left
131  * @word: value to rotate
132  * @shift: bits to roll
133  */
134 static inline __u8 rol8(__u8 word, unsigned int shift)
135 {
136         return (word << shift) | (word >> (8 - shift));
137 }
138 
139 /**
140  * ror8 - rotate an 8-bit value right
141  * @word: value to rotate
142  * @shift: bits to roll
143  */
144 static inline __u8 ror8(__u8 word, unsigned int shift)
145 {
146         return (word >> shift) | (word << (8 - shift));
147 }
148 
149 /**
150  * sign_extend32 - sign extend a 32-bit value using specified bit as sign-bit
151  * @value: value to sign extend
152  * @index: 0 based bit index (0<=index<32) to sign bit
153  */
154 static inline __s32 sign_extend32(__u32 value, int index)
155 {
156         __u8 shift = 31 - index;
157         return (__s32)(value << shift) >> shift;
158 }
159 
160 static inline unsigned fls_long(unsigned long l)
161 {
162         if (sizeof(l) == 4)
163                 return fls(l);
164         return fls64(l);
165 }
166 
167 /**
168  * __ffs64 - find first set bit in a 64 bit word
169  * @word: The 64 bit word
170  *
171  * On 64 bit arches this is a synomyn for __ffs
172  * The result is not defined if no bits are set, so check that @word
173  * is non-zero before calling this.
174  */
175 static inline unsigned long __ffs64(u64 word)
176 {
177 #if BITS_PER_LONG == 32
178         if (((u32)word) == 0UL)
179                 return __ffs((u32)(word >> 32)) + 32;
180 #elif BITS_PER_LONG != 64
181 #error BITS_PER_LONG not 32 or 64
182 #endif
183         return __ffs((unsigned long)word);
184 }
185 
186 #ifdef __KERNEL__
187 
188 #ifndef find_last_bit
189 /**
190  * find_last_bit - find the last set bit in a memory region
191  * @addr: The address to start the search at
192  * @size: The maximum size to search
193  *
194  * Returns the bit number of the first set bit, or size.
195  */
196 extern unsigned long find_last_bit(const unsigned long *addr,
197                                    unsigned long size);
198 #endif
199 
200 #endif /* __KERNEL__ */
201 #endif
202 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp