~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/parisc/include/asm/atomic.h

Version: ~ [ linux-5.11-rc3 ] ~ [ linux-5.10.7 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.89 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.167 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.215 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.251 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.251 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.85 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>
  2  * Copyright (C) 2006 Kyle McMartin <kyle@parisc-linux.org>
  3  */
  4 
  5 #ifndef _ASM_PARISC_ATOMIC_H_
  6 #define _ASM_PARISC_ATOMIC_H_
  7 
  8 #include <linux/types.h>
  9 #include <asm/cmpxchg.h>
 10 
 11 /*
 12  * Atomic operations that C can't guarantee us.  Useful for
 13  * resource counting etc..
 14  *
 15  * And probably incredibly slow on parisc.  OTOH, we don't
 16  * have to write any serious assembly.   prumpf
 17  */
 18 
 19 #ifdef CONFIG_SMP
 20 #include <asm/spinlock.h>
 21 #include <asm/cache.h>          /* we use L1_CACHE_BYTES */
 22 
 23 /* Use an array of spinlocks for our atomic_ts.
 24  * Hash function to index into a different SPINLOCK.
 25  * Since "a" is usually an address, use one spinlock per cacheline.
 26  */
 27 #  define ATOMIC_HASH_SIZE 4
 28 #  define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) (a))/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ]))
 29 
 30 extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
 31 
 32 /* Can't use raw_spin_lock_irq because of #include problems, so
 33  * this is the substitute */
 34 #define _atomic_spin_lock_irqsave(l,f) do {     \
 35         arch_spinlock_t *s = ATOMIC_HASH(l);            \
 36         local_irq_save(f);                      \
 37         arch_spin_lock(s);                      \
 38 } while(0)
 39 
 40 #define _atomic_spin_unlock_irqrestore(l,f) do {        \
 41         arch_spinlock_t *s = ATOMIC_HASH(l);                    \
 42         arch_spin_unlock(s);                            \
 43         local_irq_restore(f);                           \
 44 } while(0)
 45 
 46 
 47 #else
 48 #  define _atomic_spin_lock_irqsave(l,f) do { local_irq_save(f); } while (0)
 49 #  define _atomic_spin_unlock_irqrestore(l,f) do { local_irq_restore(f); } while (0)
 50 #endif
 51 
 52 /*
 53  * Note that we need not lock read accesses - aligned word writes/reads
 54  * are atomic, so a reader never sees inconsistent values.
 55  */
 56 
 57 /* It's possible to reduce all atomic operations to either
 58  * __atomic_add_return, atomic_set and atomic_read (the latter
 59  * is there only for consistency).
 60  */
 61 
 62 static __inline__ int __atomic_add_return(int i, atomic_t *v)
 63 {
 64         int ret;
 65         unsigned long flags;
 66         _atomic_spin_lock_irqsave(v, flags);
 67 
 68         ret = (v->counter += i);
 69 
 70         _atomic_spin_unlock_irqrestore(v, flags);
 71         return ret;
 72 }
 73 
 74 static __inline__ void atomic_set(atomic_t *v, int i) 
 75 {
 76         unsigned long flags;
 77         _atomic_spin_lock_irqsave(v, flags);
 78 
 79         v->counter = i;
 80 
 81         _atomic_spin_unlock_irqrestore(v, flags);
 82 }
 83 
 84 static __inline__ int atomic_read(const atomic_t *v)
 85 {
 86         return (*(volatile int *)&(v)->counter);
 87 }
 88 
 89 /* exported interface */
 90 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
 91 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
 92 
 93 /**
 94  * __atomic_add_unless - add unless the number is a given value
 95  * @v: pointer of type atomic_t
 96  * @a: the amount to add to v...
 97  * @u: ...unless v is equal to u.
 98  *
 99  * Atomically adds @a to @v, so long as it was not @u.
100  * Returns the old value of @v.
101  */
102 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
103 {
104         int c, old;
105         c = atomic_read(v);
106         for (;;) {
107                 if (unlikely(c == (u)))
108                         break;
109                 old = atomic_cmpxchg((v), c, c + (a));
110                 if (likely(old == c))
111                         break;
112                 c = old;
113         }
114         return c;
115 }
116 
117 
118 #define atomic_add(i,v) ((void)(__atomic_add_return(        (i),(v))))
119 #define atomic_sub(i,v) ((void)(__atomic_add_return(-((int) (i)),(v))))
120 #define atomic_inc(v)   ((void)(__atomic_add_return(   1,(v))))
121 #define atomic_dec(v)   ((void)(__atomic_add_return(  -1,(v))))
122 
123 #define atomic_add_return(i,v)  (__atomic_add_return( (i),(v)))
124 #define atomic_sub_return(i,v)  (__atomic_add_return(-(i),(v)))
125 #define atomic_inc_return(v)    (__atomic_add_return(   1,(v)))
126 #define atomic_dec_return(v)    (__atomic_add_return(  -1,(v)))
127 
128 #define atomic_add_negative(a, v)       (atomic_add_return((a), (v)) < 0)
129 
130 /*
131  * atomic_inc_and_test - increment and test
132  * @v: pointer of type atomic_t
133  *
134  * Atomically increments @v by 1
135  * and returns true if the result is zero, or false for all
136  * other cases.
137  */
138 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
139 
140 #define atomic_dec_and_test(v)  (atomic_dec_return(v) == 0)
141 
142 #define atomic_sub_and_test(i,v)        (atomic_sub_return((i),(v)) == 0)
143 
144 #define ATOMIC_INIT(i)  { (i) }
145 
146 #define smp_mb__before_atomic_dec()     smp_mb()
147 #define smp_mb__after_atomic_dec()      smp_mb()
148 #define smp_mb__before_atomic_inc()     smp_mb()
149 #define smp_mb__after_atomic_inc()      smp_mb()
150 
151 #ifdef CONFIG_64BIT
152 
153 #define ATOMIC64_INIT(i) { (i) }
154 
155 static __inline__ s64
156 __atomic64_add_return(s64 i, atomic64_t *v)
157 {
158         s64 ret;
159         unsigned long flags;
160         _atomic_spin_lock_irqsave(v, flags);
161 
162         ret = (v->counter += i);
163 
164         _atomic_spin_unlock_irqrestore(v, flags);
165         return ret;
166 }
167 
168 static __inline__ void
169 atomic64_set(atomic64_t *v, s64 i)
170 {
171         unsigned long flags;
172         _atomic_spin_lock_irqsave(v, flags);
173 
174         v->counter = i;
175 
176         _atomic_spin_unlock_irqrestore(v, flags);
177 }
178 
179 static __inline__ s64
180 atomic64_read(const atomic64_t *v)
181 {
182         return (*(volatile long *)&(v)->counter);
183 }
184 
185 #define atomic64_add(i,v)       ((void)(__atomic64_add_return( ((s64)(i)),(v))))
186 #define atomic64_sub(i,v)       ((void)(__atomic64_add_return(-((s64)(i)),(v))))
187 #define atomic64_inc(v)         ((void)(__atomic64_add_return(   1,(v))))
188 #define atomic64_dec(v)         ((void)(__atomic64_add_return(  -1,(v))))
189 
190 #define atomic64_add_return(i,v)        (__atomic64_add_return( ((s64)(i)),(v)))
191 #define atomic64_sub_return(i,v)        (__atomic64_add_return(-((s64)(i)),(v)))
192 #define atomic64_inc_return(v)          (__atomic64_add_return(   1,(v)))
193 #define atomic64_dec_return(v)          (__atomic64_add_return(  -1,(v)))
194 
195 #define atomic64_add_negative(a, v)     (atomic64_add_return((a), (v)) < 0)
196 
197 #define atomic64_inc_and_test(v)        (atomic64_inc_return(v) == 0)
198 #define atomic64_dec_and_test(v)        (atomic64_dec_return(v) == 0)
199 #define atomic64_sub_and_test(i,v)      (atomic64_sub_return((i),(v)) == 0)
200 
201 /* exported interface */
202 #define atomic64_cmpxchg(v, o, n) \
203         ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
204 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
205 
206 /**
207  * atomic64_add_unless - add unless the number is a given value
208  * @v: pointer of type atomic64_t
209  * @a: the amount to add to v...
210  * @u: ...unless v is equal to u.
211  *
212  * Atomically adds @a to @v, so long as it was not @u.
213  * Returns the old value of @v.
214  */
215 static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
216 {
217         long c, old;
218         c = atomic64_read(v);
219         for (;;) {
220                 if (unlikely(c == (u)))
221                         break;
222                 old = atomic64_cmpxchg((v), c, c + (a));
223                 if (likely(old == c))
224                         break;
225                 c = old;
226         }
227         return c != (u);
228 }
229 
230 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
231 
232 /*
233  * atomic64_dec_if_positive - decrement by 1 if old value positive
234  * @v: pointer of type atomic_t
235  *
236  * The function returns the old value of *v minus 1, even if
237  * the atomic variable, v, was not decremented.
238  */
239 static inline long atomic64_dec_if_positive(atomic64_t *v)
240 {
241         long c, old, dec;
242         c = atomic64_read(v);
243         for (;;) {
244                 dec = c - 1;
245                 if (unlikely(dec < 0))
246                         break;
247                 old = atomic64_cmpxchg((v), c, dec);
248                 if (likely(old == c))
249                         break;
250                 c = old;
251         }
252         return dec;
253 }
254 
255 #endif /* !CONFIG_64BIT */
256 
257 
258 #endif /* _ASM_PARISC_ATOMIC_H_ */
259 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp