~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/parisc/include/asm/spinlock.h

Version: ~ [ linux-5.4-rc3 ] ~ [ linux-5.3.6 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.79 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.149 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.196 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.196 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.75 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-3.9.11 ] ~ [ linux-3.8.13 ] ~ [ linux-3.7.10 ] ~ [ linux-3.6.11 ] ~ [ linux-3.5.7 ] ~ [ linux-3.4.113 ] ~ [ linux-3.3.8 ] ~ [ linux-3.2.102 ] ~ [ linux-3.1.10 ] ~ [ linux-3.0.101 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 #ifndef __ASM_SPINLOCK_H
  2 #define __ASM_SPINLOCK_H
  3 
  4 #include <asm/barrier.h>
  5 #include <asm/ldcw.h>
  6 #include <asm/processor.h>
  7 #include <asm/spinlock_types.h>
  8 
  9 static inline int arch_spin_is_locked(arch_spinlock_t *x)
 10 {
 11         volatile unsigned int *a = __ldcw_align(x);
 12         return *a == 0;
 13 }
 14 
 15 #define arch_spin_lock(lock) arch_spin_lock_flags(lock, 0)
 16 
 17 static inline void arch_spin_unlock_wait(arch_spinlock_t *x)
 18 {
 19         volatile unsigned int *a = __ldcw_align(x);
 20 
 21         smp_cond_load_acquire(a, VAL);
 22 }
 23 
 24 static inline void arch_spin_lock_flags(arch_spinlock_t *x,
 25                                          unsigned long flags)
 26 {
 27         volatile unsigned int *a;
 28 
 29         mb();
 30         a = __ldcw_align(x);
 31         while (__ldcw(a) == 0)
 32                 while (*a == 0)
 33                         if (flags & PSW_SM_I) {
 34                                 local_irq_enable();
 35                                 cpu_relax();
 36                                 local_irq_disable();
 37                         } else
 38                                 cpu_relax();
 39         mb();
 40 }
 41 
 42 static inline void arch_spin_unlock(arch_spinlock_t *x)
 43 {
 44         volatile unsigned int *a;
 45         mb();
 46         a = __ldcw_align(x);
 47         *a = 1;
 48         mb();
 49 }
 50 
 51 static inline int arch_spin_trylock(arch_spinlock_t *x)
 52 {
 53         volatile unsigned int *a;
 54         int ret;
 55 
 56         mb();
 57         a = __ldcw_align(x);
 58         ret = __ldcw(a) != 0;
 59         mb();
 60 
 61         return ret;
 62 }
 63 
 64 /*
 65  * Read-write spinlocks, allowing multiple readers but only one writer.
 66  * Linux rwlocks are unfair to writers; they can be starved for an indefinite
 67  * time by readers.  With care, they can also be taken in interrupt context.
 68  *
 69  * In the PA-RISC implementation, we have a spinlock and a counter.
 70  * Readers use the lock to serialise their access to the counter (which
 71  * records how many readers currently hold the lock).
 72  * Writers hold the spinlock, preventing any readers or other writers from
 73  * grabbing the rwlock.
 74  */
 75 
 76 /* Note that we have to ensure interrupts are disabled in case we're
 77  * interrupted by some other code that wants to grab the same read lock */
 78 static  __inline__ void arch_read_lock(arch_rwlock_t *rw)
 79 {
 80         unsigned long flags;
 81         local_irq_save(flags);
 82         arch_spin_lock_flags(&rw->lock, flags);
 83         rw->counter++;
 84         arch_spin_unlock(&rw->lock);
 85         local_irq_restore(flags);
 86 }
 87 
 88 /* Note that we have to ensure interrupts are disabled in case we're
 89  * interrupted by some other code that wants to grab the same read lock */
 90 static  __inline__ void arch_read_unlock(arch_rwlock_t *rw)
 91 {
 92         unsigned long flags;
 93         local_irq_save(flags);
 94         arch_spin_lock_flags(&rw->lock, flags);
 95         rw->counter--;
 96         arch_spin_unlock(&rw->lock);
 97         local_irq_restore(flags);
 98 }
 99 
100 /* Note that we have to ensure interrupts are disabled in case we're
101  * interrupted by some other code that wants to grab the same read lock */
102 static __inline__ int arch_read_trylock(arch_rwlock_t *rw)
103 {
104         unsigned long flags;
105  retry:
106         local_irq_save(flags);
107         if (arch_spin_trylock(&rw->lock)) {
108                 rw->counter++;
109                 arch_spin_unlock(&rw->lock);
110                 local_irq_restore(flags);
111                 return 1;
112         }
113 
114         local_irq_restore(flags);
115         /* If write-locked, we fail to acquire the lock */
116         if (rw->counter < 0)
117                 return 0;
118 
119         /* Wait until we have a realistic chance at the lock */
120         while (arch_spin_is_locked(&rw->lock) && rw->counter >= 0)
121                 cpu_relax();
122 
123         goto retry;
124 }
125 
126 /* Note that we have to ensure interrupts are disabled in case we're
127  * interrupted by some other code that wants to read_trylock() this lock */
128 static __inline__ void arch_write_lock(arch_rwlock_t *rw)
129 {
130         unsigned long flags;
131 retry:
132         local_irq_save(flags);
133         arch_spin_lock_flags(&rw->lock, flags);
134 
135         if (rw->counter != 0) {
136                 arch_spin_unlock(&rw->lock);
137                 local_irq_restore(flags);
138 
139                 while (rw->counter != 0)
140                         cpu_relax();
141 
142                 goto retry;
143         }
144 
145         rw->counter = -1; /* mark as write-locked */
146         mb();
147         local_irq_restore(flags);
148 }
149 
150 static __inline__ void arch_write_unlock(arch_rwlock_t *rw)
151 {
152         rw->counter = 0;
153         arch_spin_unlock(&rw->lock);
154 }
155 
156 /* Note that we have to ensure interrupts are disabled in case we're
157  * interrupted by some other code that wants to read_trylock() this lock */
158 static __inline__ int arch_write_trylock(arch_rwlock_t *rw)
159 {
160         unsigned long flags;
161         int result = 0;
162 
163         local_irq_save(flags);
164         if (arch_spin_trylock(&rw->lock)) {
165                 if (rw->counter == 0) {
166                         rw->counter = -1;
167                         result = 1;
168                 } else {
169                         /* Read-locked.  Oh well. */
170                         arch_spin_unlock(&rw->lock);
171                 }
172         }
173         local_irq_restore(flags);
174 
175         return result;
176 }
177 
178 /*
179  * read_can_lock - would read_trylock() succeed?
180  * @lock: the rwlock in question.
181  */
182 static __inline__ int arch_read_can_lock(arch_rwlock_t *rw)
183 {
184         return rw->counter >= 0;
185 }
186 
187 /*
188  * write_can_lock - would write_trylock() succeed?
189  * @lock: the rwlock in question.
190  */
191 static __inline__ int arch_write_can_lock(arch_rwlock_t *rw)
192 {
193         return !rw->counter;
194 }
195 
196 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
197 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
198 
199 #endif /* __ASM_SPINLOCK_H */
200 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp