~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/tile/lib/atomic_32.c

Version: ~ [ linux-5.11-rc1 ] ~ [ linux-5.10.4 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.86 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.164 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.213 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.249 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.249 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.85 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  * Copyright 2010 Tilera Corporation. All Rights Reserved.
  3  *
  4  *   This program is free software; you can redistribute it and/or
  5  *   modify it under the terms of the GNU General Public License
  6  *   as published by the Free Software Foundation, version 2.
  7  *
  8  *   This program is distributed in the hope that it will be useful, but
  9  *   WITHOUT ANY WARRANTY; without even the implied warranty of
 10  *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
 11  *   NON INFRINGEMENT.  See the GNU General Public License for
 12  *   more details.
 13  */
 14 
 15 #include <linux/cache.h>
 16 #include <linux/delay.h>
 17 #include <linux/uaccess.h>
 18 #include <linux/module.h>
 19 #include <linux/mm.h>
 20 #include <linux/atomic.h>
 21 #include <arch/chip.h>
 22 
 23 /* This page is remapped on startup to be hash-for-home. */
 24 int atomic_locks[PAGE_SIZE / sizeof(int)] __page_aligned_bss;
 25 
 26 int *__atomic_hashed_lock(volatile void *v)
 27 {
 28         /* NOTE: this code must match "sys_cmpxchg" in kernel/intvec_32.S */
 29         /*
 30          * Use bits [3, 3 + ATOMIC_HASH_SHIFT) as the lock index.
 31          * Using mm works here because atomic_locks is page aligned.
 32          */
 33         unsigned long ptr = __insn_mm((unsigned long)v >> 1,
 34                                       (unsigned long)atomic_locks,
 35                                       2, (ATOMIC_HASH_SHIFT + 2) - 1);
 36         return (int *)ptr;
 37 }
 38 
 39 #ifdef CONFIG_SMP
 40 /* Return whether the passed pointer is a valid atomic lock pointer. */
 41 static int is_atomic_lock(int *p)
 42 {
 43         return p >= &atomic_locks[0] && p < &atomic_locks[ATOMIC_HASH_SIZE];
 44 }
 45 
 46 void __atomic_fault_unlock(int *irqlock_word)
 47 {
 48         BUG_ON(!is_atomic_lock(irqlock_word));
 49         BUG_ON(*irqlock_word != 1);
 50         *irqlock_word = 0;
 51 }
 52 
 53 #endif /* CONFIG_SMP */
 54 
 55 static inline int *__atomic_setup(volatile void *v)
 56 {
 57         /* Issue a load to the target to bring it into cache. */
 58         *(volatile int *)v;
 59         return __atomic_hashed_lock(v);
 60 }
 61 
 62 int _atomic_xchg(int *v, int n)
 63 {
 64         return __atomic32_xchg(v, __atomic_setup(v), n).val;
 65 }
 66 EXPORT_SYMBOL(_atomic_xchg);
 67 
 68 int _atomic_xchg_add(int *v, int i)
 69 {
 70         return __atomic32_xchg_add(v, __atomic_setup(v), i).val;
 71 }
 72 EXPORT_SYMBOL(_atomic_xchg_add);
 73 
 74 int _atomic_xchg_add_unless(int *v, int a, int u)
 75 {
 76         /*
 77          * Note: argument order is switched here since it is easier
 78          * to use the first argument consistently as the "old value"
 79          * in the assembly, as is done for _atomic_cmpxchg().
 80          */
 81         return __atomic32_xchg_add_unless(v, __atomic_setup(v), u, a).val;
 82 }
 83 EXPORT_SYMBOL(_atomic_xchg_add_unless);
 84 
 85 int _atomic_cmpxchg(int *v, int o, int n)
 86 {
 87         return __atomic32_cmpxchg(v, __atomic_setup(v), o, n).val;
 88 }
 89 EXPORT_SYMBOL(_atomic_cmpxchg);
 90 
 91 unsigned long _atomic_fetch_or(volatile unsigned long *p, unsigned long mask)
 92 {
 93         return __atomic32_fetch_or((int *)p, __atomic_setup(p), mask).val;
 94 }
 95 EXPORT_SYMBOL(_atomic_fetch_or);
 96 
 97 unsigned long _atomic_fetch_and(volatile unsigned long *p, unsigned long mask)
 98 {
 99         return __atomic32_fetch_and((int *)p, __atomic_setup(p), mask).val;
100 }
101 EXPORT_SYMBOL(_atomic_fetch_and);
102 
103 unsigned long _atomic_fetch_andn(volatile unsigned long *p, unsigned long mask)
104 {
105         return __atomic32_fetch_andn((int *)p, __atomic_setup(p), mask).val;
106 }
107 EXPORT_SYMBOL(_atomic_fetch_andn);
108 
109 unsigned long _atomic_fetch_xor(volatile unsigned long *p, unsigned long mask)
110 {
111         return __atomic32_fetch_xor((int *)p, __atomic_setup(p), mask).val;
112 }
113 EXPORT_SYMBOL(_atomic_fetch_xor);
114 
115 
116 long long _atomic64_xchg(long long *v, long long n)
117 {
118         return __atomic64_xchg(v, __atomic_setup(v), n);
119 }
120 EXPORT_SYMBOL(_atomic64_xchg);
121 
122 long long _atomic64_xchg_add(long long *v, long long i)
123 {
124         return __atomic64_xchg_add(v, __atomic_setup(v), i);
125 }
126 EXPORT_SYMBOL(_atomic64_xchg_add);
127 
128 long long _atomic64_xchg_add_unless(long long *v, long long a, long long u)
129 {
130         /*
131          * Note: argument order is switched here since it is easier
132          * to use the first argument consistently as the "old value"
133          * in the assembly, as is done for _atomic_cmpxchg().
134          */
135         return __atomic64_xchg_add_unless(v, __atomic_setup(v), u, a);
136 }
137 EXPORT_SYMBOL(_atomic64_xchg_add_unless);
138 
139 long long _atomic64_cmpxchg(long long *v, long long o, long long n)
140 {
141         return __atomic64_cmpxchg(v, __atomic_setup(v), o, n);
142 }
143 EXPORT_SYMBOL(_atomic64_cmpxchg);
144 
145 long long _atomic64_fetch_and(long long *v, long long n)
146 {
147         return __atomic64_fetch_and(v, __atomic_setup(v), n);
148 }
149 EXPORT_SYMBOL(_atomic64_fetch_and);
150 
151 long long _atomic64_fetch_or(long long *v, long long n)
152 {
153         return __atomic64_fetch_or(v, __atomic_setup(v), n);
154 }
155 EXPORT_SYMBOL(_atomic64_fetch_or);
156 
157 long long _atomic64_fetch_xor(long long *v, long long n)
158 {
159         return __atomic64_fetch_xor(v, __atomic_setup(v), n);
160 }
161 EXPORT_SYMBOL(_atomic64_fetch_xor);
162 
163 /*
164  * If any of the atomic or futex routines hit a bad address (not in
165  * the page tables at kernel PL) this routine is called.  The futex
166  * routines are never used on kernel space, and the normal atomics and
167  * bitops are never used on user space.  So a fault on kernel space
168  * must be fatal, but a fault on userspace is a futex fault and we
169  * need to return -EFAULT.  Note that the context this routine is
170  * invoked in is the context of the "_atomic_xxx()" routines called
171  * by the functions in this file.
172  */
173 struct __get_user __atomic_bad_address(int __user *addr)
174 {
175         if (unlikely(!access_ok(VERIFY_WRITE, addr, sizeof(int))))
176                 panic("Bad address used for kernel atomic op: %p\n", addr);
177         return (struct __get_user) { .err = -EFAULT };
178 }
179 
180 
181 void __init __init_atomic_per_cpu(void)
182 {
183         /* Validate power-of-two and "bigger than cpus" assumption */
184         BUILD_BUG_ON(ATOMIC_HASH_SIZE & (ATOMIC_HASH_SIZE-1));
185         BUG_ON(ATOMIC_HASH_SIZE < nr_cpu_ids);
186 
187         /*
188          * On TILEPro we prefer to use a single hash-for-home
189          * page, since this means atomic operations are less
190          * likely to encounter a TLB fault and thus should
191          * in general perform faster.  You may wish to disable
192          * this in situations where few hash-for-home tiles
193          * are configured.
194          */
195         BUG_ON((unsigned long)atomic_locks % PAGE_SIZE != 0);
196 
197         /* The locks must all fit on one page. */
198         BUILD_BUG_ON(ATOMIC_HASH_SIZE * sizeof(int) > PAGE_SIZE);
199 
200         /*
201          * We use the page offset of the atomic value's address as
202          * an index into atomic_locks, excluding the low 3 bits.
203          * That should not produce more indices than ATOMIC_HASH_SIZE.
204          */
205         BUILD_BUG_ON((PAGE_SIZE >> 3) > ATOMIC_HASH_SIZE);
206 }
207 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp