~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/mips/include/asm/mach-pmcs-msp71xx/msp_regops.h

Version: ~ [ linux-5.14-rc3 ] ~ [ linux-5.13.5 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.53 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.135 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.198 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.240 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.276 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.276 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.18.140 ] ~ [ linux-3.16.85 ] ~ [ linux-3.14.79 ] ~ [ linux-3.12.74 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  * SMP/VPE-safe functions to access "registers" (see note).
  3  *
  4  * NOTES:
  5 * - These macros use ll/sc instructions, so it is your responsibility to
  6  * ensure these are available on your platform before including this file.
  7  * - The MIPS32 spec states that ll/sc results are undefined for uncached
  8  * accesses. This means they can't be used on HW registers accessed
  9  * through kseg1. Code which requires these macros for this purpose must
 10  * front-end the registers with cached memory "registers" and have a single
 11  * thread update the actual HW registers.
 12  * - A maximum of 2k of code can be inserted between ll and sc. Every
 13  * memory accesses between the instructions will increase the chance of
 14  * sc failing and having to loop.
 15  * - When using custom_read_reg32/custom_write_reg32 only perform the
 16  * necessary logical operations on the register value in between these
 17  * two calls. All other logic should be performed before the first call.
 18   * - There is a bug on the R10000 chips which has a workaround. If you
 19  * are affected by this bug, make sure to define the symbol 'R10000_LLSC_WAR'
 20  * to be non-zero.  If you are using this header from within linux, you may
 21  * include <asm/war.h> before including this file to have this defined
 22  * appropriately for you.
 23  *
 24  * Copyright 2005-2007 PMC-Sierra, Inc.
 25  *
 26  *  This program is free software; you can redistribute  it and/or modify it
 27  *  under  the terms of  the GNU General  Public License as published by the
 28  *  Free Software Foundation;  either version 2 of the  License, or (at your
 29  *  option) any later version.
 30  *
 31  *  THIS  SOFTWARE  IS PROVIDED   ``AS  IS'' AND   ANY  EXPRESS OR IMPLIED
 32  *  WARRANTIES,   INCLUDING, BUT NOT  LIMITED  TO, THE IMPLIED WARRANTIES OF
 33  *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO
 34  *  EVENT  SHALL   THE AUTHOR  BE    LIABLE FOR ANY   DIRECT, INDIRECT,
 35  *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
 36  *  LIMITED   TO, PROCUREMENT OF  SUBSTITUTE GOODS  OR SERVICES; LOSS OF USE,
 37  *  DATA,  OR PROFITS; OR  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 38  *  THEORY OF LIABILITY, WHETHER IN  CONTRACT, STRICT LIABILITY, OR TORT
 39  *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
 40  *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 41  *
 42  *  You should have received a copy of the  GNU General Public License along
 43  *  with this program; if not, write  to the Free Software Foundation, Inc., 675
 44  *  Mass Ave, Cambridge, MA 02139, USA.
 45  */
 46 
 47 #ifndef __ASM_REGOPS_H__
 48 #define __ASM_REGOPS_H__
 49 
 50 #include <linux/types.h>
 51 
 52 #include <asm/compiler.h>
 53 #include <asm/war.h>
 54 
 55 #ifndef R10000_LLSC_WAR
 56 #define R10000_LLSC_WAR 0
 57 #endif
 58 
 59 #if R10000_LLSC_WAR == 1
 60 #define __beqz  "beqzl  "
 61 #else
 62 #define __beqz  "beqz   "
 63 #endif
 64 
 65 #ifndef _LINUX_TYPES_H
 66 typedef unsigned int u32;
 67 #endif
 68 
 69 /*
 70  * Sets all the masked bits to the corresponding value bits
 71  */
 72 static inline void set_value_reg32(volatile u32 *const addr,
 73                                         u32 const mask,
 74                                         u32 const value)
 75 {
 76         u32 temp;
 77 
 78         __asm__ __volatile__(
 79         "       .set    push                            \n"
 80         "       .set    arch=r4000                      \n"
 81         "1:     ll      %0, %1  # set_value_reg32       \n"
 82         "       and     %0, %2                          \n"
 83         "       or      %0, %3                          \n"
 84         "       sc      %0, %1                          \n"
 85         "       "__beqz"%0, 1b                          \n"
 86         "       nop                                     \n"
 87         "       .set    pop                             \n"
 88         : "=&r" (temp), "=" GCC_OFF_SMALL_ASM() (*addr)
 89         : "ir" (~mask), "ir" (value), GCC_OFF_SMALL_ASM() (*addr));
 90 }
 91 
 92 /*
 93  * Sets all the masked bits to '1'
 94  */
 95 static inline void set_reg32(volatile u32 *const addr,
 96                                 u32 const mask)
 97 {
 98         u32 temp;
 99 
100         __asm__ __volatile__(
101         "       .set    push                            \n"
102         "       .set    arch=r4000                      \n"
103         "1:     ll      %0, %1          # set_reg32     \n"
104         "       or      %0, %2                          \n"
105         "       sc      %0, %1                          \n"
106         "       "__beqz"%0, 1b                          \n"
107         "       nop                                     \n"
108         "       .set    pop                             \n"
109         : "=&r" (temp), "=" GCC_OFF_SMALL_ASM() (*addr)
110         : "ir" (mask), GCC_OFF_SMALL_ASM() (*addr));
111 }
112 
113 /*
114  * Sets all the masked bits to ''
115  */
116 static inline void clear_reg32(volatile u32 *const addr,
117                                 u32 const mask)
118 {
119         u32 temp;
120 
121         __asm__ __volatile__(
122         "       .set    push                            \n"
123         "       .set    arch=r4000                      \n"
124         "1:     ll      %0, %1          # clear_reg32   \n"
125         "       and     %0, %2                          \n"
126         "       sc      %0, %1                          \n"
127         "       "__beqz"%0, 1b                          \n"
128         "       nop                                     \n"
129         "       .set    pop                             \n"
130         : "=&r" (temp), "=" GCC_OFF_SMALL_ASM() (*addr)
131         : "ir" (~mask), GCC_OFF_SMALL_ASM() (*addr));
132 }
133 
134 /*
135  * Toggles all masked bits from '' to '1' and '1' to ''
136  */
137 static inline void toggle_reg32(volatile u32 *const addr,
138                                 u32 const mask)
139 {
140         u32 temp;
141 
142         __asm__ __volatile__(
143         "       .set    push                            \n"
144         "       .set    arch=r4000                      \n"
145         "1:     ll      %0, %1          # toggle_reg32  \n"
146         "       xor     %0, %2                          \n"
147         "       sc      %0, %1                          \n"
148         "       "__beqz"%0, 1b                          \n"
149         "       nop                                     \n"
150         "       .set    pop                             \n"
151         : "=&r" (temp), "=" GCC_OFF_SMALL_ASM() (*addr)
152         : "ir" (mask), GCC_OFF_SMALL_ASM() (*addr));
153 }
154 
155 /*
156  * Read all masked bits others are returned as ''
157  */
158 static inline u32 read_reg32(volatile u32 *const addr,
159                                 u32 const mask)
160 {
161         u32 temp;
162 
163         __asm__ __volatile__(
164         "       .set    push                            \n"
165         "       .set    noreorder                       \n"
166         "       lw      %0, %1          # read          \n"
167         "       and     %0, %2          # mask          \n"
168         "       .set    pop                             \n"
169         : "=&r" (temp)
170         : "m" (*addr), "ir" (mask));
171 
172         return temp;
173 }
174 
175 /*
176  * blocking_read_reg32 - Read address with blocking load
177  *
178  * Uncached writes need to be read back to ensure they reach RAM.
179  * The returned value must be 'used' to prevent from becoming a
180  * non-blocking load.
181  */
182 static inline u32 blocking_read_reg32(volatile u32 *const addr)
183 {
184         u32 temp;
185 
186         __asm__ __volatile__(
187         "       .set    push                            \n"
188         "       .set    noreorder                       \n"
189         "       lw      %0, %1          # read          \n"
190         "       move    %0, %0          # block         \n"
191         "       .set    pop                             \n"
192         : "=&r" (temp)
193         : "m" (*addr));
194 
195         return temp;
196 }
197 
198 /*
199  * For special strange cases only:
200  *
201  * If you need custom processing within a ll/sc loop, use the following macros
202  * VERY CAREFULLY:
203  *
204  *   u32 tmp;                           <-- Define a variable to hold the data
205  *
206  *   custom_read_reg32(address, tmp);   <-- Reads the address and put the value
207  *                                              in the 'tmp' variable given
208  *
209  *      From here on out, you are (basically) atomic, so don't do anything too
210  *      fancy!
211  *      Also, this code may loop if the end of this block fails to write
212  *      everything back safely due do the other CPU, so do NOT do anything
213  *      with side-effects!
214  *
215  *   custom_write_reg32(address, tmp);  <-- Writes back 'tmp' safely.
216  */
217 #define custom_read_reg32(address, tmp)                         \
218         __asm__ __volatile__(                                   \
219         "       .set    push                            \n"     \
220         "       .set    arch=r4000                      \n"     \
221         "1:     ll      %0, %1  #custom_read_reg32      \n"     \
222         "       .set    pop                             \n"     \
223         : "=r" (tmp), "=" GCC_OFF_SMALL_ASM() (*address)                \
224         : GCC_OFF_SMALL_ASM() (*address))
225 
226 #define custom_write_reg32(address, tmp)                        \
227         __asm__ __volatile__(                                   \
228         "       .set    push                            \n"     \
229         "       .set    arch=r4000                      \n"     \
230         "       sc      %0, %1  #custom_write_reg32     \n"     \
231         "       "__beqz"%0, 1b                          \n"     \
232         "       nop                                     \n"     \
233         "       .set    pop                             \n"     \
234         : "=&r" (tmp), "=" GCC_OFF_SMALL_ASM() (*address)               \
235         : "" (tmp), GCC_OFF_SMALL_ASM() (*address))
236 
237 #endif  /* __ASM_REGOPS_H__ */
238 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp