~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/cris/include/asm/uaccess.h

Version: ~ [ linux-5.4-rc3 ] ~ [ linux-5.3.6 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.79 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.149 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.196 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.196 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.75 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-3.9.11 ] ~ [ linux-3.8.13 ] ~ [ linux-3.7.10 ] ~ [ linux-3.6.11 ] ~ [ linux-3.5.7 ] ~ [ linux-3.4.113 ] ~ [ linux-3.3.8 ] ~ [ linux-3.2.102 ] ~ [ linux-3.1.10 ] ~ [ linux-3.0.101 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  * Authors:    Bjorn Wesen (bjornw@axis.com)
  3  *             Hans-Peter Nilsson (hp@axis.com)
  4  */
  5 
  6 /* Asm:s have been tweaked (within the domain of correctness) to give
  7    satisfactory results for "gcc version 2.96 20000427 (experimental)".
  8 
  9    Check regularly...
 10 
 11    Register $r9 is chosen for temporaries, being a call-clobbered register
 12    first in line to be used (notably for local blocks), not colliding with
 13    parameter registers.  */
 14 
 15 #ifndef _CRIS_UACCESS_H
 16 #define _CRIS_UACCESS_H
 17 
 18 #ifndef __ASSEMBLY__
 19 #include <linux/sched.h>
 20 #include <linux/errno.h>
 21 #include <asm/processor.h>
 22 #include <asm/page.h>
 23 
 24 #define VERIFY_READ     0
 25 #define VERIFY_WRITE    1
 26 
 27 /*
 28  * The fs value determines whether argument validity checking should be
 29  * performed or not.  If get_fs() == USER_DS, checking is performed, with
 30  * get_fs() == KERNEL_DS, checking is bypassed.
 31  *
 32  * For historical reasons, these macros are grossly misnamed.
 33  */
 34 
 35 #define MAKE_MM_SEG(s)  ((mm_segment_t) { (s) })
 36 
 37 /* addr_limit is the maximum accessible address for the task. we misuse
 38  * the KERNEL_DS and USER_DS values to both assign and compare the
 39  * addr_limit values through the equally misnamed get/set_fs macros.
 40  * (see above)
 41  */
 42 
 43 #define KERNEL_DS       MAKE_MM_SEG(0xFFFFFFFF)
 44 #define USER_DS         MAKE_MM_SEG(TASK_SIZE)
 45 
 46 #define get_ds()        (KERNEL_DS)
 47 #define get_fs()        (current_thread_info()->addr_limit)
 48 #define set_fs(x)       (current_thread_info()->addr_limit = (x))
 49 
 50 #define segment_eq(a, b)        ((a).seg == (b).seg)
 51 
 52 #define __kernel_ok (segment_eq(get_fs(), KERNEL_DS))
 53 #define __user_ok(addr, size) \
 54         (((size) <= TASK_SIZE) && ((addr) <= TASK_SIZE-(size)))
 55 #define __access_ok(addr, size) (__kernel_ok || __user_ok((addr), (size)))
 56 #define access_ok(type, addr, size) __access_ok((unsigned long)(addr), (size))
 57 
 58 #include <arch/uaccess.h>
 59 
 60 /*
 61  * The exception table consists of pairs of addresses: the first is the
 62  * address of an instruction that is allowed to fault, and the second is
 63  * the address at which the program should continue.  No registers are
 64  * modified, so it is entirely up to the continuation code to figure out
 65  * what to do.
 66  *
 67  * All the routines below use bits of fixup code that are out of line
 68  * with the main instruction path.  This means when everything is well,
 69  * we don't even have to jump over them.  Further, they do not intrude
 70  * on our cache or tlb entries.
 71  */
 72 
 73 struct exception_table_entry {
 74         unsigned long insn, fixup;
 75 };
 76 
 77 /*
 78  * These are the main single-value transfer routines.  They automatically
 79  * use the right size if we just have the right pointer type.
 80  *
 81  * This gets kind of ugly. We want to return _two_ values in "get_user()"
 82  * and yet we don't want to do any pointers, because that is too much
 83  * of a performance impact. Thus we have a few rather ugly macros here,
 84  * and hide all the ugliness from the user.
 85  *
 86  * The "__xxx" versions of the user access functions are versions that
 87  * do not verify the address space, that must have been done previously
 88  * with a separate "access_ok()" call (this is used when we do multiple
 89  * accesses to the same area of user memory).
 90  *
 91  * As we use the same address space for kernel and user data on
 92  * CRIS, we can just do these as direct assignments.  (Of course, the
 93  * exception handling means that it's no longer "just"...)
 94  */
 95 #define get_user(x, ptr) \
 96         __get_user_check((x), (ptr), sizeof(*(ptr)))
 97 #define put_user(x, ptr) \
 98         __put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
 99 
100 #define __get_user(x, ptr) \
101         __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
102 #define __put_user(x, ptr) \
103         __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
104 
105 extern long __put_user_bad(void);
106 
107 #define __put_user_size(x, ptr, size, retval)                           \
108 do {                                                                    \
109         retval = 0;                                                     \
110         switch (size) {                                                 \
111         case 1:                                                         \
112                 __put_user_asm(x, ptr, retval, "move.b");               \
113                 break;                                                  \
114         case 2:                                                         \
115                 __put_user_asm(x, ptr, retval, "move.w");               \
116                 break;                                                  \
117         case 4:                                                         \
118                 __put_user_asm(x, ptr, retval, "move.d");               \
119                 break;                                                  \
120         case 8:                                                         \
121                 __put_user_asm_64(x, ptr, retval);                      \
122                 break;                                                  \
123         default:                                                        \
124                 __put_user_bad();                                       \
125         }                                                               \
126 } while (0)
127 
128 #define __get_user_size(x, ptr, size, retval)                           \
129 do {                                                                    \
130         retval = 0;                                                     \
131         switch (size) {                                                 \
132         case 1:                                                         \
133                 __get_user_asm(x, ptr, retval, "move.b");               \
134                 break;                                                  \
135         case 2:                                                         \
136                 __get_user_asm(x, ptr, retval, "move.w");               \
137                 break;                                                  \
138         case 4:                                                         \
139                 __get_user_asm(x, ptr, retval, "move.d");               \
140                 break;                                                  \
141         case 8:                                                         \
142                 __get_user_asm_64(x, ptr, retval);                      \
143                 break;                                                  \
144         default:                                                        \
145                 (x) = __get_user_bad();                                 \
146         }                                                               \
147 } while (0)
148 
149 #define __put_user_nocheck(x, ptr, size)                \
150 ({                                                      \
151         long __pu_err;                                  \
152         __put_user_size((x), (ptr), (size), __pu_err);  \
153         __pu_err;                                       \
154 })
155 
156 #define __put_user_check(x, ptr, size)                                  \
157 ({                                                                      \
158         long __pu_err = -EFAULT;                                        \
159         __typeof__(*(ptr)) *__pu_addr = (ptr);                          \
160         if (access_ok(VERIFY_WRITE, __pu_addr, size))                   \
161                 __put_user_size((x), __pu_addr, (size), __pu_err);      \
162         __pu_err;                                                       \
163 })
164 
165 struct __large_struct { unsigned long buf[100]; };
166 #define __m(x) (*(struct __large_struct *)(x))
167 
168 
169 
170 #define __get_user_nocheck(x, ptr, size)                        \
171 ({                                                              \
172         long __gu_err, __gu_val;                                \
173         __get_user_size(__gu_val, (ptr), (size), __gu_err);     \
174         (x) = (__force __typeof__(*(ptr)))__gu_val;             \
175         __gu_err;                                               \
176 })
177 
178 #define __get_user_check(x, ptr, size)                                  \
179 ({                                                                      \
180         long __gu_err = -EFAULT, __gu_val = 0;                          \
181         const __typeof__(*(ptr)) *__gu_addr = (ptr);                    \
182         if (access_ok(VERIFY_READ, __gu_addr, size))                    \
183                 __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
184         (x) = (__force __typeof__(*(ptr)))__gu_val;                     \
185         __gu_err;                                                       \
186 })
187 
188 extern long __get_user_bad(void);
189 
190 /* More complex functions.  Most are inline, but some call functions that
191    live in lib/usercopy.c  */
192 
193 extern unsigned long __copy_user(void __user *to, const void *from, unsigned long n);
194 extern unsigned long __copy_user_zeroing(void *to, const void __user *from, unsigned long n);
195 extern unsigned long __do_clear_user(void __user *to, unsigned long n);
196 
197 static inline long
198 __strncpy_from_user(char *dst, const char __user *src, long count)
199 {
200         return __do_strncpy_from_user(dst, src, count);
201 }
202 
203 static inline long
204 strncpy_from_user(char *dst, const char __user *src, long count)
205 {
206         long res = -EFAULT;
207 
208         if (access_ok(VERIFY_READ, src, 1))
209                 res = __do_strncpy_from_user(dst, src, count);
210         return res;
211 }
212 
213 
214 /* Note that these expand awfully if made into switch constructs, so
215    don't do that.  */
216 
217 static inline unsigned long
218 __constant_copy_from_user(void *to, const void __user *from, unsigned long n)
219 {
220         unsigned long ret = 0;
221 
222         if (n == 0)
223                 ;
224         else if (n == 1)
225                 __asm_copy_from_user_1(to, from, ret);
226         else if (n == 2)
227                 __asm_copy_from_user_2(to, from, ret);
228         else if (n == 3)
229                 __asm_copy_from_user_3(to, from, ret);
230         else if (n == 4)
231                 __asm_copy_from_user_4(to, from, ret);
232         else if (n == 5)
233                 __asm_copy_from_user_5(to, from, ret);
234         else if (n == 6)
235                 __asm_copy_from_user_6(to, from, ret);
236         else if (n == 7)
237                 __asm_copy_from_user_7(to, from, ret);
238         else if (n == 8)
239                 __asm_copy_from_user_8(to, from, ret);
240         else if (n == 9)
241                 __asm_copy_from_user_9(to, from, ret);
242         else if (n == 10)
243                 __asm_copy_from_user_10(to, from, ret);
244         else if (n == 11)
245                 __asm_copy_from_user_11(to, from, ret);
246         else if (n == 12)
247                 __asm_copy_from_user_12(to, from, ret);
248         else if (n == 13)
249                 __asm_copy_from_user_13(to, from, ret);
250         else if (n == 14)
251                 __asm_copy_from_user_14(to, from, ret);
252         else if (n == 15)
253                 __asm_copy_from_user_15(to, from, ret);
254         else if (n == 16)
255                 __asm_copy_from_user_16(to, from, ret);
256         else if (n == 20)
257                 __asm_copy_from_user_20(to, from, ret);
258         else if (n == 24)
259                 __asm_copy_from_user_24(to, from, ret);
260         else
261                 ret = __copy_user_zeroing(to, from, n);
262 
263         return ret;
264 }
265 
266 /* Ditto, don't make a switch out of this.  */
267 
268 static inline unsigned long
269 __constant_copy_to_user(void __user *to, const void *from, unsigned long n)
270 {
271         unsigned long ret = 0;
272 
273         if (n == 0)
274                 ;
275         else if (n == 1)
276                 __asm_copy_to_user_1(to, from, ret);
277         else if (n == 2)
278                 __asm_copy_to_user_2(to, from, ret);
279         else if (n == 3)
280                 __asm_copy_to_user_3(to, from, ret);
281         else if (n == 4)
282                 __asm_copy_to_user_4(to, from, ret);
283         else if (n == 5)
284                 __asm_copy_to_user_5(to, from, ret);
285         else if (n == 6)
286                 __asm_copy_to_user_6(to, from, ret);
287         else if (n == 7)
288                 __asm_copy_to_user_7(to, from, ret);
289         else if (n == 8)
290                 __asm_copy_to_user_8(to, from, ret);
291         else if (n == 9)
292                 __asm_copy_to_user_9(to, from, ret);
293         else if (n == 10)
294                 __asm_copy_to_user_10(to, from, ret);
295         else if (n == 11)
296                 __asm_copy_to_user_11(to, from, ret);
297         else if (n == 12)
298                 __asm_copy_to_user_12(to, from, ret);
299         else if (n == 13)
300                 __asm_copy_to_user_13(to, from, ret);
301         else if (n == 14)
302                 __asm_copy_to_user_14(to, from, ret);
303         else if (n == 15)
304                 __asm_copy_to_user_15(to, from, ret);
305         else if (n == 16)
306                 __asm_copy_to_user_16(to, from, ret);
307         else if (n == 20)
308                 __asm_copy_to_user_20(to, from, ret);
309         else if (n == 24)
310                 __asm_copy_to_user_24(to, from, ret);
311         else
312                 ret = __copy_user(to, from, n);
313 
314         return ret;
315 }
316 
317 /* No switch, please.  */
318 
319 static inline unsigned long
320 __constant_clear_user(void __user *to, unsigned long n)
321 {
322         unsigned long ret = 0;
323 
324         if (n == 0)
325                 ;
326         else if (n == 1)
327                 __asm_clear_1(to, ret);
328         else if (n == 2)
329                 __asm_clear_2(to, ret);
330         else if (n == 3)
331                 __asm_clear_3(to, ret);
332         else if (n == 4)
333                 __asm_clear_4(to, ret);
334         else if (n == 8)
335                 __asm_clear_8(to, ret);
336         else if (n == 12)
337                 __asm_clear_12(to, ret);
338         else if (n == 16)
339                 __asm_clear_16(to, ret);
340         else if (n == 20)
341                 __asm_clear_20(to, ret);
342         else if (n == 24)
343                 __asm_clear_24(to, ret);
344         else
345                 ret = __do_clear_user(to, n);
346 
347         return ret;
348 }
349 
350 
351 static inline size_t clear_user(void __user *to, size_t n)
352 {
353         if (unlikely(!access_ok(VERIFY_WRITE, to, n)))
354                 return n;
355         if (__builtin_constant_p(n))
356                 return __constant_clear_user(to, n);
357         else
358                 return __do_clear_user(to, n);
359 }
360 
361 static inline size_t copy_from_user(void *to, const void __user *from, size_t n)
362 {
363         if (unlikely(!access_ok(VERIFY_READ, from, n))) {
364                 memset(to, 0, n);
365                 return n;
366         }
367         if (__builtin_constant_p(n))
368                 return __constant_copy_from_user(to, from, n);
369         else
370                 return __copy_user_zeroing(to, from, n);
371 }
372 
373 static inline size_t copy_to_user(void __user *to, const void *from, size_t n)
374 {
375         if (unlikely(!access_ok(VERIFY_WRITE, to, n)))
376                 return n;
377         if (__builtin_constant_p(n))
378                 return __constant_copy_to_user(to, from, n);
379         else
380                 return __copy_user(to, from, n);
381 }
382 
383 /* We let the __ versions of copy_from/to_user inline, because they're often
384  * used in fast paths and have only a small space overhead.
385  */
386 
387 static inline unsigned long
388 __generic_copy_from_user_nocheck(void *to, const void __user *from,
389                                  unsigned long n)
390 {
391         return __copy_user_zeroing(to, from, n);
392 }
393 
394 static inline unsigned long
395 __generic_copy_to_user_nocheck(void __user *to, const void *from,
396                                unsigned long n)
397 {
398         return __copy_user(to, from, n);
399 }
400 
401 static inline unsigned long
402 __generic_clear_user_nocheck(void __user *to, unsigned long n)
403 {
404         return __do_clear_user(to, n);
405 }
406 
407 /* without checking */
408 
409 #define __copy_to_user(to, from, n) \
410         __generic_copy_to_user_nocheck((to), (from), (n))
411 #define __copy_from_user(to, from, n) \
412         __generic_copy_from_user_nocheck((to), (from), (n))
413 #define __copy_to_user_inatomic __copy_to_user
414 #define __copy_from_user_inatomic __copy_from_user
415 #define __clear_user(to, n) __generic_clear_user_nocheck((to), (n))
416 
417 #define strlen_user(str)        strnlen_user((str), 0x7ffffffe)
418 
419 #endif  /* __ASSEMBLY__ */
420 
421 #endif  /* _CRIS_UACCESS_H */
422 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp