~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/mips/include/asm/uaccess.h

Version: ~ [ linux-5.11-rc3 ] ~ [ linux-5.10.7 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.89 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.167 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.215 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.251 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.251 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.85 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  * This file is subject to the terms and conditions of the GNU General Public
  3  * License.  See the file "COPYING" in the main directory of this archive
  4  * for more details.
  5  *
  6  * Copyright (C) 1996, 1997, 1998, 1999, 2000, 03, 04 by Ralf Baechle
  7  * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
  8  * Copyright (C) 2007  Maciej W. Rozycki
  9  * Copyright (C) 2014, Imagination Technologies Ltd.
 10  */
 11 #ifndef _ASM_UACCESS_H
 12 #define _ASM_UACCESS_H
 13 
 14 #include <linux/kernel.h>
 15 #include <linux/errno.h>
 16 #include <linux/thread_info.h>
 17 #include <linux/string.h>
 18 #include <asm/asm-eva.h>
 19 
 20 /*
 21  * The fs value determines whether argument validity checking should be
 22  * performed or not.  If get_fs() == USER_DS, checking is performed, with
 23  * get_fs() == KERNEL_DS, checking is bypassed.
 24  *
 25  * For historical reasons, these macros are grossly misnamed.
 26  */
 27 #ifdef CONFIG_32BIT
 28 
 29 #ifdef CONFIG_KVM_GUEST
 30 #define __UA_LIMIT 0x40000000UL
 31 #else
 32 #define __UA_LIMIT 0x80000000UL
 33 #endif
 34 
 35 #define __UA_ADDR       ".word"
 36 #define __UA_LA         "la"
 37 #define __UA_ADDU       "addu"
 38 #define __UA_t0         "$8"
 39 #define __UA_t1         "$9"
 40 
 41 #endif /* CONFIG_32BIT */
 42 
 43 #ifdef CONFIG_64BIT
 44 
 45 extern u64 __ua_limit;
 46 
 47 #define __UA_LIMIT      __ua_limit
 48 
 49 #define __UA_ADDR       ".dword"
 50 #define __UA_LA         "dla"
 51 #define __UA_ADDU       "daddu"
 52 #define __UA_t0         "$12"
 53 #define __UA_t1         "$13"
 54 
 55 #endif /* CONFIG_64BIT */
 56 
 57 /*
 58  * USER_DS is a bitmask that has the bits set that may not be set in a valid
 59  * userspace address.  Note that we limit 32-bit userspace to 0x7fff8000 but
 60  * the arithmetic we're doing only works if the limit is a power of two, so
 61  * we use 0x80000000 here on 32-bit kernels.  If a process passes an invalid
 62  * address in this range it's the process's problem, not ours :-)
 63  */
 64 
 65 #ifdef CONFIG_KVM_GUEST
 66 #define KERNEL_DS       ((mm_segment_t) { 0x80000000UL })
 67 #define USER_DS         ((mm_segment_t) { 0xC0000000UL })
 68 #else
 69 #define KERNEL_DS       ((mm_segment_t) { 0UL })
 70 #define USER_DS         ((mm_segment_t) { __UA_LIMIT })
 71 #endif
 72 
 73 #define VERIFY_READ    0
 74 #define VERIFY_WRITE   1
 75 
 76 #define get_ds()        (KERNEL_DS)
 77 #define get_fs()        (current_thread_info()->addr_limit)
 78 #define set_fs(x)       (current_thread_info()->addr_limit = (x))
 79 
 80 #define segment_eq(a, b)        ((a).seg == (b).seg)
 81 
 82 /*
 83  * eva_kernel_access() - determine whether kernel memory access on an EVA system
 84  *
 85  * Determines whether memory accesses should be performed to kernel memory
 86  * on a system using Extended Virtual Addressing (EVA).
 87  *
 88  * Return: true if a kernel memory access on an EVA system, else false.
 89  */
 90 static inline bool eva_kernel_access(void)
 91 {
 92         if (!IS_ENABLED(CONFIG_EVA))
 93                 return false;
 94 
 95         return segment_eq(get_fs(), get_ds());
 96 }
 97 
 98 /*
 99  * Is a address valid? This does a straightforward calculation rather
100  * than tests.
101  *
102  * Address valid if:
103  *  - "addr" doesn't have any high-bits set
104  *  - AND "size" doesn't have any high-bits set
105  *  - AND "addr+size" doesn't have any high-bits set
106  *  - OR we are in kernel mode.
107  *
108  * __ua_size() is a trick to avoid runtime checking of positive constant
109  * sizes; for those we already know at compile time that the size is ok.
110  */
111 #define __ua_size(size)                                                 \
112         ((__builtin_constant_p(size) && (signed long) (size) > 0) ? 0 : (size))
113 
114 /*
115  * access_ok: - Checks if a user space pointer is valid
116  * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE.  Note that
117  *        %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe
118  *        to write to a block, it is always safe to read from it.
119  * @addr: User space pointer to start of block to check
120  * @size: Size of block to check
121  *
122  * Context: User context only. This function may sleep if pagefaults are
123  *          enabled.
124  *
125  * Checks if a pointer to a block of memory in user space is valid.
126  *
127  * Returns true (nonzero) if the memory block may be valid, false (zero)
128  * if it is definitely invalid.
129  *
130  * Note that, depending on architecture, this function probably just
131  * checks that the pointer is in the user space range - after calling
132  * this function, memory access functions may still return -EFAULT.
133  */
134 
135 #define __access_mask get_fs().seg
136 
137 #define __access_ok(addr, size, mask)                                   \
138 ({                                                                      \
139         unsigned long __addr = (unsigned long) (addr);                  \
140         unsigned long __size = size;                                    \
141         unsigned long __mask = mask;                                    \
142         unsigned long __ok;                                             \
143                                                                         \
144         __chk_user_ptr(addr);                                           \
145         __ok = (signed long)(__mask & (__addr | (__addr + __size) |     \
146                 __ua_size(__size)));                                    \
147         __ok == 0;                                                      \
148 })
149 
150 #define access_ok(type, addr, size)                                     \
151         likely(__access_ok((addr), (size), __access_mask))
152 
153 /*
154  * put_user: - Write a simple value into user space.
155  * @x:   Value to copy to user space.
156  * @ptr: Destination address, in user space.
157  *
158  * Context: User context only. This function may sleep if pagefaults are
159  *          enabled.
160  *
161  * This macro copies a single simple value from kernel space to user
162  * space.  It supports simple types like char and int, but not larger
163  * data types like structures or arrays.
164  *
165  * @ptr must have pointer-to-simple-variable type, and @x must be assignable
166  * to the result of dereferencing @ptr.
167  *
168  * Returns zero on success, or -EFAULT on error.
169  */
170 #define put_user(x,ptr) \
171         __put_user_check((x), (ptr), sizeof(*(ptr)))
172 
173 /*
174  * get_user: - Get a simple variable from user space.
175  * @x:   Variable to store result.
176  * @ptr: Source address, in user space.
177  *
178  * Context: User context only. This function may sleep if pagefaults are
179  *          enabled.
180  *
181  * This macro copies a single simple variable from user space to kernel
182  * space.  It supports simple types like char and int, but not larger
183  * data types like structures or arrays.
184  *
185  * @ptr must have pointer-to-simple-variable type, and the result of
186  * dereferencing @ptr must be assignable to @x without a cast.
187  *
188  * Returns zero on success, or -EFAULT on error.
189  * On error, the variable @x is set to zero.
190  */
191 #define get_user(x,ptr) \
192         __get_user_check((x), (ptr), sizeof(*(ptr)))
193 
194 /*
195  * __put_user: - Write a simple value into user space, with less checking.
196  * @x:   Value to copy to user space.
197  * @ptr: Destination address, in user space.
198  *
199  * Context: User context only. This function may sleep if pagefaults are
200  *          enabled.
201  *
202  * This macro copies a single simple value from kernel space to user
203  * space.  It supports simple types like char and int, but not larger
204  * data types like structures or arrays.
205  *
206  * @ptr must have pointer-to-simple-variable type, and @x must be assignable
207  * to the result of dereferencing @ptr.
208  *
209  * Caller must check the pointer with access_ok() before calling this
210  * function.
211  *
212  * Returns zero on success, or -EFAULT on error.
213  */
214 #define __put_user(x,ptr) \
215         __put_user_nocheck((x), (ptr), sizeof(*(ptr)))
216 
217 /*
218  * __get_user: - Get a simple variable from user space, with less checking.
219  * @x:   Variable to store result.
220  * @ptr: Source address, in user space.
221  *
222  * Context: User context only. This function may sleep if pagefaults are
223  *          enabled.
224  *
225  * This macro copies a single simple variable from user space to kernel
226  * space.  It supports simple types like char and int, but not larger
227  * data types like structures or arrays.
228  *
229  * @ptr must have pointer-to-simple-variable type, and the result of
230  * dereferencing @ptr must be assignable to @x without a cast.
231  *
232  * Caller must check the pointer with access_ok() before calling this
233  * function.
234  *
235  * Returns zero on success, or -EFAULT on error.
236  * On error, the variable @x is set to zero.
237  */
238 #define __get_user(x,ptr) \
239         __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
240 
241 struct __large_struct { unsigned long buf[100]; };
242 #define __m(x) (*(struct __large_struct __user *)(x))
243 
244 /*
245  * Yuck.  We need two variants, one for 64bit operation and one
246  * for 32 bit mode and old iron.
247  */
248 #ifndef CONFIG_EVA
249 #define __get_kernel_common(val, size, ptr) __get_user_common(val, size, ptr)
250 #else
251 /*
252  * Kernel specific functions for EVA. We need to use normal load instructions
253  * to read data from kernel when operating in EVA mode. We use these macros to
254  * avoid redefining __get_user_asm for EVA.
255  */
256 #undef _loadd
257 #undef _loadw
258 #undef _loadh
259 #undef _loadb
260 #ifdef CONFIG_32BIT
261 #define _loadd                  _loadw
262 #else
263 #define _loadd(reg, addr)       "ld " reg ", " addr
264 #endif
265 #define _loadw(reg, addr)       "lw " reg ", " addr
266 #define _loadh(reg, addr)       "lh " reg ", " addr
267 #define _loadb(reg, addr)       "lb " reg ", " addr
268 
269 #define __get_kernel_common(val, size, ptr)                             \
270 do {                                                                    \
271         switch (size) {                                                 \
272         case 1: __get_data_asm(val, _loadb, ptr); break;                \
273         case 2: __get_data_asm(val, _loadh, ptr); break;                \
274         case 4: __get_data_asm(val, _loadw, ptr); break;                \
275         case 8: __GET_DW(val, _loadd, ptr); break;                      \
276         default: __get_user_unknown(); break;                           \
277         }                                                               \
278 } while (0)
279 #endif
280 
281 #ifdef CONFIG_32BIT
282 #define __GET_DW(val, insn, ptr) __get_data_asm_ll32(val, insn, ptr)
283 #endif
284 #ifdef CONFIG_64BIT
285 #define __GET_DW(val, insn, ptr) __get_data_asm(val, insn, ptr)
286 #endif
287 
288 extern void __get_user_unknown(void);
289 
290 #define __get_user_common(val, size, ptr)                               \
291 do {                                                                    \
292         switch (size) {                                                 \
293         case 1: __get_data_asm(val, user_lb, ptr); break;               \
294         case 2: __get_data_asm(val, user_lh, ptr); break;               \
295         case 4: __get_data_asm(val, user_lw, ptr); break;               \
296         case 8: __GET_DW(val, user_ld, ptr); break;                     \
297         default: __get_user_unknown(); break;                           \
298         }                                                               \
299 } while (0)
300 
301 #define __get_user_nocheck(x, ptr, size)                                \
302 ({                                                                      \
303         int __gu_err;                                                   \
304                                                                         \
305         if (eva_kernel_access()) {                                      \
306                 __get_kernel_common((x), size, ptr);                    \
307         } else {                                                        \
308                 __chk_user_ptr(ptr);                                    \
309                 __get_user_common((x), size, ptr);                      \
310         }                                                               \
311         __gu_err;                                                       \
312 })
313 
314 #define __get_user_check(x, ptr, size)                                  \
315 ({                                                                      \
316         int __gu_err = -EFAULT;                                         \
317         const __typeof__(*(ptr)) __user * __gu_ptr = (ptr);             \
318                                                                         \
319         might_fault();                                                  \
320         if (likely(access_ok(VERIFY_READ,  __gu_ptr, size))) {          \
321                 if (eva_kernel_access())                                \
322                         __get_kernel_common((x), size, __gu_ptr);       \
323                 else                                                    \
324                         __get_user_common((x), size, __gu_ptr);         \
325         } else                                                          \
326                 (x) = 0;                                                \
327                                                                         \
328         __gu_err;                                                       \
329 })
330 
331 #define __get_data_asm(val, insn, addr)                                 \
332 {                                                                       \
333         long __gu_tmp;                                                  \
334                                                                         \
335         __asm__ __volatile__(                                           \
336         "1:     "insn("%1", "%3")"                              \n"     \
337         "2:                                                     \n"     \
338         "       .insn                                           \n"     \
339         "       .section .fixup,\"ax\"                          \n"     \
340         "3:     li      %0, %4                                  \n"     \
341         "       move    %1, $0                                  \n"     \
342         "       j       2b                                      \n"     \
343         "       .previous                                       \n"     \
344         "       .section __ex_table,\"a\"                       \n"     \
345         "       "__UA_ADDR "\t1b, 3b                            \n"     \
346         "       .previous                                       \n"     \
347         : "=r" (__gu_err), "=r" (__gu_tmp)                              \
348         : "" (0), "o" (__m(addr)), "i" (-EFAULT));                     \
349                                                                         \
350         (val) = (__typeof__(*(addr))) __gu_tmp;                         \
351 }
352 
353 /*
354  * Get a long long 64 using 32 bit registers.
355  */
356 #define __get_data_asm_ll32(val, insn, addr)                            \
357 {                                                                       \
358         union {                                                         \
359                 unsigned long long      l;                              \
360                 __typeof__(*(addr))     t;                              \
361         } __gu_tmp;                                                     \
362                                                                         \
363         __asm__ __volatile__(                                           \
364         "1:     " insn("%1", "(%3)")"                           \n"     \
365         "2:     " insn("%D1", "4(%3)")"                         \n"     \
366         "3:                                                     \n"     \
367         "       .insn                                           \n"     \
368         "       .section        .fixup,\"ax\"                   \n"     \
369         "4:     li      %0, %4                                  \n"     \
370         "       move    %1, $0                                  \n"     \
371         "       move    %D1, $0                                 \n"     \
372         "       j       3b                                      \n"     \
373         "       .previous                                       \n"     \
374         "       .section        __ex_table,\"a\"                \n"     \
375         "       " __UA_ADDR "   1b, 4b                          \n"     \
376         "       " __UA_ADDR "   2b, 4b                          \n"     \
377         "       .previous                                       \n"     \
378         : "=r" (__gu_err), "=&r" (__gu_tmp.l)                           \
379         : "" (0), "r" (addr), "i" (-EFAULT));                          \
380                                                                         \
381         (val) = __gu_tmp.t;                                             \
382 }
383 
384 #ifndef CONFIG_EVA
385 #define __put_kernel_common(ptr, size) __put_user_common(ptr, size)
386 #else
387 /*
388  * Kernel specific functions for EVA. We need to use normal load instructions
389  * to read data from kernel when operating in EVA mode. We use these macros to
390  * avoid redefining __get_data_asm for EVA.
391  */
392 #undef _stored
393 #undef _storew
394 #undef _storeh
395 #undef _storeb
396 #ifdef CONFIG_32BIT
397 #define _stored                 _storew
398 #else
399 #define _stored(reg, addr)      "ld " reg ", " addr
400 #endif
401 
402 #define _storew(reg, addr)      "sw " reg ", " addr
403 #define _storeh(reg, addr)      "sh " reg ", " addr
404 #define _storeb(reg, addr)      "sb " reg ", " addr
405 
406 #define __put_kernel_common(ptr, size)                                  \
407 do {                                                                    \
408         switch (size) {                                                 \
409         case 1: __put_data_asm(_storeb, ptr); break;                    \
410         case 2: __put_data_asm(_storeh, ptr); break;                    \
411         case 4: __put_data_asm(_storew, ptr); break;                    \
412         case 8: __PUT_DW(_stored, ptr); break;                          \
413         default: __put_user_unknown(); break;                           \
414         }                                                               \
415 } while(0)
416 #endif
417 
418 /*
419  * Yuck.  We need two variants, one for 64bit operation and one
420  * for 32 bit mode and old iron.
421  */
422 #ifdef CONFIG_32BIT
423 #define __PUT_DW(insn, ptr) __put_data_asm_ll32(insn, ptr)
424 #endif
425 #ifdef CONFIG_64BIT
426 #define __PUT_DW(insn, ptr) __put_data_asm(insn, ptr)
427 #endif
428 
429 #define __put_user_common(ptr, size)                                    \
430 do {                                                                    \
431         switch (size) {                                                 \
432         case 1: __put_data_asm(user_sb, ptr); break;                    \
433         case 2: __put_data_asm(user_sh, ptr); break;                    \
434         case 4: __put_data_asm(user_sw, ptr); break;                    \
435         case 8: __PUT_DW(user_sd, ptr); break;                          \
436         default: __put_user_unknown(); break;                           \
437         }                                                               \
438 } while (0)
439 
440 #define __put_user_nocheck(x, ptr, size)                                \
441 ({                                                                      \
442         __typeof__(*(ptr)) __pu_val;                                    \
443         int __pu_err = 0;                                               \
444                                                                         \
445         __pu_val = (x);                                                 \
446         if (eva_kernel_access()) {                                      \
447                 __put_kernel_common(ptr, size);                         \
448         } else {                                                        \
449                 __chk_user_ptr(ptr);                                    \
450                 __put_user_common(ptr, size);                           \
451         }                                                               \
452         __pu_err;                                                       \
453 })
454 
455 #define __put_user_check(x, ptr, size)                                  \
456 ({                                                                      \
457         __typeof__(*(ptr)) __user *__pu_addr = (ptr);                   \
458         __typeof__(*(ptr)) __pu_val = (x);                              \
459         int __pu_err = -EFAULT;                                         \
460                                                                         \
461         might_fault();                                                  \
462         if (likely(access_ok(VERIFY_WRITE,  __pu_addr, size))) {        \
463                 if (eva_kernel_access())                                \
464                         __put_kernel_common(__pu_addr, size);           \
465                 else                                                    \
466                         __put_user_common(__pu_addr, size);             \
467         }                                                               \
468                                                                         \
469         __pu_err;                                                       \
470 })
471 
472 #define __put_data_asm(insn, ptr)                                       \
473 {                                                                       \
474         __asm__ __volatile__(                                           \
475         "1:     "insn("%z2", "%3")"     # __put_data_asm        \n"     \
476         "2:                                                     \n"     \
477         "       .insn                                           \n"     \
478         "       .section        .fixup,\"ax\"                   \n"     \
479         "3:     li      %0, %4                                  \n"     \
480         "       j       2b                                      \n"     \
481         "       .previous                                       \n"     \
482         "       .section        __ex_table,\"a\"                \n"     \
483         "       " __UA_ADDR "   1b, 3b                          \n"     \
484         "       .previous                                       \n"     \
485         : "=r" (__pu_err)                                               \
486         : "" (0), "Jr" (__pu_val), "o" (__m(ptr)),                     \
487           "i" (-EFAULT));                                               \
488 }
489 
490 #define __put_data_asm_ll32(insn, ptr)                                  \
491 {                                                                       \
492         __asm__ __volatile__(                                           \
493         "1:     "insn("%2", "(%3)")"    # __put_data_asm_ll32   \n"     \
494         "2:     "insn("%D2", "4(%3)")"                          \n"     \
495         "3:                                                     \n"     \
496         "       .insn                                           \n"     \
497         "       .section        .fixup,\"ax\"                   \n"     \
498         "4:     li      %0, %4                                  \n"     \
499         "       j       3b                                      \n"     \
500         "       .previous                                       \n"     \
501         "       .section        __ex_table,\"a\"                \n"     \
502         "       " __UA_ADDR "   1b, 4b                          \n"     \
503         "       " __UA_ADDR "   2b, 4b                          \n"     \
504         "       .previous"                                              \
505         : "=r" (__pu_err)                                               \
506         : "" (0), "r" (__pu_val), "r" (ptr),                           \
507           "i" (-EFAULT));                                               \
508 }
509 
510 extern void __put_user_unknown(void);
511 
512 /*
513  * ul{b,h,w} are macros and there are no equivalent macros for EVA.
514  * EVA unaligned access is handled in the ADE exception handler.
515  */
516 #ifndef CONFIG_EVA
517 /*
518  * put_user_unaligned: - Write a simple value into user space.
519  * @x:   Value to copy to user space.
520  * @ptr: Destination address, in user space.
521  *
522  * Context: User context only. This function may sleep if pagefaults are
523  *          enabled.
524  *
525  * This macro copies a single simple value from kernel space to user
526  * space.  It supports simple types like char and int, but not larger
527  * data types like structures or arrays.
528  *
529  * @ptr must have pointer-to-simple-variable type, and @x must be assignable
530  * to the result of dereferencing @ptr.
531  *
532  * Returns zero on success, or -EFAULT on error.
533  */
534 #define put_user_unaligned(x,ptr)       \
535         __put_user_unaligned_check((x),(ptr),sizeof(*(ptr)))
536 
537 /*
538  * get_user_unaligned: - Get a simple variable from user space.
539  * @x:   Variable to store result.
540  * @ptr: Source address, in user space.
541  *
542  * Context: User context only. This function may sleep if pagefaults are
543  *          enabled.
544  *
545  * This macro copies a single simple variable from user space to kernel
546  * space.  It supports simple types like char and int, but not larger
547  * data types like structures or arrays.
548  *
549  * @ptr must have pointer-to-simple-variable type, and the result of
550  * dereferencing @ptr must be assignable to @x without a cast.
551  *
552  * Returns zero on success, or -EFAULT on error.
553  * On error, the variable @x is set to zero.
554  */
555 #define get_user_unaligned(x,ptr) \
556         __get_user_unaligned_check((x),(ptr),sizeof(*(ptr)))
557 
558 /*
559  * __put_user_unaligned: - Write a simple value into user space, with less checking.
560  * @x:   Value to copy to user space.
561  * @ptr: Destination address, in user space.
562  *
563  * Context: User context only. This function may sleep if pagefaults are
564  *          enabled.
565  *
566  * This macro copies a single simple value from kernel space to user
567  * space.  It supports simple types like char and int, but not larger
568  * data types like structures or arrays.
569  *
570  * @ptr must have pointer-to-simple-variable type, and @x must be assignable
571  * to the result of dereferencing @ptr.
572  *
573  * Caller must check the pointer with access_ok() before calling this
574  * function.
575  *
576  * Returns zero on success, or -EFAULT on error.
577  */
578 #define __put_user_unaligned(x,ptr) \
579         __put_user_unaligned_nocheck((x),(ptr),sizeof(*(ptr)))
580 
581 /*
582  * __get_user_unaligned: - Get a simple variable from user space, with less checking.
583  * @x:   Variable to store result.
584  * @ptr: Source address, in user space.
585  *
586  * Context: User context only. This function may sleep if pagefaults are
587  *          enabled.
588  *
589  * This macro copies a single simple variable from user space to kernel
590  * space.  It supports simple types like char and int, but not larger
591  * data types like structures or arrays.
592  *
593  * @ptr must have pointer-to-simple-variable type, and the result of
594  * dereferencing @ptr must be assignable to @x without a cast.
595  *
596  * Caller must check the pointer with access_ok() before calling this
597  * function.
598  *
599  * Returns zero on success, or -EFAULT on error.
600  * On error, the variable @x is set to zero.
601  */
602 #define __get_user_unaligned(x,ptr) \
603         __get_user_unaligned_nocheck((x),(ptr),sizeof(*(ptr)))
604 
605 /*
606  * Yuck.  We need two variants, one for 64bit operation and one
607  * for 32 bit mode and old iron.
608  */
609 #ifdef CONFIG_32BIT
610 #define __GET_USER_UNALIGNED_DW(val, ptr)                               \
611         __get_user_unaligned_asm_ll32(val, ptr)
612 #endif
613 #ifdef CONFIG_64BIT
614 #define __GET_USER_UNALIGNED_DW(val, ptr)                               \
615         __get_user_unaligned_asm(val, "uld", ptr)
616 #endif
617 
618 extern void __get_user_unaligned_unknown(void);
619 
620 #define __get_user_unaligned_common(val, size, ptr)                     \
621 do {                                                                    \
622         switch (size) {                                                 \
623         case 1: __get_data_asm(val, "lb", ptr); break;                  \
624         case 2: __get_data_unaligned_asm(val, "ulh", ptr); break;       \
625         case 4: __get_data_unaligned_asm(val, "ulw", ptr); break;       \
626         case 8: __GET_USER_UNALIGNED_DW(val, ptr); break;               \
627         default: __get_user_unaligned_unknown(); break;                 \
628         }                                                               \
629 } while (0)
630 
631 #define __get_user_unaligned_nocheck(x,ptr,size)                        \
632 ({                                                                      \
633         int __gu_err;                                                   \
634                                                                         \
635         __get_user_unaligned_common((x), size, ptr);                    \
636         __gu_err;                                                       \
637 })
638 
639 #define __get_user_unaligned_check(x,ptr,size)                          \
640 ({                                                                      \
641         int __gu_err = -EFAULT;                                         \
642         const __typeof__(*(ptr)) __user * __gu_ptr = (ptr);             \
643                                                                         \
644         if (likely(access_ok(VERIFY_READ,  __gu_ptr, size)))            \
645                 __get_user_unaligned_common((x), size, __gu_ptr);       \
646                                                                         \
647         __gu_err;                                                       \
648 })
649 
650 #define __get_data_unaligned_asm(val, insn, addr)                       \
651 {                                                                       \
652         long __gu_tmp;                                                  \
653                                                                         \
654         __asm__ __volatile__(                                           \
655         "1:     " insn "        %1, %3                          \n"     \
656         "2:                                                     \n"     \
657         "       .insn                                           \n"     \
658         "       .section .fixup,\"ax\"                          \n"     \
659         "3:     li      %0, %4                                  \n"     \
660         "       move    %1, $0                                  \n"     \
661         "       j       2b                                      \n"     \
662         "       .previous                                       \n"     \
663         "       .section __ex_table,\"a\"                       \n"     \
664         "       "__UA_ADDR "\t1b, 3b                            \n"     \
665         "       "__UA_ADDR "\t1b + 4, 3b                        \n"     \
666         "       .previous                                       \n"     \
667         : "=r" (__gu_err), "=r" (__gu_tmp)                              \
668         : "" (0), "o" (__m(addr)), "i" (-EFAULT));                     \
669                                                                         \
670         (val) = (__typeof__(*(addr))) __gu_tmp;                         \
671 }
672 
673 /*
674  * Get a long long 64 using 32 bit registers.
675  */
676 #define __get_user_unaligned_asm_ll32(val, addr)                        \
677 {                                                                       \
678         unsigned long long __gu_tmp;                                    \
679                                                                         \
680         __asm__ __volatile__(                                           \
681         "1:     ulw     %1, (%3)                                \n"     \
682         "2:     ulw     %D1, 4(%3)                              \n"     \
683         "       move    %0, $0                                  \n"     \
684         "3:                                                     \n"     \
685         "       .insn                                           \n"     \
686         "       .section        .fixup,\"ax\"                   \n"     \
687         "4:     li      %0, %4                                  \n"     \
688         "       move    %1, $0                                  \n"     \
689         "       move    %D1, $0                                 \n"     \
690         "       j       3b                                      \n"     \
691         "       .previous                                       \n"     \
692         "       .section        __ex_table,\"a\"                \n"     \
693         "       " __UA_ADDR "   1b, 4b                          \n"     \
694         "       " __UA_ADDR "   1b + 4, 4b                      \n"     \
695         "       " __UA_ADDR "   2b, 4b                          \n"     \
696         "       " __UA_ADDR "   2b + 4, 4b                      \n"     \
697         "       .previous                                       \n"     \
698         : "=r" (__gu_err), "=&r" (__gu_tmp)                             \
699         : "" (0), "r" (addr), "i" (-EFAULT));                          \
700         (val) = (__typeof__(*(addr))) __gu_tmp;                         \
701 }
702 
703 /*
704  * Yuck.  We need two variants, one for 64bit operation and one
705  * for 32 bit mode and old iron.
706  */
707 #ifdef CONFIG_32BIT
708 #define __PUT_USER_UNALIGNED_DW(ptr) __put_user_unaligned_asm_ll32(ptr)
709 #endif
710 #ifdef CONFIG_64BIT
711 #define __PUT_USER_UNALIGNED_DW(ptr) __put_user_unaligned_asm("usd", ptr)
712 #endif
713 
714 #define __put_user_unaligned_common(ptr, size)                          \
715 do {                                                                    \
716         switch (size) {                                                 \
717         case 1: __put_data_asm("sb", ptr); break;                       \
718         case 2: __put_user_unaligned_asm("ush", ptr); break;            \
719         case 4: __put_user_unaligned_asm("usw", ptr); break;            \
720         case 8: __PUT_USER_UNALIGNED_DW(ptr); break;                    \
721         default: __put_user_unaligned_unknown(); break;                 \
722 } while (0)
723 
724 #define __put_user_unaligned_nocheck(x,ptr,size)                        \
725 ({                                                                      \
726         __typeof__(*(ptr)) __pu_val;                                    \
727         int __pu_err = 0;                                               \
728                                                                         \
729         __pu_val = (x);                                                 \
730         __put_user_unaligned_common(ptr, size);                         \
731         __pu_err;                                                       \
732 })
733 
734 #define __put_user_unaligned_check(x,ptr,size)                          \
735 ({                                                                      \
736         __typeof__(*(ptr)) __user *__pu_addr = (ptr);                   \
737         __typeof__(*(ptr)) __pu_val = (x);                              \
738         int __pu_err = -EFAULT;                                         \
739                                                                         \
740         if (likely(access_ok(VERIFY_WRITE,  __pu_addr, size)))          \
741                 __put_user_unaligned_common(__pu_addr, size);           \
742                                                                         \
743         __pu_err;                                                       \
744 })
745 
746 #define __put_user_unaligned_asm(insn, ptr)                             \
747 {                                                                       \
748         __asm__ __volatile__(                                           \
749         "1:     " insn "        %z2, %3         # __put_user_unaligned_asm\n" \
750         "2:                                                     \n"     \
751         "       .insn                                           \n"     \
752         "       .section        .fixup,\"ax\"                   \n"     \
753         "3:     li      %0, %4                                  \n"     \
754         "       j       2b                                      \n"     \
755         "       .previous                                       \n"     \
756         "       .section        __ex_table,\"a\"                \n"     \
757         "       " __UA_ADDR "   1b, 3b                          \n"     \
758         "       .previous                                       \n"     \
759         : "=r" (__pu_err)                                               \
760         : "" (0), "Jr" (__pu_val), "o" (__m(ptr)),                     \
761           "i" (-EFAULT));                                               \
762 }
763 
764 #define __put_user_unaligned_asm_ll32(ptr)                              \
765 {                                                                       \
766         __asm__ __volatile__(                                           \
767         "1:     sw      %2, (%3)        # __put_user_unaligned_asm_ll32 \n" \
768         "2:     sw      %D2, 4(%3)                              \n"     \
769         "3:                                                     \n"     \
770         "       .insn                                           \n"     \
771         "       .section        .fixup,\"ax\"                   \n"     \
772         "4:     li      %0, %4                                  \n"     \
773         "       j       3b                                      \n"     \
774         "       .previous                                       \n"     \
775         "       .section        __ex_table,\"a\"                \n"     \
776         "       " __UA_ADDR "   1b, 4b                          \n"     \
777         "       " __UA_ADDR "   1b + 4, 4b                      \n"     \
778         "       " __UA_ADDR "   2b, 4b                          \n"     \
779         "       " __UA_ADDR "   2b + 4, 4b                      \n"     \
780         "       .previous"                                              \
781         : "=r" (__pu_err)                                               \
782         : "" (0), "r" (__pu_val), "r" (ptr),                           \
783           "i" (-EFAULT));                                               \
784 }
785 
786 extern void __put_user_unaligned_unknown(void);
787 #endif
788 
789 /*
790  * We're generating jump to subroutines which will be outside the range of
791  * jump instructions
792  */
793 #ifdef MODULE
794 #define __MODULE_JAL(destination)                                       \
795         ".set\tnoat\n\t"                                                \
796         __UA_LA "\t$1, " #destination "\n\t"                            \
797         "jalr\t$1\n\t"                                                  \
798         ".set\tat\n\t"
799 #else
800 #define __MODULE_JAL(destination)                                       \
801         "jal\t" #destination "\n\t"
802 #endif
803 
804 #if defined(CONFIG_CPU_DADDI_WORKAROUNDS) || (defined(CONFIG_EVA) &&    \
805                                               defined(CONFIG_CPU_HAS_PREFETCH))
806 #define DADDI_SCRATCH "$3"
807 #else
808 #define DADDI_SCRATCH "$0"
809 #endif
810 
811 extern size_t __copy_user(void *__to, const void *__from, size_t __n);
812 
813 #ifndef CONFIG_EVA
814 #define __invoke_copy_to_user(to, from, n)                              \
815 ({                                                                      \
816         register void __user *__cu_to_r __asm__("$4");                  \
817         register const void *__cu_from_r __asm__("$5");                 \
818         register long __cu_len_r __asm__("$6");                         \
819                                                                         \
820         __cu_to_r = (to);                                               \
821         __cu_from_r = (from);                                           \
822         __cu_len_r = (n);                                               \
823         __asm__ __volatile__(                                           \
824         __MODULE_JAL(__copy_user)                                       \
825         : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r)       \
826         :                                                               \
827         : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31",  \
828           DADDI_SCRATCH, "memory");                                     \
829         __cu_len_r;                                                     \
830 })
831 
832 #define __invoke_copy_to_kernel(to, from, n)                            \
833         __invoke_copy_to_user(to, from, n)
834 
835 #endif
836 
837 /*
838  * __copy_to_user: - Copy a block of data into user space, with less checking.
839  * @to:   Destination address, in user space.
840  * @from: Source address, in kernel space.
841  * @n:    Number of bytes to copy.
842  *
843  * Context: User context only. This function may sleep if pagefaults are
844  *          enabled.
845  *
846  * Copy data from kernel space to user space.  Caller must check
847  * the specified block with access_ok() before calling this function.
848  *
849  * Returns number of bytes that could not be copied.
850  * On success, this will be zero.
851  */
852 #define __copy_to_user(to, from, n)                                     \
853 ({                                                                      \
854         void __user *__cu_to;                                           \
855         const void *__cu_from;                                          \
856         long __cu_len;                                                  \
857                                                                         \
858         __cu_to = (to);                                                 \
859         __cu_from = (from);                                             \
860         __cu_len = (n);                                                 \
861         might_fault();                                                  \
862         if (eva_kernel_access())                                        \
863                 __cu_len = __invoke_copy_to_kernel(__cu_to, __cu_from,  \
864                                                    __cu_len);           \
865         else                                                            \
866                 __cu_len = __invoke_copy_to_user(__cu_to, __cu_from,    \
867                                                  __cu_len);             \
868         __cu_len;                                                       \
869 })
870 
871 extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
872 
873 #define __copy_to_user_inatomic(to, from, n)                            \
874 ({                                                                      \
875         void __user *__cu_to;                                           \
876         const void *__cu_from;                                          \
877         long __cu_len;                                                  \
878                                                                         \
879         __cu_to = (to);                                                 \
880         __cu_from = (from);                                             \
881         __cu_len = (n);                                                 \
882         if (eva_kernel_access())                                        \
883                 __cu_len = __invoke_copy_to_kernel(__cu_to, __cu_from,  \
884                                                    __cu_len);           \
885         else                                                            \
886                 __cu_len = __invoke_copy_to_user(__cu_to, __cu_from,    \
887                                                  __cu_len);             \
888         __cu_len;                                                       \
889 })
890 
891 #define __copy_from_user_inatomic(to, from, n)                          \
892 ({                                                                      \
893         void *__cu_to;                                                  \
894         const void __user *__cu_from;                                   \
895         long __cu_len;                                                  \
896                                                                         \
897         __cu_to = (to);                                                 \
898         __cu_from = (from);                                             \
899         __cu_len = (n);                                                 \
900         if (eva_kernel_access())                                        \
901                 __cu_len = __invoke_copy_from_kernel_inatomic(__cu_to,  \
902                                                               __cu_from,\
903                                                               __cu_len);\
904         else                                                            \
905                 __cu_len = __invoke_copy_from_user_inatomic(__cu_to,    \
906                                                             __cu_from,  \
907                                                             __cu_len);  \
908         __cu_len;                                                       \
909 })
910 
911 /*
912  * copy_to_user: - Copy a block of data into user space.
913  * @to:   Destination address, in user space.
914  * @from: Source address, in kernel space.
915  * @n:    Number of bytes to copy.
916  *
917  * Context: User context only. This function may sleep if pagefaults are
918  *          enabled.
919  *
920  * Copy data from kernel space to user space.
921  *
922  * Returns number of bytes that could not be copied.
923  * On success, this will be zero.
924  */
925 #define copy_to_user(to, from, n)                                       \
926 ({                                                                      \
927         void __user *__cu_to;                                           \
928         const void *__cu_from;                                          \
929         long __cu_len;                                                  \
930                                                                         \
931         __cu_to = (to);                                                 \
932         __cu_from = (from);                                             \
933         __cu_len = (n);                                                 \
934         if (eva_kernel_access()) {                                      \
935                 __cu_len = __invoke_copy_to_kernel(__cu_to,             \
936                                                    __cu_from,           \
937                                                    __cu_len);           \
938         } else {                                                        \
939                 if (access_ok(VERIFY_WRITE, __cu_to, __cu_len)) {       \
940                         might_fault();                                  \
941                         __cu_len = __invoke_copy_to_user(__cu_to,       \
942                                                          __cu_from,     \
943                                                          __cu_len);     \
944                 }                                                       \
945         }                                                               \
946         __cu_len;                                                       \
947 })
948 
949 #ifndef CONFIG_EVA
950 
951 #define __invoke_copy_from_user(to, from, n)                            \
952 ({                                                                      \
953         register void *__cu_to_r __asm__("$4");                         \
954         register const void __user *__cu_from_r __asm__("$5");          \
955         register long __cu_len_r __asm__("$6");                         \
956                                                                         \
957         __cu_to_r = (to);                                               \
958         __cu_from_r = (from);                                           \
959         __cu_len_r = (n);                                               \
960         __asm__ __volatile__(                                           \
961         ".set\tnoreorder\n\t"                                           \
962         __MODULE_JAL(__copy_user)                                       \
963         ".set\tnoat\n\t"                                                \
964         __UA_ADDU "\t$1, %1, %2\n\t"                                    \
965         ".set\tat\n\t"                                                  \
966         ".set\treorder"                                                 \
967         : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r)       \
968         :                                                               \
969         : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31",  \
970           DADDI_SCRATCH, "memory");                                     \
971         __cu_len_r;                                                     \
972 })
973 
974 #define __invoke_copy_from_kernel(to, from, n)                          \
975         __invoke_copy_from_user(to, from, n)
976 
977 /* For userland <-> userland operations */
978 #define ___invoke_copy_in_user(to, from, n)                             \
979         __invoke_copy_from_user(to, from, n)
980 
981 /* For kernel <-> kernel operations */
982 #define ___invoke_copy_in_kernel(to, from, n)                           \
983         __invoke_copy_from_user(to, from, n)
984 
985 #define __invoke_copy_from_user_inatomic(to, from, n)                   \
986 ({                                                                      \
987         register void *__cu_to_r __asm__("$4");                         \
988         register const void __user *__cu_from_r __asm__("$5");          \
989         register long __cu_len_r __asm__("$6");                         \
990                                                                         \
991         __cu_to_r = (to);                                               \
992         __cu_from_r = (from);                                           \
993         __cu_len_r = (n);                                               \
994         __asm__ __volatile__(                                           \
995         ".set\tnoreorder\n\t"                                           \
996         __MODULE_JAL(__copy_user_inatomic)                              \
997         ".set\tnoat\n\t"                                                \
998         __UA_ADDU "\t$1, %1, %2\n\t"                                    \
999         ".set\tat\n\t"                                                  \
1000         ".set\treorder"                                                 \
1001         : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r)       \
1002         :                                                               \
1003         : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31",  \
1004           DADDI_SCRATCH, "memory");                                     \
1005         __cu_len_r;                                                     \
1006 })
1007 
1008 #define __invoke_copy_from_kernel_inatomic(to, from, n)                 \
1009         __invoke_copy_from_user_inatomic(to, from, n)                   \
1010 
1011 #else
1012 
1013 /* EVA specific functions */
1014 
1015 extern size_t __copy_user_inatomic_eva(void *__to, const void *__from,
1016                                        size_t __n);
1017 extern size_t __copy_from_user_eva(void *__to, const void *__from,
1018                                    size_t __n);
1019 extern size_t __copy_to_user_eva(void *__to, const void *__from,
1020                                  size_t __n);
1021 extern size_t __copy_in_user_eva(void *__to, const void *__from, size_t __n);
1022 
1023 #define __invoke_copy_from_user_eva_generic(to, from, n, func_ptr)      \
1024 ({                                                                      \
1025         register void *__cu_to_r __asm__("$4");                         \
1026         register const void __user *__cu_from_r __asm__("$5");          \
1027         register long __cu_len_r __asm__("$6");                         \
1028                                                                         \
1029         __cu_to_r = (to);                                               \
1030         __cu_from_r = (from);                                           \
1031         __cu_len_r = (n);                                               \
1032         __asm__ __volatile__(                                           \
1033         ".set\tnoreorder\n\t"                                           \
1034         __MODULE_JAL(func_ptr)                                          \
1035         ".set\tnoat\n\t"                                                \
1036         __UA_ADDU "\t$1, %1, %2\n\t"                                    \
1037         ".set\tat\n\t"                                                  \
1038         ".set\treorder"                                                 \
1039         : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r)       \
1040         :                                                               \
1041         : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31",  \
1042           DADDI_SCRATCH, "memory");                                     \
1043         __cu_len_r;                                                     \
1044 })
1045 
1046 #define __invoke_copy_to_user_eva_generic(to, from, n, func_ptr)        \
1047 ({                                                                      \
1048         register void *__cu_to_r __asm__("$4");                         \
1049         register const void __user *__cu_from_r __asm__("$5");          \
1050         register long __cu_len_r __asm__("$6");                         \
1051                                                                         \
1052         __cu_to_r = (to);                                               \
1053         __cu_from_r = (from);                                           \
1054         __cu_len_r = (n);                                               \
1055         __asm__ __volatile__(                                           \
1056         __MODULE_JAL(func_ptr)                                          \
1057         : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r)       \
1058         :                                                               \
1059         : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31",  \
1060           DADDI_SCRATCH, "memory");                                     \
1061         __cu_len_r;                                                     \
1062 })
1063 
1064 /*
1065  * Source or destination address is in userland. We need to go through
1066  * the TLB
1067  */
1068 #define __invoke_copy_from_user(to, from, n)                            \
1069         __invoke_copy_from_user_eva_generic(to, from, n, __copy_from_user_eva)
1070 
1071 #define __invoke_copy_from_user_inatomic(to, from, n)                   \
1072         __invoke_copy_from_user_eva_generic(to, from, n,                \
1073                                             __copy_user_inatomic_eva)
1074 
1075 #define __invoke_copy_to_user(to, from, n)                              \
1076         __invoke_copy_to_user_eva_generic(to, from, n, __copy_to_user_eva)
1077 
1078 #define ___invoke_copy_in_user(to, from, n)                             \
1079         __invoke_copy_from_user_eva_generic(to, from, n, __copy_in_user_eva)
1080 
1081 /*
1082  * Source or destination address in the kernel. We are not going through
1083  * the TLB
1084  */
1085 #define __invoke_copy_from_kernel(to, from, n)                          \
1086         __invoke_copy_from_user_eva_generic(to, from, n, __copy_user)
1087 
1088 #define __invoke_copy_from_kernel_inatomic(to, from, n)                 \
1089         __invoke_copy_from_user_eva_generic(to, from, n, __copy_user_inatomic)
1090 
1091 #define __invoke_copy_to_kernel(to, from, n)                            \
1092         __invoke_copy_to_user_eva_generic(to, from, n, __copy_user)
1093 
1094 #define ___invoke_copy_in_kernel(to, from, n)                           \
1095         __invoke_copy_from_user_eva_generic(to, from, n, __copy_user)
1096 
1097 #endif /* CONFIG_EVA */
1098 
1099 /*
1100  * __copy_from_user: - Copy a block of data from user space, with less checking.
1101  * @to:   Destination address, in kernel space.
1102  * @from: Source address, in user space.
1103  * @n:    Number of bytes to copy.
1104  *
1105  * Context: User context only. This function may sleep if pagefaults are
1106  *          enabled.
1107  *
1108  * Copy data from user space to kernel space.  Caller must check
1109  * the specified block with access_ok() before calling this function.
1110  *
1111  * Returns number of bytes that could not be copied.
1112  * On success, this will be zero.
1113  *
1114  * If some data could not be copied, this function will pad the copied
1115  * data to the requested size using zero bytes.
1116  */
1117 #define __copy_from_user(to, from, n)                                   \
1118 ({                                                                      \
1119         void *__cu_to;                                                  \
1120         const void __user *__cu_from;                                   \
1121         long __cu_len;                                                  \
1122                                                                         \
1123         __cu_to = (to);                                                 \
1124         __cu_from = (from);                                             \
1125         __cu_len = (n);                                                 \
1126         if (eva_kernel_access()) {                                      \
1127                 __cu_len = __invoke_copy_from_kernel(__cu_to,           \
1128                                                      __cu_from,         \
1129                                                      __cu_len);         \
1130         } else {                                                        \
1131                 might_fault();                                          \
1132                 __cu_len = __invoke_copy_from_user(__cu_to, __cu_from,  \
1133                                                    __cu_len);           \
1134         }                                                               \
1135         __cu_len;                                                       \
1136 })
1137 
1138 /*
1139  * copy_from_user: - Copy a block of data from user space.
1140  * @to:   Destination address, in kernel space.
1141  * @from: Source address, in user space.
1142  * @n:    Number of bytes to copy.
1143  *
1144  * Context: User context only. This function may sleep if pagefaults are
1145  *          enabled.
1146  *
1147  * Copy data from user space to kernel space.
1148  *
1149  * Returns number of bytes that could not be copied.
1150  * On success, this will be zero.
1151  *
1152  * If some data could not be copied, this function will pad the copied
1153  * data to the requested size using zero bytes.
1154  */
1155 #define copy_from_user(to, from, n)                                     \
1156 ({                                                                      \
1157         void *__cu_to;                                                  \
1158         const void __user *__cu_from;                                   \
1159         long __cu_len;                                                  \
1160                                                                         \
1161         __cu_to = (to);                                                 \
1162         __cu_from = (from);                                             \
1163         __cu_len = (n);                                                 \
1164         if (eva_kernel_access()) {                                      \
1165                 __cu_len = __invoke_copy_from_kernel(__cu_to,           \
1166                                                      __cu_from,         \
1167                                                      __cu_len);         \
1168         } else {                                                        \
1169                 if (access_ok(VERIFY_READ, __cu_from, __cu_len)) {      \
1170                         might_fault();                                  \
1171                         __cu_len = __invoke_copy_from_user(__cu_to,     \
1172                                                            __cu_from,   \
1173                                                            __cu_len);   \
1174                 } else {                                                \
1175                         memset(__cu_to, 0, __cu_len);                   \
1176                 }                                                       \
1177         }                                                               \
1178         __cu_len;                                                       \
1179 })
1180 
1181 #define __copy_in_user(to, from, n)                                     \
1182 ({                                                                      \
1183         void __user *__cu_to;                                           \
1184         const void __user *__cu_from;                                   \
1185         long __cu_len;                                                  \
1186                                                                         \
1187         __cu_to = (to);                                                 \
1188         __cu_from = (from);                                             \
1189         __cu_len = (n);                                                 \
1190         if (eva_kernel_access()) {                                      \
1191                 __cu_len = ___invoke_copy_in_kernel(__cu_to, __cu_from, \
1192                                                     __cu_len);          \
1193         } else {                                                        \
1194                 might_fault();                                          \
1195                 __cu_len = ___invoke_copy_in_user(__cu_to, __cu_from,   \
1196                                                   __cu_len);            \
1197         }                                                               \
1198         __cu_len;                                                       \
1199 })
1200 
1201 #define copy_in_user(to, from, n)                                       \
1202 ({                                                                      \
1203         void __user *__cu_to;                                           \
1204         const void __user *__cu_from;                                   \
1205         long __cu_len;                                                  \
1206                                                                         \
1207         __cu_to = (to);                                                 \
1208         __cu_from = (from);                                             \
1209         __cu_len = (n);                                                 \
1210         if (eva_kernel_access()) {                                      \
1211                 __cu_len = ___invoke_copy_in_kernel(__cu_to,__cu_from,  \
1212                                                     __cu_len);          \
1213         } else {                                                        \
1214                 if (likely(access_ok(VERIFY_READ, __cu_from, __cu_len) &&\
1215                            access_ok(VERIFY_WRITE, __cu_to, __cu_len))) {\
1216                         might_fault();                                  \
1217                         __cu_len = ___invoke_copy_in_user(__cu_to,      \
1218                                                           __cu_from,    \
1219                                                           __cu_len);    \
1220                 }                                                       \
1221         }                                                               \
1222         __cu_len;                                                       \
1223 })
1224 
1225 /*
1226  * __clear_user: - Zero a block of memory in user space, with less checking.
1227  * @to:   Destination address, in user space.
1228  * @n:    Number of bytes to zero.
1229  *
1230  * Zero a block of memory in user space.  Caller must check
1231  * the specified block with access_ok() before calling this function.
1232  *
1233  * Returns number of bytes that could not be cleared.
1234  * On success, this will be zero.
1235  */
1236 static inline __kernel_size_t
1237 __clear_user(void __user *addr, __kernel_size_t size)
1238 {
1239         __kernel_size_t res;
1240 
1241         if (eva_kernel_access()) {
1242                 __asm__ __volatile__(
1243                         "move\t$4, %1\n\t"
1244                         "move\t$5, $0\n\t"
1245                         "move\t$6, %2\n\t"
1246                         __MODULE_JAL(__bzero_kernel)
1247                         "move\t%0, $6"
1248                         : "=r" (res)
1249                         : "r" (addr), "r" (size)
1250                         : "$4", "$5", "$6", __UA_t0, __UA_t1, "$31");
1251         } else {
1252                 might_fault();
1253                 __asm__ __volatile__(
1254                         "move\t$4, %1\n\t"
1255                         "move\t$5, $0\n\t"
1256                         "move\t$6, %2\n\t"
1257                         __MODULE_JAL(__bzero)
1258                         "move\t%0, $6"
1259                         : "=r" (res)
1260                         : "r" (addr), "r" (size)
1261                         : "$4", "$5", "$6", __UA_t0, __UA_t1, "$31");
1262         }
1263 
1264         return res;
1265 }
1266 
1267 #define clear_user(addr,n)                                              \
1268 ({                                                                      \
1269         void __user * __cl_addr = (addr);                               \
1270         unsigned long __cl_size = (n);                                  \
1271         if (__cl_size && access_ok(VERIFY_WRITE,                        \
1272                                         __cl_addr, __cl_size))          \
1273                 __cl_size = __clear_user(__cl_addr, __cl_size);         \
1274         __cl_size;                                                      \
1275 })
1276 
1277 /*
1278  * __strncpy_from_user: - Copy a NUL terminated string from userspace, with less checking.
1279  * @dst:   Destination address, in kernel space.  This buffer must be at
1280  *         least @count bytes long.
1281  * @src:   Source address, in user space.
1282  * @count: Maximum number of bytes to copy, including the trailing NUL.
1283  *
1284  * Copies a NUL-terminated string from userspace to kernel space.
1285  * Caller must check the specified block with access_ok() before calling
1286  * this function.
1287  *
1288  * On success, returns the length of the string (not including the trailing
1289  * NUL).
1290  *
1291  * If access to userspace fails, returns -EFAULT (some data may have been
1292  * copied).
1293  *
1294  * If @count is smaller than the length of the string, copies @count bytes
1295  * and returns @count.
1296  */
1297 static inline long
1298 __strncpy_from_user(char *__to, const char __user *__from, long __len)
1299 {
1300         long res;
1301 
1302         if (eva_kernel_access()) {
1303                 __asm__ __volatile__(
1304                         "move\t$4, %1\n\t"
1305                         "move\t$5, %2\n\t"
1306                         "move\t$6, %3\n\t"
1307                         __MODULE_JAL(__strncpy_from_kernel_nocheck_asm)
1308                         "move\t%0, $2"
1309                         : "=r" (res)
1310                         : "r" (__to), "r" (__from), "r" (__len)
1311                         : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
1312         } else {
1313                 might_fault();
1314                 __asm__ __volatile__(
1315                         "move\t$4, %1\n\t"
1316                         "move\t$5, %2\n\t"
1317                         "move\t$6, %3\n\t"
1318                         __MODULE_JAL(__strncpy_from_user_nocheck_asm)
1319                         "move\t%0, $2"
1320                         : "=r" (res)
1321                         : "r" (__to), "r" (__from), "r" (__len)
1322                         : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
1323         }
1324 
1325         return res;
1326 }
1327 
1328 /*
1329  * strncpy_from_user: - Copy a NUL terminated string from userspace.
1330  * @dst:   Destination address, in kernel space.  This buffer must be at
1331  *         least @count bytes long.
1332  * @src:   Source address, in user space.
1333  * @count: Maximum number of bytes to copy, including the trailing NUL.
1334  *
1335  * Copies a NUL-terminated string from userspace to kernel space.
1336  *
1337  * On success, returns the length of the string (not including the trailing
1338  * NUL).
1339  *
1340  * If access to userspace fails, returns -EFAULT (some data may have been
1341  * copied).
1342  *
1343  * If @count is smaller than the length of the string, copies @count bytes
1344  * and returns @count.
1345  */
1346 static inline long
1347 strncpy_from_user(char *__to, const char __user *__from, long __len)
1348 {
1349         long res;
1350 
1351         if (eva_kernel_access()) {
1352                 __asm__ __volatile__(
1353                         "move\t$4, %1\n\t"
1354                         "move\t$5, %2\n\t"
1355                         "move\t$6, %3\n\t"
1356                         __MODULE_JAL(__strncpy_from_kernel_asm)
1357                         "move\t%0, $2"
1358                         : "=r" (res)
1359                         : "r" (__to), "r" (__from), "r" (__len)
1360                         : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
1361         } else {
1362                 might_fault();
1363                 __asm__ __volatile__(
1364                         "move\t$4, %1\n\t"
1365                         "move\t$5, %2\n\t"
1366                         "move\t$6, %3\n\t"
1367                         __MODULE_JAL(__strncpy_from_user_asm)
1368                         "move\t%0, $2"
1369                         : "=r" (res)
1370                         : "r" (__to), "r" (__from), "r" (__len)
1371                         : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
1372         }
1373 
1374         return res;
1375 }
1376 
1377 /*
1378  * strlen_user: - Get the size of a string in user space.
1379  * @str: The string to measure.
1380  *
1381  * Context: User context only. This function may sleep if pagefaults are
1382  *          enabled.
1383  *
1384  * Get the size of a NUL-terminated string in user space.
1385  *
1386  * Returns the size of the string INCLUDING the terminating NUL.
1387  * On exception, returns 0.
1388  *
1389  * If there is a limit on the length of a valid string, you may wish to
1390  * consider using strnlen_user() instead.
1391  */
1392 static inline long strlen_user(const char __user *s)
1393 {
1394         long res;
1395 
1396         if (eva_kernel_access()) {
1397                 __asm__ __volatile__(
1398                         "move\t$4, %1\n\t"
1399                         __MODULE_JAL(__strlen_kernel_asm)
1400                         "move\t%0, $2"
1401                         : "=r" (res)
1402                         : "r" (s)
1403                         : "$2", "$4", __UA_t0, "$31");
1404         } else {
1405                 might_fault();
1406                 __asm__ __volatile__(
1407                         "move\t$4, %1\n\t"
1408                         __MODULE_JAL(__strlen_user_asm)
1409                         "move\t%0, $2"
1410                         : "=r" (res)
1411                         : "r" (s)
1412                         : "$2", "$4", __UA_t0, "$31");
1413         }
1414 
1415         return res;
1416 }
1417 
1418 /* Returns: 0 if bad, string length+1 (memory size) of string if ok */
1419 static inline long __strnlen_user(const char __user *s, long n)
1420 {
1421         long res;
1422 
1423         if (eva_kernel_access()) {
1424                 __asm__ __volatile__(
1425                         "move\t$4, %1\n\t"
1426                         "move\t$5, %2\n\t"
1427                         __MODULE_JAL(__strnlen_kernel_nocheck_asm)
1428                         "move\t%0, $2"
1429                         : "=r" (res)
1430                         : "r" (s), "r" (n)
1431                         : "$2", "$4", "$5", __UA_t0, "$31");
1432         } else {
1433                 might_fault();
1434                 __asm__ __volatile__(
1435                         "move\t$4, %1\n\t"
1436                         "move\t$5, %2\n\t"
1437                         __MODULE_JAL(__strnlen_user_nocheck_asm)
1438                         "move\t%0, $2"
1439                         : "=r" (res)
1440                         : "r" (s), "r" (n)
1441                         : "$2", "$4", "$5", __UA_t0, "$31");
1442         }
1443 
1444         return res;
1445 }
1446 
1447 /*
1448  * strnlen_user: - Get the size of a string in user space.
1449  * @str: The string to measure.
1450  *
1451  * Context: User context only. This function may sleep if pagefaults are
1452  *          enabled.
1453  *
1454  * Get the size of a NUL-terminated string in user space.
1455  *
1456  * Returns the size of the string INCLUDING the terminating NUL.
1457  * On exception, returns 0.
1458  * If the string is too long, returns a value greater than @n.
1459  */
1460 static inline long strnlen_user(const char __user *s, long n)
1461 {
1462         long res;
1463 
1464         might_fault();
1465         if (eva_kernel_access()) {
1466                 __asm__ __volatile__(
1467                         "move\t$4, %1\n\t"
1468                         "move\t$5, %2\n\t"
1469                         __MODULE_JAL(__strnlen_kernel_asm)
1470                         "move\t%0, $2"
1471                         : "=r" (res)
1472                         : "r" (s), "r" (n)
1473                         : "$2", "$4", "$5", __UA_t0, "$31");
1474         } else {
1475                 __asm__ __volatile__(
1476                         "move\t$4, %1\n\t"
1477                         "move\t$5, %2\n\t"
1478                         __MODULE_JAL(__strnlen_user_asm)
1479                         "move\t%0, $2"
1480                         : "=r" (res)
1481                         : "r" (s), "r" (n)
1482                         : "$2", "$4", "$5", __UA_t0, "$31");
1483         }
1484 
1485         return res;
1486 }
1487 
1488 struct exception_table_entry
1489 {
1490         unsigned long insn;
1491         unsigned long nextinsn;
1492 };
1493 
1494 extern int fixup_exception(struct pt_regs *regs);
1495 
1496 #endif /* _ASM_UACCESS_H */
1497 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp