~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/tile/include/asm/uaccess.h

Version: ~ [ linux-5.17-rc1 ] ~ [ linux-5.16.2 ] ~ [ linux-5.15.16 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.93 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.173 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.225 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.262 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.297 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.299 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.18.140 ] ~ [ linux-3.16.85 ] ~ [ linux-3.14.79 ] ~ [ linux-3.12.74 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  * Copyright 2010 Tilera Corporation. All Rights Reserved.
  3  *
  4  *   This program is free software; you can redistribute it and/or
  5  *   modify it under the terms of the GNU General Public License
  6  *   as published by the Free Software Foundation, version 2.
  7  *
  8  *   This program is distributed in the hope that it will be useful, but
  9  *   WITHOUT ANY WARRANTY; without even the implied warranty of
 10  *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
 11  *   NON INFRINGEMENT.  See the GNU General Public License for
 12  *   more details.
 13  */
 14 
 15 #ifndef _ASM_TILE_UACCESS_H
 16 #define _ASM_TILE_UACCESS_H
 17 
 18 /*
 19  * User space memory access functions
 20  */
 21 #include <linux/sched.h>
 22 #include <linux/mm.h>
 23 #include <asm-generic/uaccess-unaligned.h>
 24 #include <asm/processor.h>
 25 #include <asm/page.h>
 26 
 27 #define VERIFY_READ     0
 28 #define VERIFY_WRITE    1
 29 
 30 /*
 31  * The fs value determines whether argument validity checking should be
 32  * performed or not.  If get_fs() == USER_DS, checking is performed, with
 33  * get_fs() == KERNEL_DS, checking is bypassed.
 34  *
 35  * For historical reasons, these macros are grossly misnamed.
 36  */
 37 #define MAKE_MM_SEG(a)  ((mm_segment_t) { (a) })
 38 
 39 #define KERNEL_DS       MAKE_MM_SEG(-1UL)
 40 #define USER_DS         MAKE_MM_SEG(PAGE_OFFSET)
 41 
 42 #define get_ds()        (KERNEL_DS)
 43 #define get_fs()        (current_thread_info()->addr_limit)
 44 #define set_fs(x)       (current_thread_info()->addr_limit = (x))
 45 
 46 #define segment_eq(a, b) ((a).seg == (b).seg)
 47 
 48 #ifndef __tilegx__
 49 /*
 50  * We could allow mapping all 16 MB at 0xfc000000, but we set up a
 51  * special hack in arch_setup_additional_pages() to auto-create a mapping
 52  * for the first 16 KB, and it would seem strange to have different
 53  * user-accessible semantics for memory at 0xfc000000 and above 0xfc004000.
 54  */
 55 static inline int is_arch_mappable_range(unsigned long addr,
 56                                          unsigned long size)
 57 {
 58         return (addr >= MEM_USER_INTRPT &&
 59                 addr < (MEM_USER_INTRPT + INTRPT_SIZE) &&
 60                 size <= (MEM_USER_INTRPT + INTRPT_SIZE) - addr);
 61 }
 62 #define is_arch_mappable_range is_arch_mappable_range
 63 #else
 64 #define is_arch_mappable_range(addr, size) 0
 65 #endif
 66 
 67 /*
 68  * Test whether a block of memory is a valid user space address.
 69  * Returns 0 if the range is valid, nonzero otherwise.
 70  */
 71 int __range_ok(unsigned long addr, unsigned long size);
 72 
 73 /**
 74  * access_ok: - Checks if a user space pointer is valid
 75  * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE.  Note that
 76  *        %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe
 77  *        to write to a block, it is always safe to read from it.
 78  * @addr: User space pointer to start of block to check
 79  * @size: Size of block to check
 80  *
 81  * Context: User context only.  This function may sleep.
 82  *
 83  * Checks if a pointer to a block of memory in user space is valid.
 84  *
 85  * Returns true (nonzero) if the memory block may be valid, false (zero)
 86  * if it is definitely invalid.
 87  *
 88  * Note that, depending on architecture, this function probably just
 89  * checks that the pointer is in the user space range - after calling
 90  * this function, memory access functions may still return -EFAULT.
 91  */
 92 #define access_ok(type, addr, size) ({ \
 93         __chk_user_ptr(addr); \
 94         likely(__range_ok((unsigned long)(addr), (size)) == 0); \
 95 })
 96 
 97 /*
 98  * The exception table consists of pairs of addresses: the first is the
 99  * address of an instruction that is allowed to fault, and the second is
100  * the address at which the program should continue.  No registers are
101  * modified, so it is entirely up to the continuation code to figure out
102  * what to do.
103  *
104  * All the routines below use bits of fixup code that are out of line
105  * with the main instruction path.  This means when everything is well,
106  * we don't even have to jump over them.  Further, they do not intrude
107  * on our cache or tlb entries.
108  */
109 
110 struct exception_table_entry {
111         unsigned long insn, fixup;
112 };
113 
114 extern int fixup_exception(struct pt_regs *regs);
115 
116 /*
117  * Support macros for __get_user().
118  *
119  * Implementation note: The "case 8" logic of casting to the type of
120  * the result of subtracting the value from itself is basically a way
121  * of keeping all integer types the same, but casting any pointers to
122  * ptrdiff_t, i.e. also an integer type.  This way there are no
123  * questionable casts seen by the compiler on an ILP32 platform.
124  *
125  * Note that __get_user() and __put_user() assume proper alignment.
126  */
127 
128 #ifdef __LP64__
129 #define _ASM_PTR        ".quad"
130 #else
131 #define _ASM_PTR        ".long"
132 #endif
133 
134 #define __get_user_asm(OP, x, ptr, ret)                                 \
135         asm volatile("1: {" #OP " %1, %2; movei %0, 0 }\n"              \
136                      ".pushsection .fixup,\"ax\"\n"                     \
137                      "0: { movei %1, 0; movei %0, %3 }\n"               \
138                      "j 9f\n"                                           \
139                      ".section __ex_table,\"a\"\n"                      \
140                      _ASM_PTR " 1b, 0b\n"                               \
141                      ".popsection\n"                                    \
142                      "9:"                                               \
143                      : "=r" (ret), "=r" (x)                             \
144                      : "r" (ptr), "i" (-EFAULT))
145 
146 #ifdef __tilegx__
147 #define __get_user_1(x, ptr, ret) __get_user_asm(ld1u, x, ptr, ret)
148 #define __get_user_2(x, ptr, ret) __get_user_asm(ld2u, x, ptr, ret)
149 #define __get_user_4(x, ptr, ret) __get_user_asm(ld4s, x, ptr, ret)
150 #define __get_user_8(x, ptr, ret) __get_user_asm(ld, x, ptr, ret)
151 #else
152 #define __get_user_1(x, ptr, ret) __get_user_asm(lb_u, x, ptr, ret)
153 #define __get_user_2(x, ptr, ret) __get_user_asm(lh_u, x, ptr, ret)
154 #define __get_user_4(x, ptr, ret) __get_user_asm(lw, x, ptr, ret)
155 #ifdef __LITTLE_ENDIAN
156 #define __lo32(a, b) a
157 #define __hi32(a, b) b
158 #else
159 #define __lo32(a, b) b
160 #define __hi32(a, b) a
161 #endif
162 #define __get_user_8(x, ptr, ret)                                       \
163         ({                                                              \
164                 unsigned int __a, __b;                                  \
165                 asm volatile("1: { lw %1, %3; addi %2, %3, 4 }\n"       \
166                              "2: { lw %2, %2; movei %0, 0 }\n"          \
167                              ".pushsection .fixup,\"ax\"\n"             \
168                              "0: { movei %1, 0; movei %2, 0 }\n"        \
169                              "{ movei %0, %4; j 9f }\n"                 \
170                              ".section __ex_table,\"a\"\n"              \
171                              ".word 1b, 0b\n"                           \
172                              ".word 2b, 0b\n"                           \
173                              ".popsection\n"                            \
174                              "9:"                                       \
175                              : "=r" (ret), "=r" (__a), "=&r" (__b)      \
176                              : "r" (ptr), "i" (-EFAULT));               \
177                 (x) = (__typeof(x))(__typeof((x)-(x)))                  \
178                         (((u64)__hi32(__a, __b) << 32) |                \
179                          __lo32(__a, __b));                             \
180         })
181 #endif
182 
183 extern int __get_user_bad(void)
184   __attribute__((warning("sizeof __get_user argument not 1, 2, 4 or 8")));
185 
186 /**
187  * __get_user: - Get a simple variable from user space, with less checking.
188  * @x:   Variable to store result.
189  * @ptr: Source address, in user space.
190  *
191  * Context: User context only.  This function may sleep.
192  *
193  * This macro copies a single simple variable from user space to kernel
194  * space.  It supports simple types like char and int, but not larger
195  * data types like structures or arrays.
196  *
197  * @ptr must have pointer-to-simple-variable type, and the result of
198  * dereferencing @ptr must be assignable to @x without a cast.
199  *
200  * Returns zero on success, or -EFAULT on error.
201  * On error, the variable @x is set to zero.
202  *
203  * Caller must check the pointer with access_ok() before calling this
204  * function.
205  */
206 #define __get_user(x, ptr)                                              \
207         ({                                                              \
208                 int __ret;                                              \
209                 __chk_user_ptr(ptr);                                    \
210                 switch (sizeof(*(ptr))) {                               \
211                 case 1: __get_user_1(x, ptr, __ret); break;             \
212                 case 2: __get_user_2(x, ptr, __ret); break;             \
213                 case 4: __get_user_4(x, ptr, __ret); break;             \
214                 case 8: __get_user_8(x, ptr, __ret); break;             \
215                 default: __ret = __get_user_bad(); break;               \
216                 }                                                       \
217                 __ret;                                                  \
218         })
219 
220 /* Support macros for __put_user(). */
221 
222 #define __put_user_asm(OP, x, ptr, ret)                 \
223         asm volatile("1: {" #OP " %1, %2; movei %0, 0 }\n"              \
224                      ".pushsection .fixup,\"ax\"\n"                     \
225                      "0: { movei %0, %3; j 9f }\n"                      \
226                      ".section __ex_table,\"a\"\n"                      \
227                      _ASM_PTR " 1b, 0b\n"                               \
228                      ".popsection\n"                                    \
229                      "9:"                                               \
230                      : "=r" (ret)                                       \
231                      : "r" (ptr), "r" (x), "i" (-EFAULT))
232 
233 #ifdef __tilegx__
234 #define __put_user_1(x, ptr, ret) __put_user_asm(st1, x, ptr, ret)
235 #define __put_user_2(x, ptr, ret) __put_user_asm(st2, x, ptr, ret)
236 #define __put_user_4(x, ptr, ret) __put_user_asm(st4, x, ptr, ret)
237 #define __put_user_8(x, ptr, ret) __put_user_asm(st, x, ptr, ret)
238 #else
239 #define __put_user_1(x, ptr, ret) __put_user_asm(sb, x, ptr, ret)
240 #define __put_user_2(x, ptr, ret) __put_user_asm(sh, x, ptr, ret)
241 #define __put_user_4(x, ptr, ret) __put_user_asm(sw, x, ptr, ret)
242 #define __put_user_8(x, ptr, ret)                                       \
243         ({                                                              \
244                 u64 __x = (__typeof((x)-(x)))(x);                       \
245                 int __lo = (int) __x, __hi = (int) (__x >> 32);         \
246                 asm volatile("1: { sw %1, %2; addi %0, %1, 4 }\n"       \
247                              "2: { sw %0, %3; movei %0, 0 }\n"          \
248                              ".pushsection .fixup,\"ax\"\n"             \
249                              "0: { movei %0, %4; j 9f }\n"              \
250                              ".section __ex_table,\"a\"\n"              \
251                              ".word 1b, 0b\n"                           \
252                              ".word 2b, 0b\n"                           \
253                              ".popsection\n"                            \
254                              "9:"                                       \
255                              : "=&r" (ret)                              \
256                              : "r" (ptr), "r" (__lo32(__lo, __hi)),     \
257                              "r" (__hi32(__lo, __hi)), "i" (-EFAULT));  \
258         })
259 #endif
260 
261 extern int __put_user_bad(void)
262   __attribute__((warning("sizeof __put_user argument not 1, 2, 4 or 8")));
263 
264 /**
265  * __put_user: - Write a simple value into user space, with less checking.
266  * @x:   Value to copy to user space.
267  * @ptr: Destination address, in user space.
268  *
269  * Context: User context only.  This function may sleep.
270  *
271  * This macro copies a single simple value from kernel space to user
272  * space.  It supports simple types like char and int, but not larger
273  * data types like structures or arrays.
274  *
275  * @ptr must have pointer-to-simple-variable type, and @x must be assignable
276  * to the result of dereferencing @ptr.
277  *
278  * Caller must check the pointer with access_ok() before calling this
279  * function.
280  *
281  * Returns zero on success, or -EFAULT on error.
282  */
283 #define __put_user(x, ptr)                                              \
284 ({                                                                      \
285         int __ret;                                                      \
286         __chk_user_ptr(ptr);                                            \
287         switch (sizeof(*(ptr))) {                                       \
288         case 1: __put_user_1(x, ptr, __ret); break;                     \
289         case 2: __put_user_2(x, ptr, __ret); break;                     \
290         case 4: __put_user_4(x, ptr, __ret); break;                     \
291         case 8: __put_user_8(x, ptr, __ret); break;                     \
292         default: __ret = __put_user_bad(); break;                       \
293         }                                                               \
294         __ret;                                                          \
295 })
296 
297 /*
298  * The versions of get_user and put_user without initial underscores
299  * check the address of their arguments to make sure they are not
300  * in kernel space.
301  */
302 #define put_user(x, ptr)                                                \
303 ({                                                                      \
304         __typeof__(*(ptr)) __user *__Pu_addr = (ptr);                   \
305         access_ok(VERIFY_WRITE, (__Pu_addr), sizeof(*(__Pu_addr))) ?    \
306                 __put_user((x), (__Pu_addr)) :                          \
307                 -EFAULT;                                                \
308 })
309 
310 #define get_user(x, ptr)                                                \
311 ({                                                                      \
312         __typeof__(*(ptr)) const __user *__Gu_addr = (ptr);             \
313         access_ok(VERIFY_READ, (__Gu_addr), sizeof(*(__Gu_addr))) ?     \
314                 __get_user((x), (__Gu_addr)) :                          \
315                 ((x) = 0, -EFAULT);                                     \
316 })
317 
318 /**
319  * __copy_to_user() - copy data into user space, with less checking.
320  * @to:   Destination address, in user space.
321  * @from: Source address, in kernel space.
322  * @n:    Number of bytes to copy.
323  *
324  * Context: User context only.  This function may sleep.
325  *
326  * Copy data from kernel space to user space.  Caller must check
327  * the specified block with access_ok() before calling this function.
328  *
329  * Returns number of bytes that could not be copied.
330  * On success, this will be zero.
331  *
332  * An alternate version - __copy_to_user_inatomic() - is designed
333  * to be called from atomic context, typically bracketed by calls
334  * to pagefault_disable() and pagefault_enable().
335  */
336 extern unsigned long __must_check __copy_to_user_inatomic(
337         void __user *to, const void *from, unsigned long n);
338 
339 static inline unsigned long __must_check
340 __copy_to_user(void __user *to, const void *from, unsigned long n)
341 {
342         might_fault();
343         return __copy_to_user_inatomic(to, from, n);
344 }
345 
346 static inline unsigned long __must_check
347 copy_to_user(void __user *to, const void *from, unsigned long n)
348 {
349         if (access_ok(VERIFY_WRITE, to, n))
350                 n = __copy_to_user(to, from, n);
351         return n;
352 }
353 
354 /**
355  * __copy_from_user() - copy data from user space, with less checking.
356  * @to:   Destination address, in kernel space.
357  * @from: Source address, in user space.
358  * @n:    Number of bytes to copy.
359  *
360  * Context: User context only.  This function may sleep.
361  *
362  * Copy data from user space to kernel space.  Caller must check
363  * the specified block with access_ok() before calling this function.
364  *
365  * Returns number of bytes that could not be copied.
366  * On success, this will be zero.
367  *
368  * If some data could not be copied, this function will pad the copied
369  * data to the requested size using zero bytes.
370  *
371  * An alternate version - __copy_from_user_inatomic() - is designed
372  * to be called from atomic context, typically bracketed by calls
373  * to pagefault_disable() and pagefault_enable().  This version
374  * does *NOT* pad with zeros.
375  */
376 extern unsigned long __must_check __copy_from_user_inatomic(
377         void *to, const void __user *from, unsigned long n);
378 extern unsigned long __must_check __copy_from_user_zeroing(
379         void *to, const void __user *from, unsigned long n);
380 
381 static inline unsigned long __must_check
382 __copy_from_user(void *to, const void __user *from, unsigned long n)
383 {
384        might_fault();
385        return __copy_from_user_zeroing(to, from, n);
386 }
387 
388 static inline unsigned long __must_check
389 _copy_from_user(void *to, const void __user *from, unsigned long n)
390 {
391         if (access_ok(VERIFY_READ, from, n))
392                 n = __copy_from_user(to, from, n);
393         else
394                 memset(to, 0, n);
395         return n;
396 }
397 
398 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
399 /*
400  * There are still unprovable places in the generic code as of 2.6.34, so this
401  * option is not really compatible with -Werror, which is more useful in
402  * general.
403  */
404 extern void copy_from_user_overflow(void)
405         __compiletime_warning("copy_from_user() size is not provably correct");
406 
407 static inline unsigned long __must_check copy_from_user(void *to,
408                                           const void __user *from,
409                                           unsigned long n)
410 {
411         int sz = __compiletime_object_size(to);
412 
413         if (likely(sz == -1 || sz >= n))
414                 n = _copy_from_user(to, from, n);
415         else
416                 copy_from_user_overflow();
417 
418         return n;
419 }
420 #else
421 #define copy_from_user _copy_from_user
422 #endif
423 
424 #ifdef __tilegx__
425 /**
426  * __copy_in_user() - copy data within user space, with less checking.
427  * @to:   Destination address, in user space.
428  * @from: Source address, in user space.
429  * @n:    Number of bytes to copy.
430  *
431  * Context: User context only.  This function may sleep.
432  *
433  * Copy data from user space to user space.  Caller must check
434  * the specified blocks with access_ok() before calling this function.
435  *
436  * Returns number of bytes that could not be copied.
437  * On success, this will be zero.
438  */
439 extern unsigned long __copy_in_user_inatomic(
440         void __user *to, const void __user *from, unsigned long n);
441 
442 static inline unsigned long __must_check
443 __copy_in_user(void __user *to, const void __user *from, unsigned long n)
444 {
445         might_sleep();
446         return __copy_in_user_inatomic(to, from, n);
447 }
448 
449 static inline unsigned long __must_check
450 copy_in_user(void __user *to, const void __user *from, unsigned long n)
451 {
452         if (access_ok(VERIFY_WRITE, to, n) && access_ok(VERIFY_READ, from, n))
453                 n = __copy_in_user(to, from, n);
454         return n;
455 }
456 #endif
457 
458 
459 /**
460  * strlen_user: - Get the size of a string in user space.
461  * @str: The string to measure.
462  *
463  * Context: User context only.  This function may sleep.
464  *
465  * Get the size of a NUL-terminated string in user space.
466  *
467  * Returns the size of the string INCLUDING the terminating NUL.
468  * On exception, returns 0.
469  *
470  * If there is a limit on the length of a valid string, you may wish to
471  * consider using strnlen_user() instead.
472  */
473 extern long strnlen_user_asm(const char __user *str, long n);
474 static inline long __must_check strnlen_user(const char __user *str, long n)
475 {
476         might_fault();
477         return strnlen_user_asm(str, n);
478 }
479 #define strlen_user(str) strnlen_user(str, LONG_MAX)
480 
481 /**
482  * strncpy_from_user: - Copy a NUL terminated string from userspace, with less checking.
483  * @dst:   Destination address, in kernel space.  This buffer must be at
484  *         least @count bytes long.
485  * @src:   Source address, in user space.
486  * @count: Maximum number of bytes to copy, including the trailing NUL.
487  *
488  * Copies a NUL-terminated string from userspace to kernel space.
489  * Caller must check the specified block with access_ok() before calling
490  * this function.
491  *
492  * On success, returns the length of the string (not including the trailing
493  * NUL).
494  *
495  * If access to userspace fails, returns -EFAULT (some data may have been
496  * copied).
497  *
498  * If @count is smaller than the length of the string, copies @count bytes
499  * and returns @count.
500  */
501 extern long strncpy_from_user_asm(char *dst, const char __user *src, long);
502 static inline long __must_check __strncpy_from_user(
503         char *dst, const char __user *src, long count)
504 {
505         might_fault();
506         return strncpy_from_user_asm(dst, src, count);
507 }
508 static inline long __must_check strncpy_from_user(
509         char *dst, const char __user *src, long count)
510 {
511         if (access_ok(VERIFY_READ, src, 1))
512                 return __strncpy_from_user(dst, src, count);
513         return -EFAULT;
514 }
515 
516 /**
517  * clear_user: - Zero a block of memory in user space.
518  * @mem:   Destination address, in user space.
519  * @len:   Number of bytes to zero.
520  *
521  * Zero a block of memory in user space.
522  *
523  * Returns number of bytes that could not be cleared.
524  * On success, this will be zero.
525  */
526 extern unsigned long clear_user_asm(void __user *mem, unsigned long len);
527 static inline unsigned long __must_check __clear_user(
528         void __user *mem, unsigned long len)
529 {
530         might_fault();
531         return clear_user_asm(mem, len);
532 }
533 static inline unsigned long __must_check clear_user(
534         void __user *mem, unsigned long len)
535 {
536         if (access_ok(VERIFY_WRITE, mem, len))
537                 return __clear_user(mem, len);
538         return len;
539 }
540 
541 /**
542  * flush_user: - Flush a block of memory in user space from cache.
543  * @mem:   Destination address, in user space.
544  * @len:   Number of bytes to flush.
545  *
546  * Returns number of bytes that could not be flushed.
547  * On success, this will be zero.
548  */
549 extern unsigned long flush_user_asm(void __user *mem, unsigned long len);
550 static inline unsigned long __must_check __flush_user(
551         void __user *mem, unsigned long len)
552 {
553         int retval;
554 
555         might_fault();
556         retval = flush_user_asm(mem, len);
557         mb_incoherent();
558         return retval;
559 }
560 
561 static inline unsigned long __must_check flush_user(
562         void __user *mem, unsigned long len)
563 {
564         if (access_ok(VERIFY_WRITE, mem, len))
565                 return __flush_user(mem, len);
566         return len;
567 }
568 
569 /**
570  * inv_user: - Invalidate a block of memory in user space from cache.
571  * @mem:   Destination address, in user space.
572  * @len:   Number of bytes to invalidate.
573  *
574  * Returns number of bytes that could not be invalidated.
575  * On success, this will be zero.
576  *
577  * Note that on Tile64, the "inv" operation is in fact a
578  * "flush and invalidate", so cache write-backs will occur prior
579  * to the cache being marked invalid.
580  */
581 extern unsigned long inv_user_asm(void __user *mem, unsigned long len);
582 static inline unsigned long __must_check __inv_user(
583         void __user *mem, unsigned long len)
584 {
585         int retval;
586 
587         might_fault();
588         retval = inv_user_asm(mem, len);
589         mb_incoherent();
590         return retval;
591 }
592 static inline unsigned long __must_check inv_user(
593         void __user *mem, unsigned long len)
594 {
595         if (access_ok(VERIFY_WRITE, mem, len))
596                 return __inv_user(mem, len);
597         return len;
598 }
599 
600 /**
601  * finv_user: - Flush-inval a block of memory in user space from cache.
602  * @mem:   Destination address, in user space.
603  * @len:   Number of bytes to invalidate.
604  *
605  * Returns number of bytes that could not be flush-invalidated.
606  * On success, this will be zero.
607  */
608 extern unsigned long finv_user_asm(void __user *mem, unsigned long len);
609 static inline unsigned long __must_check __finv_user(
610         void __user *mem, unsigned long len)
611 {
612         int retval;
613 
614         might_fault();
615         retval = finv_user_asm(mem, len);
616         mb_incoherent();
617         return retval;
618 }
619 static inline unsigned long __must_check finv_user(
620         void __user *mem, unsigned long len)
621 {
622         if (access_ok(VERIFY_WRITE, mem, len))
623                 return __finv_user(mem, len);
624         return len;
625 }
626 
627 #endif /* _ASM_TILE_UACCESS_H */
628 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp