~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/x86/include/asm/mutex_64.h

Version: ~ [ linux-5.7-rc7 ] ~ [ linux-5.6.14 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.42 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.124 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.181 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.224 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.224 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.84 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  * Assembly implementation of the mutex fastpath, based on atomic
  3  * decrement/increment.
  4  *
  5  * started by Ingo Molnar:
  6  *
  7  *  Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
  8  */
  9 #ifndef _ASM_X86_MUTEX_64_H
 10 #define _ASM_X86_MUTEX_64_H
 11 
 12 /**
 13  * __mutex_fastpath_lock - decrement and call function if negative
 14  * @v: pointer of type atomic_t
 15  * @fail_fn: function to call if the result is negative
 16  *
 17  * Atomically decrements @v and calls <fail_fn> if the result is negative.
 18  */
 19 #ifdef CC_HAVE_ASM_GOTO
 20 static inline void __mutex_fastpath_lock(atomic_t *v,
 21                                          void (*fail_fn)(atomic_t *))
 22 {
 23         asm_volatile_goto(LOCK_PREFIX "   decl %0\n"
 24                           "   jns %l[exit]\n"
 25                           : : "m" (v->counter)
 26                           : "memory", "cc"
 27                           : exit);
 28         fail_fn(v);
 29 exit:
 30         return;
 31 }
 32 #else
 33 #define __mutex_fastpath_lock(v, fail_fn)                       \
 34 do {                                                            \
 35         unsigned long dummy;                                    \
 36                                                                 \
 37         typecheck(atomic_t *, v);                               \
 38         typecheck_fn(void (*)(atomic_t *), fail_fn);            \
 39                                                                 \
 40         asm volatile(LOCK_PREFIX "   decl (%%rdi)\n"            \
 41                      "   jns 1f         \n"                     \
 42                      "   call " #fail_fn "\n"                   \
 43                      "1:"                                       \
 44                      : "=D" (dummy)                             \
 45                      : "D" (v)                                  \
 46                      : "rax", "rsi", "rdx", "rcx",              \
 47                        "r8", "r9", "r10", "r11", "memory");     \
 48 } while (0)
 49 #endif
 50 
 51 /**
 52  *  __mutex_fastpath_lock_retval - try to take the lock by moving the count
 53  *                                 from 1 to a 0 value
 54  *  @count: pointer of type atomic_t
 55  *
 56  * Change the count from 1 to a value lower than 1. This function returns 0
 57  * if the fastpath succeeds, or -1 otherwise.
 58  */
 59 static inline int __mutex_fastpath_lock_retval(atomic_t *count)
 60 {
 61         if (unlikely(atomic_dec_return(count) < 0))
 62                 return -1;
 63         else
 64                 return 0;
 65 }
 66 
 67 /**
 68  * __mutex_fastpath_unlock - increment and call function if nonpositive
 69  * @v: pointer of type atomic_t
 70  * @fail_fn: function to call if the result is nonpositive
 71  *
 72  * Atomically increments @v and calls <fail_fn> if the result is nonpositive.
 73  */
 74 #ifdef CC_HAVE_ASM_GOTO
 75 static inline void __mutex_fastpath_unlock(atomic_t *v,
 76                                            void (*fail_fn)(atomic_t *))
 77 {
 78         asm_volatile_goto(LOCK_PREFIX "   incl %0\n"
 79                           "   jg %l[exit]\n"
 80                           : : "m" (v->counter)
 81                           : "memory", "cc"
 82                           : exit);
 83         fail_fn(v);
 84 exit:
 85         return;
 86 }
 87 #else
 88 #define __mutex_fastpath_unlock(v, fail_fn)                     \
 89 do {                                                            \
 90         unsigned long dummy;                                    \
 91                                                                 \
 92         typecheck(atomic_t *, v);                               \
 93         typecheck_fn(void (*)(atomic_t *), fail_fn);            \
 94                                                                 \
 95         asm volatile(LOCK_PREFIX "   incl (%%rdi)\n"            \
 96                      "   jg 1f\n"                               \
 97                      "   call " #fail_fn "\n"                   \
 98                      "1:"                                       \
 99                      : "=D" (dummy)                             \
100                      : "D" (v)                                  \
101                      : "rax", "rsi", "rdx", "rcx",              \
102                        "r8", "r9", "r10", "r11", "memory");     \
103 } while (0)
104 #endif
105 
106 #define __mutex_slowpath_needs_to_unlock()      1
107 
108 /**
109  * __mutex_fastpath_trylock - try to acquire the mutex, without waiting
110  *
111  *  @count: pointer of type atomic_t
112  *  @fail_fn: fallback function
113  *
114  * Change the count from 1 to 0 and return 1 (success), or return 0 (failure)
115  * if it wasn't 1 originally. [the fallback function is never used on
116  * x86_64, because all x86_64 CPUs have a CMPXCHG instruction.]
117  */
118 static inline int __mutex_fastpath_trylock(atomic_t *count,
119                                            int (*fail_fn)(atomic_t *))
120 {
121         if (likely(atomic_cmpxchg(count, 1, 0) == 1))
122                 return 1;
123         else
124                 return 0;
125 }
126 
127 #endif /* _ASM_X86_MUTEX_64_H */
128 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp