~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/x86/include/asm/mutex_32.h

Version: ~ [ linux-5.2-rc5 ] ~ [ linux-5.1.12 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.53 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.128 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.182 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.182 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.68 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-3.9.11 ] ~ [ linux-3.8.13 ] ~ [ linux-3.7.10 ] ~ [ linux-3.6.11 ] ~ [ linux-3.5.7 ] ~ [ linux-3.4.113 ] ~ [ linux-3.3.8 ] ~ [ linux-3.2.102 ] ~ [ linux-3.1.10 ] ~ [ linux-3.0.101 ] ~ [ linux-2.6.39.4 ] ~ [ linux-2.6.38.8 ] ~ [ linux-2.6.37.6 ] ~ [ linux-2.6.36.4 ] ~ [ linux-2.6.35.14 ] ~ [ linux-2.6.34.15 ] ~ [ linux-2.6.33.20 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  * Assembly implementation of the mutex fastpath, based on atomic
  3  * decrement/increment.
  4  *
  5  * started by Ingo Molnar:
  6  *
  7  *  Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
  8  */
  9 #ifndef _ASM_X86_MUTEX_32_H
 10 #define _ASM_X86_MUTEX_32_H
 11 
 12 #include <asm/alternative.h>
 13 
 14 /**
 15  *  __mutex_fastpath_lock - try to take the lock by moving the count
 16  *                          from 1 to a 0 value
 17  *  @count: pointer of type atomic_t
 18  *  @fn: function to call if the original value was not 1
 19  *
 20  * Change the count from 1 to a value lower than 1, and call <fn> if it
 21  * wasn't 1 originally. This function MUST leave the value lower than 1
 22  * even when the "1" assertion wasn't true.
 23  */
 24 #define __mutex_fastpath_lock(count, fail_fn)                   \
 25 do {                                                            \
 26         unsigned int dummy;                                     \
 27                                                                 \
 28         typecheck(atomic_t *, count);                           \
 29         typecheck_fn(void (*)(atomic_t *), fail_fn);            \
 30                                                                 \
 31         asm volatile(LOCK_PREFIX "   decl (%%eax)\n"            \
 32                      "   jns 1f \n"                             \
 33                      "   call " #fail_fn "\n"                   \
 34                      "1:\n"                                     \
 35                      : "=a" (dummy)                             \
 36                      : "a" (count)                              \
 37                      : "memory", "ecx", "edx");                 \
 38 } while (0)
 39 
 40 
 41 /**
 42  *  __mutex_fastpath_lock_retval - try to take the lock by moving the count
 43  *                                 from 1 to a 0 value
 44  *  @count: pointer of type atomic_t
 45  *  @fail_fn: function to call if the original value was not 1
 46  *
 47  * Change the count from 1 to a value lower than 1, and call <fail_fn> if it
 48  * wasn't 1 originally. This function returns 0 if the fastpath succeeds,
 49  * or anything the slow path function returns
 50  */
 51 static inline int __mutex_fastpath_lock_retval(atomic_t *count,
 52                                                int (*fail_fn)(atomic_t *))
 53 {
 54         if (unlikely(atomic_dec_return(count) < 0))
 55                 return fail_fn(count);
 56         else
 57                 return 0;
 58 }
 59 
 60 /**
 61  *  __mutex_fastpath_unlock - try to promote the mutex from 0 to 1
 62  *  @count: pointer of type atomic_t
 63  *  @fail_fn: function to call if the original value was not 0
 64  *
 65  * try to promote the mutex from 0 to 1. if it wasn't 0, call <fail_fn>.
 66  * In the failure case, this function is allowed to either set the value
 67  * to 1, or to set it to a value lower than 1.
 68  *
 69  * If the implementation sets it to a value of lower than 1, the
 70  * __mutex_slowpath_needs_to_unlock() macro needs to return 1, it needs
 71  * to return 0 otherwise.
 72  */
 73 #define __mutex_fastpath_unlock(count, fail_fn)                 \
 74 do {                                                            \
 75         unsigned int dummy;                                     \
 76                                                                 \
 77         typecheck(atomic_t *, count);                           \
 78         typecheck_fn(void (*)(atomic_t *), fail_fn);            \
 79                                                                 \
 80         asm volatile(LOCK_PREFIX "   incl (%%eax)\n"            \
 81                      "   jg     1f\n"                           \
 82                      "   call " #fail_fn "\n"                   \
 83                      "1:\n"                                     \
 84                      : "=a" (dummy)                             \
 85                      : "a" (count)                              \
 86                      : "memory", "ecx", "edx");                 \
 87 } while (0)
 88 
 89 #define __mutex_slowpath_needs_to_unlock()      1
 90 
 91 /**
 92  * __mutex_fastpath_trylock - try to acquire the mutex, without waiting
 93  *
 94  *  @count: pointer of type atomic_t
 95  *  @fail_fn: fallback function
 96  *
 97  * Change the count from 1 to a value lower than 1, and return 0 (failure)
 98  * if it wasn't 1 originally, or return 1 (success) otherwise. This function
 99  * MUST leave the value lower than 1 even when the "1" assertion wasn't true.
100  * Additionally, if the value was < 0 originally, this function must not leave
101  * it to 0 on failure.
102  */
103 static inline int __mutex_fastpath_trylock(atomic_t *count,
104                                            int (*fail_fn)(atomic_t *))
105 {
106         /*
107          * We have two variants here. The cmpxchg based one is the best one
108          * because it never induce a false contention state.  It is included
109          * here because architectures using the inc/dec algorithms over the
110          * xchg ones are much more likely to support cmpxchg natively.
111          *
112          * If not we fall back to the spinlock based variant - that is
113          * just as efficient (and simpler) as a 'destructive' probing of
114          * the mutex state would be.
115          */
116 #ifdef __HAVE_ARCH_CMPXCHG
117         if (likely(atomic_cmpxchg(count, 1, 0) == 1))
118                 return 1;
119         return 0;
120 #else
121         return fail_fn(count);
122 #endif
123 }
124 
125 #endif /* _ASM_X86_MUTEX_32_H */
126 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp