~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/kernel/locking/percpu-rwsem.c

Version: ~ [ linux-5.14-rc1 ] ~ [ linux-5.13.1 ] ~ [ linux-5.12.16 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.49 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.131 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.197 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.239 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.275 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.275 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.18.140 ] ~ [ linux-3.16.85 ] ~ [ linux-3.14.79 ] ~ [ linux-3.12.74 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 #include <linux/atomic.h>
  2 #include <linux/rwsem.h>
  3 #include <linux/percpu.h>
  4 #include <linux/wait.h>
  5 #include <linux/lockdep.h>
  6 #include <linux/percpu-rwsem.h>
  7 #include <linux/rcupdate.h>
  8 #include <linux/sched.h>
  9 #include <linux/errno.h>
 10 
 11 int __percpu_init_rwsem(struct percpu_rw_semaphore *brw,
 12                         const char *name, struct lock_class_key *rwsem_key)
 13 {
 14         brw->fast_read_ctr = alloc_percpu(int);
 15         if (unlikely(!brw->fast_read_ctr))
 16                 return -ENOMEM;
 17 
 18         /* ->rw_sem represents the whole percpu_rw_semaphore for lockdep */
 19         __init_rwsem(&brw->rw_sem, name, rwsem_key);
 20         rcu_sync_init(&brw->rss, RCU_SCHED_SYNC);
 21         atomic_set(&brw->slow_read_ctr, 0);
 22         init_waitqueue_head(&brw->write_waitq);
 23         return 0;
 24 }
 25 EXPORT_SYMBOL_GPL(__percpu_init_rwsem);
 26 
 27 void percpu_free_rwsem(struct percpu_rw_semaphore *brw)
 28 {
 29         /*
 30          * XXX: temporary kludge. The error path in alloc_super()
 31          * assumes that percpu_free_rwsem() is safe after kzalloc().
 32          */
 33         if (!brw->fast_read_ctr)
 34                 return;
 35 
 36         rcu_sync_dtor(&brw->rss);
 37         free_percpu(brw->fast_read_ctr);
 38         brw->fast_read_ctr = NULL; /* catch use after free bugs */
 39 }
 40 EXPORT_SYMBOL_GPL(percpu_free_rwsem);
 41 
 42 /*
 43  * This is the fast-path for down_read/up_read. If it succeeds we rely
 44  * on the barriers provided by rcu_sync_enter/exit; see the comments in
 45  * percpu_down_write() and percpu_up_write().
 46  *
 47  * If this helper fails the callers rely on the normal rw_semaphore and
 48  * atomic_dec_and_test(), so in this case we have the necessary barriers.
 49  */
 50 static bool update_fast_ctr(struct percpu_rw_semaphore *brw, unsigned int val)
 51 {
 52         bool success;
 53 
 54         preempt_disable();
 55         success = rcu_sync_is_idle(&brw->rss);
 56         if (likely(success))
 57                 __this_cpu_add(*brw->fast_read_ctr, val);
 58         preempt_enable();
 59 
 60         return success;
 61 }
 62 
 63 /*
 64  * Like the normal down_read() this is not recursive, the writer can
 65  * come after the first percpu_down_read() and create the deadlock.
 66  *
 67  * Note: returns with lock_is_held(brw->rw_sem) == T for lockdep,
 68  * percpu_up_read() does rwsem_release(). This pairs with the usage
 69  * of ->rw_sem in percpu_down/up_write().
 70  */
 71 void percpu_down_read(struct percpu_rw_semaphore *brw)
 72 {
 73         might_sleep();
 74         rwsem_acquire_read(&brw->rw_sem.dep_map, 0, 0, _RET_IP_);
 75 
 76         if (likely(update_fast_ctr(brw, +1)))
 77                 return;
 78 
 79         /* Avoid rwsem_acquire_read() and rwsem_release() */
 80         __down_read(&brw->rw_sem);
 81         atomic_inc(&brw->slow_read_ctr);
 82         __up_read(&brw->rw_sem);
 83 }
 84 EXPORT_SYMBOL_GPL(percpu_down_read);
 85 
 86 int percpu_down_read_trylock(struct percpu_rw_semaphore *brw)
 87 {
 88         if (unlikely(!update_fast_ctr(brw, +1))) {
 89                 if (!__down_read_trylock(&brw->rw_sem))
 90                         return 0;
 91                 atomic_inc(&brw->slow_read_ctr);
 92                 __up_read(&brw->rw_sem);
 93         }
 94 
 95         rwsem_acquire_read(&brw->rw_sem.dep_map, 0, 1, _RET_IP_);
 96         return 1;
 97 }
 98 
 99 void percpu_up_read(struct percpu_rw_semaphore *brw)
100 {
101         rwsem_release(&brw->rw_sem.dep_map, 1, _RET_IP_);
102 
103         if (likely(update_fast_ctr(brw, -1)))
104                 return;
105 
106         /* false-positive is possible but harmless */
107         if (atomic_dec_and_test(&brw->slow_read_ctr))
108                 wake_up_all(&brw->write_waitq);
109 }
110 EXPORT_SYMBOL_GPL(percpu_up_read);
111 
112 static int clear_fast_ctr(struct percpu_rw_semaphore *brw)
113 {
114         unsigned int sum = 0;
115         int cpu;
116 
117         for_each_possible_cpu(cpu) {
118                 sum += per_cpu(*brw->fast_read_ctr, cpu);
119                 per_cpu(*brw->fast_read_ctr, cpu) = 0;
120         }
121 
122         return sum;
123 }
124 
125 void percpu_down_write(struct percpu_rw_semaphore *brw)
126 {
127         /*
128          * Make rcu_sync_is_idle() == F and thus disable the fast-path in
129          * percpu_down_read() and percpu_up_read(), and wait for gp pass.
130          *
131          * The latter synchronises us with the preceding readers which used
132          * the fast-past, so we can not miss the result of __this_cpu_add()
133          * or anything else inside their criticial sections.
134          */
135         rcu_sync_enter(&brw->rss);
136 
137         /* exclude other writers, and block the new readers completely */
138         down_write(&brw->rw_sem);
139 
140         /* nobody can use fast_read_ctr, move its sum into slow_read_ctr */
141         atomic_add(clear_fast_ctr(brw), &brw->slow_read_ctr);
142 
143         /* wait for all readers to complete their percpu_up_read() */
144         wait_event(brw->write_waitq, !atomic_read(&brw->slow_read_ctr));
145 }
146 EXPORT_SYMBOL_GPL(percpu_down_write);
147 
148 void percpu_up_write(struct percpu_rw_semaphore *brw)
149 {
150         /* release the lock, but the readers can't use the fast-path */
151         up_write(&brw->rw_sem);
152         /*
153          * Enable the fast-path in percpu_down_read() and percpu_up_read()
154          * but only after another gp pass; this adds the necessary barrier
155          * to ensure the reader can't miss the changes done by us.
156          */
157         rcu_sync_exit(&brw->rss);
158 }
159 EXPORT_SYMBOL_GPL(percpu_up_write);
160 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp