~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/kernel/rcupdate.c

Version: ~ [ linux-5.1-rc2 ] ~ [ linux-5.0.4 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.31 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.108 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.165 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.177 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.137 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.63 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-3.9.11 ] ~ [ linux-3.8.13 ] ~ [ linux-3.7.10 ] ~ [ linux-3.6.11 ] ~ [ linux-3.5.7 ] ~ [ linux-3.4.113 ] ~ [ linux-3.3.8 ] ~ [ linux-3.2.102 ] ~ [ linux-3.1.10 ] ~ [ linux-3.0.101 ] ~ [ linux-2.6.39.4 ] ~ [ linux-2.6.38.8 ] ~ [ linux-2.6.37.6 ] ~ [ linux-2.6.36.4 ] ~ [ linux-2.6.35.14 ] ~ [ linux-2.6.34.15 ] ~ [ linux-2.6.33.20 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  * Read-Copy Update mechanism for mutual exclusion
  3  *
  4  * This program is free software; you can redistribute it and/or modify
  5  * it under the terms of the GNU General Public License as published by
  6  * the Free Software Foundation; either version 2 of the License, or
  7  * (at your option) any later version.
  8  *
  9  * This program is distributed in the hope that it will be useful,
 10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
 11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 12  * GNU General Public License for more details.
 13  *
 14  * You should have received a copy of the GNU General Public License
 15  * along with this program; if not, write to the Free Software
 16  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
 17  *
 18  * Copyright (C) IBM Corporation, 2001
 19  *
 20  * Author: Dipankar Sarma <dipankar@in.ibm.com>
 21  * 
 22  * Based on the original work by Paul McKenney <paul.mckenney@us.ibm.com>
 23  * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
 24  * Papers:
 25  * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
 26  * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001)
 27  *
 28  * For detailed explanation of Read-Copy Update mechanism see -
 29  *              http://lse.sourceforge.net/locking/rcupdate.html
 30  *
 31  */
 32 #include <linux/types.h>
 33 #include <linux/kernel.h>
 34 #include <linux/init.h>
 35 #include <linux/spinlock.h>
 36 #include <linux/smp.h>
 37 #include <linux/interrupt.h>
 38 #include <linux/sched.h>
 39 #include <asm/atomic.h>
 40 #include <asm/bitops.h>
 41 #include <linux/module.h>
 42 #include <linux/completion.h>
 43 #include <linux/percpu.h>
 44 #include <linux/notifier.h>
 45 #include <linux/rcupdate.h>
 46 #include <linux/cpu.h>
 47 
 48 /* Definition for rcupdate control block. */
 49 struct rcu_ctrlblk rcu_ctrlblk = 
 50         { .mutex = SPIN_LOCK_UNLOCKED, .curbatch = 1, 
 51           .maxbatch = 1, .rcu_cpu_mask = CPU_MASK_NONE };
 52 DEFINE_PER_CPU(struct rcu_data, rcu_data) = { 0L };
 53 
 54 /* Fake initialization required by compiler */
 55 static DEFINE_PER_CPU(struct tasklet_struct, rcu_tasklet) = {NULL};
 56 #define RCU_tasklet(cpu) (per_cpu(rcu_tasklet, cpu))
 57 
 58 /**
 59  * call_rcu - Queue an RCU update request.
 60  * @head: structure to be used for queueing the RCU updates.
 61  * @func: actual update function to be invoked after the grace period
 62  * @arg: argument to be passed to the update function
 63  *
 64  * The update function will be invoked as soon as all CPUs have performed 
 65  * a context switch or been seen in the idle loop or in a user process. 
 66  * The read-side of critical section that use call_rcu() for updation must 
 67  * be protected by rcu_read_lock()/rcu_read_unlock().
 68  */
 69 void call_rcu(struct rcu_head *head, void (*func)(void *arg), void *arg)
 70 {
 71         int cpu;
 72         unsigned long flags;
 73 
 74         head->func = func;
 75         head->arg = arg;
 76         local_irq_save(flags);
 77         cpu = smp_processor_id();
 78         list_add_tail(&head->list, &RCU_nxtlist(cpu));
 79         local_irq_restore(flags);
 80 }
 81 
 82 /*
 83  * Invoke the completed RCU callbacks. They are expected to be in
 84  * a per-cpu list.
 85  */
 86 static void rcu_do_batch(struct list_head *list)
 87 {
 88         struct list_head *entry;
 89         struct rcu_head *head;
 90 
 91         while (!list_empty(list)) {
 92                 entry = list->next;
 93                 list_del(entry);
 94                 head = list_entry(entry, struct rcu_head, list);
 95                 head->func(head->arg);
 96         }
 97 }
 98 
 99 /*
100  * Register a new batch of callbacks, and start it up if there is currently no
101  * active batch and the batch to be registered has not already occurred.
102  * Caller must hold the rcu_ctrlblk lock.
103  */
104 static void rcu_start_batch(long newbatch)
105 {
106         if (rcu_batch_before(rcu_ctrlblk.maxbatch, newbatch)) {
107                 rcu_ctrlblk.maxbatch = newbatch;
108         }
109         if (rcu_batch_before(rcu_ctrlblk.maxbatch, rcu_ctrlblk.curbatch) ||
110             !cpus_empty(rcu_ctrlblk.rcu_cpu_mask)) {
111                 return;
112         }
113         rcu_ctrlblk.rcu_cpu_mask = cpu_online_map;
114 }
115 
116 /*
117  * Check if the cpu has gone through a quiescent state (say context
118  * switch). If so and if it already hasn't done so in this RCU
119  * quiescent cycle, then indicate that it has done so.
120  */
121 static void rcu_check_quiescent_state(void)
122 {
123         int cpu = smp_processor_id();
124 
125         if (!cpu_isset(cpu, rcu_ctrlblk.rcu_cpu_mask))
126                 return;
127 
128         /* 
129          * Races with local timer interrupt - in the worst case
130          * we may miss one quiescent state of that CPU. That is
131          * tolerable. So no need to disable interrupts.
132          */
133         if (RCU_last_qsctr(cpu) == RCU_QSCTR_INVALID) {
134                 RCU_last_qsctr(cpu) = RCU_qsctr(cpu);
135                 return;
136         }
137         if (RCU_qsctr(cpu) == RCU_last_qsctr(cpu))
138                 return;
139 
140         spin_lock(&rcu_ctrlblk.mutex);
141         if (!cpu_isset(cpu, rcu_ctrlblk.rcu_cpu_mask))
142                 goto out_unlock;
143 
144         cpu_clear(cpu, rcu_ctrlblk.rcu_cpu_mask);
145         RCU_last_qsctr(cpu) = RCU_QSCTR_INVALID;
146         if (!cpus_empty(rcu_ctrlblk.rcu_cpu_mask))
147                 goto out_unlock;
148 
149         rcu_ctrlblk.curbatch++;
150         rcu_start_batch(rcu_ctrlblk.maxbatch);
151 
152 out_unlock:
153         spin_unlock(&rcu_ctrlblk.mutex);
154 }
155 
156 
157 /*
158  * This does the RCU processing work from tasklet context. 
159  */
160 static void rcu_process_callbacks(unsigned long unused)
161 {
162         int cpu = smp_processor_id();
163         LIST_HEAD(list);
164 
165         if (!list_empty(&RCU_curlist(cpu)) &&
166             rcu_batch_after(rcu_ctrlblk.curbatch, RCU_batch(cpu))) {
167                 list_splice(&RCU_curlist(cpu), &list);
168                 INIT_LIST_HEAD(&RCU_curlist(cpu));
169         }
170 
171         local_irq_disable();
172         if (!list_empty(&RCU_nxtlist(cpu)) && list_empty(&RCU_curlist(cpu))) {
173                 list_splice(&RCU_nxtlist(cpu), &RCU_curlist(cpu));
174                 INIT_LIST_HEAD(&RCU_nxtlist(cpu));
175                 local_irq_enable();
176 
177                 /*
178                  * start the next batch of callbacks
179                  */
180                 spin_lock(&rcu_ctrlblk.mutex);
181                 RCU_batch(cpu) = rcu_ctrlblk.curbatch + 1;
182                 rcu_start_batch(RCU_batch(cpu));
183                 spin_unlock(&rcu_ctrlblk.mutex);
184         } else {
185                 local_irq_enable();
186         }
187         rcu_check_quiescent_state();
188         if (!list_empty(&list))
189                 rcu_do_batch(&list);
190 }
191 
192 void rcu_check_callbacks(int cpu, int user)
193 {
194         if (user || 
195             (idle_cpu(cpu) && !in_softirq() && 
196                                 hardirq_count() <= (1 << HARDIRQ_SHIFT)))
197                 RCU_qsctr(cpu)++;
198         tasklet_schedule(&RCU_tasklet(cpu));
199 }
200 
201 static void __devinit rcu_online_cpu(int cpu)
202 {
203         memset(&per_cpu(rcu_data, cpu), 0, sizeof(struct rcu_data));
204         tasklet_init(&RCU_tasklet(cpu), rcu_process_callbacks, 0UL);
205         INIT_LIST_HEAD(&RCU_nxtlist(cpu));
206         INIT_LIST_HEAD(&RCU_curlist(cpu));
207 }
208 
209 static int __devinit rcu_cpu_notify(struct notifier_block *self, 
210                                 unsigned long action, void *hcpu)
211 {
212         long cpu = (long)hcpu;
213         switch (action) {
214         case CPU_UP_PREPARE:
215                 rcu_online_cpu(cpu);
216                 break;
217         /* Space reserved for CPU_OFFLINE :) */
218         default:
219                 break;
220         }
221         return NOTIFY_OK;
222 }
223 
224 static struct notifier_block __devinitdata rcu_nb = {
225         .notifier_call  = rcu_cpu_notify,
226 };
227 
228 /*
229  * Initializes rcu mechanism.  Assumed to be called early.
230  * That is before local timer(SMP) or jiffie timer (uniproc) is setup.
231  * Note that rcu_qsctr and friends are implicitly
232  * initialized due to the choice of ``0'' for RCU_CTR_INVALID.
233  */
234 void __init rcu_init(void)
235 {
236         rcu_cpu_notify(&rcu_nb, CPU_UP_PREPARE,
237                         (void *)(long)smp_processor_id());
238         /* Register notifier for non-boot CPUs */
239         register_cpu_notifier(&rcu_nb);
240 }
241 
242 
243 /* Because of FASTCALL declaration of complete, we use this wrapper */
244 static void wakeme_after_rcu(void *completion)
245 {
246         complete(completion);
247 }
248 
249 /**
250  * synchronize-kernel - wait until all the CPUs have gone
251  * through a "quiescent" state. It may sleep.
252  */
253 void synchronize_kernel(void)
254 {
255         struct rcu_head rcu;
256         DECLARE_COMPLETION(completion);
257 
258         /* Will wake me after RCU finished */
259         call_rcu(&rcu, wakeme_after_rcu, &completion);
260 
261         /* Wait for it */
262         wait_for_completion(&completion);
263 }
264 
265 
266 EXPORT_SYMBOL(call_rcu);
267 EXPORT_SYMBOL(synchronize_kernel);
268 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp