~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/mm/backing-dev.c

Version: ~ [ linux-5.11-rc3 ] ~ [ linux-5.10.7 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.89 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.167 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.215 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.251 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.251 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.85 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 
  2 #include <linux/wait.h>
  3 #include <linux/backing-dev.h>
  4 #include <linux/kthread.h>
  5 #include <linux/freezer.h>
  6 #include <linux/fs.h>
  7 #include <linux/pagemap.h>
  8 #include <linux/mm.h>
  9 #include <linux/sched.h>
 10 #include <linux/module.h>
 11 #include <linux/writeback.h>
 12 #include <linux/device.h>
 13 #include <trace/events/writeback.h>
 14 
 15 static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
 16 
 17 struct backing_dev_info default_backing_dev_info = {
 18         .name           = "default",
 19         .ra_pages       = VM_MAX_READAHEAD * 1024 / PAGE_CACHE_SIZE,
 20         .state          = 0,
 21         .capabilities   = BDI_CAP_MAP_COPY,
 22 };
 23 EXPORT_SYMBOL_GPL(default_backing_dev_info);
 24 
 25 struct backing_dev_info noop_backing_dev_info = {
 26         .name           = "noop",
 27         .capabilities   = BDI_CAP_NO_ACCT_AND_WRITEBACK,
 28 };
 29 EXPORT_SYMBOL_GPL(noop_backing_dev_info);
 30 
 31 static struct class *bdi_class;
 32 
 33 /*
 34  * bdi_lock protects updates to bdi_list. bdi_list has RCU reader side
 35  * locking.
 36  */
 37 DEFINE_SPINLOCK(bdi_lock);
 38 LIST_HEAD(bdi_list);
 39 
 40 /* bdi_wq serves all asynchronous writeback tasks */
 41 struct workqueue_struct *bdi_wq;
 42 
 43 void bdi_lock_two(struct bdi_writeback *wb1, struct bdi_writeback *wb2)
 44 {
 45         if (wb1 < wb2) {
 46                 spin_lock(&wb1->list_lock);
 47                 spin_lock_nested(&wb2->list_lock, 1);
 48         } else {
 49                 spin_lock(&wb2->list_lock);
 50                 spin_lock_nested(&wb1->list_lock, 1);
 51         }
 52 }
 53 
 54 #ifdef CONFIG_DEBUG_FS
 55 #include <linux/debugfs.h>
 56 #include <linux/seq_file.h>
 57 
 58 static struct dentry *bdi_debug_root;
 59 
 60 static void bdi_debug_init(void)
 61 {
 62         bdi_debug_root = debugfs_create_dir("bdi", NULL);
 63 }
 64 
 65 static int bdi_debug_stats_show(struct seq_file *m, void *v)
 66 {
 67         struct backing_dev_info *bdi = m->private;
 68         struct bdi_writeback *wb = &bdi->wb;
 69         unsigned long background_thresh;
 70         unsigned long dirty_thresh;
 71         unsigned long bdi_thresh;
 72         unsigned long nr_dirty, nr_io, nr_more_io;
 73         struct inode *inode;
 74 
 75         nr_dirty = nr_io = nr_more_io = 0;
 76         spin_lock(&wb->list_lock);
 77         list_for_each_entry(inode, &wb->b_dirty, i_wb_list)
 78                 nr_dirty++;
 79         list_for_each_entry(inode, &wb->b_io, i_wb_list)
 80                 nr_io++;
 81         list_for_each_entry(inode, &wb->b_more_io, i_wb_list)
 82                 nr_more_io++;
 83         spin_unlock(&wb->list_lock);
 84 
 85         global_dirty_limits(&background_thresh, &dirty_thresh);
 86         bdi_thresh = bdi_dirty_limit(bdi, dirty_thresh);
 87 
 88 #define K(x) ((x) << (PAGE_SHIFT - 10))
 89         seq_printf(m,
 90                    "BdiWriteback:       %10lu kB\n"
 91                    "BdiReclaimable:     %10lu kB\n"
 92                    "BdiDirtyThresh:     %10lu kB\n"
 93                    "DirtyThresh:        %10lu kB\n"
 94                    "BackgroundThresh:   %10lu kB\n"
 95                    "BdiDirtied:         %10lu kB\n"
 96                    "BdiWritten:         %10lu kB\n"
 97                    "BdiWriteBandwidth:  %10lu kBps\n"
 98                    "b_dirty:            %10lu\n"
 99                    "b_io:               %10lu\n"
100                    "b_more_io:          %10lu\n"
101                    "bdi_list:           %10u\n"
102                    "state:              %10lx\n",
103                    (unsigned long) K(bdi_stat(bdi, BDI_WRITEBACK)),
104                    (unsigned long) K(bdi_stat(bdi, BDI_RECLAIMABLE)),
105                    K(bdi_thresh),
106                    K(dirty_thresh),
107                    K(background_thresh),
108                    (unsigned long) K(bdi_stat(bdi, BDI_DIRTIED)),
109                    (unsigned long) K(bdi_stat(bdi, BDI_WRITTEN)),
110                    (unsigned long) K(bdi->write_bandwidth),
111                    nr_dirty,
112                    nr_io,
113                    nr_more_io,
114                    !list_empty(&bdi->bdi_list), bdi->state);
115 #undef K
116 
117         return 0;
118 }
119 
120 static int bdi_debug_stats_open(struct inode *inode, struct file *file)
121 {
122         return single_open(file, bdi_debug_stats_show, inode->i_private);
123 }
124 
125 static const struct file_operations bdi_debug_stats_fops = {
126         .open           = bdi_debug_stats_open,
127         .read           = seq_read,
128         .llseek         = seq_lseek,
129         .release        = single_release,
130 };
131 
132 static void bdi_debug_register(struct backing_dev_info *bdi, const char *name)
133 {
134         bdi->debug_dir = debugfs_create_dir(name, bdi_debug_root);
135         bdi->debug_stats = debugfs_create_file("stats", 0444, bdi->debug_dir,
136                                                bdi, &bdi_debug_stats_fops);
137 }
138 
139 static void bdi_debug_unregister(struct backing_dev_info *bdi)
140 {
141         debugfs_remove(bdi->debug_stats);
142         debugfs_remove(bdi->debug_dir);
143 }
144 #else
145 static inline void bdi_debug_init(void)
146 {
147 }
148 static inline void bdi_debug_register(struct backing_dev_info *bdi,
149                                       const char *name)
150 {
151 }
152 static inline void bdi_debug_unregister(struct backing_dev_info *bdi)
153 {
154 }
155 #endif
156 
157 static ssize_t read_ahead_kb_store(struct device *dev,
158                                   struct device_attribute *attr,
159                                   const char *buf, size_t count)
160 {
161         struct backing_dev_info *bdi = dev_get_drvdata(dev);
162         unsigned long read_ahead_kb;
163         ssize_t ret;
164 
165         ret = kstrtoul(buf, 10, &read_ahead_kb);
166         if (ret < 0)
167                 return ret;
168 
169         bdi->ra_pages = read_ahead_kb >> (PAGE_SHIFT - 10);
170 
171         return count;
172 }
173 
174 #define K(pages) ((pages) << (PAGE_SHIFT - 10))
175 
176 #define BDI_SHOW(name, expr)                                            \
177 static ssize_t name##_show(struct device *dev,                          \
178                            struct device_attribute *attr, char *page)   \
179 {                                                                       \
180         struct backing_dev_info *bdi = dev_get_drvdata(dev);            \
181                                                                         \
182         return snprintf(page, PAGE_SIZE-1, "%lld\n", (long long)expr);  \
183 }                                                                       \
184 static DEVICE_ATTR_RW(name);
185 
186 BDI_SHOW(read_ahead_kb, K(bdi->ra_pages))
187 
188 static ssize_t min_ratio_store(struct device *dev,
189                 struct device_attribute *attr, const char *buf, size_t count)
190 {
191         struct backing_dev_info *bdi = dev_get_drvdata(dev);
192         unsigned int ratio;
193         ssize_t ret;
194 
195         ret = kstrtouint(buf, 10, &ratio);
196         if (ret < 0)
197                 return ret;
198 
199         ret = bdi_set_min_ratio(bdi, ratio);
200         if (!ret)
201                 ret = count;
202 
203         return ret;
204 }
205 BDI_SHOW(min_ratio, bdi->min_ratio)
206 
207 static ssize_t max_ratio_store(struct device *dev,
208                 struct device_attribute *attr, const char *buf, size_t count)
209 {
210         struct backing_dev_info *bdi = dev_get_drvdata(dev);
211         unsigned int ratio;
212         ssize_t ret;
213 
214         ret = kstrtouint(buf, 10, &ratio);
215         if (ret < 0)
216                 return ret;
217 
218         ret = bdi_set_max_ratio(bdi, ratio);
219         if (!ret)
220                 ret = count;
221 
222         return ret;
223 }
224 BDI_SHOW(max_ratio, bdi->max_ratio)
225 
226 static ssize_t stable_pages_required_show(struct device *dev,
227                                           struct device_attribute *attr,
228                                           char *page)
229 {
230         struct backing_dev_info *bdi = dev_get_drvdata(dev);
231 
232         return snprintf(page, PAGE_SIZE-1, "%d\n",
233                         bdi_cap_stable_pages_required(bdi) ? 1 : 0);
234 }
235 static DEVICE_ATTR_RO(stable_pages_required);
236 
237 static struct attribute *bdi_dev_attrs[] = {
238         &dev_attr_read_ahead_kb.attr,
239         &dev_attr_min_ratio.attr,
240         &dev_attr_max_ratio.attr,
241         &dev_attr_stable_pages_required.attr,
242         NULL,
243 };
244 ATTRIBUTE_GROUPS(bdi_dev);
245 
246 static __init int bdi_class_init(void)
247 {
248         bdi_class = class_create(THIS_MODULE, "bdi");
249         if (IS_ERR(bdi_class))
250                 return PTR_ERR(bdi_class);
251 
252         bdi_class->dev_groups = bdi_dev_groups;
253         bdi_debug_init();
254         return 0;
255 }
256 postcore_initcall(bdi_class_init);
257 
258 static int __init default_bdi_init(void)
259 {
260         int err;
261 
262         bdi_wq = alloc_workqueue("writeback", WQ_MEM_RECLAIM | WQ_FREEZABLE |
263                                               WQ_UNBOUND | WQ_SYSFS, 0);
264         if (!bdi_wq)
265                 return -ENOMEM;
266 
267         err = bdi_init(&default_backing_dev_info);
268         if (!err)
269                 bdi_register(&default_backing_dev_info, NULL, "default");
270         err = bdi_init(&noop_backing_dev_info);
271 
272         return err;
273 }
274 subsys_initcall(default_bdi_init);
275 
276 int bdi_has_dirty_io(struct backing_dev_info *bdi)
277 {
278         return wb_has_dirty_io(&bdi->wb);
279 }
280 
281 /*
282  * This function is used when the first inode for this bdi is marked dirty. It
283  * wakes-up the corresponding bdi thread which should then take care of the
284  * periodic background write-out of dirty inodes. Since the write-out would
285  * starts only 'dirty_writeback_interval' centisecs from now anyway, we just
286  * set up a timer which wakes the bdi thread up later.
287  *
288  * Note, we wouldn't bother setting up the timer, but this function is on the
289  * fast-path (used by '__mark_inode_dirty()'), so we save few context switches
290  * by delaying the wake-up.
291  *
292  * We have to be careful not to postpone flush work if it is scheduled for
293  * earlier. Thus we use queue_delayed_work().
294  */
295 void bdi_wakeup_thread_delayed(struct backing_dev_info *bdi)
296 {
297         unsigned long timeout;
298 
299         timeout = msecs_to_jiffies(dirty_writeback_interval * 10);
300         spin_lock_bh(&bdi->wb_lock);
301         if (test_bit(BDI_registered, &bdi->state))
302                 queue_delayed_work(bdi_wq, &bdi->wb.dwork, timeout);
303         spin_unlock_bh(&bdi->wb_lock);
304 }
305 
306 /*
307  * Remove bdi from bdi_list, and ensure that it is no longer visible
308  */
309 static void bdi_remove_from_list(struct backing_dev_info *bdi)
310 {
311         spin_lock_bh(&bdi_lock);
312         list_del_rcu(&bdi->bdi_list);
313         spin_unlock_bh(&bdi_lock);
314 
315         synchronize_rcu_expedited();
316 }
317 
318 int bdi_register(struct backing_dev_info *bdi, struct device *parent,
319                 const char *fmt, ...)
320 {
321         va_list args;
322         struct device *dev;
323 
324         if (bdi->dev)   /* The driver needs to use separate queues per device */
325                 return 0;
326 
327         va_start(args, fmt);
328         dev = device_create_vargs(bdi_class, parent, MKDEV(0, 0), bdi, fmt, args);
329         va_end(args);
330         if (IS_ERR(dev))
331                 return PTR_ERR(dev);
332 
333         bdi->dev = dev;
334 
335         bdi_debug_register(bdi, dev_name(dev));
336         set_bit(BDI_registered, &bdi->state);
337 
338         spin_lock_bh(&bdi_lock);
339         list_add_tail_rcu(&bdi->bdi_list, &bdi_list);
340         spin_unlock_bh(&bdi_lock);
341 
342         trace_writeback_bdi_register(bdi);
343         return 0;
344 }
345 EXPORT_SYMBOL(bdi_register);
346 
347 int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev)
348 {
349         return bdi_register(bdi, NULL, "%u:%u", MAJOR(dev), MINOR(dev));
350 }
351 EXPORT_SYMBOL(bdi_register_dev);
352 
353 /*
354  * Remove bdi from the global list and shutdown any threads we have running
355  */
356 static void bdi_wb_shutdown(struct backing_dev_info *bdi)
357 {
358         if (!bdi_cap_writeback_dirty(bdi))
359                 return;
360 
361         /*
362          * Make sure nobody finds us on the bdi_list anymore
363          */
364         bdi_remove_from_list(bdi);
365 
366         /* Make sure nobody queues further work */
367         spin_lock_bh(&bdi->wb_lock);
368         clear_bit(BDI_registered, &bdi->state);
369         spin_unlock_bh(&bdi->wb_lock);
370 
371         /*
372          * Drain work list and shutdown the delayed_work.  At this point,
373          * @bdi->bdi_list is empty telling bdi_Writeback_workfn() that @bdi
374          * is dying and its work_list needs to be drained no matter what.
375          */
376         mod_delayed_work(bdi_wq, &bdi->wb.dwork, 0);
377         flush_delayed_work(&bdi->wb.dwork);
378         WARN_ON(!list_empty(&bdi->work_list));
379 
380         /*
381          * This shouldn't be necessary unless @bdi for some reason has
382          * unflushed dirty IO after work_list is drained.  Do it anyway
383          * just in case.
384          */
385         cancel_delayed_work_sync(&bdi->wb.dwork);
386 }
387 
388 /*
389  * This bdi is going away now, make sure that no super_blocks point to it
390  */
391 static void bdi_prune_sb(struct backing_dev_info *bdi)
392 {
393         struct super_block *sb;
394 
395         spin_lock(&sb_lock);
396         list_for_each_entry(sb, &super_blocks, s_list) {
397                 if (sb->s_bdi == bdi)
398                         sb->s_bdi = &default_backing_dev_info;
399         }
400         spin_unlock(&sb_lock);
401 }
402 
403 void bdi_unregister(struct backing_dev_info *bdi)
404 {
405         struct device *dev = bdi->dev;
406 
407         if (dev) {
408                 bdi_set_min_ratio(bdi, 0);
409                 trace_writeback_bdi_unregister(bdi);
410                 bdi_prune_sb(bdi);
411 
412                 bdi_wb_shutdown(bdi);
413                 bdi_debug_unregister(bdi);
414 
415                 spin_lock_bh(&bdi->wb_lock);
416                 bdi->dev = NULL;
417                 spin_unlock_bh(&bdi->wb_lock);
418 
419                 device_unregister(dev);
420         }
421 }
422 EXPORT_SYMBOL(bdi_unregister);
423 
424 static void bdi_wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi)
425 {
426         memset(wb, 0, sizeof(*wb));
427 
428         wb->bdi = bdi;
429         wb->last_old_flush = jiffies;
430         INIT_LIST_HEAD(&wb->b_dirty);
431         INIT_LIST_HEAD(&wb->b_io);
432         INIT_LIST_HEAD(&wb->b_more_io);
433         spin_lock_init(&wb->list_lock);
434         INIT_DELAYED_WORK(&wb->dwork, bdi_writeback_workfn);
435 }
436 
437 /*
438  * Initial write bandwidth: 100 MB/s
439  */
440 #define INIT_BW         (100 << (20 - PAGE_SHIFT))
441 
442 int bdi_init(struct backing_dev_info *bdi)
443 {
444         int i, err;
445 
446         bdi->dev = NULL;
447 
448         bdi->min_ratio = 0;
449         bdi->max_ratio = 100;
450         bdi->max_prop_frac = FPROP_FRAC_BASE;
451         spin_lock_init(&bdi->wb_lock);
452         INIT_LIST_HEAD(&bdi->bdi_list);
453         INIT_LIST_HEAD(&bdi->work_list);
454 
455         bdi_wb_init(&bdi->wb, bdi);
456 
457         for (i = 0; i < NR_BDI_STAT_ITEMS; i++) {
458                 err = percpu_counter_init(&bdi->bdi_stat[i], 0);
459                 if (err)
460                         goto err;
461         }
462 
463         bdi->dirty_exceeded = 0;
464 
465         bdi->bw_time_stamp = jiffies;
466         bdi->written_stamp = 0;
467 
468         bdi->balanced_dirty_ratelimit = INIT_BW;
469         bdi->dirty_ratelimit = INIT_BW;
470         bdi->write_bandwidth = INIT_BW;
471         bdi->avg_write_bandwidth = INIT_BW;
472 
473         err = fprop_local_init_percpu(&bdi->completions);
474 
475         if (err) {
476 err:
477                 while (i--)
478                         percpu_counter_destroy(&bdi->bdi_stat[i]);
479         }
480 
481         return err;
482 }
483 EXPORT_SYMBOL(bdi_init);
484 
485 void bdi_destroy(struct backing_dev_info *bdi)
486 {
487         int i;
488 
489         /*
490          * Splice our entries to the default_backing_dev_info, if this
491          * bdi disappears
492          */
493         if (bdi_has_dirty_io(bdi)) {
494                 struct bdi_writeback *dst = &default_backing_dev_info.wb;
495 
496                 bdi_lock_two(&bdi->wb, dst);
497                 list_splice(&bdi->wb.b_dirty, &dst->b_dirty);
498                 list_splice(&bdi->wb.b_io, &dst->b_io);
499                 list_splice(&bdi->wb.b_more_io, &dst->b_more_io);
500                 spin_unlock(&bdi->wb.list_lock);
501                 spin_unlock(&dst->list_lock);
502         }
503 
504         bdi_unregister(bdi);
505 
506         /*
507          * If bdi_unregister() had already been called earlier, the dwork
508          * could still be pending because bdi_prune_sb() can race with the
509          * bdi_wakeup_thread_delayed() calls from __mark_inode_dirty().
510          */
511         cancel_delayed_work_sync(&bdi->wb.dwork);
512 
513         for (i = 0; i < NR_BDI_STAT_ITEMS; i++)
514                 percpu_counter_destroy(&bdi->bdi_stat[i]);
515 
516         fprop_local_destroy_percpu(&bdi->completions);
517 }
518 EXPORT_SYMBOL(bdi_destroy);
519 
520 /*
521  * For use from filesystems to quickly init and register a bdi associated
522  * with dirty writeback
523  */
524 int bdi_setup_and_register(struct backing_dev_info *bdi, char *name,
525                            unsigned int cap)
526 {
527         int err;
528 
529         bdi->name = name;
530         bdi->capabilities = cap;
531         err = bdi_init(bdi);
532         if (err)
533                 return err;
534 
535         err = bdi_register(bdi, NULL, "%.28s-%ld", name,
536                            atomic_long_inc_return(&bdi_seq));
537         if (err) {
538                 bdi_destroy(bdi);
539                 return err;
540         }
541 
542         return 0;
543 }
544 EXPORT_SYMBOL(bdi_setup_and_register);
545 
546 static wait_queue_head_t congestion_wqh[2] = {
547                 __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[0]),
548                 __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[1])
549         };
550 static atomic_t nr_bdi_congested[2];
551 
552 void clear_bdi_congested(struct backing_dev_info *bdi, int sync)
553 {
554         enum bdi_state bit;
555         wait_queue_head_t *wqh = &congestion_wqh[sync];
556 
557         bit = sync ? BDI_sync_congested : BDI_async_congested;
558         if (test_and_clear_bit(bit, &bdi->state))
559                 atomic_dec(&nr_bdi_congested[sync]);
560         smp_mb__after_clear_bit();
561         if (waitqueue_active(wqh))
562                 wake_up(wqh);
563 }
564 EXPORT_SYMBOL(clear_bdi_congested);
565 
566 void set_bdi_congested(struct backing_dev_info *bdi, int sync)
567 {
568         enum bdi_state bit;
569 
570         bit = sync ? BDI_sync_congested : BDI_async_congested;
571         if (!test_and_set_bit(bit, &bdi->state))
572                 atomic_inc(&nr_bdi_congested[sync]);
573 }
574 EXPORT_SYMBOL(set_bdi_congested);
575 
576 /**
577  * congestion_wait - wait for a backing_dev to become uncongested
578  * @sync: SYNC or ASYNC IO
579  * @timeout: timeout in jiffies
580  *
581  * Waits for up to @timeout jiffies for a backing_dev (any backing_dev) to exit
582  * write congestion.  If no backing_devs are congested then just wait for the
583  * next write to be completed.
584  */
585 long congestion_wait(int sync, long timeout)
586 {
587         long ret;
588         unsigned long start = jiffies;
589         DEFINE_WAIT(wait);
590         wait_queue_head_t *wqh = &congestion_wqh[sync];
591 
592         prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
593         ret = io_schedule_timeout(timeout);
594         finish_wait(wqh, &wait);
595 
596         trace_writeback_congestion_wait(jiffies_to_usecs(timeout),
597                                         jiffies_to_usecs(jiffies - start));
598 
599         return ret;
600 }
601 EXPORT_SYMBOL(congestion_wait);
602 
603 /**
604  * wait_iff_congested - Conditionally wait for a backing_dev to become uncongested or a zone to complete writes
605  * @zone: A zone to check if it is heavily congested
606  * @sync: SYNC or ASYNC IO
607  * @timeout: timeout in jiffies
608  *
609  * In the event of a congested backing_dev (any backing_dev) and the given
610  * @zone has experienced recent congestion, this waits for up to @timeout
611  * jiffies for either a BDI to exit congestion of the given @sync queue
612  * or a write to complete.
613  *
614  * In the absence of zone congestion, cond_resched() is called to yield
615  * the processor if necessary but otherwise does not sleep.
616  *
617  * The return value is 0 if the sleep is for the full timeout. Otherwise,
618  * it is the number of jiffies that were still remaining when the function
619  * returned. return_value == timeout implies the function did not sleep.
620  */
621 long wait_iff_congested(struct zone *zone, int sync, long timeout)
622 {
623         long ret;
624         unsigned long start = jiffies;
625         DEFINE_WAIT(wait);
626         wait_queue_head_t *wqh = &congestion_wqh[sync];
627 
628         /*
629          * If there is no congestion, or heavy congestion is not being
630          * encountered in the current zone, yield if necessary instead
631          * of sleeping on the congestion queue
632          */
633         if (atomic_read(&nr_bdi_congested[sync]) == 0 ||
634                         !zone_is_reclaim_congested(zone)) {
635                 cond_resched();
636 
637                 /* In case we scheduled, work out time remaining */
638                 ret = timeout - (jiffies - start);
639                 if (ret < 0)
640                         ret = 0;
641 
642                 goto out;
643         }
644 
645         /* Sleep until uncongested or a write happens */
646         prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
647         ret = io_schedule_timeout(timeout);
648         finish_wait(wqh, &wait);
649 
650 out:
651         trace_writeback_wait_iff_congested(jiffies_to_usecs(timeout),
652                                         jiffies_to_usecs(jiffies - start));
653 
654         return ret;
655 }
656 EXPORT_SYMBOL(wait_iff_congested);
657 
658 int pdflush_proc_obsolete(struct ctl_table *table, int write,
659                         void __user *buffer, size_t *lenp, loff_t *ppos)
660 {
661         char kbuf[] = "\n";
662 
663         if (*ppos || *lenp < sizeof(kbuf)) {
664                 *lenp = 0;
665                 return 0;
666         }
667 
668         if (copy_to_user(buffer, kbuf, sizeof(kbuf)))
669                 return -EFAULT;
670         printk_once(KERN_WARNING "%s exported in /proc is scheduled for removal\n",
671                         table->procname);
672 
673         *lenp = 2;
674         *ppos += *lenp;
675         return 2;
676 }
677 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp