~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/include/linux/backing-dev.h

Version: ~ [ linux-5.11 ] ~ [ linux-5.10.17 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.99 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.176 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.221 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.257 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.257 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.85 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  * include/linux/backing-dev.h
  3  *
  4  * low-level device information and state which is propagated up through
  5  * to high-level code.
  6  */
  7 
  8 #ifndef _LINUX_BACKING_DEV_H
  9 #define _LINUX_BACKING_DEV_H
 10 
 11 #include <linux/kernel.h>
 12 #include <linux/fs.h>
 13 #include <linux/sched.h>
 14 #include <linux/blkdev.h>
 15 #include <linux/writeback.h>
 16 #include <linux/blk-cgroup.h>
 17 #include <linux/backing-dev-defs.h>
 18 #include <linux/slab.h>
 19 
 20 int __must_check bdi_init(struct backing_dev_info *bdi);
 21 void bdi_exit(struct backing_dev_info *bdi);
 22 
 23 __printf(3, 4)
 24 int bdi_register(struct backing_dev_info *bdi, struct device *parent,
 25                 const char *fmt, ...);
 26 int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev);
 27 void bdi_unregister(struct backing_dev_info *bdi);
 28 
 29 int __must_check bdi_setup_and_register(struct backing_dev_info *, char *);
 30 void bdi_destroy(struct backing_dev_info *bdi);
 31 
 32 void wb_start_writeback(struct bdi_writeback *wb, long nr_pages,
 33                         bool range_cyclic, enum wb_reason reason);
 34 void wb_start_background_writeback(struct bdi_writeback *wb);
 35 void wb_workfn(struct work_struct *work);
 36 void wb_wakeup_delayed(struct bdi_writeback *wb);
 37 
 38 extern spinlock_t bdi_lock;
 39 extern struct list_head bdi_list;
 40 
 41 extern struct workqueue_struct *bdi_wq;
 42 
 43 static inline bool wb_has_dirty_io(struct bdi_writeback *wb)
 44 {
 45         return test_bit(WB_has_dirty_io, &wb->state);
 46 }
 47 
 48 static inline bool bdi_has_dirty_io(struct backing_dev_info *bdi)
 49 {
 50         /*
 51          * @bdi->tot_write_bandwidth is guaranteed to be > 0 if there are
 52          * any dirty wbs.  See wb_update_write_bandwidth().
 53          */
 54         return atomic_long_read(&bdi->tot_write_bandwidth);
 55 }
 56 
 57 static inline void __add_wb_stat(struct bdi_writeback *wb,
 58                                  enum wb_stat_item item, s64 amount)
 59 {
 60         __percpu_counter_add(&wb->stat[item], amount, WB_STAT_BATCH);
 61 }
 62 
 63 static inline void __inc_wb_stat(struct bdi_writeback *wb,
 64                                  enum wb_stat_item item)
 65 {
 66         __add_wb_stat(wb, item, 1);
 67 }
 68 
 69 static inline void inc_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
 70 {
 71         unsigned long flags;
 72 
 73         local_irq_save(flags);
 74         __inc_wb_stat(wb, item);
 75         local_irq_restore(flags);
 76 }
 77 
 78 static inline void __dec_wb_stat(struct bdi_writeback *wb,
 79                                  enum wb_stat_item item)
 80 {
 81         __add_wb_stat(wb, item, -1);
 82 }
 83 
 84 static inline void dec_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
 85 {
 86         unsigned long flags;
 87 
 88         local_irq_save(flags);
 89         __dec_wb_stat(wb, item);
 90         local_irq_restore(flags);
 91 }
 92 
 93 static inline s64 wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
 94 {
 95         return percpu_counter_read_positive(&wb->stat[item]);
 96 }
 97 
 98 static inline s64 __wb_stat_sum(struct bdi_writeback *wb,
 99                                 enum wb_stat_item item)
100 {
101         return percpu_counter_sum_positive(&wb->stat[item]);
102 }
103 
104 static inline s64 wb_stat_sum(struct bdi_writeback *wb, enum wb_stat_item item)
105 {
106         s64 sum;
107         unsigned long flags;
108 
109         local_irq_save(flags);
110         sum = __wb_stat_sum(wb, item);
111         local_irq_restore(flags);
112 
113         return sum;
114 }
115 
116 extern void wb_writeout_inc(struct bdi_writeback *wb);
117 
118 /*
119  * maximal error of a stat counter.
120  */
121 static inline unsigned long wb_stat_error(struct bdi_writeback *wb)
122 {
123 #ifdef CONFIG_SMP
124         return nr_cpu_ids * WB_STAT_BATCH;
125 #else
126         return 1;
127 #endif
128 }
129 
130 int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio);
131 int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio);
132 
133 /*
134  * Flags in backing_dev_info::capability
135  *
136  * The first three flags control whether dirty pages will contribute to the
137  * VM's accounting and whether writepages() should be called for dirty pages
138  * (something that would not, for example, be appropriate for ramfs)
139  *
140  * WARNING: these flags are closely related and should not normally be
141  * used separately.  The BDI_CAP_NO_ACCT_AND_WRITEBACK combines these
142  * three flags into a single convenience macro.
143  *
144  * BDI_CAP_NO_ACCT_DIRTY:  Dirty pages shouldn't contribute to accounting
145  * BDI_CAP_NO_WRITEBACK:   Don't write pages back
146  * BDI_CAP_NO_ACCT_WB:     Don't automatically account writeback pages
147  * BDI_CAP_STRICTLIMIT:    Keep number of dirty pages below bdi threshold.
148  *
149  * BDI_CAP_CGROUP_WRITEBACK: Supports cgroup-aware writeback.
150  */
151 #define BDI_CAP_NO_ACCT_DIRTY   0x00000001
152 #define BDI_CAP_NO_WRITEBACK    0x00000002
153 #define BDI_CAP_NO_ACCT_WB      0x00000004
154 #define BDI_CAP_STABLE_WRITES   0x00000008
155 #define BDI_CAP_STRICTLIMIT     0x00000010
156 #define BDI_CAP_CGROUP_WRITEBACK 0x00000020
157 
158 #define BDI_CAP_NO_ACCT_AND_WRITEBACK \
159         (BDI_CAP_NO_WRITEBACK | BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_ACCT_WB)
160 
161 extern struct backing_dev_info noop_backing_dev_info;
162 
163 /**
164  * writeback_in_progress - determine whether there is writeback in progress
165  * @wb: bdi_writeback of interest
166  *
167  * Determine whether there is writeback waiting to be handled against a
168  * bdi_writeback.
169  */
170 static inline bool writeback_in_progress(struct bdi_writeback *wb)
171 {
172         return test_bit(WB_writeback_running, &wb->state);
173 }
174 
175 static inline struct backing_dev_info *inode_to_bdi(struct inode *inode)
176 {
177         struct super_block *sb;
178 
179         if (!inode)
180                 return &noop_backing_dev_info;
181 
182         sb = inode->i_sb;
183 #ifdef CONFIG_BLOCK
184         if (sb_is_blkdev_sb(sb))
185                 return blk_get_backing_dev_info(I_BDEV(inode));
186 #endif
187         return sb->s_bdi;
188 }
189 
190 static inline int wb_congested(struct bdi_writeback *wb, int cong_bits)
191 {
192         struct backing_dev_info *bdi = wb->bdi;
193 
194         if (bdi->congested_fn)
195                 return bdi->congested_fn(bdi->congested_data, cong_bits);
196         return wb->congested->state & cong_bits;
197 }
198 
199 long congestion_wait(int sync, long timeout);
200 long wait_iff_congested(struct zone *zone, int sync, long timeout);
201 int pdflush_proc_obsolete(struct ctl_table *table, int write,
202                 void __user *buffer, size_t *lenp, loff_t *ppos);
203 
204 static inline bool bdi_cap_stable_pages_required(struct backing_dev_info *bdi)
205 {
206         return bdi->capabilities & BDI_CAP_STABLE_WRITES;
207 }
208 
209 static inline bool bdi_cap_writeback_dirty(struct backing_dev_info *bdi)
210 {
211         return !(bdi->capabilities & BDI_CAP_NO_WRITEBACK);
212 }
213 
214 static inline bool bdi_cap_account_dirty(struct backing_dev_info *bdi)
215 {
216         return !(bdi->capabilities & BDI_CAP_NO_ACCT_DIRTY);
217 }
218 
219 static inline bool bdi_cap_account_writeback(struct backing_dev_info *bdi)
220 {
221         /* Paranoia: BDI_CAP_NO_WRITEBACK implies BDI_CAP_NO_ACCT_WB */
222         return !(bdi->capabilities & (BDI_CAP_NO_ACCT_WB |
223                                       BDI_CAP_NO_WRITEBACK));
224 }
225 
226 static inline bool mapping_cap_writeback_dirty(struct address_space *mapping)
227 {
228         return bdi_cap_writeback_dirty(inode_to_bdi(mapping->host));
229 }
230 
231 static inline bool mapping_cap_account_dirty(struct address_space *mapping)
232 {
233         return bdi_cap_account_dirty(inode_to_bdi(mapping->host));
234 }
235 
236 static inline int bdi_sched_wait(void *word)
237 {
238         schedule();
239         return 0;
240 }
241 
242 #ifdef CONFIG_CGROUP_WRITEBACK
243 
244 struct bdi_writeback_congested *
245 wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp);
246 void wb_congested_put(struct bdi_writeback_congested *congested);
247 struct bdi_writeback *wb_get_create(struct backing_dev_info *bdi,
248                                     struct cgroup_subsys_state *memcg_css,
249                                     gfp_t gfp);
250 void wb_memcg_offline(struct mem_cgroup *memcg);
251 void wb_blkcg_offline(struct blkcg *blkcg);
252 int inode_congested(struct inode *inode, int cong_bits);
253 
254 /**
255  * inode_cgwb_enabled - test whether cgroup writeback is enabled on an inode
256  * @inode: inode of interest
257  *
258  * cgroup writeback requires support from both the bdi and filesystem.
259  * Test whether @inode has both.
260  */
261 static inline bool inode_cgwb_enabled(struct inode *inode)
262 {
263         struct backing_dev_info *bdi = inode_to_bdi(inode);
264 
265         return bdi_cap_account_dirty(bdi) &&
266                 (bdi->capabilities & BDI_CAP_CGROUP_WRITEBACK) &&
267                 (inode->i_sb->s_iflags & SB_I_CGROUPWB);
268 }
269 
270 /**
271  * wb_find_current - find wb for %current on a bdi
272  * @bdi: bdi of interest
273  *
274  * Find the wb of @bdi which matches both the memcg and blkcg of %current.
275  * Must be called under rcu_read_lock() which protects the returend wb.
276  * NULL if not found.
277  */
278 static inline struct bdi_writeback *wb_find_current(struct backing_dev_info *bdi)
279 {
280         struct cgroup_subsys_state *memcg_css;
281         struct bdi_writeback *wb;
282 
283         memcg_css = task_css(current, memory_cgrp_id);
284         if (!memcg_css->parent)
285                 return &bdi->wb;
286 
287         wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id);
288 
289         /*
290          * %current's blkcg equals the effective blkcg of its memcg.  No
291          * need to use the relatively expensive cgroup_get_e_css().
292          */
293         if (likely(wb && wb->blkcg_css == task_css(current, blkio_cgrp_id)))
294                 return wb;
295         return NULL;
296 }
297 
298 /**
299  * wb_get_create_current - get or create wb for %current on a bdi
300  * @bdi: bdi of interest
301  * @gfp: allocation mask
302  *
303  * Equivalent to wb_get_create() on %current's memcg.  This function is
304  * called from a relatively hot path and optimizes the common cases using
305  * wb_find_current().
306  */
307 static inline struct bdi_writeback *
308 wb_get_create_current(struct backing_dev_info *bdi, gfp_t gfp)
309 {
310         struct bdi_writeback *wb;
311 
312         rcu_read_lock();
313         wb = wb_find_current(bdi);
314         if (wb && unlikely(!wb_tryget(wb)))
315                 wb = NULL;
316         rcu_read_unlock();
317 
318         if (unlikely(!wb)) {
319                 struct cgroup_subsys_state *memcg_css;
320 
321                 memcg_css = task_get_css(current, memory_cgrp_id);
322                 wb = wb_get_create(bdi, memcg_css, gfp);
323                 css_put(memcg_css);
324         }
325         return wb;
326 }
327 
328 /**
329  * inode_to_wb_is_valid - test whether an inode has a wb associated
330  * @inode: inode of interest
331  *
332  * Returns %true if @inode has a wb associated.  May be called without any
333  * locking.
334  */
335 static inline bool inode_to_wb_is_valid(struct inode *inode)
336 {
337         return inode->i_wb;
338 }
339 
340 /**
341  * inode_to_wb - determine the wb of an inode
342  * @inode: inode of interest
343  *
344  * Returns the wb @inode is currently associated with.  The caller must be
345  * holding either @inode->i_lock, @inode->i_mapping->tree_lock, or the
346  * associated wb's list_lock.
347  */
348 static inline struct bdi_writeback *inode_to_wb(struct inode *inode)
349 {
350 #ifdef CONFIG_LOCKDEP
351         WARN_ON_ONCE(debug_locks &&
352                      (!lockdep_is_held(&inode->i_lock) &&
353                       !lockdep_is_held(&inode->i_mapping->tree_lock) &&
354                       !lockdep_is_held(&inode->i_wb->list_lock)));
355 #endif
356         return inode->i_wb;
357 }
358 
359 /**
360  * unlocked_inode_to_wb_begin - begin unlocked inode wb access transaction
361  * @inode: target inode
362  * @lockedp: temp bool output param, to be passed to the end function
363  *
364  * The caller wants to access the wb associated with @inode but isn't
365  * holding inode->i_lock, mapping->tree_lock or wb->list_lock.  This
366  * function determines the wb associated with @inode and ensures that the
367  * association doesn't change until the transaction is finished with
368  * unlocked_inode_to_wb_end().
369  *
370  * The caller must call unlocked_inode_to_wb_end() with *@lockdep
371  * afterwards and can't sleep during transaction.  IRQ may or may not be
372  * disabled on return.
373  */
374 static inline struct bdi_writeback *
375 unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp)
376 {
377         rcu_read_lock();
378 
379         /*
380          * Paired with store_release in inode_switch_wb_work_fn() and
381          * ensures that we see the new wb if we see cleared I_WB_SWITCH.
382          */
383         *lockedp = smp_load_acquire(&inode->i_state) & I_WB_SWITCH;
384 
385         if (unlikely(*lockedp))
386                 spin_lock_irq(&inode->i_mapping->tree_lock);
387 
388         /*
389          * Protected by either !I_WB_SWITCH + rcu_read_lock() or tree_lock.
390          * inode_to_wb() will bark.  Deref directly.
391          */
392         return inode->i_wb;
393 }
394 
395 /**
396  * unlocked_inode_to_wb_end - end inode wb access transaction
397  * @inode: target inode
398  * @locked: *@lockedp from unlocked_inode_to_wb_begin()
399  */
400 static inline void unlocked_inode_to_wb_end(struct inode *inode, bool locked)
401 {
402         if (unlikely(locked))
403                 spin_unlock_irq(&inode->i_mapping->tree_lock);
404 
405         rcu_read_unlock();
406 }
407 
408 struct wb_iter {
409         int                     start_blkcg_id;
410         struct radix_tree_iter  tree_iter;
411         void                    **slot;
412 };
413 
414 static inline struct bdi_writeback *__wb_iter_next(struct wb_iter *iter,
415                                                    struct backing_dev_info *bdi)
416 {
417         struct radix_tree_iter *titer = &iter->tree_iter;
418 
419         WARN_ON_ONCE(!rcu_read_lock_held());
420 
421         if (iter->start_blkcg_id >= 0) {
422                 iter->slot = radix_tree_iter_init(titer, iter->start_blkcg_id);
423                 iter->start_blkcg_id = -1;
424         } else {
425                 iter->slot = radix_tree_next_slot(iter->slot, titer, 0);
426         }
427 
428         if (!iter->slot)
429                 iter->slot = radix_tree_next_chunk(&bdi->cgwb_tree, titer, 0);
430         if (iter->slot)
431                 return *iter->slot;
432         return NULL;
433 }
434 
435 static inline struct bdi_writeback *__wb_iter_init(struct wb_iter *iter,
436                                                    struct backing_dev_info *bdi,
437                                                    int start_blkcg_id)
438 {
439         iter->start_blkcg_id = start_blkcg_id;
440 
441         if (start_blkcg_id)
442                 return __wb_iter_next(iter, bdi);
443         else
444                 return &bdi->wb;
445 }
446 
447 /**
448  * bdi_for_each_wb - walk all wb's of a bdi in ascending blkcg ID order
449  * @wb_cur: cursor struct bdi_writeback pointer
450  * @bdi: bdi to walk wb's of
451  * @iter: pointer to struct wb_iter to be used as iteration buffer
452  * @start_blkcg_id: blkcg ID to start iteration from
453  *
454  * Iterate @wb_cur through the wb's (bdi_writeback's) of @bdi in ascending
455  * blkcg ID order starting from @start_blkcg_id.  @iter is struct wb_iter
456  * to be used as temp storage during iteration.  rcu_read_lock() must be
457  * held throughout iteration.
458  */
459 #define bdi_for_each_wb(wb_cur, bdi, iter, start_blkcg_id)              \
460         for ((wb_cur) = __wb_iter_init(iter, bdi, start_blkcg_id);      \
461              (wb_cur); (wb_cur) = __wb_iter_next(iter, bdi))
462 
463 #else   /* CONFIG_CGROUP_WRITEBACK */
464 
465 static inline bool inode_cgwb_enabled(struct inode *inode)
466 {
467         return false;
468 }
469 
470 static inline struct bdi_writeback_congested *
471 wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp)
472 {
473         atomic_inc(&bdi->wb_congested->refcnt);
474         return bdi->wb_congested;
475 }
476 
477 static inline void wb_congested_put(struct bdi_writeback_congested *congested)
478 {
479         if (atomic_dec_and_test(&congested->refcnt))
480                 kfree(congested);
481 }
482 
483 static inline struct bdi_writeback *wb_find_current(struct backing_dev_info *bdi)
484 {
485         return &bdi->wb;
486 }
487 
488 static inline struct bdi_writeback *
489 wb_get_create_current(struct backing_dev_info *bdi, gfp_t gfp)
490 {
491         return &bdi->wb;
492 }
493 
494 static inline bool inode_to_wb_is_valid(struct inode *inode)
495 {
496         return true;
497 }
498 
499 static inline struct bdi_writeback *inode_to_wb(struct inode *inode)
500 {
501         return &inode_to_bdi(inode)->wb;
502 }
503 
504 static inline struct bdi_writeback *
505 unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp)
506 {
507         return inode_to_wb(inode);
508 }
509 
510 static inline void unlocked_inode_to_wb_end(struct inode *inode, bool locked)
511 {
512 }
513 
514 static inline void wb_memcg_offline(struct mem_cgroup *memcg)
515 {
516 }
517 
518 static inline void wb_blkcg_offline(struct blkcg *blkcg)
519 {
520 }
521 
522 struct wb_iter {
523         int             next_id;
524 };
525 
526 #define bdi_for_each_wb(wb_cur, bdi, iter, start_blkcg_id)              \
527         for ((iter)->next_id = (start_blkcg_id);                        \
528              ({ (wb_cur) = !(iter)->next_id++ ? &(bdi)->wb : NULL; }); )
529 
530 static inline int inode_congested(struct inode *inode, int cong_bits)
531 {
532         return wb_congested(&inode_to_bdi(inode)->wb, cong_bits);
533 }
534 
535 #endif  /* CONFIG_CGROUP_WRITEBACK */
536 
537 static inline int inode_read_congested(struct inode *inode)
538 {
539         return inode_congested(inode, 1 << WB_sync_congested);
540 }
541 
542 static inline int inode_write_congested(struct inode *inode)
543 {
544         return inode_congested(inode, 1 << WB_async_congested);
545 }
546 
547 static inline int inode_rw_congested(struct inode *inode)
548 {
549         return inode_congested(inode, (1 << WB_sync_congested) |
550                                       (1 << WB_async_congested));
551 }
552 
553 static inline int bdi_congested(struct backing_dev_info *bdi, int cong_bits)
554 {
555         return wb_congested(&bdi->wb, cong_bits);
556 }
557 
558 static inline int bdi_read_congested(struct backing_dev_info *bdi)
559 {
560         return bdi_congested(bdi, 1 << WB_sync_congested);
561 }
562 
563 static inline int bdi_write_congested(struct backing_dev_info *bdi)
564 {
565         return bdi_congested(bdi, 1 << WB_async_congested);
566 }
567 
568 static inline int bdi_rw_congested(struct backing_dev_info *bdi)
569 {
570         return bdi_congested(bdi, (1 << WB_sync_congested) |
571                                   (1 << WB_async_congested));
572 }
573 
574 #endif  /* _LINUX_BACKING_DEV_H */
575 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp