~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/block/blk-cgroup.c

Version: ~ [ linux-5.8 ] ~ [ linux-5.7.14 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.57 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.138 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.193 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.232 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.232 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.85 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0
  2 /*
  3  * Common Block IO controller cgroup interface
  4  *
  5  * Based on ideas and code from CFQ, CFS and BFQ:
  6  * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
  7  *
  8  * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
  9  *                    Paolo Valente <paolo.valente@unimore.it>
 10  *
 11  * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
 12  *                    Nauman Rafique <nauman@google.com>
 13  *
 14  * For policy-specific per-blkcg data:
 15  * Copyright (C) 2015 Paolo Valente <paolo.valente@unimore.it>
 16  *                    Arianna Avanzini <avanzini.arianna@gmail.com>
 17  */
 18 #include <linux/ioprio.h>
 19 #include <linux/kdev_t.h>
 20 #include <linux/module.h>
 21 #include <linux/sched/signal.h>
 22 #include <linux/err.h>
 23 #include <linux/blkdev.h>
 24 #include <linux/backing-dev.h>
 25 #include <linux/slab.h>
 26 #include <linux/genhd.h>
 27 #include <linux/delay.h>
 28 #include <linux/atomic.h>
 29 #include <linux/ctype.h>
 30 #include <linux/blk-cgroup.h>
 31 #include <linux/tracehook.h>
 32 #include "blk.h"
 33 
 34 #define MAX_KEY_LEN 100
 35 
 36 /*
 37  * blkcg_pol_mutex protects blkcg_policy[] and policy [de]activation.
 38  * blkcg_pol_register_mutex nests outside of it and synchronizes entire
 39  * policy [un]register operations including cgroup file additions /
 40  * removals.  Putting cgroup file registration outside blkcg_pol_mutex
 41  * allows grabbing it from cgroup callbacks.
 42  */
 43 static DEFINE_MUTEX(blkcg_pol_register_mutex);
 44 static DEFINE_MUTEX(blkcg_pol_mutex);
 45 
 46 struct blkcg blkcg_root;
 47 EXPORT_SYMBOL_GPL(blkcg_root);
 48 
 49 struct cgroup_subsys_state * const blkcg_root_css = &blkcg_root.css;
 50 
 51 static struct blkcg_policy *blkcg_policy[BLKCG_MAX_POLS];
 52 
 53 static LIST_HEAD(all_blkcgs);           /* protected by blkcg_pol_mutex */
 54 
 55 static bool blkcg_debug_stats = false;
 56 
 57 static bool blkcg_policy_enabled(struct request_queue *q,
 58                                  const struct blkcg_policy *pol)
 59 {
 60         return pol && test_bit(pol->plid, q->blkcg_pols);
 61 }
 62 
 63 /**
 64  * blkg_free - free a blkg
 65  * @blkg: blkg to free
 66  *
 67  * Free @blkg which may be partially allocated.
 68  */
 69 static void blkg_free(struct blkcg_gq *blkg)
 70 {
 71         int i;
 72 
 73         if (!blkg)
 74                 return;
 75 
 76         for (i = 0; i < BLKCG_MAX_POLS; i++)
 77                 if (blkg->pd[i])
 78                         blkcg_policy[i]->pd_free_fn(blkg->pd[i]);
 79 
 80         blkg_rwstat_exit(&blkg->stat_ios);
 81         blkg_rwstat_exit(&blkg->stat_bytes);
 82         kfree(blkg);
 83 }
 84 
 85 static void __blkg_release(struct rcu_head *rcu)
 86 {
 87         struct blkcg_gq *blkg = container_of(rcu, struct blkcg_gq, rcu_head);
 88 
 89         percpu_ref_exit(&blkg->refcnt);
 90 
 91         /* release the blkcg and parent blkg refs this blkg has been holding */
 92         css_put(&blkg->blkcg->css);
 93         if (blkg->parent)
 94                 blkg_put(blkg->parent);
 95 
 96         wb_congested_put(blkg->wb_congested);
 97 
 98         blkg_free(blkg);
 99 }
100 
101 /*
102  * A group is RCU protected, but having an rcu lock does not mean that one
103  * can access all the fields of blkg and assume these are valid.  For
104  * example, don't try to follow throtl_data and request queue links.
105  *
106  * Having a reference to blkg under an rcu allows accesses to only values
107  * local to groups like group stats and group rate limits.
108  */
109 static void blkg_release(struct percpu_ref *ref)
110 {
111         struct blkcg_gq *blkg = container_of(ref, struct blkcg_gq, refcnt);
112 
113         call_rcu(&blkg->rcu_head, __blkg_release);
114 }
115 
116 /**
117  * blkg_alloc - allocate a blkg
118  * @blkcg: block cgroup the new blkg is associated with
119  * @q: request_queue the new blkg is associated with
120  * @gfp_mask: allocation mask to use
121  *
122  * Allocate a new blkg assocating @blkcg and @q.
123  */
124 static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q,
125                                    gfp_t gfp_mask)
126 {
127         struct blkcg_gq *blkg;
128         int i;
129 
130         /* alloc and init base part */
131         blkg = kzalloc_node(sizeof(*blkg), gfp_mask, q->node);
132         if (!blkg)
133                 return NULL;
134 
135         if (blkg_rwstat_init(&blkg->stat_bytes, gfp_mask) ||
136             blkg_rwstat_init(&blkg->stat_ios, gfp_mask))
137                 goto err_free;
138 
139         blkg->q = q;
140         INIT_LIST_HEAD(&blkg->q_node);
141         blkg->blkcg = blkcg;
142 
143         for (i = 0; i < BLKCG_MAX_POLS; i++) {
144                 struct blkcg_policy *pol = blkcg_policy[i];
145                 struct blkg_policy_data *pd;
146 
147                 if (!blkcg_policy_enabled(q, pol))
148                         continue;
149 
150                 /* alloc per-policy data and attach it to blkg */
151                 pd = pol->pd_alloc_fn(gfp_mask, q->node);
152                 if (!pd)
153                         goto err_free;
154 
155                 blkg->pd[i] = pd;
156                 pd->blkg = blkg;
157                 pd->plid = i;
158         }
159 
160         return blkg;
161 
162 err_free:
163         blkg_free(blkg);
164         return NULL;
165 }
166 
167 struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg,
168                                       struct request_queue *q, bool update_hint)
169 {
170         struct blkcg_gq *blkg;
171 
172         /*
173          * Hint didn't match.  Look up from the radix tree.  Note that the
174          * hint can only be updated under queue_lock as otherwise @blkg
175          * could have already been removed from blkg_tree.  The caller is
176          * responsible for grabbing queue_lock if @update_hint.
177          */
178         blkg = radix_tree_lookup(&blkcg->blkg_tree, q->id);
179         if (blkg && blkg->q == q) {
180                 if (update_hint) {
181                         lockdep_assert_held(&q->queue_lock);
182                         rcu_assign_pointer(blkcg->blkg_hint, blkg);
183                 }
184                 return blkg;
185         }
186 
187         return NULL;
188 }
189 EXPORT_SYMBOL_GPL(blkg_lookup_slowpath);
190 
191 /*
192  * If @new_blkg is %NULL, this function tries to allocate a new one as
193  * necessary using %GFP_NOWAIT.  @new_blkg is always consumed on return.
194  */
195 static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
196                                     struct request_queue *q,
197                                     struct blkcg_gq *new_blkg)
198 {
199         struct blkcg_gq *blkg;
200         struct bdi_writeback_congested *wb_congested;
201         int i, ret;
202 
203         WARN_ON_ONCE(!rcu_read_lock_held());
204         lockdep_assert_held(&q->queue_lock);
205 
206         /* request_queue is dying, do not create/recreate a blkg */
207         if (blk_queue_dying(q)) {
208                 ret = -ENODEV;
209                 goto err_free_blkg;
210         }
211 
212         /* blkg holds a reference to blkcg */
213         if (!css_tryget_online(&blkcg->css)) {
214                 ret = -ENODEV;
215                 goto err_free_blkg;
216         }
217 
218         wb_congested = wb_congested_get_create(q->backing_dev_info,
219                                                blkcg->css.id,
220                                                GFP_NOWAIT | __GFP_NOWARN);
221         if (!wb_congested) {
222                 ret = -ENOMEM;
223                 goto err_put_css;
224         }
225 
226         /* allocate */
227         if (!new_blkg) {
228                 new_blkg = blkg_alloc(blkcg, q, GFP_NOWAIT | __GFP_NOWARN);
229                 if (unlikely(!new_blkg)) {
230                         ret = -ENOMEM;
231                         goto err_put_congested;
232                 }
233         }
234         blkg = new_blkg;
235         blkg->wb_congested = wb_congested;
236 
237         /* link parent */
238         if (blkcg_parent(blkcg)) {
239                 blkg->parent = __blkg_lookup(blkcg_parent(blkcg), q, false);
240                 if (WARN_ON_ONCE(!blkg->parent)) {
241                         ret = -ENODEV;
242                         goto err_put_congested;
243                 }
244                 blkg_get(blkg->parent);
245         }
246 
247         ret = percpu_ref_init(&blkg->refcnt, blkg_release, 0,
248                               GFP_NOWAIT | __GFP_NOWARN);
249         if (ret)
250                 goto err_cancel_ref;
251 
252         /* invoke per-policy init */
253         for (i = 0; i < BLKCG_MAX_POLS; i++) {
254                 struct blkcg_policy *pol = blkcg_policy[i];
255 
256                 if (blkg->pd[i] && pol->pd_init_fn)
257                         pol->pd_init_fn(blkg->pd[i]);
258         }
259 
260         /* insert */
261         spin_lock(&blkcg->lock);
262         ret = radix_tree_insert(&blkcg->blkg_tree, q->id, blkg);
263         if (likely(!ret)) {
264                 hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
265                 list_add(&blkg->q_node, &q->blkg_list);
266 
267                 for (i = 0; i < BLKCG_MAX_POLS; i++) {
268                         struct blkcg_policy *pol = blkcg_policy[i];
269 
270                         if (blkg->pd[i] && pol->pd_online_fn)
271                                 pol->pd_online_fn(blkg->pd[i]);
272                 }
273         }
274         blkg->online = true;
275         spin_unlock(&blkcg->lock);
276 
277         if (!ret)
278                 return blkg;
279 
280         /* @blkg failed fully initialized, use the usual release path */
281         blkg_put(blkg);
282         return ERR_PTR(ret);
283 
284 err_cancel_ref:
285         percpu_ref_exit(&blkg->refcnt);
286 err_put_congested:
287         wb_congested_put(wb_congested);
288 err_put_css:
289         css_put(&blkcg->css);
290 err_free_blkg:
291         blkg_free(new_blkg);
292         return ERR_PTR(ret);
293 }
294 
295 /**
296  * __blkg_lookup_create - lookup blkg, try to create one if not there
297  * @blkcg: blkcg of interest
298  * @q: request_queue of interest
299  *
300  * Lookup blkg for the @blkcg - @q pair.  If it doesn't exist, try to
301  * create one.  blkg creation is performed recursively from blkcg_root such
302  * that all non-root blkg's have access to the parent blkg.  This function
303  * should be called under RCU read lock and @q->queue_lock.
304  *
305  * Returns the blkg or the closest blkg if blkg_create() fails as it walks
306  * down from root.
307  */
308 struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg,
309                                       struct request_queue *q)
310 {
311         struct blkcg_gq *blkg;
312 
313         WARN_ON_ONCE(!rcu_read_lock_held());
314         lockdep_assert_held(&q->queue_lock);
315 
316         blkg = __blkg_lookup(blkcg, q, true);
317         if (blkg)
318                 return blkg;
319 
320         /*
321          * Create blkgs walking down from blkcg_root to @blkcg, so that all
322          * non-root blkgs have access to their parents.  Returns the closest
323          * blkg to the intended blkg should blkg_create() fail.
324          */
325         while (true) {
326                 struct blkcg *pos = blkcg;
327                 struct blkcg *parent = blkcg_parent(blkcg);
328                 struct blkcg_gq *ret_blkg = q->root_blkg;
329 
330                 while (parent) {
331                         blkg = __blkg_lookup(parent, q, false);
332                         if (blkg) {
333                                 /* remember closest blkg */
334                                 ret_blkg = blkg;
335                                 break;
336                         }
337                         pos = parent;
338                         parent = blkcg_parent(parent);
339                 }
340 
341                 blkg = blkg_create(pos, q, NULL);
342                 if (IS_ERR(blkg))
343                         return ret_blkg;
344                 if (pos == blkcg)
345                         return blkg;
346         }
347 }
348 
349 /**
350  * blkg_lookup_create - find or create a blkg
351  * @blkcg: target block cgroup
352  * @q: target request_queue
353  *
354  * This looks up or creates the blkg representing the unique pair
355  * of the blkcg and the request_queue.
356  */
357 struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
358                                     struct request_queue *q)
359 {
360         struct blkcg_gq *blkg = blkg_lookup(blkcg, q);
361 
362         if (unlikely(!blkg)) {
363                 unsigned long flags;
364 
365                 spin_lock_irqsave(&q->queue_lock, flags);
366                 blkg = __blkg_lookup_create(blkcg, q);
367                 spin_unlock_irqrestore(&q->queue_lock, flags);
368         }
369 
370         return blkg;
371 }
372 
373 static void blkg_destroy(struct blkcg_gq *blkg)
374 {
375         struct blkcg *blkcg = blkg->blkcg;
376         struct blkcg_gq *parent = blkg->parent;
377         int i;
378 
379         lockdep_assert_held(&blkg->q->queue_lock);
380         lockdep_assert_held(&blkcg->lock);
381 
382         /* Something wrong if we are trying to remove same group twice */
383         WARN_ON_ONCE(list_empty(&blkg->q_node));
384         WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node));
385 
386         for (i = 0; i < BLKCG_MAX_POLS; i++) {
387                 struct blkcg_policy *pol = blkcg_policy[i];
388 
389                 if (blkg->pd[i] && pol->pd_offline_fn)
390                         pol->pd_offline_fn(blkg->pd[i]);
391         }
392 
393         if (parent) {
394                 blkg_rwstat_add_aux(&parent->stat_bytes, &blkg->stat_bytes);
395                 blkg_rwstat_add_aux(&parent->stat_ios, &blkg->stat_ios);
396         }
397 
398         blkg->online = false;
399 
400         radix_tree_delete(&blkcg->blkg_tree, blkg->q->id);
401         list_del_init(&blkg->q_node);
402         hlist_del_init_rcu(&blkg->blkcg_node);
403 
404         /*
405          * Both setting lookup hint to and clearing it from @blkg are done
406          * under queue_lock.  If it's not pointing to @blkg now, it never
407          * will.  Hint assignment itself can race safely.
408          */
409         if (rcu_access_pointer(blkcg->blkg_hint) == blkg)
410                 rcu_assign_pointer(blkcg->blkg_hint, NULL);
411 
412         /*
413          * Put the reference taken at the time of creation so that when all
414          * queues are gone, group can be destroyed.
415          */
416         percpu_ref_kill(&blkg->refcnt);
417 }
418 
419 /**
420  * blkg_destroy_all - destroy all blkgs associated with a request_queue
421  * @q: request_queue of interest
422  *
423  * Destroy all blkgs associated with @q.
424  */
425 static void blkg_destroy_all(struct request_queue *q)
426 {
427         struct blkcg_gq *blkg, *n;
428 
429         spin_lock_irq(&q->queue_lock);
430         list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) {
431                 struct blkcg *blkcg = blkg->blkcg;
432 
433                 spin_lock(&blkcg->lock);
434                 blkg_destroy(blkg);
435                 spin_unlock(&blkcg->lock);
436         }
437 
438         q->root_blkg = NULL;
439         spin_unlock_irq(&q->queue_lock);
440 }
441 
442 static int blkcg_reset_stats(struct cgroup_subsys_state *css,
443                              struct cftype *cftype, u64 val)
444 {
445         struct blkcg *blkcg = css_to_blkcg(css);
446         struct blkcg_gq *blkg;
447         int i;
448 
449         mutex_lock(&blkcg_pol_mutex);
450         spin_lock_irq(&blkcg->lock);
451 
452         /*
453          * Note that stat reset is racy - it doesn't synchronize against
454          * stat updates.  This is a debug feature which shouldn't exist
455          * anyway.  If you get hit by a race, retry.
456          */
457         hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
458                 blkg_rwstat_reset(&blkg->stat_bytes);
459                 blkg_rwstat_reset(&blkg->stat_ios);
460 
461                 for (i = 0; i < BLKCG_MAX_POLS; i++) {
462                         struct blkcg_policy *pol = blkcg_policy[i];
463 
464                         if (blkg->pd[i] && pol->pd_reset_stats_fn)
465                                 pol->pd_reset_stats_fn(blkg->pd[i]);
466                 }
467         }
468 
469         spin_unlock_irq(&blkcg->lock);
470         mutex_unlock(&blkcg_pol_mutex);
471         return 0;
472 }
473 
474 const char *blkg_dev_name(struct blkcg_gq *blkg)
475 {
476         /* some drivers (floppy) instantiate a queue w/o disk registered */
477         if (blkg->q->backing_dev_info->dev)
478                 return dev_name(blkg->q->backing_dev_info->dev);
479         return NULL;
480 }
481 
482 /**
483  * blkcg_print_blkgs - helper for printing per-blkg data
484  * @sf: seq_file to print to
485  * @blkcg: blkcg of interest
486  * @prfill: fill function to print out a blkg
487  * @pol: policy in question
488  * @data: data to be passed to @prfill
489  * @show_total: to print out sum of prfill return values or not
490  *
491  * This function invokes @prfill on each blkg of @blkcg if pd for the
492  * policy specified by @pol exists.  @prfill is invoked with @sf, the
493  * policy data and @data and the matching queue lock held.  If @show_total
494  * is %true, the sum of the return values from @prfill is printed with
495  * "Total" label at the end.
496  *
497  * This is to be used to construct print functions for
498  * cftype->read_seq_string method.
499  */
500 void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
501                        u64 (*prfill)(struct seq_file *,
502                                      struct blkg_policy_data *, int),
503                        const struct blkcg_policy *pol, int data,
504                        bool show_total)
505 {
506         struct blkcg_gq *blkg;
507         u64 total = 0;
508 
509         rcu_read_lock();
510         hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
511                 spin_lock_irq(&blkg->q->queue_lock);
512                 if (blkcg_policy_enabled(blkg->q, pol))
513                         total += prfill(sf, blkg->pd[pol->plid], data);
514                 spin_unlock_irq(&blkg->q->queue_lock);
515         }
516         rcu_read_unlock();
517 
518         if (show_total)
519                 seq_printf(sf, "Total %llu\n", (unsigned long long)total);
520 }
521 EXPORT_SYMBOL_GPL(blkcg_print_blkgs);
522 
523 /**
524  * __blkg_prfill_u64 - prfill helper for a single u64 value
525  * @sf: seq_file to print to
526  * @pd: policy private data of interest
527  * @v: value to print
528  *
529  * Print @v to @sf for the device assocaited with @pd.
530  */
531 u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v)
532 {
533         const char *dname = blkg_dev_name(pd->blkg);
534 
535         if (!dname)
536                 return 0;
537 
538         seq_printf(sf, "%s %llu\n", dname, (unsigned long long)v);
539         return v;
540 }
541 EXPORT_SYMBOL_GPL(__blkg_prfill_u64);
542 
543 /**
544  * __blkg_prfill_rwstat - prfill helper for a blkg_rwstat
545  * @sf: seq_file to print to
546  * @pd: policy private data of interest
547  * @rwstat: rwstat to print
548  *
549  * Print @rwstat to @sf for the device assocaited with @pd.
550  */
551 u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
552                          const struct blkg_rwstat *rwstat)
553 {
554         static const char *rwstr[] = {
555                 [BLKG_RWSTAT_READ]      = "Read",
556                 [BLKG_RWSTAT_WRITE]     = "Write",
557                 [BLKG_RWSTAT_SYNC]      = "Sync",
558                 [BLKG_RWSTAT_ASYNC]     = "Async",
559                 [BLKG_RWSTAT_DISCARD]   = "Discard",
560         };
561         const char *dname = blkg_dev_name(pd->blkg);
562         u64 v;
563         int i;
564 
565         if (!dname)
566                 return 0;
567 
568         for (i = 0; i < BLKG_RWSTAT_NR; i++)
569                 seq_printf(sf, "%s %s %llu\n", dname, rwstr[i],
570                            (unsigned long long)atomic64_read(&rwstat->aux_cnt[i]));
571 
572         v = atomic64_read(&rwstat->aux_cnt[BLKG_RWSTAT_READ]) +
573                 atomic64_read(&rwstat->aux_cnt[BLKG_RWSTAT_WRITE]) +
574                 atomic64_read(&rwstat->aux_cnt[BLKG_RWSTAT_DISCARD]);
575         seq_printf(sf, "%s Total %llu\n", dname, (unsigned long long)v);
576         return v;
577 }
578 EXPORT_SYMBOL_GPL(__blkg_prfill_rwstat);
579 
580 /**
581  * blkg_prfill_stat - prfill callback for blkg_stat
582  * @sf: seq_file to print to
583  * @pd: policy private data of interest
584  * @off: offset to the blkg_stat in @pd
585  *
586  * prfill callback for printing a blkg_stat.
587  */
588 u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off)
589 {
590         return __blkg_prfill_u64(sf, pd, blkg_stat_read((void *)pd + off));
591 }
592 EXPORT_SYMBOL_GPL(blkg_prfill_stat);
593 
594 /**
595  * blkg_prfill_rwstat - prfill callback for blkg_rwstat
596  * @sf: seq_file to print to
597  * @pd: policy private data of interest
598  * @off: offset to the blkg_rwstat in @pd
599  *
600  * prfill callback for printing a blkg_rwstat.
601  */
602 u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
603                        int off)
604 {
605         struct blkg_rwstat rwstat = blkg_rwstat_read((void *)pd + off);
606 
607         return __blkg_prfill_rwstat(sf, pd, &rwstat);
608 }
609 EXPORT_SYMBOL_GPL(blkg_prfill_rwstat);
610 
611 static u64 blkg_prfill_rwstat_field(struct seq_file *sf,
612                                     struct blkg_policy_data *pd, int off)
613 {
614         struct blkg_rwstat rwstat = blkg_rwstat_read((void *)pd->blkg + off);
615 
616         return __blkg_prfill_rwstat(sf, pd, &rwstat);
617 }
618 
619 /**
620  * blkg_print_stat_bytes - seq_show callback for blkg->stat_bytes
621  * @sf: seq_file to print to
622  * @v: unused
623  *
624  * To be used as cftype->seq_show to print blkg->stat_bytes.
625  * cftype->private must be set to the blkcg_policy.
626  */
627 int blkg_print_stat_bytes(struct seq_file *sf, void *v)
628 {
629         blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
630                           blkg_prfill_rwstat_field, (void *)seq_cft(sf)->private,
631                           offsetof(struct blkcg_gq, stat_bytes), true);
632         return 0;
633 }
634 EXPORT_SYMBOL_GPL(blkg_print_stat_bytes);
635 
636 /**
637  * blkg_print_stat_bytes - seq_show callback for blkg->stat_ios
638  * @sf: seq_file to print to
639  * @v: unused
640  *
641  * To be used as cftype->seq_show to print blkg->stat_ios.  cftype->private
642  * must be set to the blkcg_policy.
643  */
644 int blkg_print_stat_ios(struct seq_file *sf, void *v)
645 {
646         blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
647                           blkg_prfill_rwstat_field, (void *)seq_cft(sf)->private,
648                           offsetof(struct blkcg_gq, stat_ios), true);
649         return 0;
650 }
651 EXPORT_SYMBOL_GPL(blkg_print_stat_ios);
652 
653 static u64 blkg_prfill_rwstat_field_recursive(struct seq_file *sf,
654                                               struct blkg_policy_data *pd,
655                                               int off)
656 {
657         struct blkg_rwstat rwstat = blkg_rwstat_recursive_sum(pd->blkg,
658                                                               NULL, off);
659         return __blkg_prfill_rwstat(sf, pd, &rwstat);
660 }
661 
662 /**
663  * blkg_print_stat_bytes_recursive - recursive version of blkg_print_stat_bytes
664  * @sf: seq_file to print to
665  * @v: unused
666  */
667 int blkg_print_stat_bytes_recursive(struct seq_file *sf, void *v)
668 {
669         blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
670                           blkg_prfill_rwstat_field_recursive,
671                           (void *)seq_cft(sf)->private,
672                           offsetof(struct blkcg_gq, stat_bytes), true);
673         return 0;
674 }
675 EXPORT_SYMBOL_GPL(blkg_print_stat_bytes_recursive);
676 
677 /**
678  * blkg_print_stat_ios_recursive - recursive version of blkg_print_stat_ios
679  * @sf: seq_file to print to
680  * @v: unused
681  */
682 int blkg_print_stat_ios_recursive(struct seq_file *sf, void *v)
683 {
684         blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
685                           blkg_prfill_rwstat_field_recursive,
686                           (void *)seq_cft(sf)->private,
687                           offsetof(struct blkcg_gq, stat_ios), true);
688         return 0;
689 }
690 EXPORT_SYMBOL_GPL(blkg_print_stat_ios_recursive);
691 
692 /**
693  * blkg_stat_recursive_sum - collect hierarchical blkg_stat
694  * @blkg: blkg of interest
695  * @pol: blkcg_policy which contains the blkg_stat
696  * @off: offset to the blkg_stat in blkg_policy_data or @blkg
697  *
698  * Collect the blkg_stat specified by @blkg, @pol and @off and all its
699  * online descendants and their aux counts.  The caller must be holding the
700  * queue lock for online tests.
701  *
702  * If @pol is NULL, blkg_stat is at @off bytes into @blkg; otherwise, it is
703  * at @off bytes into @blkg's blkg_policy_data of the policy.
704  */
705 u64 blkg_stat_recursive_sum(struct blkcg_gq *blkg,
706                             struct blkcg_policy *pol, int off)
707 {
708         struct blkcg_gq *pos_blkg;
709         struct cgroup_subsys_state *pos_css;
710         u64 sum = 0;
711 
712         lockdep_assert_held(&blkg->q->queue_lock);
713 
714         rcu_read_lock();
715         blkg_for_each_descendant_pre(pos_blkg, pos_css, blkg) {
716                 struct blkg_stat *stat;
717 
718                 if (!pos_blkg->online)
719                         continue;
720 
721                 if (pol)
722                         stat = (void *)blkg_to_pd(pos_blkg, pol) + off;
723                 else
724                         stat = (void *)blkg + off;
725 
726                 sum += blkg_stat_read(stat) + atomic64_read(&stat->aux_cnt);
727         }
728         rcu_read_unlock();
729 
730         return sum;
731 }
732 EXPORT_SYMBOL_GPL(blkg_stat_recursive_sum);
733 
734 /**
735  * blkg_rwstat_recursive_sum - collect hierarchical blkg_rwstat
736  * @blkg: blkg of interest
737  * @pol: blkcg_policy which contains the blkg_rwstat
738  * @off: offset to the blkg_rwstat in blkg_policy_data or @blkg
739  *
740  * Collect the blkg_rwstat specified by @blkg, @pol and @off and all its
741  * online descendants and their aux counts.  The caller must be holding the
742  * queue lock for online tests.
743  *
744  * If @pol is NULL, blkg_rwstat is at @off bytes into @blkg; otherwise, it
745  * is at @off bytes into @blkg's blkg_policy_data of the policy.
746  */
747 struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkcg_gq *blkg,
748                                              struct blkcg_policy *pol, int off)
749 {
750         struct blkcg_gq *pos_blkg;
751         struct cgroup_subsys_state *pos_css;
752         struct blkg_rwstat sum = { };
753         int i;
754 
755         lockdep_assert_held(&blkg->q->queue_lock);
756 
757         rcu_read_lock();
758         blkg_for_each_descendant_pre(pos_blkg, pos_css, blkg) {
759                 struct blkg_rwstat *rwstat;
760 
761                 if (!pos_blkg->online)
762                         continue;
763 
764                 if (pol)
765                         rwstat = (void *)blkg_to_pd(pos_blkg, pol) + off;
766                 else
767                         rwstat = (void *)pos_blkg + off;
768 
769                 for (i = 0; i < BLKG_RWSTAT_NR; i++)
770                         atomic64_add(atomic64_read(&rwstat->aux_cnt[i]) +
771                                 percpu_counter_sum_positive(&rwstat->cpu_cnt[i]),
772                                 &sum.aux_cnt[i]);
773         }
774         rcu_read_unlock();
775 
776         return sum;
777 }
778 EXPORT_SYMBOL_GPL(blkg_rwstat_recursive_sum);
779 
780 /* Performs queue bypass and policy enabled checks then looks up blkg. */
781 static struct blkcg_gq *blkg_lookup_check(struct blkcg *blkcg,
782                                           const struct blkcg_policy *pol,
783                                           struct request_queue *q)
784 {
785         WARN_ON_ONCE(!rcu_read_lock_held());
786         lockdep_assert_held(&q->queue_lock);
787 
788         if (!blkcg_policy_enabled(q, pol))
789                 return ERR_PTR(-EOPNOTSUPP);
790         return __blkg_lookup(blkcg, q, true /* update_hint */);
791 }
792 
793 /**
794  * blkg_conf_prep - parse and prepare for per-blkg config update
795  * @blkcg: target block cgroup
796  * @pol: target policy
797  * @input: input string
798  * @ctx: blkg_conf_ctx to be filled
799  *
800  * Parse per-blkg config update from @input and initialize @ctx with the
801  * result.  @ctx->blkg points to the blkg to be updated and @ctx->body the
802  * part of @input following MAJ:MIN.  This function returns with RCU read
803  * lock and queue lock held and must be paired with blkg_conf_finish().
804  */
805 int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
806                    char *input, struct blkg_conf_ctx *ctx)
807         __acquires(rcu) __acquires(&disk->queue->queue_lock)
808 {
809         struct gendisk *disk;
810         struct request_queue *q;
811         struct blkcg_gq *blkg;
812         unsigned int major, minor;
813         int key_len, part, ret;
814         char *body;
815 
816         if (sscanf(input, "%u:%u%n", &major, &minor, &key_len) != 2)
817                 return -EINVAL;
818 
819         body = input + key_len;
820         if (!isspace(*body))
821                 return -EINVAL;
822         body = skip_spaces(body);
823 
824         disk = get_gendisk(MKDEV(major, minor), &part);
825         if (!disk)
826                 return -ENODEV;
827         if (part) {
828                 ret = -ENODEV;
829                 goto fail;
830         }
831 
832         q = disk->queue;
833 
834         rcu_read_lock();
835         spin_lock_irq(&q->queue_lock);
836 
837         blkg = blkg_lookup_check(blkcg, pol, q);
838         if (IS_ERR(blkg)) {
839                 ret = PTR_ERR(blkg);
840                 goto fail_unlock;
841         }
842 
843         if (blkg)
844                 goto success;
845 
846         /*
847          * Create blkgs walking down from blkcg_root to @blkcg, so that all
848          * non-root blkgs have access to their parents.
849          */
850         while (true) {
851                 struct blkcg *pos = blkcg;
852                 struct blkcg *parent;
853                 struct blkcg_gq *new_blkg;
854 
855                 parent = blkcg_parent(blkcg);
856                 while (parent && !__blkg_lookup(parent, q, false)) {
857                         pos = parent;
858                         parent = blkcg_parent(parent);
859                 }
860 
861                 /* Drop locks to do new blkg allocation with GFP_KERNEL. */
862                 spin_unlock_irq(&q->queue_lock);
863                 rcu_read_unlock();
864 
865                 new_blkg = blkg_alloc(pos, q, GFP_KERNEL);
866                 if (unlikely(!new_blkg)) {
867                         ret = -ENOMEM;
868                         goto fail;
869                 }
870 
871                 rcu_read_lock();
872                 spin_lock_irq(&q->queue_lock);
873 
874                 blkg = blkg_lookup_check(pos, pol, q);
875                 if (IS_ERR(blkg)) {
876                         ret = PTR_ERR(blkg);
877                         goto fail_unlock;
878                 }
879 
880                 if (blkg) {
881                         blkg_free(new_blkg);
882                 } else {
883                         blkg = blkg_create(pos, q, new_blkg);
884                         if (IS_ERR(blkg)) {
885                                 ret = PTR_ERR(blkg);
886                                 goto fail_unlock;
887                         }
888                 }
889 
890                 if (pos == blkcg)
891                         goto success;
892         }
893 success:
894         ctx->disk = disk;
895         ctx->blkg = blkg;
896         ctx->body = body;
897         return 0;
898 
899 fail_unlock:
900         spin_unlock_irq(&q->queue_lock);
901         rcu_read_unlock();
902 fail:
903         put_disk_and_module(disk);
904         /*
905          * If queue was bypassing, we should retry.  Do so after a
906          * short msleep().  It isn't strictly necessary but queue
907          * can be bypassing for some time and it's always nice to
908          * avoid busy looping.
909          */
910         if (ret == -EBUSY) {
911                 msleep(10);
912                 ret = restart_syscall();
913         }
914         return ret;
915 }
916 
917 /**
918  * blkg_conf_finish - finish up per-blkg config update
919  * @ctx: blkg_conf_ctx intiailized by blkg_conf_prep()
920  *
921  * Finish up after per-blkg config update.  This function must be paired
922  * with blkg_conf_prep().
923  */
924 void blkg_conf_finish(struct blkg_conf_ctx *ctx)
925         __releases(&ctx->disk->queue->queue_lock) __releases(rcu)
926 {
927         spin_unlock_irq(&ctx->disk->queue->queue_lock);
928         rcu_read_unlock();
929         put_disk_and_module(ctx->disk);
930 }
931 
932 static int blkcg_print_stat(struct seq_file *sf, void *v)
933 {
934         struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
935         struct blkcg_gq *blkg;
936 
937         rcu_read_lock();
938 
939         hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
940                 const char *dname;
941                 char *buf;
942                 struct blkg_rwstat rwstat;
943                 u64 rbytes, wbytes, rios, wios, dbytes, dios;
944                 size_t size = seq_get_buf(sf, &buf), off = 0;
945                 int i;
946                 bool has_stats = false;
947 
948                 dname = blkg_dev_name(blkg);
949                 if (!dname)
950                         continue;
951 
952                 /*
953                  * Hooray string manipulation, count is the size written NOT
954                  * INCLUDING THE \0, so size is now count+1 less than what we
955                  * had before, but we want to start writing the next bit from
956                  * the \0 so we only add count to buf.
957                  */
958                 off += scnprintf(buf+off, size-off, "%s ", dname);
959 
960                 spin_lock_irq(&blkg->q->queue_lock);
961 
962                 rwstat = blkg_rwstat_recursive_sum(blkg, NULL,
963                                         offsetof(struct blkcg_gq, stat_bytes));
964                 rbytes = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_READ]);
965                 wbytes = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_WRITE]);
966                 dbytes = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_DISCARD]);
967 
968                 rwstat = blkg_rwstat_recursive_sum(blkg, NULL,
969                                         offsetof(struct blkcg_gq, stat_ios));
970                 rios = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_READ]);
971                 wios = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_WRITE]);
972                 dios = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_DISCARD]);
973 
974                 spin_unlock_irq(&blkg->q->queue_lock);
975 
976                 if (rbytes || wbytes || rios || wios) {
977                         has_stats = true;
978                         off += scnprintf(buf+off, size-off,
979                                          "rbytes=%llu wbytes=%llu rios=%llu wios=%llu dbytes=%llu dios=%llu",
980                                          rbytes, wbytes, rios, wios,
981                                          dbytes, dios);
982                 }
983 
984                 if (!blkcg_debug_stats)
985                         goto next;
986 
987                 if (atomic_read(&blkg->use_delay)) {
988                         has_stats = true;
989                         off += scnprintf(buf+off, size-off,
990                                          " use_delay=%d delay_nsec=%llu",
991                                          atomic_read(&blkg->use_delay),
992                                         (unsigned long long)atomic64_read(&blkg->delay_nsec));
993                 }
994 
995                 for (i = 0; i < BLKCG_MAX_POLS; i++) {
996                         struct blkcg_policy *pol = blkcg_policy[i];
997                         size_t written;
998 
999                         if (!blkg->pd[i] || !pol->pd_stat_fn)
1000                                 continue;
1001 
1002                         written = pol->pd_stat_fn(blkg->pd[i], buf+off, size-off);
1003                         if (written)
1004                                 has_stats = true;
1005                         off += written;
1006                 }
1007 next:
1008                 if (has_stats) {
1009                         if (off < size - 1) {
1010                                 off += scnprintf(buf+off, size-off, "\n");
1011                                 seq_commit(sf, off);
1012                         } else {
1013                                 seq_commit(sf, -1);
1014                         }
1015                 }
1016         }
1017 
1018         rcu_read_unlock();
1019         return 0;
1020 }
1021 
1022 static struct cftype blkcg_files[] = {
1023         {
1024                 .name = "stat",
1025                 .flags = CFTYPE_NOT_ON_ROOT,
1026                 .seq_show = blkcg_print_stat,
1027         },
1028         { }     /* terminate */
1029 };
1030 
1031 static struct cftype blkcg_legacy_files[] = {
1032         {
1033                 .name = "reset_stats",
1034                 .write_u64 = blkcg_reset_stats,
1035         },
1036         { }     /* terminate */
1037 };
1038 
1039 /*
1040  * blkcg destruction is a three-stage process.
1041  *
1042  * 1. Destruction starts.  The blkcg_css_offline() callback is invoked
1043  *    which offlines writeback.  Here we tie the next stage of blkg destruction
1044  *    to the completion of writeback associated with the blkcg.  This lets us
1045  *    avoid punting potentially large amounts of outstanding writeback to root
1046  *    while maintaining any ongoing policies.  The next stage is triggered when
1047  *    the nr_cgwbs count goes to zero.
1048  *
1049  * 2. When the nr_cgwbs count goes to zero, blkcg_destroy_blkgs() is called
1050  *    and handles the destruction of blkgs.  Here the css reference held by
1051  *    the blkg is put back eventually allowing blkcg_css_free() to be called.
1052  *    This work may occur in cgwb_release_workfn() on the cgwb_release
1053  *    workqueue.  Any submitted ios that fail to get the blkg ref will be
1054  *    punted to the root_blkg.
1055  *
1056  * 3. Once the blkcg ref count goes to zero, blkcg_css_free() is called.
1057  *    This finally frees the blkcg.
1058  */
1059 
1060 /**
1061  * blkcg_css_offline - cgroup css_offline callback
1062  * @css: css of interest
1063  *
1064  * This function is called when @css is about to go away.  Here the cgwbs are
1065  * offlined first and only once writeback associated with the blkcg has
1066  * finished do we start step 2 (see above).
1067  */
1068 static void blkcg_css_offline(struct cgroup_subsys_state *css)
1069 {
1070         struct blkcg *blkcg = css_to_blkcg(css);
1071 
1072         /* this prevents anyone from attaching or migrating to this blkcg */
1073         wb_blkcg_offline(blkcg);
1074 
1075         /* put the base cgwb reference allowing step 2 to be triggered */
1076         blkcg_cgwb_put(blkcg);
1077 }
1078 
1079 /**
1080  * blkcg_destroy_blkgs - responsible for shooting down blkgs
1081  * @blkcg: blkcg of interest
1082  *
1083  * blkgs should be removed while holding both q and blkcg locks.  As blkcg lock
1084  * is nested inside q lock, this function performs reverse double lock dancing.
1085  * Destroying the blkgs releases the reference held on the blkcg's css allowing
1086  * blkcg_css_free to eventually be called.
1087  *
1088  * This is the blkcg counterpart of ioc_release_fn().
1089  */
1090 void blkcg_destroy_blkgs(struct blkcg *blkcg)
1091 {
1092         spin_lock_irq(&blkcg->lock);
1093 
1094         while (!hlist_empty(&blkcg->blkg_list)) {
1095                 struct blkcg_gq *blkg = hlist_entry(blkcg->blkg_list.first,
1096                                                 struct blkcg_gq, blkcg_node);
1097                 struct request_queue *q = blkg->q;
1098 
1099                 if (spin_trylock(&q->queue_lock)) {
1100                         blkg_destroy(blkg);
1101                         spin_unlock(&q->queue_lock);
1102                 } else {
1103                         spin_unlock_irq(&blkcg->lock);
1104                         cpu_relax();
1105                         spin_lock_irq(&blkcg->lock);
1106                 }
1107         }
1108 
1109         spin_unlock_irq(&blkcg->lock);
1110 }
1111 
1112 static void blkcg_css_free(struct cgroup_subsys_state *css)
1113 {
1114         struct blkcg *blkcg = css_to_blkcg(css);
1115         int i;
1116 
1117         mutex_lock(&blkcg_pol_mutex);
1118 
1119         list_del(&blkcg->all_blkcgs_node);
1120 
1121         for (i = 0; i < BLKCG_MAX_POLS; i++)
1122                 if (blkcg->cpd[i])
1123                         blkcg_policy[i]->cpd_free_fn(blkcg->cpd[i]);
1124 
1125         mutex_unlock(&blkcg_pol_mutex);
1126 
1127         kfree(blkcg);
1128 }
1129 
1130 static struct cgroup_subsys_state *
1131 blkcg_css_alloc(struct cgroup_subsys_state *parent_css)
1132 {
1133         struct blkcg *blkcg;
1134         struct cgroup_subsys_state *ret;
1135         int i;
1136 
1137         mutex_lock(&blkcg_pol_mutex);
1138 
1139         if (!parent_css) {
1140                 blkcg = &blkcg_root;
1141         } else {
1142                 blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
1143                 if (!blkcg) {
1144                         ret = ERR_PTR(-ENOMEM);
1145                         goto unlock;
1146                 }
1147         }
1148 
1149         for (i = 0; i < BLKCG_MAX_POLS ; i++) {
1150                 struct blkcg_policy *pol = blkcg_policy[i];
1151                 struct blkcg_policy_data *cpd;
1152 
1153                 /*
1154                  * If the policy hasn't been attached yet, wait for it
1155                  * to be attached before doing anything else. Otherwise,
1156                  * check if the policy requires any specific per-cgroup
1157                  * data: if it does, allocate and initialize it.
1158                  */
1159                 if (!pol || !pol->cpd_alloc_fn)
1160                         continue;
1161 
1162                 cpd = pol->cpd_alloc_fn(GFP_KERNEL);
1163                 if (!cpd) {
1164                         ret = ERR_PTR(-ENOMEM);
1165                         goto free_pd_blkcg;
1166                 }
1167                 blkcg->cpd[i] = cpd;
1168                 cpd->blkcg = blkcg;
1169                 cpd->plid = i;
1170                 if (pol->cpd_init_fn)
1171                         pol->cpd_init_fn(cpd);
1172         }
1173 
1174         spin_lock_init(&blkcg->lock);
1175         INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_NOWAIT | __GFP_NOWARN);
1176         INIT_HLIST_HEAD(&blkcg->blkg_list);
1177 #ifdef CONFIG_CGROUP_WRITEBACK
1178         INIT_LIST_HEAD(&blkcg->cgwb_list);
1179         refcount_set(&blkcg->cgwb_refcnt, 1);
1180 #endif
1181         list_add_tail(&blkcg->all_blkcgs_node, &all_blkcgs);
1182 
1183         mutex_unlock(&blkcg_pol_mutex);
1184         return &blkcg->css;
1185 
1186 free_pd_blkcg:
1187         for (i--; i >= 0; i--)
1188                 if (blkcg->cpd[i])
1189                         blkcg_policy[i]->cpd_free_fn(blkcg->cpd[i]);
1190 
1191         if (blkcg != &blkcg_root)
1192                 kfree(blkcg);
1193 unlock:
1194         mutex_unlock(&blkcg_pol_mutex);
1195         return ret;
1196 }
1197 
1198 /**
1199  * blkcg_init_queue - initialize blkcg part of request queue
1200  * @q: request_queue to initialize
1201  *
1202  * Called from blk_alloc_queue_node(). Responsible for initializing blkcg
1203  * part of new request_queue @q.
1204  *
1205  * RETURNS:
1206  * 0 on success, -errno on failure.
1207  */
1208 int blkcg_init_queue(struct request_queue *q)
1209 {
1210         struct blkcg_gq *new_blkg, *blkg;
1211         bool preloaded;
1212         int ret;
1213 
1214         new_blkg = blkg_alloc(&blkcg_root, q, GFP_KERNEL);
1215         if (!new_blkg)
1216                 return -ENOMEM;
1217 
1218         preloaded = !radix_tree_preload(GFP_KERNEL);
1219 
1220         /* Make sure the root blkg exists. */
1221         rcu_read_lock();
1222         spin_lock_irq(&q->queue_lock);
1223         blkg = blkg_create(&blkcg_root, q, new_blkg);
1224         if (IS_ERR(blkg))
1225                 goto err_unlock;
1226         q->root_blkg = blkg;
1227         spin_unlock_irq(&q->queue_lock);
1228         rcu_read_unlock();
1229 
1230         if (preloaded)
1231                 radix_tree_preload_end();
1232 
1233         ret = blk_iolatency_init(q);
1234         if (ret)
1235                 goto err_destroy_all;
1236 
1237         ret = blk_throtl_init(q);
1238         if (ret)
1239                 goto err_destroy_all;
1240         return 0;
1241 
1242 err_destroy_all:
1243         blkg_destroy_all(q);
1244         return ret;
1245 err_unlock:
1246         spin_unlock_irq(&q->queue_lock);
1247         rcu_read_unlock();
1248         if (preloaded)
1249                 radix_tree_preload_end();
1250         return PTR_ERR(blkg);
1251 }
1252 
1253 /**
1254  * blkcg_drain_queue - drain blkcg part of request_queue
1255  * @q: request_queue to drain
1256  *
1257  * Called from blk_drain_queue().  Responsible for draining blkcg part.
1258  */
1259 void blkcg_drain_queue(struct request_queue *q)
1260 {
1261         lockdep_assert_held(&q->queue_lock);
1262 
1263         /*
1264          * @q could be exiting and already have destroyed all blkgs as
1265          * indicated by NULL root_blkg.  If so, don't confuse policies.
1266          */
1267         if (!q->root_blkg)
1268                 return;
1269 
1270         blk_throtl_drain(q);
1271 }
1272 
1273 /**
1274  * blkcg_exit_queue - exit and release blkcg part of request_queue
1275  * @q: request_queue being released
1276  *
1277  * Called from blk_exit_queue().  Responsible for exiting blkcg part.
1278  */
1279 void blkcg_exit_queue(struct request_queue *q)
1280 {
1281         blkg_destroy_all(q);
1282         blk_throtl_exit(q);
1283 }
1284 
1285 /*
1286  * We cannot support shared io contexts, as we have no mean to support
1287  * two tasks with the same ioc in two different groups without major rework
1288  * of the main cic data structures.  For now we allow a task to change
1289  * its cgroup only if it's the only owner of its ioc.
1290  */
1291 static int blkcg_can_attach(struct cgroup_taskset *tset)
1292 {
1293         struct task_struct *task;
1294         struct cgroup_subsys_state *dst_css;
1295         struct io_context *ioc;
1296         int ret = 0;
1297 
1298         /* task_lock() is needed to avoid races with exit_io_context() */
1299         cgroup_taskset_for_each(task, dst_css, tset) {
1300                 task_lock(task);
1301                 ioc = task->io_context;
1302                 if (ioc && atomic_read(&ioc->nr_tasks) > 1)
1303                         ret = -EINVAL;
1304                 task_unlock(task);
1305                 if (ret)
1306                         break;
1307         }
1308         return ret;
1309 }
1310 
1311 static void blkcg_bind(struct cgroup_subsys_state *root_css)
1312 {
1313         int i;
1314 
1315         mutex_lock(&blkcg_pol_mutex);
1316 
1317         for (i = 0; i < BLKCG_MAX_POLS; i++) {
1318                 struct blkcg_policy *pol = blkcg_policy[i];
1319                 struct blkcg *blkcg;
1320 
1321                 if (!pol || !pol->cpd_bind_fn)
1322                         continue;
1323 
1324                 list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node)
1325                         if (blkcg->cpd[pol->plid])
1326                                 pol->cpd_bind_fn(blkcg->cpd[pol->plid]);
1327         }
1328         mutex_unlock(&blkcg_pol_mutex);
1329 }
1330 
1331 static void blkcg_exit(struct task_struct *tsk)
1332 {
1333         if (tsk->throttle_queue)
1334                 blk_put_queue(tsk->throttle_queue);
1335         tsk->throttle_queue = NULL;
1336 }
1337 
1338 struct cgroup_subsys io_cgrp_subsys = {
1339         .css_alloc = blkcg_css_alloc,
1340         .css_offline = blkcg_css_offline,
1341         .css_free = blkcg_css_free,
1342         .can_attach = blkcg_can_attach,
1343         .bind = blkcg_bind,
1344         .dfl_cftypes = blkcg_files,
1345         .legacy_cftypes = blkcg_legacy_files,
1346         .legacy_name = "blkio",
1347         .exit = blkcg_exit,
1348 #ifdef CONFIG_MEMCG
1349         /*
1350          * This ensures that, if available, memcg is automatically enabled
1351          * together on the default hierarchy so that the owner cgroup can
1352          * be retrieved from writeback pages.
1353          */
1354         .depends_on = 1 << memory_cgrp_id,
1355 #endif
1356 };
1357 EXPORT_SYMBOL_GPL(io_cgrp_subsys);
1358 
1359 /**
1360  * blkcg_activate_policy - activate a blkcg policy on a request_queue
1361  * @q: request_queue of interest
1362  * @pol: blkcg policy to activate
1363  *
1364  * Activate @pol on @q.  Requires %GFP_KERNEL context.  @q goes through
1365  * bypass mode to populate its blkgs with policy_data for @pol.
1366  *
1367  * Activation happens with @q bypassed, so nobody would be accessing blkgs
1368  * from IO path.  Update of each blkg is protected by both queue and blkcg
1369  * locks so that holding either lock and testing blkcg_policy_enabled() is
1370  * always enough for dereferencing policy data.
1371  *
1372  * The caller is responsible for synchronizing [de]activations and policy
1373  * [un]registerations.  Returns 0 on success, -errno on failure.
1374  */
1375 int blkcg_activate_policy(struct request_queue *q,
1376                           const struct blkcg_policy *pol)
1377 {
1378         struct blkg_policy_data *pd_prealloc = NULL;
1379         struct blkcg_gq *blkg;
1380         int ret;
1381 
1382         if (blkcg_policy_enabled(q, pol))
1383                 return 0;
1384 
1385         if (queue_is_mq(q))
1386                 blk_mq_freeze_queue(q);
1387 pd_prealloc:
1388         if (!pd_prealloc) {
1389                 pd_prealloc = pol->pd_alloc_fn(GFP_KERNEL, q->node);
1390                 if (!pd_prealloc) {
1391                         ret = -ENOMEM;
1392                         goto out_bypass_end;
1393                 }
1394         }
1395 
1396         spin_lock_irq(&q->queue_lock);
1397 
1398         list_for_each_entry(blkg, &q->blkg_list, q_node) {
1399                 struct blkg_policy_data *pd;
1400 
1401                 if (blkg->pd[pol->plid])
1402                         continue;
1403 
1404                 pd = pol->pd_alloc_fn(GFP_NOWAIT | __GFP_NOWARN, q->node);
1405                 if (!pd)
1406                         swap(pd, pd_prealloc);
1407                 if (!pd) {
1408                         spin_unlock_irq(&q->queue_lock);
1409                         goto pd_prealloc;
1410                 }
1411 
1412                 blkg->pd[pol->plid] = pd;
1413                 pd->blkg = blkg;
1414                 pd->plid = pol->plid;
1415                 if (pol->pd_init_fn)
1416                         pol->pd_init_fn(pd);
1417         }
1418 
1419         __set_bit(pol->plid, q->blkcg_pols);
1420         ret = 0;
1421 
1422         spin_unlock_irq(&q->queue_lock);
1423 out_bypass_end:
1424         if (queue_is_mq(q))
1425                 blk_mq_unfreeze_queue(q);
1426         if (pd_prealloc)
1427                 pol->pd_free_fn(pd_prealloc);
1428         return ret;
1429 }
1430 EXPORT_SYMBOL_GPL(blkcg_activate_policy);
1431 
1432 /**
1433  * blkcg_deactivate_policy - deactivate a blkcg policy on a request_queue
1434  * @q: request_queue of interest
1435  * @pol: blkcg policy to deactivate
1436  *
1437  * Deactivate @pol on @q.  Follows the same synchronization rules as
1438  * blkcg_activate_policy().
1439  */
1440 void blkcg_deactivate_policy(struct request_queue *q,
1441                              const struct blkcg_policy *pol)
1442 {
1443         struct blkcg_gq *blkg;
1444 
1445         if (!blkcg_policy_enabled(q, pol))
1446                 return;
1447 
1448         if (queue_is_mq(q))
1449                 blk_mq_freeze_queue(q);
1450 
1451         spin_lock_irq(&q->queue_lock);
1452 
1453         __clear_bit(pol->plid, q->blkcg_pols);
1454 
1455         list_for_each_entry(blkg, &q->blkg_list, q_node) {
1456                 if (blkg->pd[pol->plid]) {
1457                         if (pol->pd_offline_fn)
1458                                 pol->pd_offline_fn(blkg->pd[pol->plid]);
1459                         pol->pd_free_fn(blkg->pd[pol->plid]);
1460                         blkg->pd[pol->plid] = NULL;
1461                 }
1462         }
1463 
1464         spin_unlock_irq(&q->queue_lock);
1465 
1466         if (queue_is_mq(q))
1467                 blk_mq_unfreeze_queue(q);
1468 }
1469 EXPORT_SYMBOL_GPL(blkcg_deactivate_policy);
1470 
1471 /**
1472  * blkcg_policy_register - register a blkcg policy
1473  * @pol: blkcg policy to register
1474  *
1475  * Register @pol with blkcg core.  Might sleep and @pol may be modified on
1476  * successful registration.  Returns 0 on success and -errno on failure.
1477  */
1478 int blkcg_policy_register(struct blkcg_policy *pol)
1479 {
1480         struct blkcg *blkcg;
1481         int i, ret;
1482 
1483         mutex_lock(&blkcg_pol_register_mutex);
1484         mutex_lock(&blkcg_pol_mutex);
1485 
1486         /* find an empty slot */
1487         ret = -ENOSPC;
1488         for (i = 0; i < BLKCG_MAX_POLS; i++)
1489                 if (!blkcg_policy[i])
1490                         break;
1491         if (i >= BLKCG_MAX_POLS) {
1492                 pr_warn("blkcg_policy_register: BLKCG_MAX_POLS too small\n");
1493                 goto err_unlock;
1494         }
1495 
1496         /* Make sure cpd/pd_alloc_fn and cpd/pd_free_fn in pairs */
1497         if ((!pol->cpd_alloc_fn ^ !pol->cpd_free_fn) ||
1498                 (!pol->pd_alloc_fn ^ !pol->pd_free_fn))
1499                 goto err_unlock;
1500 
1501         /* register @pol */
1502         pol->plid = i;
1503         blkcg_policy[pol->plid] = pol;
1504 
1505         /* allocate and install cpd's */
1506         if (pol->cpd_alloc_fn) {
1507                 list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) {
1508                         struct blkcg_policy_data *cpd;
1509 
1510                         cpd = pol->cpd_alloc_fn(GFP_KERNEL);
1511                         if (!cpd)
1512                                 goto err_free_cpds;
1513 
1514                         blkcg->cpd[pol->plid] = cpd;
1515                         cpd->blkcg = blkcg;
1516                         cpd->plid = pol->plid;
1517                         pol->cpd_init_fn(cpd);
1518                 }
1519         }
1520 
1521         mutex_unlock(&blkcg_pol_mutex);
1522 
1523         /* everything is in place, add intf files for the new policy */
1524         if (pol->dfl_cftypes)
1525                 WARN_ON(cgroup_add_dfl_cftypes(&io_cgrp_subsys,
1526                                                pol->dfl_cftypes));
1527         if (pol->legacy_cftypes)
1528                 WARN_ON(cgroup_add_legacy_cftypes(&io_cgrp_subsys,
1529                                                   pol->legacy_cftypes));
1530         mutex_unlock(&blkcg_pol_register_mutex);
1531         return 0;
1532 
1533 err_free_cpds:
1534         if (pol->cpd_free_fn) {
1535                 list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) {
1536                         if (blkcg->cpd[pol->plid]) {
1537                                 pol->cpd_free_fn(blkcg->cpd[pol->plid]);
1538                                 blkcg->cpd[pol->plid] = NULL;
1539                         }
1540                 }
1541         }
1542         blkcg_policy[pol->plid] = NULL;
1543 err_unlock:
1544         mutex_unlock(&blkcg_pol_mutex);
1545         mutex_unlock(&blkcg_pol_register_mutex);
1546         return ret;
1547 }
1548 EXPORT_SYMBOL_GPL(blkcg_policy_register);
1549 
1550 /**
1551  * blkcg_policy_unregister - unregister a blkcg policy
1552  * @pol: blkcg policy to unregister
1553  *
1554  * Undo blkcg_policy_register(@pol).  Might sleep.
1555  */
1556 void blkcg_policy_unregister(struct blkcg_policy *pol)
1557 {
1558         struct blkcg *blkcg;
1559 
1560         mutex_lock(&blkcg_pol_register_mutex);
1561 
1562         if (WARN_ON(blkcg_policy[pol->plid] != pol))
1563                 goto out_unlock;
1564 
1565         /* kill the intf files first */
1566         if (pol->dfl_cftypes)
1567                 cgroup_rm_cftypes(pol->dfl_cftypes);
1568         if (pol->legacy_cftypes)
1569                 cgroup_rm_cftypes(pol->legacy_cftypes);
1570 
1571         /* remove cpds and unregister */
1572         mutex_lock(&blkcg_pol_mutex);
1573 
1574         if (pol->cpd_free_fn) {
1575                 list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) {
1576                         if (blkcg->cpd[pol->plid]) {
1577                                 pol->cpd_free_fn(blkcg->cpd[pol->plid]);
1578                                 blkcg->cpd[pol->plid] = NULL;
1579                         }
1580                 }
1581         }
1582         blkcg_policy[pol->plid] = NULL;
1583 
1584         mutex_unlock(&blkcg_pol_mutex);
1585 out_unlock:
1586         mutex_unlock(&blkcg_pol_register_mutex);
1587 }
1588 EXPORT_SYMBOL_GPL(blkcg_policy_unregister);
1589 
1590 /*
1591  * Scale the accumulated delay based on how long it has been since we updated
1592  * the delay.  We only call this when we are adding delay, in case it's been a
1593  * while since we added delay, and when we are checking to see if we need to
1594  * delay a task, to account for any delays that may have occurred.
1595  */
1596 static void blkcg_scale_delay(struct blkcg_gq *blkg, u64 now)
1597 {
1598         u64 old = atomic64_read(&blkg->delay_start);
1599 
1600         /*
1601          * We only want to scale down every second.  The idea here is that we
1602          * want to delay people for min(delay_nsec, NSEC_PER_SEC) in a certain
1603          * time window.  We only want to throttle tasks for recent delay that
1604          * has occurred, in 1 second time windows since that's the maximum
1605          * things can be throttled.  We save the current delay window in
1606          * blkg->last_delay so we know what amount is still left to be charged
1607          * to the blkg from this point onward.  blkg->last_use keeps track of
1608          * the use_delay counter.  The idea is if we're unthrottling the blkg we
1609          * are ok with whatever is happening now, and we can take away more of
1610          * the accumulated delay as we've already throttled enough that
1611          * everybody is happy with their IO latencies.
1612          */
1613         if (time_before64(old + NSEC_PER_SEC, now) &&
1614             atomic64_cmpxchg(&blkg->delay_start, old, now) == old) {
1615                 u64 cur = atomic64_read(&blkg->delay_nsec);
1616                 u64 sub = min_t(u64, blkg->last_delay, now - old);
1617                 int cur_use = atomic_read(&blkg->use_delay);
1618 
1619                 /*
1620                  * We've been unthrottled, subtract a larger chunk of our
1621                  * accumulated delay.
1622                  */
1623                 if (cur_use < blkg->last_use)
1624                         sub = max_t(u64, sub, blkg->last_delay >> 1);
1625 
1626                 /*
1627                  * This shouldn't happen, but handle it anyway.  Our delay_nsec
1628                  * should only ever be growing except here where we subtract out
1629                  * min(last_delay, 1 second), but lord knows bugs happen and I'd
1630                  * rather not end up with negative numbers.
1631                  */
1632                 if (unlikely(cur < sub)) {
1633                         atomic64_set(&blkg->delay_nsec, 0);
1634                         blkg->last_delay = 0;
1635                 } else {
1636                         atomic64_sub(sub, &blkg->delay_nsec);
1637                         blkg->last_delay = cur - sub;
1638                 }
1639                 blkg->last_use = cur_use;
1640         }
1641 }
1642 
1643 /*
1644  * This is called when we want to actually walk up the hierarchy and check to
1645  * see if we need to throttle, and then actually throttle if there is some
1646  * accumulated delay.  This should only be called upon return to user space so
1647  * we're not holding some lock that would induce a priority inversion.
1648  */
1649 static void blkcg_maybe_throttle_blkg(struct blkcg_gq *blkg, bool use_memdelay)
1650 {
1651         u64 now = ktime_to_ns(ktime_get());
1652         u64 exp;
1653         u64 delay_nsec = 0;
1654         int tok;
1655 
1656         while (blkg->parent) {
1657                 if (atomic_read(&blkg->use_delay)) {
1658                         blkcg_scale_delay(blkg, now);
1659                         delay_nsec = max_t(u64, delay_nsec,
1660                                            atomic64_read(&blkg->delay_nsec));
1661                 }
1662                 blkg = blkg->parent;
1663         }
1664 
1665         if (!delay_nsec)
1666                 return;
1667 
1668         /*
1669          * Let's not sleep for all eternity if we've amassed a huge delay.
1670          * Swapping or metadata IO can accumulate 10's of seconds worth of
1671          * delay, and we want userspace to be able to do _something_ so cap the
1672          * delays at 1 second.  If there's 10's of seconds worth of delay then
1673          * the tasks will be delayed for 1 second for every syscall.
1674          */
1675         delay_nsec = min_t(u64, delay_nsec, 250 * NSEC_PER_MSEC);
1676 
1677         /*
1678          * TODO: the use_memdelay flag is going to be for the upcoming psi stuff
1679          * that hasn't landed upstream yet.  Once that stuff is in place we need
1680          * to do a psi_memstall_enter/leave if memdelay is set.
1681          */
1682 
1683         exp = ktime_add_ns(now, delay_nsec);
1684         tok = io_schedule_prepare();
1685         do {
1686                 __set_current_state(TASK_KILLABLE);
1687                 if (!schedule_hrtimeout(&exp, HRTIMER_MODE_ABS))
1688                         break;
1689         } while (!fatal_signal_pending(current));
1690         io_schedule_finish(tok);
1691 }
1692 
1693 /**
1694  * blkcg_maybe_throttle_current - throttle the current task if it has been marked
1695  *
1696  * This is only called if we've been marked with set_notify_resume().  Obviously
1697  * we can be set_notify_resume() for reasons other than blkcg throttling, so we
1698  * check to see if current->throttle_queue is set and if not this doesn't do
1699  * anything.  This should only ever be called by the resume code, it's not meant
1700  * to be called by people willy-nilly as it will actually do the work to
1701  * throttle the task if it is setup for throttling.
1702  */
1703 void blkcg_maybe_throttle_current(void)
1704 {
1705         struct request_queue *q = current->throttle_queue;
1706         struct cgroup_subsys_state *css;
1707         struct blkcg *blkcg;
1708         struct blkcg_gq *blkg;
1709         bool use_memdelay = current->use_memdelay;
1710 
1711         if (!q)
1712                 return;
1713 
1714         current->throttle_queue = NULL;
1715         current->use_memdelay = false;
1716 
1717         rcu_read_lock();
1718         css = kthread_blkcg();
1719         if (css)
1720                 blkcg = css_to_blkcg(css);
1721         else
1722                 blkcg = css_to_blkcg(task_css(current, io_cgrp_id));
1723 
1724         if (!blkcg)
1725                 goto out;
1726         blkg = blkg_lookup(blkcg, q);
1727         if (!blkg)
1728                 goto out;
1729         if (!blkg_tryget(blkg))
1730                 goto out;
1731         rcu_read_unlock();
1732 
1733         blkcg_maybe_throttle_blkg(blkg, use_memdelay);
1734         blkg_put(blkg);
1735         blk_put_queue(q);
1736         return;
1737 out:
1738         rcu_read_unlock();
1739         blk_put_queue(q);
1740 }
1741 
1742 /**
1743  * blkcg_schedule_throttle - this task needs to check for throttling
1744  * @q: the request queue IO was submitted on
1745  * @use_memdelay: do we charge this to memory delay for PSI
1746  *
1747  * This is called by the IO controller when we know there's delay accumulated
1748  * for the blkg for this task.  We do not pass the blkg because there are places
1749  * we call this that may not have that information, the swapping code for
1750  * instance will only have a request_queue at that point.  This set's the
1751  * notify_resume for the task to check and see if it requires throttling before
1752  * returning to user space.
1753  *
1754  * We will only schedule once per syscall.  You can call this over and over
1755  * again and it will only do the check once upon return to user space, and only
1756  * throttle once.  If the task needs to be throttled again it'll need to be
1757  * re-set at the next time we see the task.
1758  */
1759 void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay)
1760 {
1761         if (unlikely(current->flags & PF_KTHREAD))
1762                 return;
1763 
1764         if (!blk_get_queue(q))
1765                 return;
1766 
1767         if (current->throttle_queue)
1768                 blk_put_queue(current->throttle_queue);
1769         current->throttle_queue = q;
1770         if (use_memdelay)
1771                 current->use_memdelay = use_memdelay;
1772         set_notify_resume(current);
1773 }
1774 
1775 /**
1776  * blkcg_add_delay - add delay to this blkg
1777  * @blkg: blkg of interest
1778  * @now: the current time in nanoseconds
1779  * @delta: how many nanoseconds of delay to add
1780  *
1781  * Charge @delta to the blkg's current delay accumulation.  This is used to
1782  * throttle tasks if an IO controller thinks we need more throttling.
1783  */
1784 void blkcg_add_delay(struct blkcg_gq *blkg, u64 now, u64 delta)
1785 {
1786         blkcg_scale_delay(blkg, now);
1787         atomic64_add(delta, &blkg->delay_nsec);
1788 }
1789 
1790 module_param(blkcg_debug_stats, bool, 0644);
1791 MODULE_PARM_DESC(blkcg_debug_stats, "True if you want debug stats, false if not");
1792 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp