~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/block/blk-cgroup.c

Version: ~ [ linux-5.0-rc6 ] ~ [ linux-4.20.10 ] ~ [ linux-4.19.23 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.101 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.158 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.174 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.134 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.63 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-3.9.11 ] ~ [ linux-3.8.13 ] ~ [ linux-3.7.10 ] ~ [ linux-3.6.11 ] ~ [ linux-3.5.7 ] ~ [ linux-3.4.113 ] ~ [ linux-3.3.8 ] ~ [ linux-3.2.102 ] ~ [ linux-3.1.10 ] ~ [ linux-3.0.101 ] ~ [ linux-2.6.39.4 ] ~ [ linux-2.6.38.8 ] ~ [ linux-2.6.37.6 ] ~ [ linux-2.6.36.4 ] ~ [ linux-2.6.35.14 ] ~ [ linux-2.6.34.15 ] ~ [ linux-2.6.33.20 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  * Common Block IO controller cgroup interface
  3  *
  4  * Based on ideas and code from CFQ, CFS and BFQ:
  5  * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
  6  *
  7  * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
  8  *                    Paolo Valente <paolo.valente@unimore.it>
  9  *
 10  * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
 11  *                    Nauman Rafique <nauman@google.com>
 12  *
 13  * For policy-specific per-blkcg data:
 14  * Copyright (C) 2015 Paolo Valente <paolo.valente@unimore.it>
 15  *                    Arianna Avanzini <avanzini.arianna@gmail.com>
 16  */
 17 #include <linux/ioprio.h>
 18 #include <linux/kdev_t.h>
 19 #include <linux/module.h>
 20 #include <linux/sched/signal.h>
 21 #include <linux/err.h>
 22 #include <linux/blkdev.h>
 23 #include <linux/backing-dev.h>
 24 #include <linux/slab.h>
 25 #include <linux/genhd.h>
 26 #include <linux/delay.h>
 27 #include <linux/atomic.h>
 28 #include <linux/ctype.h>
 29 #include <linux/blk-cgroup.h>
 30 #include <linux/tracehook.h>
 31 #include "blk.h"
 32 
 33 #define MAX_KEY_LEN 100
 34 
 35 /*
 36  * blkcg_pol_mutex protects blkcg_policy[] and policy [de]activation.
 37  * blkcg_pol_register_mutex nests outside of it and synchronizes entire
 38  * policy [un]register operations including cgroup file additions /
 39  * removals.  Putting cgroup file registration outside blkcg_pol_mutex
 40  * allows grabbing it from cgroup callbacks.
 41  */
 42 static DEFINE_MUTEX(blkcg_pol_register_mutex);
 43 static DEFINE_MUTEX(blkcg_pol_mutex);
 44 
 45 struct blkcg blkcg_root;
 46 EXPORT_SYMBOL_GPL(blkcg_root);
 47 
 48 struct cgroup_subsys_state * const blkcg_root_css = &blkcg_root.css;
 49 
 50 static struct blkcg_policy *blkcg_policy[BLKCG_MAX_POLS];
 51 
 52 static LIST_HEAD(all_blkcgs);           /* protected by blkcg_pol_mutex */
 53 
 54 static bool blkcg_debug_stats = false;
 55 
 56 static bool blkcg_policy_enabled(struct request_queue *q,
 57                                  const struct blkcg_policy *pol)
 58 {
 59         return pol && test_bit(pol->plid, q->blkcg_pols);
 60 }
 61 
 62 /**
 63  * blkg_free - free a blkg
 64  * @blkg: blkg to free
 65  *
 66  * Free @blkg which may be partially allocated.
 67  */
 68 static void blkg_free(struct blkcg_gq *blkg)
 69 {
 70         int i;
 71 
 72         if (!blkg)
 73                 return;
 74 
 75         for (i = 0; i < BLKCG_MAX_POLS; i++)
 76                 if (blkg->pd[i])
 77                         blkcg_policy[i]->pd_free_fn(blkg->pd[i]);
 78 
 79         blkg_rwstat_exit(&blkg->stat_ios);
 80         blkg_rwstat_exit(&blkg->stat_bytes);
 81         kfree(blkg);
 82 }
 83 
 84 static void __blkg_release(struct rcu_head *rcu)
 85 {
 86         struct blkcg_gq *blkg = container_of(rcu, struct blkcg_gq, rcu_head);
 87 
 88         percpu_ref_exit(&blkg->refcnt);
 89 
 90         /* release the blkcg and parent blkg refs this blkg has been holding */
 91         css_put(&blkg->blkcg->css);
 92         if (blkg->parent)
 93                 blkg_put(blkg->parent);
 94 
 95         wb_congested_put(blkg->wb_congested);
 96 
 97         blkg_free(blkg);
 98 }
 99 
100 /*
101  * A group is RCU protected, but having an rcu lock does not mean that one
102  * can access all the fields of blkg and assume these are valid.  For
103  * example, don't try to follow throtl_data and request queue links.
104  *
105  * Having a reference to blkg under an rcu allows accesses to only values
106  * local to groups like group stats and group rate limits.
107  */
108 static void blkg_release(struct percpu_ref *ref)
109 {
110         struct blkcg_gq *blkg = container_of(ref, struct blkcg_gq, refcnt);
111 
112         call_rcu(&blkg->rcu_head, __blkg_release);
113 }
114 
115 /**
116  * blkg_alloc - allocate a blkg
117  * @blkcg: block cgroup the new blkg is associated with
118  * @q: request_queue the new blkg is associated with
119  * @gfp_mask: allocation mask to use
120  *
121  * Allocate a new blkg assocating @blkcg and @q.
122  */
123 static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q,
124                                    gfp_t gfp_mask)
125 {
126         struct blkcg_gq *blkg;
127         int i;
128 
129         /* alloc and init base part */
130         blkg = kzalloc_node(sizeof(*blkg), gfp_mask, q->node);
131         if (!blkg)
132                 return NULL;
133 
134         if (blkg_rwstat_init(&blkg->stat_bytes, gfp_mask) ||
135             blkg_rwstat_init(&blkg->stat_ios, gfp_mask))
136                 goto err_free;
137 
138         blkg->q = q;
139         INIT_LIST_HEAD(&blkg->q_node);
140         blkg->blkcg = blkcg;
141 
142         for (i = 0; i < BLKCG_MAX_POLS; i++) {
143                 struct blkcg_policy *pol = blkcg_policy[i];
144                 struct blkg_policy_data *pd;
145 
146                 if (!blkcg_policy_enabled(q, pol))
147                         continue;
148 
149                 /* alloc per-policy data and attach it to blkg */
150                 pd = pol->pd_alloc_fn(gfp_mask, q->node);
151                 if (!pd)
152                         goto err_free;
153 
154                 blkg->pd[i] = pd;
155                 pd->blkg = blkg;
156                 pd->plid = i;
157         }
158 
159         return blkg;
160 
161 err_free:
162         blkg_free(blkg);
163         return NULL;
164 }
165 
166 struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg,
167                                       struct request_queue *q, bool update_hint)
168 {
169         struct blkcg_gq *blkg;
170 
171         /*
172          * Hint didn't match.  Look up from the radix tree.  Note that the
173          * hint can only be updated under queue_lock as otherwise @blkg
174          * could have already been removed from blkg_tree.  The caller is
175          * responsible for grabbing queue_lock if @update_hint.
176          */
177         blkg = radix_tree_lookup(&blkcg->blkg_tree, q->id);
178         if (blkg && blkg->q == q) {
179                 if (update_hint) {
180                         lockdep_assert_held(&q->queue_lock);
181                         rcu_assign_pointer(blkcg->blkg_hint, blkg);
182                 }
183                 return blkg;
184         }
185 
186         return NULL;
187 }
188 EXPORT_SYMBOL_GPL(blkg_lookup_slowpath);
189 
190 /*
191  * If @new_blkg is %NULL, this function tries to allocate a new one as
192  * necessary using %GFP_NOWAIT.  @new_blkg is always consumed on return.
193  */
194 static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
195                                     struct request_queue *q,
196                                     struct blkcg_gq *new_blkg)
197 {
198         struct blkcg_gq *blkg;
199         struct bdi_writeback_congested *wb_congested;
200         int i, ret;
201 
202         WARN_ON_ONCE(!rcu_read_lock_held());
203         lockdep_assert_held(&q->queue_lock);
204 
205         /* request_queue is dying, do not create/recreate a blkg */
206         if (blk_queue_dying(q)) {
207                 ret = -ENODEV;
208                 goto err_free_blkg;
209         }
210 
211         /* blkg holds a reference to blkcg */
212         if (!css_tryget_online(&blkcg->css)) {
213                 ret = -ENODEV;
214                 goto err_free_blkg;
215         }
216 
217         wb_congested = wb_congested_get_create(q->backing_dev_info,
218                                                blkcg->css.id,
219                                                GFP_NOWAIT | __GFP_NOWARN);
220         if (!wb_congested) {
221                 ret = -ENOMEM;
222                 goto err_put_css;
223         }
224 
225         /* allocate */
226         if (!new_blkg) {
227                 new_blkg = blkg_alloc(blkcg, q, GFP_NOWAIT | __GFP_NOWARN);
228                 if (unlikely(!new_blkg)) {
229                         ret = -ENOMEM;
230                         goto err_put_congested;
231                 }
232         }
233         blkg = new_blkg;
234         blkg->wb_congested = wb_congested;
235 
236         /* link parent */
237         if (blkcg_parent(blkcg)) {
238                 blkg->parent = __blkg_lookup(blkcg_parent(blkcg), q, false);
239                 if (WARN_ON_ONCE(!blkg->parent)) {
240                         ret = -ENODEV;
241                         goto err_put_congested;
242                 }
243                 blkg_get(blkg->parent);
244         }
245 
246         ret = percpu_ref_init(&blkg->refcnt, blkg_release, 0,
247                               GFP_NOWAIT | __GFP_NOWARN);
248         if (ret)
249                 goto err_cancel_ref;
250 
251         /* invoke per-policy init */
252         for (i = 0; i < BLKCG_MAX_POLS; i++) {
253                 struct blkcg_policy *pol = blkcg_policy[i];
254 
255                 if (blkg->pd[i] && pol->pd_init_fn)
256                         pol->pd_init_fn(blkg->pd[i]);
257         }
258 
259         /* insert */
260         spin_lock(&blkcg->lock);
261         ret = radix_tree_insert(&blkcg->blkg_tree, q->id, blkg);
262         if (likely(!ret)) {
263                 hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
264                 list_add(&blkg->q_node, &q->blkg_list);
265 
266                 for (i = 0; i < BLKCG_MAX_POLS; i++) {
267                         struct blkcg_policy *pol = blkcg_policy[i];
268 
269                         if (blkg->pd[i] && pol->pd_online_fn)
270                                 pol->pd_online_fn(blkg->pd[i]);
271                 }
272         }
273         blkg->online = true;
274         spin_unlock(&blkcg->lock);
275 
276         if (!ret)
277                 return blkg;
278 
279         /* @blkg failed fully initialized, use the usual release path */
280         blkg_put(blkg);
281         return ERR_PTR(ret);
282 
283 err_cancel_ref:
284         percpu_ref_exit(&blkg->refcnt);
285 err_put_congested:
286         wb_congested_put(wb_congested);
287 err_put_css:
288         css_put(&blkcg->css);
289 err_free_blkg:
290         blkg_free(new_blkg);
291         return ERR_PTR(ret);
292 }
293 
294 /**
295  * __blkg_lookup_create - lookup blkg, try to create one if not there
296  * @blkcg: blkcg of interest
297  * @q: request_queue of interest
298  *
299  * Lookup blkg for the @blkcg - @q pair.  If it doesn't exist, try to
300  * create one.  blkg creation is performed recursively from blkcg_root such
301  * that all non-root blkg's have access to the parent blkg.  This function
302  * should be called under RCU read lock and @q->queue_lock.
303  *
304  * Returns the blkg or the closest blkg if blkg_create() fails as it walks
305  * down from root.
306  */
307 struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg,
308                                       struct request_queue *q)
309 {
310         struct blkcg_gq *blkg;
311 
312         WARN_ON_ONCE(!rcu_read_lock_held());
313         lockdep_assert_held(&q->queue_lock);
314 
315         blkg = __blkg_lookup(blkcg, q, true);
316         if (blkg)
317                 return blkg;
318 
319         /*
320          * Create blkgs walking down from blkcg_root to @blkcg, so that all
321          * non-root blkgs have access to their parents.  Returns the closest
322          * blkg to the intended blkg should blkg_create() fail.
323          */
324         while (true) {
325                 struct blkcg *pos = blkcg;
326                 struct blkcg *parent = blkcg_parent(blkcg);
327                 struct blkcg_gq *ret_blkg = q->root_blkg;
328 
329                 while (parent) {
330                         blkg = __blkg_lookup(parent, q, false);
331                         if (blkg) {
332                                 /* remember closest blkg */
333                                 ret_blkg = blkg;
334                                 break;
335                         }
336                         pos = parent;
337                         parent = blkcg_parent(parent);
338                 }
339 
340                 blkg = blkg_create(pos, q, NULL);
341                 if (IS_ERR(blkg))
342                         return ret_blkg;
343                 if (pos == blkcg)
344                         return blkg;
345         }
346 }
347 
348 /**
349  * blkg_lookup_create - find or create a blkg
350  * @blkcg: target block cgroup
351  * @q: target request_queue
352  *
353  * This looks up or creates the blkg representing the unique pair
354  * of the blkcg and the request_queue.
355  */
356 struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
357                                     struct request_queue *q)
358 {
359         struct blkcg_gq *blkg = blkg_lookup(blkcg, q);
360 
361         if (unlikely(!blkg)) {
362                 unsigned long flags;
363 
364                 spin_lock_irqsave(&q->queue_lock, flags);
365                 blkg = __blkg_lookup_create(blkcg, q);
366                 spin_unlock_irqrestore(&q->queue_lock, flags);
367         }
368 
369         return blkg;
370 }
371 
372 static void blkg_destroy(struct blkcg_gq *blkg)
373 {
374         struct blkcg *blkcg = blkg->blkcg;
375         struct blkcg_gq *parent = blkg->parent;
376         int i;
377 
378         lockdep_assert_held(&blkg->q->queue_lock);
379         lockdep_assert_held(&blkcg->lock);
380 
381         /* Something wrong if we are trying to remove same group twice */
382         WARN_ON_ONCE(list_empty(&blkg->q_node));
383         WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node));
384 
385         for (i = 0; i < BLKCG_MAX_POLS; i++) {
386                 struct blkcg_policy *pol = blkcg_policy[i];
387 
388                 if (blkg->pd[i] && pol->pd_offline_fn)
389                         pol->pd_offline_fn(blkg->pd[i]);
390         }
391 
392         if (parent) {
393                 blkg_rwstat_add_aux(&parent->stat_bytes, &blkg->stat_bytes);
394                 blkg_rwstat_add_aux(&parent->stat_ios, &blkg->stat_ios);
395         }
396 
397         blkg->online = false;
398 
399         radix_tree_delete(&blkcg->blkg_tree, blkg->q->id);
400         list_del_init(&blkg->q_node);
401         hlist_del_init_rcu(&blkg->blkcg_node);
402 
403         /*
404          * Both setting lookup hint to and clearing it from @blkg are done
405          * under queue_lock.  If it's not pointing to @blkg now, it never
406          * will.  Hint assignment itself can race safely.
407          */
408         if (rcu_access_pointer(blkcg->blkg_hint) == blkg)
409                 rcu_assign_pointer(blkcg->blkg_hint, NULL);
410 
411         /*
412          * Put the reference taken at the time of creation so that when all
413          * queues are gone, group can be destroyed.
414          */
415         percpu_ref_kill(&blkg->refcnt);
416 }
417 
418 /**
419  * blkg_destroy_all - destroy all blkgs associated with a request_queue
420  * @q: request_queue of interest
421  *
422  * Destroy all blkgs associated with @q.
423  */
424 static void blkg_destroy_all(struct request_queue *q)
425 {
426         struct blkcg_gq *blkg, *n;
427 
428         spin_lock_irq(&q->queue_lock);
429         list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) {
430                 struct blkcg *blkcg = blkg->blkcg;
431 
432                 spin_lock(&blkcg->lock);
433                 blkg_destroy(blkg);
434                 spin_unlock(&blkcg->lock);
435         }
436 
437         q->root_blkg = NULL;
438         spin_unlock_irq(&q->queue_lock);
439 }
440 
441 static int blkcg_reset_stats(struct cgroup_subsys_state *css,
442                              struct cftype *cftype, u64 val)
443 {
444         struct blkcg *blkcg = css_to_blkcg(css);
445         struct blkcg_gq *blkg;
446         int i;
447 
448         mutex_lock(&blkcg_pol_mutex);
449         spin_lock_irq(&blkcg->lock);
450 
451         /*
452          * Note that stat reset is racy - it doesn't synchronize against
453          * stat updates.  This is a debug feature which shouldn't exist
454          * anyway.  If you get hit by a race, retry.
455          */
456         hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
457                 blkg_rwstat_reset(&blkg->stat_bytes);
458                 blkg_rwstat_reset(&blkg->stat_ios);
459 
460                 for (i = 0; i < BLKCG_MAX_POLS; i++) {
461                         struct blkcg_policy *pol = blkcg_policy[i];
462 
463                         if (blkg->pd[i] && pol->pd_reset_stats_fn)
464                                 pol->pd_reset_stats_fn(blkg->pd[i]);
465                 }
466         }
467 
468         spin_unlock_irq(&blkcg->lock);
469         mutex_unlock(&blkcg_pol_mutex);
470         return 0;
471 }
472 
473 const char *blkg_dev_name(struct blkcg_gq *blkg)
474 {
475         /* some drivers (floppy) instantiate a queue w/o disk registered */
476         if (blkg->q->backing_dev_info->dev)
477                 return dev_name(blkg->q->backing_dev_info->dev);
478         return NULL;
479 }
480 
481 /**
482  * blkcg_print_blkgs - helper for printing per-blkg data
483  * @sf: seq_file to print to
484  * @blkcg: blkcg of interest
485  * @prfill: fill function to print out a blkg
486  * @pol: policy in question
487  * @data: data to be passed to @prfill
488  * @show_total: to print out sum of prfill return values or not
489  *
490  * This function invokes @prfill on each blkg of @blkcg if pd for the
491  * policy specified by @pol exists.  @prfill is invoked with @sf, the
492  * policy data and @data and the matching queue lock held.  If @show_total
493  * is %true, the sum of the return values from @prfill is printed with
494  * "Total" label at the end.
495  *
496  * This is to be used to construct print functions for
497  * cftype->read_seq_string method.
498  */
499 void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
500                        u64 (*prfill)(struct seq_file *,
501                                      struct blkg_policy_data *, int),
502                        const struct blkcg_policy *pol, int data,
503                        bool show_total)
504 {
505         struct blkcg_gq *blkg;
506         u64 total = 0;
507 
508         rcu_read_lock();
509         hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
510                 spin_lock_irq(&blkg->q->queue_lock);
511                 if (blkcg_policy_enabled(blkg->q, pol))
512                         total += prfill(sf, blkg->pd[pol->plid], data);
513                 spin_unlock_irq(&blkg->q->queue_lock);
514         }
515         rcu_read_unlock();
516 
517         if (show_total)
518                 seq_printf(sf, "Total %llu\n", (unsigned long long)total);
519 }
520 EXPORT_SYMBOL_GPL(blkcg_print_blkgs);
521 
522 /**
523  * __blkg_prfill_u64 - prfill helper for a single u64 value
524  * @sf: seq_file to print to
525  * @pd: policy private data of interest
526  * @v: value to print
527  *
528  * Print @v to @sf for the device assocaited with @pd.
529  */
530 u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v)
531 {
532         const char *dname = blkg_dev_name(pd->blkg);
533 
534         if (!dname)
535                 return 0;
536 
537         seq_printf(sf, "%s %llu\n", dname, (unsigned long long)v);
538         return v;
539 }
540 EXPORT_SYMBOL_GPL(__blkg_prfill_u64);
541 
542 /**
543  * __blkg_prfill_rwstat - prfill helper for a blkg_rwstat
544  * @sf: seq_file to print to
545  * @pd: policy private data of interest
546  * @rwstat: rwstat to print
547  *
548  * Print @rwstat to @sf for the device assocaited with @pd.
549  */
550 u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
551                          const struct blkg_rwstat *rwstat)
552 {
553         static const char *rwstr[] = {
554                 [BLKG_RWSTAT_READ]      = "Read",
555                 [BLKG_RWSTAT_WRITE]     = "Write",
556                 [BLKG_RWSTAT_SYNC]      = "Sync",
557                 [BLKG_RWSTAT_ASYNC]     = "Async",
558                 [BLKG_RWSTAT_DISCARD]   = "Discard",
559         };
560         const char *dname = blkg_dev_name(pd->blkg);
561         u64 v;
562         int i;
563 
564         if (!dname)
565                 return 0;
566 
567         for (i = 0; i < BLKG_RWSTAT_NR; i++)
568                 seq_printf(sf, "%s %s %llu\n", dname, rwstr[i],
569                            (unsigned long long)atomic64_read(&rwstat->aux_cnt[i]));
570 
571         v = atomic64_read(&rwstat->aux_cnt[BLKG_RWSTAT_READ]) +
572                 atomic64_read(&rwstat->aux_cnt[BLKG_RWSTAT_WRITE]) +
573                 atomic64_read(&rwstat->aux_cnt[BLKG_RWSTAT_DISCARD]);
574         seq_printf(sf, "%s Total %llu\n", dname, (unsigned long long)v);
575         return v;
576 }
577 EXPORT_SYMBOL_GPL(__blkg_prfill_rwstat);
578 
579 /**
580  * blkg_prfill_stat - prfill callback for blkg_stat
581  * @sf: seq_file to print to
582  * @pd: policy private data of interest
583  * @off: offset to the blkg_stat in @pd
584  *
585  * prfill callback for printing a blkg_stat.
586  */
587 u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off)
588 {
589         return __blkg_prfill_u64(sf, pd, blkg_stat_read((void *)pd + off));
590 }
591 EXPORT_SYMBOL_GPL(blkg_prfill_stat);
592 
593 /**
594  * blkg_prfill_rwstat - prfill callback for blkg_rwstat
595  * @sf: seq_file to print to
596  * @pd: policy private data of interest
597  * @off: offset to the blkg_rwstat in @pd
598  *
599  * prfill callback for printing a blkg_rwstat.
600  */
601 u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
602                        int off)
603 {
604         struct blkg_rwstat rwstat = blkg_rwstat_read((void *)pd + off);
605 
606         return __blkg_prfill_rwstat(sf, pd, &rwstat);
607 }
608 EXPORT_SYMBOL_GPL(blkg_prfill_rwstat);
609 
610 static u64 blkg_prfill_rwstat_field(struct seq_file *sf,
611                                     struct blkg_policy_data *pd, int off)
612 {
613         struct blkg_rwstat rwstat = blkg_rwstat_read((void *)pd->blkg + off);
614 
615         return __blkg_prfill_rwstat(sf, pd, &rwstat);
616 }
617 
618 /**
619  * blkg_print_stat_bytes - seq_show callback for blkg->stat_bytes
620  * @sf: seq_file to print to
621  * @v: unused
622  *
623  * To be used as cftype->seq_show to print blkg->stat_bytes.
624  * cftype->private must be set to the blkcg_policy.
625  */
626 int blkg_print_stat_bytes(struct seq_file *sf, void *v)
627 {
628         blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
629                           blkg_prfill_rwstat_field, (void *)seq_cft(sf)->private,
630                           offsetof(struct blkcg_gq, stat_bytes), true);
631         return 0;
632 }
633 EXPORT_SYMBOL_GPL(blkg_print_stat_bytes);
634 
635 /**
636  * blkg_print_stat_bytes - seq_show callback for blkg->stat_ios
637  * @sf: seq_file to print to
638  * @v: unused
639  *
640  * To be used as cftype->seq_show to print blkg->stat_ios.  cftype->private
641  * must be set to the blkcg_policy.
642  */
643 int blkg_print_stat_ios(struct seq_file *sf, void *v)
644 {
645         blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
646                           blkg_prfill_rwstat_field, (void *)seq_cft(sf)->private,
647                           offsetof(struct blkcg_gq, stat_ios), true);
648         return 0;
649 }
650 EXPORT_SYMBOL_GPL(blkg_print_stat_ios);
651 
652 static u64 blkg_prfill_rwstat_field_recursive(struct seq_file *sf,
653                                               struct blkg_policy_data *pd,
654                                               int off)
655 {
656         struct blkg_rwstat rwstat = blkg_rwstat_recursive_sum(pd->blkg,
657                                                               NULL, off);
658         return __blkg_prfill_rwstat(sf, pd, &rwstat);
659 }
660 
661 /**
662  * blkg_print_stat_bytes_recursive - recursive version of blkg_print_stat_bytes
663  * @sf: seq_file to print to
664  * @v: unused
665  */
666 int blkg_print_stat_bytes_recursive(struct seq_file *sf, void *v)
667 {
668         blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
669                           blkg_prfill_rwstat_field_recursive,
670                           (void *)seq_cft(sf)->private,
671                           offsetof(struct blkcg_gq, stat_bytes), true);
672         return 0;
673 }
674 EXPORT_SYMBOL_GPL(blkg_print_stat_bytes_recursive);
675 
676 /**
677  * blkg_print_stat_ios_recursive - recursive version of blkg_print_stat_ios
678  * @sf: seq_file to print to
679  * @v: unused
680  */
681 int blkg_print_stat_ios_recursive(struct seq_file *sf, void *v)
682 {
683         blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
684                           blkg_prfill_rwstat_field_recursive,
685                           (void *)seq_cft(sf)->private,
686                           offsetof(struct blkcg_gq, stat_ios), true);
687         return 0;
688 }
689 EXPORT_SYMBOL_GPL(blkg_print_stat_ios_recursive);
690 
691 /**
692  * blkg_stat_recursive_sum - collect hierarchical blkg_stat
693  * @blkg: blkg of interest
694  * @pol: blkcg_policy which contains the blkg_stat
695  * @off: offset to the blkg_stat in blkg_policy_data or @blkg
696  *
697  * Collect the blkg_stat specified by @blkg, @pol and @off and all its
698  * online descendants and their aux counts.  The caller must be holding the
699  * queue lock for online tests.
700  *
701  * If @pol is NULL, blkg_stat is at @off bytes into @blkg; otherwise, it is
702  * at @off bytes into @blkg's blkg_policy_data of the policy.
703  */
704 u64 blkg_stat_recursive_sum(struct blkcg_gq *blkg,
705                             struct blkcg_policy *pol, int off)
706 {
707         struct blkcg_gq *pos_blkg;
708         struct cgroup_subsys_state *pos_css;
709         u64 sum = 0;
710 
711         lockdep_assert_held(&blkg->q->queue_lock);
712 
713         rcu_read_lock();
714         blkg_for_each_descendant_pre(pos_blkg, pos_css, blkg) {
715                 struct blkg_stat *stat;
716 
717                 if (!pos_blkg->online)
718                         continue;
719 
720                 if (pol)
721                         stat = (void *)blkg_to_pd(pos_blkg, pol) + off;
722                 else
723                         stat = (void *)blkg + off;
724 
725                 sum += blkg_stat_read(stat) + atomic64_read(&stat->aux_cnt);
726         }
727         rcu_read_unlock();
728 
729         return sum;
730 }
731 EXPORT_SYMBOL_GPL(blkg_stat_recursive_sum);
732 
733 /**
734  * blkg_rwstat_recursive_sum - collect hierarchical blkg_rwstat
735  * @blkg: blkg of interest
736  * @pol: blkcg_policy which contains the blkg_rwstat
737  * @off: offset to the blkg_rwstat in blkg_policy_data or @blkg
738  *
739  * Collect the blkg_rwstat specified by @blkg, @pol and @off and all its
740  * online descendants and their aux counts.  The caller must be holding the
741  * queue lock for online tests.
742  *
743  * If @pol is NULL, blkg_rwstat is at @off bytes into @blkg; otherwise, it
744  * is at @off bytes into @blkg's blkg_policy_data of the policy.
745  */
746 struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkcg_gq *blkg,
747                                              struct blkcg_policy *pol, int off)
748 {
749         struct blkcg_gq *pos_blkg;
750         struct cgroup_subsys_state *pos_css;
751         struct blkg_rwstat sum = { };
752         int i;
753 
754         lockdep_assert_held(&blkg->q->queue_lock);
755 
756         rcu_read_lock();
757         blkg_for_each_descendant_pre(pos_blkg, pos_css, blkg) {
758                 struct blkg_rwstat *rwstat;
759 
760                 if (!pos_blkg->online)
761                         continue;
762 
763                 if (pol)
764                         rwstat = (void *)blkg_to_pd(pos_blkg, pol) + off;
765                 else
766                         rwstat = (void *)pos_blkg + off;
767 
768                 for (i = 0; i < BLKG_RWSTAT_NR; i++)
769                         atomic64_add(atomic64_read(&rwstat->aux_cnt[i]) +
770                                 percpu_counter_sum_positive(&rwstat->cpu_cnt[i]),
771                                 &sum.aux_cnt[i]);
772         }
773         rcu_read_unlock();
774 
775         return sum;
776 }
777 EXPORT_SYMBOL_GPL(blkg_rwstat_recursive_sum);
778 
779 /* Performs queue bypass and policy enabled checks then looks up blkg. */
780 static struct blkcg_gq *blkg_lookup_check(struct blkcg *blkcg,
781                                           const struct blkcg_policy *pol,
782                                           struct request_queue *q)
783 {
784         WARN_ON_ONCE(!rcu_read_lock_held());
785         lockdep_assert_held(&q->queue_lock);
786 
787         if (!blkcg_policy_enabled(q, pol))
788                 return ERR_PTR(-EOPNOTSUPP);
789         return __blkg_lookup(blkcg, q, true /* update_hint */);
790 }
791 
792 /**
793  * blkg_conf_prep - parse and prepare for per-blkg config update
794  * @blkcg: target block cgroup
795  * @pol: target policy
796  * @input: input string
797  * @ctx: blkg_conf_ctx to be filled
798  *
799  * Parse per-blkg config update from @input and initialize @ctx with the
800  * result.  @ctx->blkg points to the blkg to be updated and @ctx->body the
801  * part of @input following MAJ:MIN.  This function returns with RCU read
802  * lock and queue lock held and must be paired with blkg_conf_finish().
803  */
804 int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
805                    char *input, struct blkg_conf_ctx *ctx)
806         __acquires(rcu) __acquires(&disk->queue->queue_lock)
807 {
808         struct gendisk *disk;
809         struct request_queue *q;
810         struct blkcg_gq *blkg;
811         unsigned int major, minor;
812         int key_len, part, ret;
813         char *body;
814 
815         if (sscanf(input, "%u:%u%n", &major, &minor, &key_len) != 2)
816                 return -EINVAL;
817 
818         body = input + key_len;
819         if (!isspace(*body))
820                 return -EINVAL;
821         body = skip_spaces(body);
822 
823         disk = get_gendisk(MKDEV(major, minor), &part);
824         if (!disk)
825                 return -ENODEV;
826         if (part) {
827                 ret = -ENODEV;
828                 goto fail;
829         }
830 
831         q = disk->queue;
832 
833         rcu_read_lock();
834         spin_lock_irq(&q->queue_lock);
835 
836         blkg = blkg_lookup_check(blkcg, pol, q);
837         if (IS_ERR(blkg)) {
838                 ret = PTR_ERR(blkg);
839                 goto fail_unlock;
840         }
841 
842         if (blkg)
843                 goto success;
844 
845         /*
846          * Create blkgs walking down from blkcg_root to @blkcg, so that all
847          * non-root blkgs have access to their parents.
848          */
849         while (true) {
850                 struct blkcg *pos = blkcg;
851                 struct blkcg *parent;
852                 struct blkcg_gq *new_blkg;
853 
854                 parent = blkcg_parent(blkcg);
855                 while (parent && !__blkg_lookup(parent, q, false)) {
856                         pos = parent;
857                         parent = blkcg_parent(parent);
858                 }
859 
860                 /* Drop locks to do new blkg allocation with GFP_KERNEL. */
861                 spin_unlock_irq(&q->queue_lock);
862                 rcu_read_unlock();
863 
864                 new_blkg = blkg_alloc(pos, q, GFP_KERNEL);
865                 if (unlikely(!new_blkg)) {
866                         ret = -ENOMEM;
867                         goto fail;
868                 }
869 
870                 rcu_read_lock();
871                 spin_lock_irq(&q->queue_lock);
872 
873                 blkg = blkg_lookup_check(pos, pol, q);
874                 if (IS_ERR(blkg)) {
875                         ret = PTR_ERR(blkg);
876                         goto fail_unlock;
877                 }
878 
879                 if (blkg) {
880                         blkg_free(new_blkg);
881                 } else {
882                         blkg = blkg_create(pos, q, new_blkg);
883                         if (unlikely(IS_ERR(blkg))) {
884                                 ret = PTR_ERR(blkg);
885                                 goto fail_unlock;
886                         }
887                 }
888 
889                 if (pos == blkcg)
890                         goto success;
891         }
892 success:
893         ctx->disk = disk;
894         ctx->blkg = blkg;
895         ctx->body = body;
896         return 0;
897 
898 fail_unlock:
899         spin_unlock_irq(&q->queue_lock);
900         rcu_read_unlock();
901 fail:
902         put_disk_and_module(disk);
903         /*
904          * If queue was bypassing, we should retry.  Do so after a
905          * short msleep().  It isn't strictly necessary but queue
906          * can be bypassing for some time and it's always nice to
907          * avoid busy looping.
908          */
909         if (ret == -EBUSY) {
910                 msleep(10);
911                 ret = restart_syscall();
912         }
913         return ret;
914 }
915 
916 /**
917  * blkg_conf_finish - finish up per-blkg config update
918  * @ctx: blkg_conf_ctx intiailized by blkg_conf_prep()
919  *
920  * Finish up after per-blkg config update.  This function must be paired
921  * with blkg_conf_prep().
922  */
923 void blkg_conf_finish(struct blkg_conf_ctx *ctx)
924         __releases(&ctx->disk->queue->queue_lock) __releases(rcu)
925 {
926         spin_unlock_irq(&ctx->disk->queue->queue_lock);
927         rcu_read_unlock();
928         put_disk_and_module(ctx->disk);
929 }
930 
931 static int blkcg_print_stat(struct seq_file *sf, void *v)
932 {
933         struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
934         struct blkcg_gq *blkg;
935 
936         rcu_read_lock();
937 
938         hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
939                 const char *dname;
940                 char *buf;
941                 struct blkg_rwstat rwstat;
942                 u64 rbytes, wbytes, rios, wios, dbytes, dios;
943                 size_t size = seq_get_buf(sf, &buf), off = 0;
944                 int i;
945                 bool has_stats = false;
946 
947                 dname = blkg_dev_name(blkg);
948                 if (!dname)
949                         continue;
950 
951                 /*
952                  * Hooray string manipulation, count is the size written NOT
953                  * INCLUDING THE \0, so size is now count+1 less than what we
954                  * had before, but we want to start writing the next bit from
955                  * the \0 so we only add count to buf.
956                  */
957                 off += scnprintf(buf+off, size-off, "%s ", dname);
958 
959                 spin_lock_irq(&blkg->q->queue_lock);
960 
961                 rwstat = blkg_rwstat_recursive_sum(blkg, NULL,
962                                         offsetof(struct blkcg_gq, stat_bytes));
963                 rbytes = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_READ]);
964                 wbytes = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_WRITE]);
965                 dbytes = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_DISCARD]);
966 
967                 rwstat = blkg_rwstat_recursive_sum(blkg, NULL,
968                                         offsetof(struct blkcg_gq, stat_ios));
969                 rios = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_READ]);
970                 wios = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_WRITE]);
971                 dios = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_DISCARD]);
972 
973                 spin_unlock_irq(&blkg->q->queue_lock);
974 
975                 if (rbytes || wbytes || rios || wios) {
976                         has_stats = true;
977                         off += scnprintf(buf+off, size-off,
978                                          "rbytes=%llu wbytes=%llu rios=%llu wios=%llu dbytes=%llu dios=%llu",
979                                          rbytes, wbytes, rios, wios,
980                                          dbytes, dios);
981                 }
982 
983                 if (!blkcg_debug_stats)
984                         goto next;
985 
986                 if (atomic_read(&blkg->use_delay)) {
987                         has_stats = true;
988                         off += scnprintf(buf+off, size-off,
989                                          " use_delay=%d delay_nsec=%llu",
990                                          atomic_read(&blkg->use_delay),
991                                         (unsigned long long)atomic64_read(&blkg->delay_nsec));
992                 }
993 
994                 for (i = 0; i < BLKCG_MAX_POLS; i++) {
995                         struct blkcg_policy *pol = blkcg_policy[i];
996                         size_t written;
997 
998                         if (!blkg->pd[i] || !pol->pd_stat_fn)
999                                 continue;
1000 
1001                         written = pol->pd_stat_fn(blkg->pd[i], buf+off, size-off);
1002                         if (written)
1003                                 has_stats = true;
1004                         off += written;
1005                 }
1006 next:
1007                 if (has_stats) {
1008                         off += scnprintf(buf+off, size-off, "\n");
1009                         seq_commit(sf, off);
1010                 }
1011         }
1012 
1013         rcu_read_unlock();
1014         return 0;
1015 }
1016 
1017 static struct cftype blkcg_files[] = {
1018         {
1019                 .name = "stat",
1020                 .flags = CFTYPE_NOT_ON_ROOT,
1021                 .seq_show = blkcg_print_stat,
1022         },
1023         { }     /* terminate */
1024 };
1025 
1026 static struct cftype blkcg_legacy_files[] = {
1027         {
1028                 .name = "reset_stats",
1029                 .write_u64 = blkcg_reset_stats,
1030         },
1031         { }     /* terminate */
1032 };
1033 
1034 /*
1035  * blkcg destruction is a three-stage process.
1036  *
1037  * 1. Destruction starts.  The blkcg_css_offline() callback is invoked
1038  *    which offlines writeback.  Here we tie the next stage of blkg destruction
1039  *    to the completion of writeback associated with the blkcg.  This lets us
1040  *    avoid punting potentially large amounts of outstanding writeback to root
1041  *    while maintaining any ongoing policies.  The next stage is triggered when
1042  *    the nr_cgwbs count goes to zero.
1043  *
1044  * 2. When the nr_cgwbs count goes to zero, blkcg_destroy_blkgs() is called
1045  *    and handles the destruction of blkgs.  Here the css reference held by
1046  *    the blkg is put back eventually allowing blkcg_css_free() to be called.
1047  *    This work may occur in cgwb_release_workfn() on the cgwb_release
1048  *    workqueue.  Any submitted ios that fail to get the blkg ref will be
1049  *    punted to the root_blkg.
1050  *
1051  * 3. Once the blkcg ref count goes to zero, blkcg_css_free() is called.
1052  *    This finally frees the blkcg.
1053  */
1054 
1055 /**
1056  * blkcg_css_offline - cgroup css_offline callback
1057  * @css: css of interest
1058  *
1059  * This function is called when @css is about to go away.  Here the cgwbs are
1060  * offlined first and only once writeback associated with the blkcg has
1061  * finished do we start step 2 (see above).
1062  */
1063 static void blkcg_css_offline(struct cgroup_subsys_state *css)
1064 {
1065         struct blkcg *blkcg = css_to_blkcg(css);
1066 
1067         /* this prevents anyone from attaching or migrating to this blkcg */
1068         wb_blkcg_offline(blkcg);
1069 
1070         /* put the base cgwb reference allowing step 2 to be triggered */
1071         blkcg_cgwb_put(blkcg);
1072 }
1073 
1074 /**
1075  * blkcg_destroy_blkgs - responsible for shooting down blkgs
1076  * @blkcg: blkcg of interest
1077  *
1078  * blkgs should be removed while holding both q and blkcg locks.  As blkcg lock
1079  * is nested inside q lock, this function performs reverse double lock dancing.
1080  * Destroying the blkgs releases the reference held on the blkcg's css allowing
1081  * blkcg_css_free to eventually be called.
1082  *
1083  * This is the blkcg counterpart of ioc_release_fn().
1084  */
1085 void blkcg_destroy_blkgs(struct blkcg *blkcg)
1086 {
1087         spin_lock_irq(&blkcg->lock);
1088 
1089         while (!hlist_empty(&blkcg->blkg_list)) {
1090                 struct blkcg_gq *blkg = hlist_entry(blkcg->blkg_list.first,
1091                                                 struct blkcg_gq, blkcg_node);
1092                 struct request_queue *q = blkg->q;
1093 
1094                 if (spin_trylock(&q->queue_lock)) {
1095                         blkg_destroy(blkg);
1096                         spin_unlock(&q->queue_lock);
1097                 } else {
1098                         spin_unlock_irq(&blkcg->lock);
1099                         cpu_relax();
1100                         spin_lock_irq(&blkcg->lock);
1101                 }
1102         }
1103 
1104         spin_unlock_irq(&blkcg->lock);
1105 }
1106 
1107 static void blkcg_css_free(struct cgroup_subsys_state *css)
1108 {
1109         struct blkcg *blkcg = css_to_blkcg(css);
1110         int i;
1111 
1112         mutex_lock(&blkcg_pol_mutex);
1113 
1114         list_del(&blkcg->all_blkcgs_node);
1115 
1116         for (i = 0; i < BLKCG_MAX_POLS; i++)
1117                 if (blkcg->cpd[i])
1118                         blkcg_policy[i]->cpd_free_fn(blkcg->cpd[i]);
1119 
1120         mutex_unlock(&blkcg_pol_mutex);
1121 
1122         kfree(blkcg);
1123 }
1124 
1125 static struct cgroup_subsys_state *
1126 blkcg_css_alloc(struct cgroup_subsys_state *parent_css)
1127 {
1128         struct blkcg *blkcg;
1129         struct cgroup_subsys_state *ret;
1130         int i;
1131 
1132         mutex_lock(&blkcg_pol_mutex);
1133 
1134         if (!parent_css) {
1135                 blkcg = &blkcg_root;
1136         } else {
1137                 blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
1138                 if (!blkcg) {
1139                         ret = ERR_PTR(-ENOMEM);
1140                         goto unlock;
1141                 }
1142         }
1143 
1144         for (i = 0; i < BLKCG_MAX_POLS ; i++) {
1145                 struct blkcg_policy *pol = blkcg_policy[i];
1146                 struct blkcg_policy_data *cpd;
1147 
1148                 /*
1149                  * If the policy hasn't been attached yet, wait for it
1150                  * to be attached before doing anything else. Otherwise,
1151                  * check if the policy requires any specific per-cgroup
1152                  * data: if it does, allocate and initialize it.
1153                  */
1154                 if (!pol || !pol->cpd_alloc_fn)
1155                         continue;
1156 
1157                 cpd = pol->cpd_alloc_fn(GFP_KERNEL);
1158                 if (!cpd) {
1159                         ret = ERR_PTR(-ENOMEM);
1160                         goto free_pd_blkcg;
1161                 }
1162                 blkcg->cpd[i] = cpd;
1163                 cpd->blkcg = blkcg;
1164                 cpd->plid = i;
1165                 if (pol->cpd_init_fn)
1166                         pol->cpd_init_fn(cpd);
1167         }
1168 
1169         spin_lock_init(&blkcg->lock);
1170         INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_NOWAIT | __GFP_NOWARN);
1171         INIT_HLIST_HEAD(&blkcg->blkg_list);
1172 #ifdef CONFIG_CGROUP_WRITEBACK
1173         INIT_LIST_HEAD(&blkcg->cgwb_list);
1174         refcount_set(&blkcg->cgwb_refcnt, 1);
1175 #endif
1176         list_add_tail(&blkcg->all_blkcgs_node, &all_blkcgs);
1177 
1178         mutex_unlock(&blkcg_pol_mutex);
1179         return &blkcg->css;
1180 
1181 free_pd_blkcg:
1182         for (i--; i >= 0; i--)
1183                 if (blkcg->cpd[i])
1184                         blkcg_policy[i]->cpd_free_fn(blkcg->cpd[i]);
1185 
1186         if (blkcg != &blkcg_root)
1187                 kfree(blkcg);
1188 unlock:
1189         mutex_unlock(&blkcg_pol_mutex);
1190         return ret;
1191 }
1192 
1193 /**
1194  * blkcg_init_queue - initialize blkcg part of request queue
1195  * @q: request_queue to initialize
1196  *
1197  * Called from blk_alloc_queue_node(). Responsible for initializing blkcg
1198  * part of new request_queue @q.
1199  *
1200  * RETURNS:
1201  * 0 on success, -errno on failure.
1202  */
1203 int blkcg_init_queue(struct request_queue *q)
1204 {
1205         struct blkcg_gq *new_blkg, *blkg;
1206         bool preloaded;
1207         int ret;
1208 
1209         new_blkg = blkg_alloc(&blkcg_root, q, GFP_KERNEL);
1210         if (!new_blkg)
1211                 return -ENOMEM;
1212 
1213         preloaded = !radix_tree_preload(GFP_KERNEL);
1214 
1215         /* Make sure the root blkg exists. */
1216         rcu_read_lock();
1217         spin_lock_irq(&q->queue_lock);
1218         blkg = blkg_create(&blkcg_root, q, new_blkg);
1219         if (IS_ERR(blkg))
1220                 goto err_unlock;
1221         q->root_blkg = blkg;
1222         spin_unlock_irq(&q->queue_lock);
1223         rcu_read_unlock();
1224 
1225         if (preloaded)
1226                 radix_tree_preload_end();
1227 
1228         ret = blk_iolatency_init(q);
1229         if (ret)
1230                 goto err_destroy_all;
1231 
1232         ret = blk_throtl_init(q);
1233         if (ret)
1234                 goto err_destroy_all;
1235         return 0;
1236 
1237 err_destroy_all:
1238         blkg_destroy_all(q);
1239         return ret;
1240 err_unlock:
1241         spin_unlock_irq(&q->queue_lock);
1242         rcu_read_unlock();
1243         if (preloaded)
1244                 radix_tree_preload_end();
1245         return PTR_ERR(blkg);
1246 }
1247 
1248 /**
1249  * blkcg_drain_queue - drain blkcg part of request_queue
1250  * @q: request_queue to drain
1251  *
1252  * Called from blk_drain_queue().  Responsible for draining blkcg part.
1253  */
1254 void blkcg_drain_queue(struct request_queue *q)
1255 {
1256         lockdep_assert_held(&q->queue_lock);
1257 
1258         /*
1259          * @q could be exiting and already have destroyed all blkgs as
1260          * indicated by NULL root_blkg.  If so, don't confuse policies.
1261          */
1262         if (!q->root_blkg)
1263                 return;
1264 
1265         blk_throtl_drain(q);
1266 }
1267 
1268 /**
1269  * blkcg_exit_queue - exit and release blkcg part of request_queue
1270  * @q: request_queue being released
1271  *
1272  * Called from blk_release_queue().  Responsible for exiting blkcg part.
1273  */
1274 void blkcg_exit_queue(struct request_queue *q)
1275 {
1276         blkg_destroy_all(q);
1277         blk_throtl_exit(q);
1278 }
1279 
1280 /*
1281  * We cannot support shared io contexts, as we have no mean to support
1282  * two tasks with the same ioc in two different groups without major rework
1283  * of the main cic data structures.  For now we allow a task to change
1284  * its cgroup only if it's the only owner of its ioc.
1285  */
1286 static int blkcg_can_attach(struct cgroup_taskset *tset)
1287 {
1288         struct task_struct *task;
1289         struct cgroup_subsys_state *dst_css;
1290         struct io_context *ioc;
1291         int ret = 0;
1292 
1293         /* task_lock() is needed to avoid races with exit_io_context() */
1294         cgroup_taskset_for_each(task, dst_css, tset) {
1295                 task_lock(task);
1296                 ioc = task->io_context;
1297                 if (ioc && atomic_read(&ioc->nr_tasks) > 1)
1298                         ret = -EINVAL;
1299                 task_unlock(task);
1300                 if (ret)
1301                         break;
1302         }
1303         return ret;
1304 }
1305 
1306 static void blkcg_bind(struct cgroup_subsys_state *root_css)
1307 {
1308         int i;
1309 
1310         mutex_lock(&blkcg_pol_mutex);
1311 
1312         for (i = 0; i < BLKCG_MAX_POLS; i++) {
1313                 struct blkcg_policy *pol = blkcg_policy[i];
1314                 struct blkcg *blkcg;
1315 
1316                 if (!pol || !pol->cpd_bind_fn)
1317                         continue;
1318 
1319                 list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node)
1320                         if (blkcg->cpd[pol->plid])
1321                                 pol->cpd_bind_fn(blkcg->cpd[pol->plid]);
1322         }
1323         mutex_unlock(&blkcg_pol_mutex);
1324 }
1325 
1326 static void blkcg_exit(struct task_struct *tsk)
1327 {
1328         if (tsk->throttle_queue)
1329                 blk_put_queue(tsk->throttle_queue);
1330         tsk->throttle_queue = NULL;
1331 }
1332 
1333 struct cgroup_subsys io_cgrp_subsys = {
1334         .css_alloc = blkcg_css_alloc,
1335         .css_offline = blkcg_css_offline,
1336         .css_free = blkcg_css_free,
1337         .can_attach = blkcg_can_attach,
1338         .bind = blkcg_bind,
1339         .dfl_cftypes = blkcg_files,
1340         .legacy_cftypes = blkcg_legacy_files,
1341         .legacy_name = "blkio",
1342         .exit = blkcg_exit,
1343 #ifdef CONFIG_MEMCG
1344         /*
1345          * This ensures that, if available, memcg is automatically enabled
1346          * together on the default hierarchy so that the owner cgroup can
1347          * be retrieved from writeback pages.
1348          */
1349         .depends_on = 1 << memory_cgrp_id,
1350 #endif
1351 };
1352 EXPORT_SYMBOL_GPL(io_cgrp_subsys);
1353 
1354 /**
1355  * blkcg_activate_policy - activate a blkcg policy on a request_queue
1356  * @q: request_queue of interest
1357  * @pol: blkcg policy to activate
1358  *
1359  * Activate @pol on @q.  Requires %GFP_KERNEL context.  @q goes through
1360  * bypass mode to populate its blkgs with policy_data for @pol.
1361  *
1362  * Activation happens with @q bypassed, so nobody would be accessing blkgs
1363  * from IO path.  Update of each blkg is protected by both queue and blkcg
1364  * locks so that holding either lock and testing blkcg_policy_enabled() is
1365  * always enough for dereferencing policy data.
1366  *
1367  * The caller is responsible for synchronizing [de]activations and policy
1368  * [un]registerations.  Returns 0 on success, -errno on failure.
1369  */
1370 int blkcg_activate_policy(struct request_queue *q,
1371                           const struct blkcg_policy *pol)
1372 {
1373         struct blkg_policy_data *pd_prealloc = NULL;
1374         struct blkcg_gq *blkg;
1375         int ret;
1376 
1377         if (blkcg_policy_enabled(q, pol))
1378                 return 0;
1379 
1380         if (queue_is_mq(q))
1381                 blk_mq_freeze_queue(q);
1382 pd_prealloc:
1383         if (!pd_prealloc) {
1384                 pd_prealloc = pol->pd_alloc_fn(GFP_KERNEL, q->node);
1385                 if (!pd_prealloc) {
1386                         ret = -ENOMEM;
1387                         goto out_bypass_end;
1388                 }
1389         }
1390 
1391         spin_lock_irq(&q->queue_lock);
1392 
1393         list_for_each_entry(blkg, &q->blkg_list, q_node) {
1394                 struct blkg_policy_data *pd;
1395 
1396                 if (blkg->pd[pol->plid])
1397                         continue;
1398 
1399                 pd = pol->pd_alloc_fn(GFP_NOWAIT | __GFP_NOWARN, q->node);
1400                 if (!pd)
1401                         swap(pd, pd_prealloc);
1402                 if (!pd) {
1403                         spin_unlock_irq(&q->queue_lock);
1404                         goto pd_prealloc;
1405                 }
1406 
1407                 blkg->pd[pol->plid] = pd;
1408                 pd->blkg = blkg;
1409                 pd->plid = pol->plid;
1410                 if (pol->pd_init_fn)
1411                         pol->pd_init_fn(pd);
1412         }
1413 
1414         __set_bit(pol->plid, q->blkcg_pols);
1415         ret = 0;
1416 
1417         spin_unlock_irq(&q->queue_lock);
1418 out_bypass_end:
1419         if (queue_is_mq(q))
1420                 blk_mq_unfreeze_queue(q);
1421         if (pd_prealloc)
1422                 pol->pd_free_fn(pd_prealloc);
1423         return ret;
1424 }
1425 EXPORT_SYMBOL_GPL(blkcg_activate_policy);
1426 
1427 /**
1428  * blkcg_deactivate_policy - deactivate a blkcg policy on a request_queue
1429  * @q: request_queue of interest
1430  * @pol: blkcg policy to deactivate
1431  *
1432  * Deactivate @pol on @q.  Follows the same synchronization rules as
1433  * blkcg_activate_policy().
1434  */
1435 void blkcg_deactivate_policy(struct request_queue *q,
1436                              const struct blkcg_policy *pol)
1437 {
1438         struct blkcg_gq *blkg;
1439 
1440         if (!blkcg_policy_enabled(q, pol))
1441                 return;
1442 
1443         if (queue_is_mq(q))
1444                 blk_mq_freeze_queue(q);
1445 
1446         spin_lock_irq(&q->queue_lock);
1447 
1448         __clear_bit(pol->plid, q->blkcg_pols);
1449 
1450         list_for_each_entry(blkg, &q->blkg_list, q_node) {
1451                 if (blkg->pd[pol->plid]) {
1452                         if (pol->pd_offline_fn)
1453                                 pol->pd_offline_fn(blkg->pd[pol->plid]);
1454                         pol->pd_free_fn(blkg->pd[pol->plid]);
1455                         blkg->pd[pol->plid] = NULL;
1456                 }
1457         }
1458 
1459         spin_unlock_irq(&q->queue_lock);
1460 
1461         if (queue_is_mq(q))
1462                 blk_mq_unfreeze_queue(q);
1463 }
1464 EXPORT_SYMBOL_GPL(blkcg_deactivate_policy);
1465 
1466 /**
1467  * blkcg_policy_register - register a blkcg policy
1468  * @pol: blkcg policy to register
1469  *
1470  * Register @pol with blkcg core.  Might sleep and @pol may be modified on
1471  * successful registration.  Returns 0 on success and -errno on failure.
1472  */
1473 int blkcg_policy_register(struct blkcg_policy *pol)
1474 {
1475         struct blkcg *blkcg;
1476         int i, ret;
1477 
1478         mutex_lock(&blkcg_pol_register_mutex);
1479         mutex_lock(&blkcg_pol_mutex);
1480 
1481         /* find an empty slot */
1482         ret = -ENOSPC;
1483         for (i = 0; i < BLKCG_MAX_POLS; i++)
1484                 if (!blkcg_policy[i])
1485                         break;
1486         if (i >= BLKCG_MAX_POLS) {
1487                 pr_warn("blkcg_policy_register: BLKCG_MAX_POLS too small\n");
1488                 goto err_unlock;
1489         }
1490 
1491         /* Make sure cpd/pd_alloc_fn and cpd/pd_free_fn in pairs */
1492         if ((!pol->cpd_alloc_fn ^ !pol->cpd_free_fn) ||
1493                 (!pol->pd_alloc_fn ^ !pol->pd_free_fn))
1494                 goto err_unlock;
1495 
1496         /* register @pol */
1497         pol->plid = i;
1498         blkcg_policy[pol->plid] = pol;
1499 
1500         /* allocate and install cpd's */
1501         if (pol->cpd_alloc_fn) {
1502                 list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) {
1503                         struct blkcg_policy_data *cpd;
1504 
1505                         cpd = pol->cpd_alloc_fn(GFP_KERNEL);
1506                         if (!cpd)
1507                                 goto err_free_cpds;
1508 
1509                         blkcg->cpd[pol->plid] = cpd;
1510                         cpd->blkcg = blkcg;
1511                         cpd->plid = pol->plid;
1512                         pol->cpd_init_fn(cpd);
1513                 }
1514         }
1515 
1516         mutex_unlock(&blkcg_pol_mutex);
1517 
1518         /* everything is in place, add intf files for the new policy */
1519         if (pol->dfl_cftypes)
1520                 WARN_ON(cgroup_add_dfl_cftypes(&io_cgrp_subsys,
1521                                                pol->dfl_cftypes));
1522         if (pol->legacy_cftypes)
1523                 WARN_ON(cgroup_add_legacy_cftypes(&io_cgrp_subsys,
1524                                                   pol->legacy_cftypes));
1525         mutex_unlock(&blkcg_pol_register_mutex);
1526         return 0;
1527 
1528 err_free_cpds:
1529         if (pol->cpd_free_fn) {
1530                 list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) {
1531                         if (blkcg->cpd[pol->plid]) {
1532                                 pol->cpd_free_fn(blkcg->cpd[pol->plid]);
1533                                 blkcg->cpd[pol->plid] = NULL;
1534                         }
1535                 }
1536         }
1537         blkcg_policy[pol->plid] = NULL;
1538 err_unlock:
1539         mutex_unlock(&blkcg_pol_mutex);
1540         mutex_unlock(&blkcg_pol_register_mutex);
1541         return ret;
1542 }
1543 EXPORT_SYMBOL_GPL(blkcg_policy_register);
1544 
1545 /**
1546  * blkcg_policy_unregister - unregister a blkcg policy
1547  * @pol: blkcg policy to unregister
1548  *
1549  * Undo blkcg_policy_register(@pol).  Might sleep.
1550  */
1551 void blkcg_policy_unregister(struct blkcg_policy *pol)
1552 {
1553         struct blkcg *blkcg;
1554 
1555         mutex_lock(&blkcg_pol_register_mutex);
1556 
1557         if (WARN_ON(blkcg_policy[pol->plid] != pol))
1558                 goto out_unlock;
1559 
1560         /* kill the intf files first */
1561         if (pol->dfl_cftypes)
1562                 cgroup_rm_cftypes(pol->dfl_cftypes);
1563         if (pol->legacy_cftypes)
1564                 cgroup_rm_cftypes(pol->legacy_cftypes);
1565 
1566         /* remove cpds and unregister */
1567         mutex_lock(&blkcg_pol_mutex);
1568 
1569         if (pol->cpd_free_fn) {
1570                 list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) {
1571                         if (blkcg->cpd[pol->plid]) {
1572                                 pol->cpd_free_fn(blkcg->cpd[pol->plid]);
1573                                 blkcg->cpd[pol->plid] = NULL;
1574                         }
1575                 }
1576         }
1577         blkcg_policy[pol->plid] = NULL;
1578 
1579         mutex_unlock(&blkcg_pol_mutex);
1580 out_unlock:
1581         mutex_unlock(&blkcg_pol_register_mutex);
1582 }
1583 EXPORT_SYMBOL_GPL(blkcg_policy_unregister);
1584 
1585 /*
1586  * Scale the accumulated delay based on how long it has been since we updated
1587  * the delay.  We only call this when we are adding delay, in case it's been a
1588  * while since we added delay, and when we are checking to see if we need to
1589  * delay a task, to account for any delays that may have occurred.
1590  */
1591 static void blkcg_scale_delay(struct blkcg_gq *blkg, u64 now)
1592 {
1593         u64 old = atomic64_read(&blkg->delay_start);
1594 
1595         /*
1596          * We only want to scale down every second.  The idea here is that we
1597          * want to delay people for min(delay_nsec, NSEC_PER_SEC) in a certain
1598          * time window.  We only want to throttle tasks for recent delay that
1599          * has occurred, in 1 second time windows since that's the maximum
1600          * things can be throttled.  We save the current delay window in
1601          * blkg->last_delay so we know what amount is still left to be charged
1602          * to the blkg from this point onward.  blkg->last_use keeps track of
1603          * the use_delay counter.  The idea is if we're unthrottling the blkg we
1604          * are ok with whatever is happening now, and we can take away more of
1605          * the accumulated delay as we've already throttled enough that
1606          * everybody is happy with their IO latencies.
1607          */
1608         if (time_before64(old + NSEC_PER_SEC, now) &&
1609             atomic64_cmpxchg(&blkg->delay_start, old, now) == old) {
1610                 u64 cur = atomic64_read(&blkg->delay_nsec);
1611                 u64 sub = min_t(u64, blkg->last_delay, now - old);
1612                 int cur_use = atomic_read(&blkg->use_delay);
1613 
1614                 /*
1615                  * We've been unthrottled, subtract a larger chunk of our
1616                  * accumulated delay.
1617                  */
1618                 if (cur_use < blkg->last_use)
1619                         sub = max_t(u64, sub, blkg->last_delay >> 1);
1620 
1621                 /*
1622                  * This shouldn't happen, but handle it anyway.  Our delay_nsec
1623                  * should only ever be growing except here where we subtract out
1624                  * min(last_delay, 1 second), but lord knows bugs happen and I'd
1625                  * rather not end up with negative numbers.
1626                  */
1627                 if (unlikely(cur < sub)) {
1628                         atomic64_set(&blkg->delay_nsec, 0);
1629                         blkg->last_delay = 0;
1630                 } else {
1631                         atomic64_sub(sub, &blkg->delay_nsec);
1632                         blkg->last_delay = cur - sub;
1633                 }
1634                 blkg->last_use = cur_use;
1635         }
1636 }
1637 
1638 /*
1639  * This is called when we want to actually walk up the hierarchy and check to
1640  * see if we need to throttle, and then actually throttle if there is some
1641  * accumulated delay.  This should only be called upon return to user space so
1642  * we're not holding some lock that would induce a priority inversion.
1643  */
1644 static void blkcg_maybe_throttle_blkg(struct blkcg_gq *blkg, bool use_memdelay)
1645 {
1646         u64 now = ktime_to_ns(ktime_get());
1647         u64 exp;
1648         u64 delay_nsec = 0;
1649         int tok;
1650 
1651         while (blkg->parent) {
1652                 if (atomic_read(&blkg->use_delay)) {
1653                         blkcg_scale_delay(blkg, now);
1654                         delay_nsec = max_t(u64, delay_nsec,
1655                                            atomic64_read(&blkg->delay_nsec));
1656                 }
1657                 blkg = blkg->parent;
1658         }
1659 
1660         if (!delay_nsec)
1661                 return;
1662 
1663         /*
1664          * Let's not sleep for all eternity if we've amassed a huge delay.
1665          * Swapping or metadata IO can accumulate 10's of seconds worth of
1666          * delay, and we want userspace to be able to do _something_ so cap the
1667          * delays at 1 second.  If there's 10's of seconds worth of delay then
1668          * the tasks will be delayed for 1 second for every syscall.
1669          */
1670         delay_nsec = min_t(u64, delay_nsec, 250 * NSEC_PER_MSEC);
1671 
1672         /*
1673          * TODO: the use_memdelay flag is going to be for the upcoming psi stuff
1674          * that hasn't landed upstream yet.  Once that stuff is in place we need
1675          * to do a psi_memstall_enter/leave if memdelay is set.
1676          */
1677 
1678         exp = ktime_add_ns(now, delay_nsec);
1679         tok = io_schedule_prepare();
1680         do {
1681                 __set_current_state(TASK_KILLABLE);
1682                 if (!schedule_hrtimeout(&exp, HRTIMER_MODE_ABS))
1683                         break;
1684         } while (!fatal_signal_pending(current));
1685         io_schedule_finish(tok);
1686 }
1687 
1688 /**
1689  * blkcg_maybe_throttle_current - throttle the current task if it has been marked
1690  *
1691  * This is only called if we've been marked with set_notify_resume().  Obviously
1692  * we can be set_notify_resume() for reasons other than blkcg throttling, so we
1693  * check to see if current->throttle_queue is set and if not this doesn't do
1694  * anything.  This should only ever be called by the resume code, it's not meant
1695  * to be called by people willy-nilly as it will actually do the work to
1696  * throttle the task if it is setup for throttling.
1697  */
1698 void blkcg_maybe_throttle_current(void)
1699 {
1700         struct request_queue *q = current->throttle_queue;
1701         struct cgroup_subsys_state *css;
1702         struct blkcg *blkcg;
1703         struct blkcg_gq *blkg;
1704         bool use_memdelay = current->use_memdelay;
1705 
1706         if (!q)
1707                 return;
1708 
1709         current->throttle_queue = NULL;
1710         current->use_memdelay = false;
1711 
1712         rcu_read_lock();
1713         css = kthread_blkcg();
1714         if (css)
1715                 blkcg = css_to_blkcg(css);
1716         else
1717                 blkcg = css_to_blkcg(task_css(current, io_cgrp_id));
1718 
1719         if (!blkcg)
1720                 goto out;
1721         blkg = blkg_lookup(blkcg, q);
1722         if (!blkg)
1723                 goto out;
1724         if (!blkg_tryget(blkg))
1725                 goto out;
1726         rcu_read_unlock();
1727 
1728         blkcg_maybe_throttle_blkg(blkg, use_memdelay);
1729         blkg_put(blkg);
1730         blk_put_queue(q);
1731         return;
1732 out:
1733         rcu_read_unlock();
1734         blk_put_queue(q);
1735 }
1736 
1737 /**
1738  * blkcg_schedule_throttle - this task needs to check for throttling
1739  * @q - the request queue IO was submitted on
1740  * @use_memdelay - do we charge this to memory delay for PSI
1741  *
1742  * This is called by the IO controller when we know there's delay accumulated
1743  * for the blkg for this task.  We do not pass the blkg because there are places
1744  * we call this that may not have that information, the swapping code for
1745  * instance will only have a request_queue at that point.  This set's the
1746  * notify_resume for the task to check and see if it requires throttling before
1747  * returning to user space.
1748  *
1749  * We will only schedule once per syscall.  You can call this over and over
1750  * again and it will only do the check once upon return to user space, and only
1751  * throttle once.  If the task needs to be throttled again it'll need to be
1752  * re-set at the next time we see the task.
1753  */
1754 void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay)
1755 {
1756         if (unlikely(current->flags & PF_KTHREAD))
1757                 return;
1758 
1759         if (!blk_get_queue(q))
1760                 return;
1761 
1762         if (current->throttle_queue)
1763                 blk_put_queue(current->throttle_queue);
1764         current->throttle_queue = q;
1765         if (use_memdelay)
1766                 current->use_memdelay = use_memdelay;
1767         set_notify_resume(current);
1768 }
1769 
1770 /**
1771  * blkcg_add_delay - add delay to this blkg
1772  * @now - the current time in nanoseconds
1773  * @delta - how many nanoseconds of delay to add
1774  *
1775  * Charge @delta to the blkg's current delay accumulation.  This is used to
1776  * throttle tasks if an IO controller thinks we need more throttling.
1777  */
1778 void blkcg_add_delay(struct blkcg_gq *blkg, u64 now, u64 delta)
1779 {
1780         blkcg_scale_delay(blkg, now);
1781         atomic64_add(delta, &blkg->delay_nsec);
1782 }
1783 
1784 module_param(blkcg_debug_stats, bool, 0644);
1785 MODULE_PARM_DESC(blkcg_debug_stats, "True if you want debug stats, false if not");
1786 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp