~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/block/blk-cgroup.c

Version: ~ [ linux-6.0-rc6 ] ~ [ linux-5.19.10 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.69 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.144 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.214 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.259 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.294 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.329 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.302 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  * Common Block IO controller cgroup interface
  3  *
  4  * Based on ideas and code from CFQ, CFS and BFQ:
  5  * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
  6  *
  7  * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
  8  *                    Paolo Valente <paolo.valente@unimore.it>
  9  *
 10  * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
 11  *                    Nauman Rafique <nauman@google.com>
 12  *
 13  * For policy-specific per-blkcg data:
 14  * Copyright (C) 2015 Paolo Valente <paolo.valente@unimore.it>
 15  *                    Arianna Avanzini <avanzini.arianna@gmail.com>
 16  */
 17 #include <linux/ioprio.h>
 18 #include <linux/kdev_t.h>
 19 #include <linux/module.h>
 20 #include <linux/sched/signal.h>
 21 #include <linux/err.h>
 22 #include <linux/blkdev.h>
 23 #include <linux/backing-dev.h>
 24 #include <linux/slab.h>
 25 #include <linux/genhd.h>
 26 #include <linux/delay.h>
 27 #include <linux/atomic.h>
 28 #include <linux/ctype.h>
 29 #include <linux/blk-cgroup.h>
 30 #include <linux/tracehook.h>
 31 #include "blk.h"
 32 
 33 #define MAX_KEY_LEN 100
 34 
 35 /*
 36  * blkcg_pol_mutex protects blkcg_policy[] and policy [de]activation.
 37  * blkcg_pol_register_mutex nests outside of it and synchronizes entire
 38  * policy [un]register operations including cgroup file additions /
 39  * removals.  Putting cgroup file registration outside blkcg_pol_mutex
 40  * allows grabbing it from cgroup callbacks.
 41  */
 42 static DEFINE_MUTEX(blkcg_pol_register_mutex);
 43 static DEFINE_MUTEX(blkcg_pol_mutex);
 44 
 45 struct blkcg blkcg_root;
 46 EXPORT_SYMBOL_GPL(blkcg_root);
 47 
 48 struct cgroup_subsys_state * const blkcg_root_css = &blkcg_root.css;
 49 
 50 static struct blkcg_policy *blkcg_policy[BLKCG_MAX_POLS];
 51 
 52 static LIST_HEAD(all_blkcgs);           /* protected by blkcg_pol_mutex */
 53 
 54 static bool blkcg_debug_stats = false;
 55 
 56 static bool blkcg_policy_enabled(struct request_queue *q,
 57                                  const struct blkcg_policy *pol)
 58 {
 59         return pol && test_bit(pol->plid, q->blkcg_pols);
 60 }
 61 
 62 /**
 63  * blkg_free - free a blkg
 64  * @blkg: blkg to free
 65  *
 66  * Free @blkg which may be partially allocated.
 67  */
 68 static void blkg_free(struct blkcg_gq *blkg)
 69 {
 70         int i;
 71 
 72         if (!blkg)
 73                 return;
 74 
 75         for (i = 0; i < BLKCG_MAX_POLS; i++)
 76                 if (blkg->pd[i])
 77                         blkcg_policy[i]->pd_free_fn(blkg->pd[i]);
 78 
 79         if (blkg->blkcg != &blkcg_root)
 80                 blk_exit_rl(blkg->q, &blkg->rl);
 81 
 82         blkg_rwstat_exit(&blkg->stat_ios);
 83         blkg_rwstat_exit(&blkg->stat_bytes);
 84         kfree(blkg);
 85 }
 86 
 87 /**
 88  * blkg_alloc - allocate a blkg
 89  * @blkcg: block cgroup the new blkg is associated with
 90  * @q: request_queue the new blkg is associated with
 91  * @gfp_mask: allocation mask to use
 92  *
 93  * Allocate a new blkg assocating @blkcg and @q.
 94  */
 95 static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q,
 96                                    gfp_t gfp_mask)
 97 {
 98         struct blkcg_gq *blkg;
 99         int i;
100 
101         /* alloc and init base part */
102         blkg = kzalloc_node(sizeof(*blkg), gfp_mask, q->node);
103         if (!blkg)
104                 return NULL;
105 
106         if (blkg_rwstat_init(&blkg->stat_bytes, gfp_mask) ||
107             blkg_rwstat_init(&blkg->stat_ios, gfp_mask))
108                 goto err_free;
109 
110         blkg->q = q;
111         INIT_LIST_HEAD(&blkg->q_node);
112         blkg->blkcg = blkcg;
113         atomic_set(&blkg->refcnt, 1);
114 
115         /* root blkg uses @q->root_rl, init rl only for !root blkgs */
116         if (blkcg != &blkcg_root) {
117                 if (blk_init_rl(&blkg->rl, q, gfp_mask))
118                         goto err_free;
119                 blkg->rl.blkg = blkg;
120         }
121 
122         for (i = 0; i < BLKCG_MAX_POLS; i++) {
123                 struct blkcg_policy *pol = blkcg_policy[i];
124                 struct blkg_policy_data *pd;
125 
126                 if (!blkcg_policy_enabled(q, pol))
127                         continue;
128 
129                 /* alloc per-policy data and attach it to blkg */
130                 pd = pol->pd_alloc_fn(gfp_mask, q->node);
131                 if (!pd)
132                         goto err_free;
133 
134                 blkg->pd[i] = pd;
135                 pd->blkg = blkg;
136                 pd->plid = i;
137         }
138 
139         return blkg;
140 
141 err_free:
142         blkg_free(blkg);
143         return NULL;
144 }
145 
146 struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg,
147                                       struct request_queue *q, bool update_hint)
148 {
149         struct blkcg_gq *blkg;
150 
151         /*
152          * Hint didn't match.  Look up from the radix tree.  Note that the
153          * hint can only be updated under queue_lock as otherwise @blkg
154          * could have already been removed from blkg_tree.  The caller is
155          * responsible for grabbing queue_lock if @update_hint.
156          */
157         blkg = radix_tree_lookup(&blkcg->blkg_tree, q->id);
158         if (blkg && blkg->q == q) {
159                 if (update_hint) {
160                         lockdep_assert_held(q->queue_lock);
161                         rcu_assign_pointer(blkcg->blkg_hint, blkg);
162                 }
163                 return blkg;
164         }
165 
166         return NULL;
167 }
168 EXPORT_SYMBOL_GPL(blkg_lookup_slowpath);
169 
170 /*
171  * If @new_blkg is %NULL, this function tries to allocate a new one as
172  * necessary using %GFP_NOWAIT.  @new_blkg is always consumed on return.
173  */
174 static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
175                                     struct request_queue *q,
176                                     struct blkcg_gq *new_blkg)
177 {
178         struct blkcg_gq *blkg;
179         struct bdi_writeback_congested *wb_congested;
180         int i, ret;
181 
182         WARN_ON_ONCE(!rcu_read_lock_held());
183         lockdep_assert_held(q->queue_lock);
184 
185         /* blkg holds a reference to blkcg */
186         if (!css_tryget_online(&blkcg->css)) {
187                 ret = -ENODEV;
188                 goto err_free_blkg;
189         }
190 
191         wb_congested = wb_congested_get_create(q->backing_dev_info,
192                                                blkcg->css.id,
193                                                GFP_NOWAIT | __GFP_NOWARN);
194         if (!wb_congested) {
195                 ret = -ENOMEM;
196                 goto err_put_css;
197         }
198 
199         /* allocate */
200         if (!new_blkg) {
201                 new_blkg = blkg_alloc(blkcg, q, GFP_NOWAIT | __GFP_NOWARN);
202                 if (unlikely(!new_blkg)) {
203                         ret = -ENOMEM;
204                         goto err_put_congested;
205                 }
206         }
207         blkg = new_blkg;
208         blkg->wb_congested = wb_congested;
209 
210         /* link parent */
211         if (blkcg_parent(blkcg)) {
212                 blkg->parent = __blkg_lookup(blkcg_parent(blkcg), q, false);
213                 if (WARN_ON_ONCE(!blkg->parent)) {
214                         ret = -ENODEV;
215                         goto err_put_congested;
216                 }
217                 blkg_get(blkg->parent);
218         }
219 
220         /* invoke per-policy init */
221         for (i = 0; i < BLKCG_MAX_POLS; i++) {
222                 struct blkcg_policy *pol = blkcg_policy[i];
223 
224                 if (blkg->pd[i] && pol->pd_init_fn)
225                         pol->pd_init_fn(blkg->pd[i]);
226         }
227 
228         /* insert */
229         spin_lock(&blkcg->lock);
230         ret = radix_tree_insert(&blkcg->blkg_tree, q->id, blkg);
231         if (likely(!ret)) {
232                 hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
233                 list_add(&blkg->q_node, &q->blkg_list);
234 
235                 for (i = 0; i < BLKCG_MAX_POLS; i++) {
236                         struct blkcg_policy *pol = blkcg_policy[i];
237 
238                         if (blkg->pd[i] && pol->pd_online_fn)
239                                 pol->pd_online_fn(blkg->pd[i]);
240                 }
241         }
242         blkg->online = true;
243         spin_unlock(&blkcg->lock);
244 
245         if (!ret)
246                 return blkg;
247 
248         /* @blkg failed fully initialized, use the usual release path */
249         blkg_put(blkg);
250         return ERR_PTR(ret);
251 
252 err_put_congested:
253         wb_congested_put(wb_congested);
254 err_put_css:
255         css_put(&blkcg->css);
256 err_free_blkg:
257         blkg_free(new_blkg);
258         return ERR_PTR(ret);
259 }
260 
261 /**
262  * blkg_lookup_create - lookup blkg, try to create one if not there
263  * @blkcg: blkcg of interest
264  * @q: request_queue of interest
265  *
266  * Lookup blkg for the @blkcg - @q pair.  If it doesn't exist, try to
267  * create one.  blkg creation is performed recursively from blkcg_root such
268  * that all non-root blkg's have access to the parent blkg.  This function
269  * should be called under RCU read lock and @q->queue_lock.
270  *
271  * Returns pointer to the looked up or created blkg on success, ERR_PTR()
272  * value on error.  If @q is dead, returns ERR_PTR(-EINVAL).  If @q is not
273  * dead and bypassing, returns ERR_PTR(-EBUSY).
274  */
275 struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
276                                     struct request_queue *q)
277 {
278         struct blkcg_gq *blkg;
279 
280         WARN_ON_ONCE(!rcu_read_lock_held());
281         lockdep_assert_held(q->queue_lock);
282 
283         /*
284          * This could be the first entry point of blkcg implementation and
285          * we shouldn't allow anything to go through for a bypassing queue.
286          */
287         if (unlikely(blk_queue_bypass(q)))
288                 return ERR_PTR(blk_queue_dying(q) ? -ENODEV : -EBUSY);
289 
290         blkg = __blkg_lookup(blkcg, q, true);
291         if (blkg)
292                 return blkg;
293 
294         /*
295          * Create blkgs walking down from blkcg_root to @blkcg, so that all
296          * non-root blkgs have access to their parents.
297          */
298         while (true) {
299                 struct blkcg *pos = blkcg;
300                 struct blkcg *parent = blkcg_parent(blkcg);
301 
302                 while (parent && !__blkg_lookup(parent, q, false)) {
303                         pos = parent;
304                         parent = blkcg_parent(parent);
305                 }
306 
307                 blkg = blkg_create(pos, q, NULL);
308                 if (pos == blkcg || IS_ERR(blkg))
309                         return blkg;
310         }
311 }
312 
313 static void blkg_destroy(struct blkcg_gq *blkg)
314 {
315         struct blkcg *blkcg = blkg->blkcg;
316         struct blkcg_gq *parent = blkg->parent;
317         int i;
318 
319         lockdep_assert_held(blkg->q->queue_lock);
320         lockdep_assert_held(&blkcg->lock);
321 
322         /* Something wrong if we are trying to remove same group twice */
323         WARN_ON_ONCE(list_empty(&blkg->q_node));
324         WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node));
325 
326         for (i = 0; i < BLKCG_MAX_POLS; i++) {
327                 struct blkcg_policy *pol = blkcg_policy[i];
328 
329                 if (blkg->pd[i] && pol->pd_offline_fn)
330                         pol->pd_offline_fn(blkg->pd[i]);
331         }
332 
333         if (parent) {
334                 blkg_rwstat_add_aux(&parent->stat_bytes, &blkg->stat_bytes);
335                 blkg_rwstat_add_aux(&parent->stat_ios, &blkg->stat_ios);
336         }
337 
338         blkg->online = false;
339 
340         radix_tree_delete(&blkcg->blkg_tree, blkg->q->id);
341         list_del_init(&blkg->q_node);
342         hlist_del_init_rcu(&blkg->blkcg_node);
343 
344         /*
345          * Both setting lookup hint to and clearing it from @blkg are done
346          * under queue_lock.  If it's not pointing to @blkg now, it never
347          * will.  Hint assignment itself can race safely.
348          */
349         if (rcu_access_pointer(blkcg->blkg_hint) == blkg)
350                 rcu_assign_pointer(blkcg->blkg_hint, NULL);
351 
352         /*
353          * Put the reference taken at the time of creation so that when all
354          * queues are gone, group can be destroyed.
355          */
356         blkg_put(blkg);
357 }
358 
359 /**
360  * blkg_destroy_all - destroy all blkgs associated with a request_queue
361  * @q: request_queue of interest
362  *
363  * Destroy all blkgs associated with @q.
364  */
365 static void blkg_destroy_all(struct request_queue *q)
366 {
367         struct blkcg_gq *blkg, *n;
368 
369         lockdep_assert_held(q->queue_lock);
370 
371         list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) {
372                 struct blkcg *blkcg = blkg->blkcg;
373 
374                 spin_lock(&blkcg->lock);
375                 blkg_destroy(blkg);
376                 spin_unlock(&blkcg->lock);
377         }
378 
379         q->root_blkg = NULL;
380         q->root_rl.blkg = NULL;
381 }
382 
383 /*
384  * A group is RCU protected, but having an rcu lock does not mean that one
385  * can access all the fields of blkg and assume these are valid.  For
386  * example, don't try to follow throtl_data and request queue links.
387  *
388  * Having a reference to blkg under an rcu allows accesses to only values
389  * local to groups like group stats and group rate limits.
390  */
391 void __blkg_release_rcu(struct rcu_head *rcu_head)
392 {
393         struct blkcg_gq *blkg = container_of(rcu_head, struct blkcg_gq, rcu_head);
394 
395         /* release the blkcg and parent blkg refs this blkg has been holding */
396         css_put(&blkg->blkcg->css);
397         if (blkg->parent)
398                 blkg_put(blkg->parent);
399 
400         wb_congested_put(blkg->wb_congested);
401 
402         blkg_free(blkg);
403 }
404 EXPORT_SYMBOL_GPL(__blkg_release_rcu);
405 
406 /*
407  * The next function used by blk_queue_for_each_rl().  It's a bit tricky
408  * because the root blkg uses @q->root_rl instead of its own rl.
409  */
410 struct request_list *__blk_queue_next_rl(struct request_list *rl,
411                                          struct request_queue *q)
412 {
413         struct list_head *ent;
414         struct blkcg_gq *blkg;
415 
416         /*
417          * Determine the current blkg list_head.  The first entry is
418          * root_rl which is off @q->blkg_list and mapped to the head.
419          */
420         if (rl == &q->root_rl) {
421                 ent = &q->blkg_list;
422                 /* There are no more block groups, hence no request lists */
423                 if (list_empty(ent))
424                         return NULL;
425         } else {
426                 blkg = container_of(rl, struct blkcg_gq, rl);
427                 ent = &blkg->q_node;
428         }
429 
430         /* walk to the next list_head, skip root blkcg */
431         ent = ent->next;
432         if (ent == &q->root_blkg->q_node)
433                 ent = ent->next;
434         if (ent == &q->blkg_list)
435                 return NULL;
436 
437         blkg = container_of(ent, struct blkcg_gq, q_node);
438         return &blkg->rl;
439 }
440 
441 static int blkcg_reset_stats(struct cgroup_subsys_state *css,
442                              struct cftype *cftype, u64 val)
443 {
444         struct blkcg *blkcg = css_to_blkcg(css);
445         struct blkcg_gq *blkg;
446         int i;
447 
448         mutex_lock(&blkcg_pol_mutex);
449         spin_lock_irq(&blkcg->lock);
450 
451         /*
452          * Note that stat reset is racy - it doesn't synchronize against
453          * stat updates.  This is a debug feature which shouldn't exist
454          * anyway.  If you get hit by a race, retry.
455          */
456         hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
457                 blkg_rwstat_reset(&blkg->stat_bytes);
458                 blkg_rwstat_reset(&blkg->stat_ios);
459 
460                 for (i = 0; i < BLKCG_MAX_POLS; i++) {
461                         struct blkcg_policy *pol = blkcg_policy[i];
462 
463                         if (blkg->pd[i] && pol->pd_reset_stats_fn)
464                                 pol->pd_reset_stats_fn(blkg->pd[i]);
465                 }
466         }
467 
468         spin_unlock_irq(&blkcg->lock);
469         mutex_unlock(&blkcg_pol_mutex);
470         return 0;
471 }
472 
473 const char *blkg_dev_name(struct blkcg_gq *blkg)
474 {
475         /* some drivers (floppy) instantiate a queue w/o disk registered */
476         if (blkg->q->backing_dev_info->dev)
477                 return dev_name(blkg->q->backing_dev_info->dev);
478         return NULL;
479 }
480 EXPORT_SYMBOL_GPL(blkg_dev_name);
481 
482 /**
483  * blkcg_print_blkgs - helper for printing per-blkg data
484  * @sf: seq_file to print to
485  * @blkcg: blkcg of interest
486  * @prfill: fill function to print out a blkg
487  * @pol: policy in question
488  * @data: data to be passed to @prfill
489  * @show_total: to print out sum of prfill return values or not
490  *
491  * This function invokes @prfill on each blkg of @blkcg if pd for the
492  * policy specified by @pol exists.  @prfill is invoked with @sf, the
493  * policy data and @data and the matching queue lock held.  If @show_total
494  * is %true, the sum of the return values from @prfill is printed with
495  * "Total" label at the end.
496  *
497  * This is to be used to construct print functions for
498  * cftype->read_seq_string method.
499  */
500 void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
501                        u64 (*prfill)(struct seq_file *,
502                                      struct blkg_policy_data *, int),
503                        const struct blkcg_policy *pol, int data,
504                        bool show_total)
505 {
506         struct blkcg_gq *blkg;
507         u64 total = 0;
508 
509         rcu_read_lock();
510         hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
511                 spin_lock_irq(blkg->q->queue_lock);
512                 if (blkcg_policy_enabled(blkg->q, pol))
513                         total += prfill(sf, blkg->pd[pol->plid], data);
514                 spin_unlock_irq(blkg->q->queue_lock);
515         }
516         rcu_read_unlock();
517 
518         if (show_total)
519                 seq_printf(sf, "Total %llu\n", (unsigned long long)total);
520 }
521 EXPORT_SYMBOL_GPL(blkcg_print_blkgs);
522 
523 /**
524  * __blkg_prfill_u64 - prfill helper for a single u64 value
525  * @sf: seq_file to print to
526  * @pd: policy private data of interest
527  * @v: value to print
528  *
529  * Print @v to @sf for the device assocaited with @pd.
530  */
531 u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v)
532 {
533         const char *dname = blkg_dev_name(pd->blkg);
534 
535         if (!dname)
536                 return 0;
537 
538         seq_printf(sf, "%s %llu\n", dname, (unsigned long long)v);
539         return v;
540 }
541 EXPORT_SYMBOL_GPL(__blkg_prfill_u64);
542 
543 /**
544  * __blkg_prfill_rwstat - prfill helper for a blkg_rwstat
545  * @sf: seq_file to print to
546  * @pd: policy private data of interest
547  * @rwstat: rwstat to print
548  *
549  * Print @rwstat to @sf for the device assocaited with @pd.
550  */
551 u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
552                          const struct blkg_rwstat *rwstat)
553 {
554         static const char *rwstr[] = {
555                 [BLKG_RWSTAT_READ]      = "Read",
556                 [BLKG_RWSTAT_WRITE]     = "Write",
557                 [BLKG_RWSTAT_SYNC]      = "Sync",
558                 [BLKG_RWSTAT_ASYNC]     = "Async",
559                 [BLKG_RWSTAT_DISCARD]   = "Discard",
560         };
561         const char *dname = blkg_dev_name(pd->blkg);
562         u64 v;
563         int i;
564 
565         if (!dname)
566                 return 0;
567 
568         for (i = 0; i < BLKG_RWSTAT_NR; i++)
569                 seq_printf(sf, "%s %s %llu\n", dname, rwstr[i],
570                            (unsigned long long)atomic64_read(&rwstat->aux_cnt[i]));
571 
572         v = atomic64_read(&rwstat->aux_cnt[BLKG_RWSTAT_READ]) +
573                 atomic64_read(&rwstat->aux_cnt[BLKG_RWSTAT_WRITE]) +
574                 atomic64_read(&rwstat->aux_cnt[BLKG_RWSTAT_DISCARD]);
575         seq_printf(sf, "%s Total %llu\n", dname, (unsigned long long)v);
576         return v;
577 }
578 EXPORT_SYMBOL_GPL(__blkg_prfill_rwstat);
579 
580 /**
581  * blkg_prfill_stat - prfill callback for blkg_stat
582  * @sf: seq_file to print to
583  * @pd: policy private data of interest
584  * @off: offset to the blkg_stat in @pd
585  *
586  * prfill callback for printing a blkg_stat.
587  */
588 u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off)
589 {
590         return __blkg_prfill_u64(sf, pd, blkg_stat_read((void *)pd + off));
591 }
592 EXPORT_SYMBOL_GPL(blkg_prfill_stat);
593 
594 /**
595  * blkg_prfill_rwstat - prfill callback for blkg_rwstat
596  * @sf: seq_file to print to
597  * @pd: policy private data of interest
598  * @off: offset to the blkg_rwstat in @pd
599  *
600  * prfill callback for printing a blkg_rwstat.
601  */
602 u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
603                        int off)
604 {
605         struct blkg_rwstat rwstat = blkg_rwstat_read((void *)pd + off);
606 
607         return __blkg_prfill_rwstat(sf, pd, &rwstat);
608 }
609 EXPORT_SYMBOL_GPL(blkg_prfill_rwstat);
610 
611 static u64 blkg_prfill_rwstat_field(struct seq_file *sf,
612                                     struct blkg_policy_data *pd, int off)
613 {
614         struct blkg_rwstat rwstat = blkg_rwstat_read((void *)pd->blkg + off);
615 
616         return __blkg_prfill_rwstat(sf, pd, &rwstat);
617 }
618 
619 /**
620  * blkg_print_stat_bytes - seq_show callback for blkg->stat_bytes
621  * @sf: seq_file to print to
622  * @v: unused
623  *
624  * To be used as cftype->seq_show to print blkg->stat_bytes.
625  * cftype->private must be set to the blkcg_policy.
626  */
627 int blkg_print_stat_bytes(struct seq_file *sf, void *v)
628 {
629         blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
630                           blkg_prfill_rwstat_field, (void *)seq_cft(sf)->private,
631                           offsetof(struct blkcg_gq, stat_bytes), true);
632         return 0;
633 }
634 EXPORT_SYMBOL_GPL(blkg_print_stat_bytes);
635 
636 /**
637  * blkg_print_stat_bytes - seq_show callback for blkg->stat_ios
638  * @sf: seq_file to print to
639  * @v: unused
640  *
641  * To be used as cftype->seq_show to print blkg->stat_ios.  cftype->private
642  * must be set to the blkcg_policy.
643  */
644 int blkg_print_stat_ios(struct seq_file *sf, void *v)
645 {
646         blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
647                           blkg_prfill_rwstat_field, (void *)seq_cft(sf)->private,
648                           offsetof(struct blkcg_gq, stat_ios), true);
649         return 0;
650 }
651 EXPORT_SYMBOL_GPL(blkg_print_stat_ios);
652 
653 static u64 blkg_prfill_rwstat_field_recursive(struct seq_file *sf,
654                                               struct blkg_policy_data *pd,
655                                               int off)
656 {
657         struct blkg_rwstat rwstat = blkg_rwstat_recursive_sum(pd->blkg,
658                                                               NULL, off);
659         return __blkg_prfill_rwstat(sf, pd, &rwstat);
660 }
661 
662 /**
663  * blkg_print_stat_bytes_recursive - recursive version of blkg_print_stat_bytes
664  * @sf: seq_file to print to
665  * @v: unused
666  */
667 int blkg_print_stat_bytes_recursive(struct seq_file *sf, void *v)
668 {
669         blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
670                           blkg_prfill_rwstat_field_recursive,
671                           (void *)seq_cft(sf)->private,
672                           offsetof(struct blkcg_gq, stat_bytes), true);
673         return 0;
674 }
675 EXPORT_SYMBOL_GPL(blkg_print_stat_bytes_recursive);
676 
677 /**
678  * blkg_print_stat_ios_recursive - recursive version of blkg_print_stat_ios
679  * @sf: seq_file to print to
680  * @v: unused
681  */
682 int blkg_print_stat_ios_recursive(struct seq_file *sf, void *v)
683 {
684         blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
685                           blkg_prfill_rwstat_field_recursive,
686                           (void *)seq_cft(sf)->private,
687                           offsetof(struct blkcg_gq, stat_ios), true);
688         return 0;
689 }
690 EXPORT_SYMBOL_GPL(blkg_print_stat_ios_recursive);
691 
692 /**
693  * blkg_stat_recursive_sum - collect hierarchical blkg_stat
694  * @blkg: blkg of interest
695  * @pol: blkcg_policy which contains the blkg_stat
696  * @off: offset to the blkg_stat in blkg_policy_data or @blkg
697  *
698  * Collect the blkg_stat specified by @blkg, @pol and @off and all its
699  * online descendants and their aux counts.  The caller must be holding the
700  * queue lock for online tests.
701  *
702  * If @pol is NULL, blkg_stat is at @off bytes into @blkg; otherwise, it is
703  * at @off bytes into @blkg's blkg_policy_data of the policy.
704  */
705 u64 blkg_stat_recursive_sum(struct blkcg_gq *blkg,
706                             struct blkcg_policy *pol, int off)
707 {
708         struct blkcg_gq *pos_blkg;
709         struct cgroup_subsys_state *pos_css;
710         u64 sum = 0;
711 
712         lockdep_assert_held(blkg->q->queue_lock);
713 
714         rcu_read_lock();
715         blkg_for_each_descendant_pre(pos_blkg, pos_css, blkg) {
716                 struct blkg_stat *stat;
717 
718                 if (!pos_blkg->online)
719                         continue;
720 
721                 if (pol)
722                         stat = (void *)blkg_to_pd(pos_blkg, pol) + off;
723                 else
724                         stat = (void *)blkg + off;
725 
726                 sum += blkg_stat_read(stat) + atomic64_read(&stat->aux_cnt);
727         }
728         rcu_read_unlock();
729 
730         return sum;
731 }
732 EXPORT_SYMBOL_GPL(blkg_stat_recursive_sum);
733 
734 /**
735  * blkg_rwstat_recursive_sum - collect hierarchical blkg_rwstat
736  * @blkg: blkg of interest
737  * @pol: blkcg_policy which contains the blkg_rwstat
738  * @off: offset to the blkg_rwstat in blkg_policy_data or @blkg
739  *
740  * Collect the blkg_rwstat specified by @blkg, @pol and @off and all its
741  * online descendants and their aux counts.  The caller must be holding the
742  * queue lock for online tests.
743  *
744  * If @pol is NULL, blkg_rwstat is at @off bytes into @blkg; otherwise, it
745  * is at @off bytes into @blkg's blkg_policy_data of the policy.
746  */
747 struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkcg_gq *blkg,
748                                              struct blkcg_policy *pol, int off)
749 {
750         struct blkcg_gq *pos_blkg;
751         struct cgroup_subsys_state *pos_css;
752         struct blkg_rwstat sum = { };
753         int i;
754 
755         lockdep_assert_held(blkg->q->queue_lock);
756 
757         rcu_read_lock();
758         blkg_for_each_descendant_pre(pos_blkg, pos_css, blkg) {
759                 struct blkg_rwstat *rwstat;
760 
761                 if (!pos_blkg->online)
762                         continue;
763 
764                 if (pol)
765                         rwstat = (void *)blkg_to_pd(pos_blkg, pol) + off;
766                 else
767                         rwstat = (void *)pos_blkg + off;
768 
769                 for (i = 0; i < BLKG_RWSTAT_NR; i++)
770                         atomic64_add(atomic64_read(&rwstat->aux_cnt[i]) +
771                                 percpu_counter_sum_positive(&rwstat->cpu_cnt[i]),
772                                 &sum.aux_cnt[i]);
773         }
774         rcu_read_unlock();
775 
776         return sum;
777 }
778 EXPORT_SYMBOL_GPL(blkg_rwstat_recursive_sum);
779 
780 /* Performs queue bypass and policy enabled checks then looks up blkg. */
781 static struct blkcg_gq *blkg_lookup_check(struct blkcg *blkcg,
782                                           const struct blkcg_policy *pol,
783                                           struct request_queue *q)
784 {
785         WARN_ON_ONCE(!rcu_read_lock_held());
786         lockdep_assert_held(q->queue_lock);
787 
788         if (!blkcg_policy_enabled(q, pol))
789                 return ERR_PTR(-EOPNOTSUPP);
790 
791         /*
792          * This could be the first entry point of blkcg implementation and
793          * we shouldn't allow anything to go through for a bypassing queue.
794          */
795         if (unlikely(blk_queue_bypass(q)))
796                 return ERR_PTR(blk_queue_dying(q) ? -ENODEV : -EBUSY);
797 
798         return __blkg_lookup(blkcg, q, true /* update_hint */);
799 }
800 
801 /**
802  * blkg_conf_prep - parse and prepare for per-blkg config update
803  * @blkcg: target block cgroup
804  * @pol: target policy
805  * @input: input string
806  * @ctx: blkg_conf_ctx to be filled
807  *
808  * Parse per-blkg config update from @input and initialize @ctx with the
809  * result.  @ctx->blkg points to the blkg to be updated and @ctx->body the
810  * part of @input following MAJ:MIN.  This function returns with RCU read
811  * lock and queue lock held and must be paired with blkg_conf_finish().
812  */
813 int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
814                    char *input, struct blkg_conf_ctx *ctx)
815         __acquires(rcu) __acquires(disk->queue->queue_lock)
816 {
817         struct gendisk *disk;
818         struct request_queue *q;
819         struct blkcg_gq *blkg;
820         unsigned int major, minor;
821         int key_len, part, ret;
822         char *body;
823 
824         if (sscanf(input, "%u:%u%n", &major, &minor, &key_len) != 2)
825                 return -EINVAL;
826 
827         body = input + key_len;
828         if (!isspace(*body))
829                 return -EINVAL;
830         body = skip_spaces(body);
831 
832         disk = get_gendisk(MKDEV(major, minor), &part);
833         if (!disk)
834                 return -ENODEV;
835         if (part) {
836                 ret = -ENODEV;
837                 goto fail;
838         }
839 
840         q = disk->queue;
841 
842         rcu_read_lock();
843         spin_lock_irq(q->queue_lock);
844 
845         blkg = blkg_lookup_check(blkcg, pol, q);
846         if (IS_ERR(blkg)) {
847                 ret = PTR_ERR(blkg);
848                 goto fail_unlock;
849         }
850 
851         if (blkg)
852                 goto success;
853 
854         /*
855          * Create blkgs walking down from blkcg_root to @blkcg, so that all
856          * non-root blkgs have access to their parents.
857          */
858         while (true) {
859                 struct blkcg *pos = blkcg;
860                 struct blkcg *parent;
861                 struct blkcg_gq *new_blkg;
862 
863                 parent = blkcg_parent(blkcg);
864                 while (parent && !__blkg_lookup(parent, q, false)) {
865                         pos = parent;
866                         parent = blkcg_parent(parent);
867                 }
868 
869                 /* Drop locks to do new blkg allocation with GFP_KERNEL. */
870                 spin_unlock_irq(q->queue_lock);
871                 rcu_read_unlock();
872 
873                 new_blkg = blkg_alloc(pos, q, GFP_KERNEL);
874                 if (unlikely(!new_blkg)) {
875                         ret = -ENOMEM;
876                         goto fail;
877                 }
878 
879                 rcu_read_lock();
880                 spin_lock_irq(q->queue_lock);
881 
882                 blkg = blkg_lookup_check(pos, pol, q);
883                 if (IS_ERR(blkg)) {
884                         ret = PTR_ERR(blkg);
885                         goto fail_unlock;
886                 }
887 
888                 if (blkg) {
889                         blkg_free(new_blkg);
890                 } else {
891                         blkg = blkg_create(pos, q, new_blkg);
892                         if (unlikely(IS_ERR(blkg))) {
893                                 ret = PTR_ERR(blkg);
894                                 goto fail_unlock;
895                         }
896                 }
897 
898                 if (pos == blkcg)
899                         goto success;
900         }
901 success:
902         ctx->disk = disk;
903         ctx->blkg = blkg;
904         ctx->body = body;
905         return 0;
906 
907 fail_unlock:
908         spin_unlock_irq(q->queue_lock);
909         rcu_read_unlock();
910 fail:
911         put_disk_and_module(disk);
912         /*
913          * If queue was bypassing, we should retry.  Do so after a
914          * short msleep().  It isn't strictly necessary but queue
915          * can be bypassing for some time and it's always nice to
916          * avoid busy looping.
917          */
918         if (ret == -EBUSY) {
919                 msleep(10);
920                 ret = restart_syscall();
921         }
922         return ret;
923 }
924 EXPORT_SYMBOL_GPL(blkg_conf_prep);
925 
926 /**
927  * blkg_conf_finish - finish up per-blkg config update
928  * @ctx: blkg_conf_ctx intiailized by blkg_conf_prep()
929  *
930  * Finish up after per-blkg config update.  This function must be paired
931  * with blkg_conf_prep().
932  */
933 void blkg_conf_finish(struct blkg_conf_ctx *ctx)
934         __releases(ctx->disk->queue->queue_lock) __releases(rcu)
935 {
936         spin_unlock_irq(ctx->disk->queue->queue_lock);
937         rcu_read_unlock();
938         put_disk_and_module(ctx->disk);
939 }
940 EXPORT_SYMBOL_GPL(blkg_conf_finish);
941 
942 static int blkcg_print_stat(struct seq_file *sf, void *v)
943 {
944         struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
945         struct blkcg_gq *blkg;
946 
947         rcu_read_lock();
948 
949         hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
950                 const char *dname;
951                 char *buf;
952                 struct blkg_rwstat rwstat;
953                 u64 rbytes, wbytes, rios, wios, dbytes, dios;
954                 size_t size = seq_get_buf(sf, &buf), off = 0;
955                 int i;
956                 bool has_stats = false;
957 
958                 dname = blkg_dev_name(blkg);
959                 if (!dname)
960                         continue;
961 
962                 /*
963                  * Hooray string manipulation, count is the size written NOT
964                  * INCLUDING THE \0, so size is now count+1 less than what we
965                  * had before, but we want to start writing the next bit from
966                  * the \0 so we only add count to buf.
967                  */
968                 off += scnprintf(buf+off, size-off, "%s ", dname);
969 
970                 spin_lock_irq(blkg->q->queue_lock);
971 
972                 rwstat = blkg_rwstat_recursive_sum(blkg, NULL,
973                                         offsetof(struct blkcg_gq, stat_bytes));
974                 rbytes = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_READ]);
975                 wbytes = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_WRITE]);
976                 dbytes = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_DISCARD]);
977 
978                 rwstat = blkg_rwstat_recursive_sum(blkg, NULL,
979                                         offsetof(struct blkcg_gq, stat_ios));
980                 rios = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_READ]);
981                 wios = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_WRITE]);
982                 dios = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_DISCARD]);
983 
984                 spin_unlock_irq(blkg->q->queue_lock);
985 
986                 if (rbytes || wbytes || rios || wios) {
987                         has_stats = true;
988                         off += scnprintf(buf+off, size-off,
989                                          "rbytes=%llu wbytes=%llu rios=%llu wios=%llu dbytes=%llu dios=%llu",
990                                          rbytes, wbytes, rios, wios,
991                                          dbytes, dios);
992                 }
993 
994                 if (!blkcg_debug_stats)
995                         goto next;
996 
997                 if (atomic_read(&blkg->use_delay)) {
998                         has_stats = true;
999                         off += scnprintf(buf+off, size-off,
1000                                          " use_delay=%d delay_nsec=%llu",
1001                                          atomic_read(&blkg->use_delay),
1002                                         (unsigned long long)atomic64_read(&blkg->delay_nsec));
1003                 }
1004 
1005                 for (i = 0; i < BLKCG_MAX_POLS; i++) {
1006                         struct blkcg_policy *pol = blkcg_policy[i];
1007                         size_t written;
1008 
1009                         if (!blkg->pd[i] || !pol->pd_stat_fn)
1010                                 continue;
1011 
1012                         written = pol->pd_stat_fn(blkg->pd[i], buf+off, size-off);
1013                         if (written)
1014                                 has_stats = true;
1015                         off += written;
1016                 }
1017 next:
1018                 if (has_stats) {
1019                         off += scnprintf(buf+off, size-off, "\n");
1020                         seq_commit(sf, off);
1021                 }
1022         }
1023 
1024         rcu_read_unlock();
1025         return 0;
1026 }
1027 
1028 static struct cftype blkcg_files[] = {
1029         {
1030                 .name = "stat",
1031                 .flags = CFTYPE_NOT_ON_ROOT,
1032                 .seq_show = blkcg_print_stat,
1033         },
1034         { }     /* terminate */
1035 };
1036 
1037 static struct cftype blkcg_legacy_files[] = {
1038         {
1039                 .name = "reset_stats",
1040                 .write_u64 = blkcg_reset_stats,
1041         },
1042         { }     /* terminate */
1043 };
1044 
1045 /*
1046  * blkcg destruction is a three-stage process.
1047  *
1048  * 1. Destruction starts.  The blkcg_css_offline() callback is invoked
1049  *    which offlines writeback.  Here we tie the next stage of blkg destruction
1050  *    to the completion of writeback associated with the blkcg.  This lets us
1051  *    avoid punting potentially large amounts of outstanding writeback to root
1052  *    while maintaining any ongoing policies.  The next stage is triggered when
1053  *    the nr_cgwbs count goes to zero.
1054  *
1055  * 2. When the nr_cgwbs count goes to zero, blkcg_destroy_blkgs() is called
1056  *    and handles the destruction of blkgs.  Here the css reference held by
1057  *    the blkg is put back eventually allowing blkcg_css_free() to be called.
1058  *    This work may occur in cgwb_release_workfn() on the cgwb_release
1059  *    workqueue.  Any submitted ios that fail to get the blkg ref will be
1060  *    punted to the root_blkg.
1061  *
1062  * 3. Once the blkcg ref count goes to zero, blkcg_css_free() is called.
1063  *    This finally frees the blkcg.
1064  */
1065 
1066 /**
1067  * blkcg_css_offline - cgroup css_offline callback
1068  * @css: css of interest
1069  *
1070  * This function is called when @css is about to go away.  Here the cgwbs are
1071  * offlined first and only once writeback associated with the blkcg has
1072  * finished do we start step 2 (see above).
1073  */
1074 static void blkcg_css_offline(struct cgroup_subsys_state *css)
1075 {
1076         struct blkcg *blkcg = css_to_blkcg(css);
1077 
1078         /* this prevents anyone from attaching or migrating to this blkcg */
1079         wb_blkcg_offline(blkcg);
1080 
1081         /* put the base cgwb reference allowing step 2 to be triggered */
1082         blkcg_cgwb_put(blkcg);
1083 }
1084 
1085 /**
1086  * blkcg_destroy_blkgs - responsible for shooting down blkgs
1087  * @blkcg: blkcg of interest
1088  *
1089  * blkgs should be removed while holding both q and blkcg locks.  As blkcg lock
1090  * is nested inside q lock, this function performs reverse double lock dancing.
1091  * Destroying the blkgs releases the reference held on the blkcg's css allowing
1092  * blkcg_css_free to eventually be called.
1093  *
1094  * This is the blkcg counterpart of ioc_release_fn().
1095  */
1096 void blkcg_destroy_blkgs(struct blkcg *blkcg)
1097 {
1098         spin_lock_irq(&blkcg->lock);
1099 
1100         while (!hlist_empty(&blkcg->blkg_list)) {
1101                 struct blkcg_gq *blkg = hlist_entry(blkcg->blkg_list.first,
1102                                                 struct blkcg_gq, blkcg_node);
1103                 struct request_queue *q = blkg->q;
1104 
1105                 if (spin_trylock(q->queue_lock)) {
1106                         blkg_destroy(blkg);
1107                         spin_unlock(q->queue_lock);
1108                 } else {
1109                         spin_unlock_irq(&blkcg->lock);
1110                         cpu_relax();
1111                         spin_lock_irq(&blkcg->lock);
1112                 }
1113         }
1114 
1115         spin_unlock_irq(&blkcg->lock);
1116 }
1117 
1118 static void blkcg_css_free(struct cgroup_subsys_state *css)
1119 {
1120         struct blkcg *blkcg = css_to_blkcg(css);
1121         int i;
1122 
1123         mutex_lock(&blkcg_pol_mutex);
1124 
1125         list_del(&blkcg->all_blkcgs_node);
1126 
1127         for (i = 0; i < BLKCG_MAX_POLS; i++)
1128                 if (blkcg->cpd[i])
1129                         blkcg_policy[i]->cpd_free_fn(blkcg->cpd[i]);
1130 
1131         mutex_unlock(&blkcg_pol_mutex);
1132 
1133         kfree(blkcg);
1134 }
1135 
1136 static struct cgroup_subsys_state *
1137 blkcg_css_alloc(struct cgroup_subsys_state *parent_css)
1138 {
1139         struct blkcg *blkcg;
1140         struct cgroup_subsys_state *ret;
1141         int i;
1142 
1143         mutex_lock(&blkcg_pol_mutex);
1144 
1145         if (!parent_css) {
1146                 blkcg = &blkcg_root;
1147         } else {
1148                 blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
1149                 if (!blkcg) {
1150                         ret = ERR_PTR(-ENOMEM);
1151                         goto unlock;
1152                 }
1153         }
1154 
1155         for (i = 0; i < BLKCG_MAX_POLS ; i++) {
1156                 struct blkcg_policy *pol = blkcg_policy[i];
1157                 struct blkcg_policy_data *cpd;
1158 
1159                 /*
1160                  * If the policy hasn't been attached yet, wait for it
1161                  * to be attached before doing anything else. Otherwise,
1162                  * check if the policy requires any specific per-cgroup
1163                  * data: if it does, allocate and initialize it.
1164                  */
1165                 if (!pol || !pol->cpd_alloc_fn)
1166                         continue;
1167 
1168                 cpd = pol->cpd_alloc_fn(GFP_KERNEL);
1169                 if (!cpd) {
1170                         ret = ERR_PTR(-ENOMEM);
1171                         goto free_pd_blkcg;
1172                 }
1173                 blkcg->cpd[i] = cpd;
1174                 cpd->blkcg = blkcg;
1175                 cpd->plid = i;
1176                 if (pol->cpd_init_fn)
1177                         pol->cpd_init_fn(cpd);
1178         }
1179 
1180         spin_lock_init(&blkcg->lock);
1181         INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_NOWAIT | __GFP_NOWARN);
1182         INIT_HLIST_HEAD(&blkcg->blkg_list);
1183 #ifdef CONFIG_CGROUP_WRITEBACK
1184         INIT_LIST_HEAD(&blkcg->cgwb_list);
1185         refcount_set(&blkcg->cgwb_refcnt, 1);
1186 #endif
1187         list_add_tail(&blkcg->all_blkcgs_node, &all_blkcgs);
1188 
1189         mutex_unlock(&blkcg_pol_mutex);
1190         return &blkcg->css;
1191 
1192 free_pd_blkcg:
1193         for (i--; i >= 0; i--)
1194                 if (blkcg->cpd[i])
1195                         blkcg_policy[i]->cpd_free_fn(blkcg->cpd[i]);
1196 
1197         if (blkcg != &blkcg_root)
1198                 kfree(blkcg);
1199 unlock:
1200         mutex_unlock(&blkcg_pol_mutex);
1201         return ret;
1202 }
1203 
1204 /**
1205  * blkcg_init_queue - initialize blkcg part of request queue
1206  * @q: request_queue to initialize
1207  *
1208  * Called from blk_alloc_queue_node(). Responsible for initializing blkcg
1209  * part of new request_queue @q.
1210  *
1211  * RETURNS:
1212  * 0 on success, -errno on failure.
1213  */
1214 int blkcg_init_queue(struct request_queue *q)
1215 {
1216         struct blkcg_gq *new_blkg, *blkg;
1217         bool preloaded;
1218         int ret;
1219 
1220         new_blkg = blkg_alloc(&blkcg_root, q, GFP_KERNEL);
1221         if (!new_blkg)
1222                 return -ENOMEM;
1223 
1224         preloaded = !radix_tree_preload(GFP_KERNEL);
1225 
1226         /* Make sure the root blkg exists. */
1227         rcu_read_lock();
1228         spin_lock_irq(q->queue_lock);
1229         blkg = blkg_create(&blkcg_root, q, new_blkg);
1230         if (IS_ERR(blkg))
1231                 goto err_unlock;
1232         q->root_blkg = blkg;
1233         q->root_rl.blkg = blkg;
1234         spin_unlock_irq(q->queue_lock);
1235         rcu_read_unlock();
1236 
1237         if (preloaded)
1238                 radix_tree_preload_end();
1239 
1240         ret = blk_iolatency_init(q);
1241         if (ret) {
1242                 spin_lock_irq(q->queue_lock);
1243                 blkg_destroy_all(q);
1244                 spin_unlock_irq(q->queue_lock);
1245                 return ret;
1246         }
1247 
1248         ret = blk_throtl_init(q);
1249         if (ret) {
1250                 spin_lock_irq(q->queue_lock);
1251                 blkg_destroy_all(q);
1252                 spin_unlock_irq(q->queue_lock);
1253         }
1254         return ret;
1255 
1256 err_unlock:
1257         spin_unlock_irq(q->queue_lock);
1258         rcu_read_unlock();
1259         if (preloaded)
1260                 radix_tree_preload_end();
1261         return PTR_ERR(blkg);
1262 }
1263 
1264 /**
1265  * blkcg_drain_queue - drain blkcg part of request_queue
1266  * @q: request_queue to drain
1267  *
1268  * Called from blk_drain_queue().  Responsible for draining blkcg part.
1269  */
1270 void blkcg_drain_queue(struct request_queue *q)
1271 {
1272         lockdep_assert_held(q->queue_lock);
1273 
1274         /*
1275          * @q could be exiting and already have destroyed all blkgs as
1276          * indicated by NULL root_blkg.  If so, don't confuse policies.
1277          */
1278         if (!q->root_blkg)
1279                 return;
1280 
1281         blk_throtl_drain(q);
1282 }
1283 
1284 /**
1285  * blkcg_exit_queue - exit and release blkcg part of request_queue
1286  * @q: request_queue being released
1287  *
1288  * Called from blk_release_queue().  Responsible for exiting blkcg part.
1289  */
1290 void blkcg_exit_queue(struct request_queue *q)
1291 {
1292         spin_lock_irq(q->queue_lock);
1293         blkg_destroy_all(q);
1294         spin_unlock_irq(q->queue_lock);
1295 
1296         blk_throtl_exit(q);
1297 }
1298 
1299 /*
1300  * We cannot support shared io contexts, as we have no mean to support
1301  * two tasks with the same ioc in two different groups without major rework
1302  * of the main cic data structures.  For now we allow a task to change
1303  * its cgroup only if it's the only owner of its ioc.
1304  */
1305 static int blkcg_can_attach(struct cgroup_taskset *tset)
1306 {
1307         struct task_struct *task;
1308         struct cgroup_subsys_state *dst_css;
1309         struct io_context *ioc;
1310         int ret = 0;
1311 
1312         /* task_lock() is needed to avoid races with exit_io_context() */
1313         cgroup_taskset_for_each(task, dst_css, tset) {
1314                 task_lock(task);
1315                 ioc = task->io_context;
1316                 if (ioc && atomic_read(&ioc->nr_tasks) > 1)
1317                         ret = -EINVAL;
1318                 task_unlock(task);
1319                 if (ret)
1320                         break;
1321         }
1322         return ret;
1323 }
1324 
1325 static void blkcg_bind(struct cgroup_subsys_state *root_css)
1326 {
1327         int i;
1328 
1329         mutex_lock(&blkcg_pol_mutex);
1330 
1331         for (i = 0; i < BLKCG_MAX_POLS; i++) {
1332                 struct blkcg_policy *pol = blkcg_policy[i];
1333                 struct blkcg *blkcg;
1334 
1335                 if (!pol || !pol->cpd_bind_fn)
1336                         continue;
1337 
1338                 list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node)
1339                         if (blkcg->cpd[pol->plid])
1340                                 pol->cpd_bind_fn(blkcg->cpd[pol->plid]);
1341         }
1342         mutex_unlock(&blkcg_pol_mutex);
1343 }
1344 
1345 static void blkcg_exit(struct task_struct *tsk)
1346 {
1347         if (tsk->throttle_queue)
1348                 blk_put_queue(tsk->throttle_queue);
1349         tsk->throttle_queue = NULL;
1350 }
1351 
1352 struct cgroup_subsys io_cgrp_subsys = {
1353         .css_alloc = blkcg_css_alloc,
1354         .css_offline = blkcg_css_offline,
1355         .css_free = blkcg_css_free,
1356         .can_attach = blkcg_can_attach,
1357         .bind = blkcg_bind,
1358         .dfl_cftypes = blkcg_files,
1359         .legacy_cftypes = blkcg_legacy_files,
1360         .legacy_name = "blkio",
1361         .exit = blkcg_exit,
1362 #ifdef CONFIG_MEMCG
1363         /*
1364          * This ensures that, if available, memcg is automatically enabled
1365          * together on the default hierarchy so that the owner cgroup can
1366          * be retrieved from writeback pages.
1367          */
1368         .depends_on = 1 << memory_cgrp_id,
1369 #endif
1370 };
1371 EXPORT_SYMBOL_GPL(io_cgrp_subsys);
1372 
1373 /**
1374  * blkcg_activate_policy - activate a blkcg policy on a request_queue
1375  * @q: request_queue of interest
1376  * @pol: blkcg policy to activate
1377  *
1378  * Activate @pol on @q.  Requires %GFP_KERNEL context.  @q goes through
1379  * bypass mode to populate its blkgs with policy_data for @pol.
1380  *
1381  * Activation happens with @q bypassed, so nobody would be accessing blkgs
1382  * from IO path.  Update of each blkg is protected by both queue and blkcg
1383  * locks so that holding either lock and testing blkcg_policy_enabled() is
1384  * always enough for dereferencing policy data.
1385  *
1386  * The caller is responsible for synchronizing [de]activations and policy
1387  * [un]registerations.  Returns 0 on success, -errno on failure.
1388  */
1389 int blkcg_activate_policy(struct request_queue *q,
1390                           const struct blkcg_policy *pol)
1391 {
1392         struct blkg_policy_data *pd_prealloc = NULL;
1393         struct blkcg_gq *blkg;
1394         int ret;
1395 
1396         if (blkcg_policy_enabled(q, pol))
1397                 return 0;
1398 
1399         if (q->mq_ops)
1400                 blk_mq_freeze_queue(q);
1401         else
1402                 blk_queue_bypass_start(q);
1403 pd_prealloc:
1404         if (!pd_prealloc) {
1405                 pd_prealloc = pol->pd_alloc_fn(GFP_KERNEL, q->node);
1406                 if (!pd_prealloc) {
1407                         ret = -ENOMEM;
1408                         goto out_bypass_end;
1409                 }
1410         }
1411 
1412         spin_lock_irq(q->queue_lock);
1413 
1414         list_for_each_entry(blkg, &q->blkg_list, q_node) {
1415                 struct blkg_policy_data *pd;
1416 
1417                 if (blkg->pd[pol->plid])
1418                         continue;
1419 
1420                 pd = pol->pd_alloc_fn(GFP_NOWAIT | __GFP_NOWARN, q->node);
1421                 if (!pd)
1422                         swap(pd, pd_prealloc);
1423                 if (!pd) {
1424                         spin_unlock_irq(q->queue_lock);
1425                         goto pd_prealloc;
1426                 }
1427 
1428                 blkg->pd[pol->plid] = pd;
1429                 pd->blkg = blkg;
1430                 pd->plid = pol->plid;
1431                 if (pol->pd_init_fn)
1432                         pol->pd_init_fn(pd);
1433         }
1434 
1435         __set_bit(pol->plid, q->blkcg_pols);
1436         ret = 0;
1437 
1438         spin_unlock_irq(q->queue_lock);
1439 out_bypass_end:
1440         if (q->mq_ops)
1441                 blk_mq_unfreeze_queue(q);
1442         else
1443                 blk_queue_bypass_end(q);
1444         if (pd_prealloc)
1445                 pol->pd_free_fn(pd_prealloc);
1446         return ret;
1447 }
1448 EXPORT_SYMBOL_GPL(blkcg_activate_policy);
1449 
1450 /**
1451  * blkcg_deactivate_policy - deactivate a blkcg policy on a request_queue
1452  * @q: request_queue of interest
1453  * @pol: blkcg policy to deactivate
1454  *
1455  * Deactivate @pol on @q.  Follows the same synchronization rules as
1456  * blkcg_activate_policy().
1457  */
1458 void blkcg_deactivate_policy(struct request_queue *q,
1459                              const struct blkcg_policy *pol)
1460 {
1461         struct blkcg_gq *blkg;
1462 
1463         if (!blkcg_policy_enabled(q, pol))
1464                 return;
1465 
1466         if (q->mq_ops)
1467                 blk_mq_freeze_queue(q);
1468         else
1469                 blk_queue_bypass_start(q);
1470 
1471         spin_lock_irq(q->queue_lock);
1472 
1473         __clear_bit(pol->plid, q->blkcg_pols);
1474 
1475         list_for_each_entry(blkg, &q->blkg_list, q_node) {
1476                 if (blkg->pd[pol->plid]) {
1477                         if (pol->pd_offline_fn)
1478                                 pol->pd_offline_fn(blkg->pd[pol->plid]);
1479                         pol->pd_free_fn(blkg->pd[pol->plid]);
1480                         blkg->pd[pol->plid] = NULL;
1481                 }
1482         }
1483 
1484         spin_unlock_irq(q->queue_lock);
1485 
1486         if (q->mq_ops)
1487                 blk_mq_unfreeze_queue(q);
1488         else
1489                 blk_queue_bypass_end(q);
1490 }
1491 EXPORT_SYMBOL_GPL(blkcg_deactivate_policy);
1492 
1493 /**
1494  * blkcg_policy_register - register a blkcg policy
1495  * @pol: blkcg policy to register
1496  *
1497  * Register @pol with blkcg core.  Might sleep and @pol may be modified on
1498  * successful registration.  Returns 0 on success and -errno on failure.
1499  */
1500 int blkcg_policy_register(struct blkcg_policy *pol)
1501 {
1502         struct blkcg *blkcg;
1503         int i, ret;
1504 
1505         mutex_lock(&blkcg_pol_register_mutex);
1506         mutex_lock(&blkcg_pol_mutex);
1507 
1508         /* find an empty slot */
1509         ret = -ENOSPC;
1510         for (i = 0; i < BLKCG_MAX_POLS; i++)
1511                 if (!blkcg_policy[i])
1512                         break;
1513         if (i >= BLKCG_MAX_POLS) {
1514                 pr_warn("blkcg_policy_register: BLKCG_MAX_POLS too small\n");
1515                 goto err_unlock;
1516         }
1517 
1518         /* Make sure cpd/pd_alloc_fn and cpd/pd_free_fn in pairs */
1519         if ((!pol->cpd_alloc_fn ^ !pol->cpd_free_fn) ||
1520                 (!pol->pd_alloc_fn ^ !pol->pd_free_fn))
1521                 goto err_unlock;
1522 
1523         /* register @pol */
1524         pol->plid = i;
1525         blkcg_policy[pol->plid] = pol;
1526 
1527         /* allocate and install cpd's */
1528         if (pol->cpd_alloc_fn) {
1529                 list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) {
1530                         struct blkcg_policy_data *cpd;
1531 
1532                         cpd = pol->cpd_alloc_fn(GFP_KERNEL);
1533                         if (!cpd)
1534                                 goto err_free_cpds;
1535 
1536                         blkcg->cpd[pol->plid] = cpd;
1537                         cpd->blkcg = blkcg;
1538                         cpd->plid = pol->plid;
1539                         pol->cpd_init_fn(cpd);
1540                 }
1541         }
1542 
1543         mutex_unlock(&blkcg_pol_mutex);
1544 
1545         /* everything is in place, add intf files for the new policy */
1546         if (pol->dfl_cftypes)
1547                 WARN_ON(cgroup_add_dfl_cftypes(&io_cgrp_subsys,
1548                                                pol->dfl_cftypes));
1549         if (pol->legacy_cftypes)
1550                 WARN_ON(cgroup_add_legacy_cftypes(&io_cgrp_subsys,
1551                                                   pol->legacy_cftypes));
1552         mutex_unlock(&blkcg_pol_register_mutex);
1553         return 0;
1554 
1555 err_free_cpds:
1556         if (pol->cpd_free_fn) {
1557                 list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) {
1558                         if (blkcg->cpd[pol->plid]) {
1559                                 pol->cpd_free_fn(blkcg->cpd[pol->plid]);
1560                                 blkcg->cpd[pol->plid] = NULL;
1561                         }
1562                 }
1563         }
1564         blkcg_policy[pol->plid] = NULL;
1565 err_unlock:
1566         mutex_unlock(&blkcg_pol_mutex);
1567         mutex_unlock(&blkcg_pol_register_mutex);
1568         return ret;
1569 }
1570 EXPORT_SYMBOL_GPL(blkcg_policy_register);
1571 
1572 /**
1573  * blkcg_policy_unregister - unregister a blkcg policy
1574  * @pol: blkcg policy to unregister
1575  *
1576  * Undo blkcg_policy_register(@pol).  Might sleep.
1577  */
1578 void blkcg_policy_unregister(struct blkcg_policy *pol)
1579 {
1580         struct blkcg *blkcg;
1581 
1582         mutex_lock(&blkcg_pol_register_mutex);
1583 
1584         if (WARN_ON(blkcg_policy[pol->plid] != pol))
1585                 goto out_unlock;
1586 
1587         /* kill the intf files first */
1588         if (pol->dfl_cftypes)
1589                 cgroup_rm_cftypes(pol->dfl_cftypes);
1590         if (pol->legacy_cftypes)
1591                 cgroup_rm_cftypes(pol->legacy_cftypes);
1592 
1593         /* remove cpds and unregister */
1594         mutex_lock(&blkcg_pol_mutex);
1595 
1596         if (pol->cpd_free_fn) {
1597                 list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) {
1598                         if (blkcg->cpd[pol->plid]) {
1599                                 pol->cpd_free_fn(blkcg->cpd[pol->plid]);
1600                                 blkcg->cpd[pol->plid] = NULL;
1601                         }
1602                 }
1603         }
1604         blkcg_policy[pol->plid] = NULL;
1605 
1606         mutex_unlock(&blkcg_pol_mutex);
1607 out_unlock:
1608         mutex_unlock(&blkcg_pol_register_mutex);
1609 }
1610 EXPORT_SYMBOL_GPL(blkcg_policy_unregister);
1611 
1612 /*
1613  * Scale the accumulated delay based on how long it has been since we updated
1614  * the delay.  We only call this when we are adding delay, in case it's been a
1615  * while since we added delay, and when we are checking to see if we need to
1616  * delay a task, to account for any delays that may have occurred.
1617  */
1618 static void blkcg_scale_delay(struct blkcg_gq *blkg, u64 now)
1619 {
1620         u64 old = atomic64_read(&blkg->delay_start);
1621 
1622         /*
1623          * We only want to scale down every second.  The idea here is that we
1624          * want to delay people for min(delay_nsec, NSEC_PER_SEC) in a certain
1625          * time window.  We only want to throttle tasks for recent delay that
1626          * has occurred, in 1 second time windows since that's the maximum
1627          * things can be throttled.  We save the current delay window in
1628          * blkg->last_delay so we know what amount is still left to be charged
1629          * to the blkg from this point onward.  blkg->last_use keeps track of
1630          * the use_delay counter.  The idea is if we're unthrottling the blkg we
1631          * are ok with whatever is happening now, and we can take away more of
1632          * the accumulated delay as we've already throttled enough that
1633          * everybody is happy with their IO latencies.
1634          */
1635         if (time_before64(old + NSEC_PER_SEC, now) &&
1636             atomic64_cmpxchg(&blkg->delay_start, old, now) == old) {
1637                 u64 cur = atomic64_read(&blkg->delay_nsec);
1638                 u64 sub = min_t(u64, blkg->last_delay, now - old);
1639                 int cur_use = atomic_read(&blkg->use_delay);
1640 
1641                 /*
1642                  * We've been unthrottled, subtract a larger chunk of our
1643                  * accumulated delay.
1644                  */
1645                 if (cur_use < blkg->last_use)
1646                         sub = max_t(u64, sub, blkg->last_delay >> 1);
1647 
1648                 /*
1649                  * This shouldn't happen, but handle it anyway.  Our delay_nsec
1650                  * should only ever be growing except here where we subtract out
1651                  * min(last_delay, 1 second), but lord knows bugs happen and I'd
1652                  * rather not end up with negative numbers.
1653                  */
1654                 if (unlikely(cur < sub)) {
1655                         atomic64_set(&blkg->delay_nsec, 0);
1656                         blkg->last_delay = 0;
1657                 } else {
1658                         atomic64_sub(sub, &blkg->delay_nsec);
1659                         blkg->last_delay = cur - sub;
1660                 }
1661                 blkg->last_use = cur_use;
1662         }
1663 }
1664 
1665 /*
1666  * This is called when we want to actually walk up the hierarchy and check to
1667  * see if we need to throttle, and then actually throttle if there is some
1668  * accumulated delay.  This should only be called upon return to user space so
1669  * we're not holding some lock that would induce a priority inversion.
1670  */
1671 static void blkcg_maybe_throttle_blkg(struct blkcg_gq *blkg, bool use_memdelay)
1672 {
1673         u64 now = ktime_to_ns(ktime_get());
1674         u64 exp;
1675         u64 delay_nsec = 0;
1676         int tok;
1677 
1678         while (blkg->parent) {
1679                 if (atomic_read(&blkg->use_delay)) {
1680                         blkcg_scale_delay(blkg, now);
1681                         delay_nsec = max_t(u64, delay_nsec,
1682                                            atomic64_read(&blkg->delay_nsec));
1683                 }
1684                 blkg = blkg->parent;
1685         }
1686 
1687         if (!delay_nsec)
1688                 return;
1689 
1690         /*
1691          * Let's not sleep for all eternity if we've amassed a huge delay.
1692          * Swapping or metadata IO can accumulate 10's of seconds worth of
1693          * delay, and we want userspace to be able to do _something_ so cap the
1694          * delays at 1 second.  If there's 10's of seconds worth of delay then
1695          * the tasks will be delayed for 1 second for every syscall.
1696          */
1697         delay_nsec = min_t(u64, delay_nsec, 250 * NSEC_PER_MSEC);
1698 
1699         /*
1700          * TODO: the use_memdelay flag is going to be for the upcoming psi stuff
1701          * that hasn't landed upstream yet.  Once that stuff is in place we need
1702          * to do a psi_memstall_enter/leave if memdelay is set.
1703          */
1704 
1705         exp = ktime_add_ns(now, delay_nsec);
1706         tok = io_schedule_prepare();
1707         do {
1708                 __set_current_state(TASK_KILLABLE);
1709                 if (!schedule_hrtimeout(&exp, HRTIMER_MODE_ABS))
1710                         break;
1711         } while (!fatal_signal_pending(current));
1712         io_schedule_finish(tok);
1713 }
1714 
1715 /**
1716  * blkcg_maybe_throttle_current - throttle the current task if it has been marked
1717  *
1718  * This is only called if we've been marked with set_notify_resume().  Obviously
1719  * we can be set_notify_resume() for reasons other than blkcg throttling, so we
1720  * check to see if current->throttle_queue is set and if not this doesn't do
1721  * anything.  This should only ever be called by the resume code, it's not meant
1722  * to be called by people willy-nilly as it will actually do the work to
1723  * throttle the task if it is setup for throttling.
1724  */
1725 void blkcg_maybe_throttle_current(void)
1726 {
1727         struct request_queue *q = current->throttle_queue;
1728         struct cgroup_subsys_state *css;
1729         struct blkcg *blkcg;
1730         struct blkcg_gq *blkg;
1731         bool use_memdelay = current->use_memdelay;
1732 
1733         if (!q)
1734                 return;
1735 
1736         current->throttle_queue = NULL;
1737         current->use_memdelay = false;
1738 
1739         rcu_read_lock();
1740         css = kthread_blkcg();
1741         if (css)
1742                 blkcg = css_to_blkcg(css);
1743         else
1744                 blkcg = css_to_blkcg(task_css(current, io_cgrp_id));
1745 
1746         if (!blkcg)
1747                 goto out;
1748         blkg = blkg_lookup(blkcg, q);
1749         if (!blkg)
1750                 goto out;
1751         blkg = blkg_try_get(blkg);
1752         if (!blkg)
1753                 goto out;
1754         rcu_read_unlock();
1755 
1756         blkcg_maybe_throttle_blkg(blkg, use_memdelay);
1757         blkg_put(blkg);
1758         blk_put_queue(q);
1759         return;
1760 out:
1761         rcu_read_unlock();
1762         blk_put_queue(q);
1763 }
1764 EXPORT_SYMBOL_GPL(blkcg_maybe_throttle_current);
1765 
1766 /**
1767  * blkcg_schedule_throttle - this task needs to check for throttling
1768  * @q - the request queue IO was submitted on
1769  * @use_memdelay - do we charge this to memory delay for PSI
1770  *
1771  * This is called by the IO controller when we know there's delay accumulated
1772  * for the blkg for this task.  We do not pass the blkg because there are places
1773  * we call this that may not have that information, the swapping code for
1774  * instance will only have a request_queue at that point.  This set's the
1775  * notify_resume for the task to check and see if it requires throttling before
1776  * returning to user space.
1777  *
1778  * We will only schedule once per syscall.  You can call this over and over
1779  * again and it will only do the check once upon return to user space, and only
1780  * throttle once.  If the task needs to be throttled again it'll need to be
1781  * re-set at the next time we see the task.
1782  */
1783 void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay)
1784 {
1785         if (unlikely(current->flags & PF_KTHREAD))
1786                 return;
1787 
1788         if (!blk_get_queue(q))
1789                 return;
1790 
1791         if (current->throttle_queue)
1792                 blk_put_queue(current->throttle_queue);
1793         current->throttle_queue = q;
1794         if (use_memdelay)
1795                 current->use_memdelay = use_memdelay;
1796         set_notify_resume(current);
1797 }
1798 EXPORT_SYMBOL_GPL(blkcg_schedule_throttle);
1799 
1800 /**
1801  * blkcg_add_delay - add delay to this blkg
1802  * @now - the current time in nanoseconds
1803  * @delta - how many nanoseconds of delay to add
1804  *
1805  * Charge @delta to the blkg's current delay accumulation.  This is used to
1806  * throttle tasks if an IO controller thinks we need more throttling.
1807  */
1808 void blkcg_add_delay(struct blkcg_gq *blkg, u64 now, u64 delta)
1809 {
1810         blkcg_scale_delay(blkg, now);
1811         atomic64_add(delta, &blkg->delay_nsec);
1812 }
1813 EXPORT_SYMBOL_GPL(blkcg_add_delay);
1814 
1815 module_param(blkcg_debug_stats, bool, 0644);
1816 MODULE_PARM_DESC(blkcg_debug_stats, "True if you want debug stats, false if not");
1817 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp