~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/block/blk-cgroup.c

Version: ~ [ linux-5.4-rc3 ] ~ [ linux-5.3.6 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.79 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.149 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.196 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.196 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.75 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-3.9.11 ] ~ [ linux-3.8.13 ] ~ [ linux-3.7.10 ] ~ [ linux-3.6.11 ] ~ [ linux-3.5.7 ] ~ [ linux-3.4.113 ] ~ [ linux-3.3.8 ] ~ [ linux-3.2.102 ] ~ [ linux-3.1.10 ] ~ [ linux-3.0.101 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  * Common Block IO controller cgroup interface
  3  *
  4  * Based on ideas and code from CFQ, CFS and BFQ:
  5  * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
  6  *
  7  * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
  8  *                    Paolo Valente <paolo.valente@unimore.it>
  9  *
 10  * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
 11  *                    Nauman Rafique <nauman@google.com>
 12  *
 13  * For policy-specific per-blkcg data:
 14  * Copyright (C) 2015 Paolo Valente <paolo.valente@unimore.it>
 15  *                    Arianna Avanzini <avanzini.arianna@gmail.com>
 16  */
 17 #include <linux/ioprio.h>
 18 #include <linux/kdev_t.h>
 19 #include <linux/module.h>
 20 #include <linux/err.h>
 21 #include <linux/blkdev.h>
 22 #include <linux/backing-dev.h>
 23 #include <linux/slab.h>
 24 #include <linux/genhd.h>
 25 #include <linux/delay.h>
 26 #include <linux/atomic.h>
 27 #include <linux/ctype.h>
 28 #include <linux/blk-cgroup.h>
 29 #include "blk.h"
 30 
 31 #define MAX_KEY_LEN 100
 32 
 33 /*
 34  * blkcg_pol_mutex protects blkcg_policy[] and policy [de]activation.
 35  * blkcg_pol_register_mutex nests outside of it and synchronizes entire
 36  * policy [un]register operations including cgroup file additions /
 37  * removals.  Putting cgroup file registration outside blkcg_pol_mutex
 38  * allows grabbing it from cgroup callbacks.
 39  */
 40 static DEFINE_MUTEX(blkcg_pol_register_mutex);
 41 static DEFINE_MUTEX(blkcg_pol_mutex);
 42 
 43 struct blkcg blkcg_root;
 44 EXPORT_SYMBOL_GPL(blkcg_root);
 45 
 46 struct cgroup_subsys_state * const blkcg_root_css = &blkcg_root.css;
 47 
 48 static struct blkcg_policy *blkcg_policy[BLKCG_MAX_POLS];
 49 
 50 static LIST_HEAD(all_blkcgs);           /* protected by blkcg_pol_mutex */
 51 
 52 static bool blkcg_policy_enabled(struct request_queue *q,
 53                                  const struct blkcg_policy *pol)
 54 {
 55         return pol && test_bit(pol->plid, q->blkcg_pols);
 56 }
 57 
 58 /**
 59  * blkg_free - free a blkg
 60  * @blkg: blkg to free
 61  *
 62  * Free @blkg which may be partially allocated.
 63  */
 64 static void blkg_free(struct blkcg_gq *blkg)
 65 {
 66         int i;
 67 
 68         if (!blkg)
 69                 return;
 70 
 71         for (i = 0; i < BLKCG_MAX_POLS; i++)
 72                 if (blkg->pd[i])
 73                         blkcg_policy[i]->pd_free_fn(blkg->pd[i]);
 74 
 75         if (blkg->blkcg != &blkcg_root)
 76                 blk_exit_rl(&blkg->rl);
 77 
 78         blkg_rwstat_exit(&blkg->stat_ios);
 79         blkg_rwstat_exit(&blkg->stat_bytes);
 80         kfree(blkg);
 81 }
 82 
 83 /**
 84  * blkg_alloc - allocate a blkg
 85  * @blkcg: block cgroup the new blkg is associated with
 86  * @q: request_queue the new blkg is associated with
 87  * @gfp_mask: allocation mask to use
 88  *
 89  * Allocate a new blkg assocating @blkcg and @q.
 90  */
 91 static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q,
 92                                    gfp_t gfp_mask)
 93 {
 94         struct blkcg_gq *blkg;
 95         int i;
 96 
 97         /* alloc and init base part */
 98         blkg = kzalloc_node(sizeof(*blkg), gfp_mask, q->node);
 99         if (!blkg)
100                 return NULL;
101 
102         if (blkg_rwstat_init(&blkg->stat_bytes, gfp_mask) ||
103             blkg_rwstat_init(&blkg->stat_ios, gfp_mask))
104                 goto err_free;
105 
106         blkg->q = q;
107         INIT_LIST_HEAD(&blkg->q_node);
108         blkg->blkcg = blkcg;
109         atomic_set(&blkg->refcnt, 1);
110 
111         /* root blkg uses @q->root_rl, init rl only for !root blkgs */
112         if (blkcg != &blkcg_root) {
113                 if (blk_init_rl(&blkg->rl, q, gfp_mask))
114                         goto err_free;
115                 blkg->rl.blkg = blkg;
116         }
117 
118         for (i = 0; i < BLKCG_MAX_POLS; i++) {
119                 struct blkcg_policy *pol = blkcg_policy[i];
120                 struct blkg_policy_data *pd;
121 
122                 if (!blkcg_policy_enabled(q, pol))
123                         continue;
124 
125                 /* alloc per-policy data and attach it to blkg */
126                 pd = pol->pd_alloc_fn(gfp_mask, q->node);
127                 if (!pd)
128                         goto err_free;
129 
130                 blkg->pd[i] = pd;
131                 pd->blkg = blkg;
132                 pd->plid = i;
133         }
134 
135         return blkg;
136 
137 err_free:
138         blkg_free(blkg);
139         return NULL;
140 }
141 
142 struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg,
143                                       struct request_queue *q, bool update_hint)
144 {
145         struct blkcg_gq *blkg;
146 
147         /*
148          * Hint didn't match.  Look up from the radix tree.  Note that the
149          * hint can only be updated under queue_lock as otherwise @blkg
150          * could have already been removed from blkg_tree.  The caller is
151          * responsible for grabbing queue_lock if @update_hint.
152          */
153         blkg = radix_tree_lookup(&blkcg->blkg_tree, q->id);
154         if (blkg && blkg->q == q) {
155                 if (update_hint) {
156                         lockdep_assert_held(q->queue_lock);
157                         rcu_assign_pointer(blkcg->blkg_hint, blkg);
158                 }
159                 return blkg;
160         }
161 
162         return NULL;
163 }
164 EXPORT_SYMBOL_GPL(blkg_lookup_slowpath);
165 
166 /*
167  * If @new_blkg is %NULL, this function tries to allocate a new one as
168  * necessary using %GFP_NOWAIT.  @new_blkg is always consumed on return.
169  */
170 static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
171                                     struct request_queue *q,
172                                     struct blkcg_gq *new_blkg)
173 {
174         struct blkcg_gq *blkg;
175         struct bdi_writeback_congested *wb_congested;
176         int i, ret;
177 
178         WARN_ON_ONCE(!rcu_read_lock_held());
179         lockdep_assert_held(q->queue_lock);
180 
181         /* blkg holds a reference to blkcg */
182         if (!css_tryget_online(&blkcg->css)) {
183                 ret = -ENODEV;
184                 goto err_free_blkg;
185         }
186 
187         wb_congested = wb_congested_get_create(&q->backing_dev_info,
188                                                blkcg->css.id, GFP_NOWAIT);
189         if (!wb_congested) {
190                 ret = -ENOMEM;
191                 goto err_put_css;
192         }
193 
194         /* allocate */
195         if (!new_blkg) {
196                 new_blkg = blkg_alloc(blkcg, q, GFP_NOWAIT);
197                 if (unlikely(!new_blkg)) {
198                         ret = -ENOMEM;
199                         goto err_put_congested;
200                 }
201         }
202         blkg = new_blkg;
203         blkg->wb_congested = wb_congested;
204 
205         /* link parent */
206         if (blkcg_parent(blkcg)) {
207                 blkg->parent = __blkg_lookup(blkcg_parent(blkcg), q, false);
208                 if (WARN_ON_ONCE(!blkg->parent)) {
209                         ret = -ENODEV;
210                         goto err_put_congested;
211                 }
212                 blkg_get(blkg->parent);
213         }
214 
215         /* invoke per-policy init */
216         for (i = 0; i < BLKCG_MAX_POLS; i++) {
217                 struct blkcg_policy *pol = blkcg_policy[i];
218 
219                 if (blkg->pd[i] && pol->pd_init_fn)
220                         pol->pd_init_fn(blkg->pd[i]);
221         }
222 
223         /* insert */
224         spin_lock(&blkcg->lock);
225         ret = radix_tree_insert(&blkcg->blkg_tree, q->id, blkg);
226         if (likely(!ret)) {
227                 hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
228                 list_add(&blkg->q_node, &q->blkg_list);
229 
230                 for (i = 0; i < BLKCG_MAX_POLS; i++) {
231                         struct blkcg_policy *pol = blkcg_policy[i];
232 
233                         if (blkg->pd[i] && pol->pd_online_fn)
234                                 pol->pd_online_fn(blkg->pd[i]);
235                 }
236         }
237         blkg->online = true;
238         spin_unlock(&blkcg->lock);
239 
240         if (!ret)
241                 return blkg;
242 
243         /* @blkg failed fully initialized, use the usual release path */
244         blkg_put(blkg);
245         return ERR_PTR(ret);
246 
247 err_put_congested:
248         wb_congested_put(wb_congested);
249 err_put_css:
250         css_put(&blkcg->css);
251 err_free_blkg:
252         blkg_free(new_blkg);
253         return ERR_PTR(ret);
254 }
255 
256 /**
257  * blkg_lookup_create - lookup blkg, try to create one if not there
258  * @blkcg: blkcg of interest
259  * @q: request_queue of interest
260  *
261  * Lookup blkg for the @blkcg - @q pair.  If it doesn't exist, try to
262  * create one.  blkg creation is performed recursively from blkcg_root such
263  * that all non-root blkg's have access to the parent blkg.  This function
264  * should be called under RCU read lock and @q->queue_lock.
265  *
266  * Returns pointer to the looked up or created blkg on success, ERR_PTR()
267  * value on error.  If @q is dead, returns ERR_PTR(-EINVAL).  If @q is not
268  * dead and bypassing, returns ERR_PTR(-EBUSY).
269  */
270 struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
271                                     struct request_queue *q)
272 {
273         struct blkcg_gq *blkg;
274 
275         WARN_ON_ONCE(!rcu_read_lock_held());
276         lockdep_assert_held(q->queue_lock);
277 
278         /*
279          * This could be the first entry point of blkcg implementation and
280          * we shouldn't allow anything to go through for a bypassing queue.
281          */
282         if (unlikely(blk_queue_bypass(q)))
283                 return ERR_PTR(blk_queue_dying(q) ? -ENODEV : -EBUSY);
284 
285         blkg = __blkg_lookup(blkcg, q, true);
286         if (blkg)
287                 return blkg;
288 
289         /*
290          * Create blkgs walking down from blkcg_root to @blkcg, so that all
291          * non-root blkgs have access to their parents.
292          */
293         while (true) {
294                 struct blkcg *pos = blkcg;
295                 struct blkcg *parent = blkcg_parent(blkcg);
296 
297                 while (parent && !__blkg_lookup(parent, q, false)) {
298                         pos = parent;
299                         parent = blkcg_parent(parent);
300                 }
301 
302                 blkg = blkg_create(pos, q, NULL);
303                 if (pos == blkcg || IS_ERR(blkg))
304                         return blkg;
305         }
306 }
307 
308 static void blkg_destroy(struct blkcg_gq *blkg)
309 {
310         struct blkcg *blkcg = blkg->blkcg;
311         struct blkcg_gq *parent = blkg->parent;
312         int i;
313 
314         lockdep_assert_held(blkg->q->queue_lock);
315         lockdep_assert_held(&blkcg->lock);
316 
317         /* Something wrong if we are trying to remove same group twice */
318         WARN_ON_ONCE(list_empty(&blkg->q_node));
319         WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node));
320 
321         for (i = 0; i < BLKCG_MAX_POLS; i++) {
322                 struct blkcg_policy *pol = blkcg_policy[i];
323 
324                 if (blkg->pd[i] && pol->pd_offline_fn)
325                         pol->pd_offline_fn(blkg->pd[i]);
326         }
327 
328         if (parent) {
329                 blkg_rwstat_add_aux(&parent->stat_bytes, &blkg->stat_bytes);
330                 blkg_rwstat_add_aux(&parent->stat_ios, &blkg->stat_ios);
331         }
332 
333         blkg->online = false;
334 
335         radix_tree_delete(&blkcg->blkg_tree, blkg->q->id);
336         list_del_init(&blkg->q_node);
337         hlist_del_init_rcu(&blkg->blkcg_node);
338 
339         /*
340          * Both setting lookup hint to and clearing it from @blkg are done
341          * under queue_lock.  If it's not pointing to @blkg now, it never
342          * will.  Hint assignment itself can race safely.
343          */
344         if (rcu_access_pointer(blkcg->blkg_hint) == blkg)
345                 rcu_assign_pointer(blkcg->blkg_hint, NULL);
346 
347         /*
348          * Put the reference taken at the time of creation so that when all
349          * queues are gone, group can be destroyed.
350          */
351         blkg_put(blkg);
352 }
353 
354 /**
355  * blkg_destroy_all - destroy all blkgs associated with a request_queue
356  * @q: request_queue of interest
357  *
358  * Destroy all blkgs associated with @q.
359  */
360 static void blkg_destroy_all(struct request_queue *q)
361 {
362         struct blkcg_gq *blkg, *n;
363 
364         lockdep_assert_held(q->queue_lock);
365 
366         list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) {
367                 struct blkcg *blkcg = blkg->blkcg;
368 
369                 spin_lock(&blkcg->lock);
370                 blkg_destroy(blkg);
371                 spin_unlock(&blkcg->lock);
372         }
373 
374         q->root_blkg = NULL;
375         q->root_rl.blkg = NULL;
376 }
377 
378 /*
379  * A group is RCU protected, but having an rcu lock does not mean that one
380  * can access all the fields of blkg and assume these are valid.  For
381  * example, don't try to follow throtl_data and request queue links.
382  *
383  * Having a reference to blkg under an rcu allows accesses to only values
384  * local to groups like group stats and group rate limits.
385  */
386 void __blkg_release_rcu(struct rcu_head *rcu_head)
387 {
388         struct blkcg_gq *blkg = container_of(rcu_head, struct blkcg_gq, rcu_head);
389 
390         /* release the blkcg and parent blkg refs this blkg has been holding */
391         css_put(&blkg->blkcg->css);
392         if (blkg->parent)
393                 blkg_put(blkg->parent);
394 
395         wb_congested_put(blkg->wb_congested);
396 
397         blkg_free(blkg);
398 }
399 EXPORT_SYMBOL_GPL(__blkg_release_rcu);
400 
401 /*
402  * The next function used by blk_queue_for_each_rl().  It's a bit tricky
403  * because the root blkg uses @q->root_rl instead of its own rl.
404  */
405 struct request_list *__blk_queue_next_rl(struct request_list *rl,
406                                          struct request_queue *q)
407 {
408         struct list_head *ent;
409         struct blkcg_gq *blkg;
410 
411         /*
412          * Determine the current blkg list_head.  The first entry is
413          * root_rl which is off @q->blkg_list and mapped to the head.
414          */
415         if (rl == &q->root_rl) {
416                 ent = &q->blkg_list;
417                 /* There are no more block groups, hence no request lists */
418                 if (list_empty(ent))
419                         return NULL;
420         } else {
421                 blkg = container_of(rl, struct blkcg_gq, rl);
422                 ent = &blkg->q_node;
423         }
424 
425         /* walk to the next list_head, skip root blkcg */
426         ent = ent->next;
427         if (ent == &q->root_blkg->q_node)
428                 ent = ent->next;
429         if (ent == &q->blkg_list)
430                 return NULL;
431 
432         blkg = container_of(ent, struct blkcg_gq, q_node);
433         return &blkg->rl;
434 }
435 
436 static int blkcg_reset_stats(struct cgroup_subsys_state *css,
437                              struct cftype *cftype, u64 val)
438 {
439         struct blkcg *blkcg = css_to_blkcg(css);
440         struct blkcg_gq *blkg;
441         int i;
442 
443         mutex_lock(&blkcg_pol_mutex);
444         spin_lock_irq(&blkcg->lock);
445 
446         /*
447          * Note that stat reset is racy - it doesn't synchronize against
448          * stat updates.  This is a debug feature which shouldn't exist
449          * anyway.  If you get hit by a race, retry.
450          */
451         hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
452                 blkg_rwstat_reset(&blkg->stat_bytes);
453                 blkg_rwstat_reset(&blkg->stat_ios);
454 
455                 for (i = 0; i < BLKCG_MAX_POLS; i++) {
456                         struct blkcg_policy *pol = blkcg_policy[i];
457 
458                         if (blkg->pd[i] && pol->pd_reset_stats_fn)
459                                 pol->pd_reset_stats_fn(blkg->pd[i]);
460                 }
461         }
462 
463         spin_unlock_irq(&blkcg->lock);
464         mutex_unlock(&blkcg_pol_mutex);
465         return 0;
466 }
467 
468 const char *blkg_dev_name(struct blkcg_gq *blkg)
469 {
470         /* some drivers (floppy) instantiate a queue w/o disk registered */
471         if (blkg->q->backing_dev_info.dev)
472                 return dev_name(blkg->q->backing_dev_info.dev);
473         return NULL;
474 }
475 EXPORT_SYMBOL_GPL(blkg_dev_name);
476 
477 /**
478  * blkcg_print_blkgs - helper for printing per-blkg data
479  * @sf: seq_file to print to
480  * @blkcg: blkcg of interest
481  * @prfill: fill function to print out a blkg
482  * @pol: policy in question
483  * @data: data to be passed to @prfill
484  * @show_total: to print out sum of prfill return values or not
485  *
486  * This function invokes @prfill on each blkg of @blkcg if pd for the
487  * policy specified by @pol exists.  @prfill is invoked with @sf, the
488  * policy data and @data and the matching queue lock held.  If @show_total
489  * is %true, the sum of the return values from @prfill is printed with
490  * "Total" label at the end.
491  *
492  * This is to be used to construct print functions for
493  * cftype->read_seq_string method.
494  */
495 void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
496                        u64 (*prfill)(struct seq_file *,
497                                      struct blkg_policy_data *, int),
498                        const struct blkcg_policy *pol, int data,
499                        bool show_total)
500 {
501         struct blkcg_gq *blkg;
502         u64 total = 0;
503 
504         rcu_read_lock();
505         hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
506                 spin_lock_irq(blkg->q->queue_lock);
507                 if (blkcg_policy_enabled(blkg->q, pol))
508                         total += prfill(sf, blkg->pd[pol->plid], data);
509                 spin_unlock_irq(blkg->q->queue_lock);
510         }
511         rcu_read_unlock();
512 
513         if (show_total)
514                 seq_printf(sf, "Total %llu\n", (unsigned long long)total);
515 }
516 EXPORT_SYMBOL_GPL(blkcg_print_blkgs);
517 
518 /**
519  * __blkg_prfill_u64 - prfill helper for a single u64 value
520  * @sf: seq_file to print to
521  * @pd: policy private data of interest
522  * @v: value to print
523  *
524  * Print @v to @sf for the device assocaited with @pd.
525  */
526 u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v)
527 {
528         const char *dname = blkg_dev_name(pd->blkg);
529 
530         if (!dname)
531                 return 0;
532 
533         seq_printf(sf, "%s %llu\n", dname, (unsigned long long)v);
534         return v;
535 }
536 EXPORT_SYMBOL_GPL(__blkg_prfill_u64);
537 
538 /**
539  * __blkg_prfill_rwstat - prfill helper for a blkg_rwstat
540  * @sf: seq_file to print to
541  * @pd: policy private data of interest
542  * @rwstat: rwstat to print
543  *
544  * Print @rwstat to @sf for the device assocaited with @pd.
545  */
546 u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
547                          const struct blkg_rwstat *rwstat)
548 {
549         static const char *rwstr[] = {
550                 [BLKG_RWSTAT_READ]      = "Read",
551                 [BLKG_RWSTAT_WRITE]     = "Write",
552                 [BLKG_RWSTAT_SYNC]      = "Sync",
553                 [BLKG_RWSTAT_ASYNC]     = "Async",
554         };
555         const char *dname = blkg_dev_name(pd->blkg);
556         u64 v;
557         int i;
558 
559         if (!dname)
560                 return 0;
561 
562         for (i = 0; i < BLKG_RWSTAT_NR; i++)
563                 seq_printf(sf, "%s %s %llu\n", dname, rwstr[i],
564                            (unsigned long long)atomic64_read(&rwstat->aux_cnt[i]));
565 
566         v = atomic64_read(&rwstat->aux_cnt[BLKG_RWSTAT_READ]) +
567                 atomic64_read(&rwstat->aux_cnt[BLKG_RWSTAT_WRITE]);
568         seq_printf(sf, "%s Total %llu\n", dname, (unsigned long long)v);
569         return v;
570 }
571 EXPORT_SYMBOL_GPL(__blkg_prfill_rwstat);
572 
573 /**
574  * blkg_prfill_stat - prfill callback for blkg_stat
575  * @sf: seq_file to print to
576  * @pd: policy private data of interest
577  * @off: offset to the blkg_stat in @pd
578  *
579  * prfill callback for printing a blkg_stat.
580  */
581 u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off)
582 {
583         return __blkg_prfill_u64(sf, pd, blkg_stat_read((void *)pd + off));
584 }
585 EXPORT_SYMBOL_GPL(blkg_prfill_stat);
586 
587 /**
588  * blkg_prfill_rwstat - prfill callback for blkg_rwstat
589  * @sf: seq_file to print to
590  * @pd: policy private data of interest
591  * @off: offset to the blkg_rwstat in @pd
592  *
593  * prfill callback for printing a blkg_rwstat.
594  */
595 u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
596                        int off)
597 {
598         struct blkg_rwstat rwstat = blkg_rwstat_read((void *)pd + off);
599 
600         return __blkg_prfill_rwstat(sf, pd, &rwstat);
601 }
602 EXPORT_SYMBOL_GPL(blkg_prfill_rwstat);
603 
604 static u64 blkg_prfill_rwstat_field(struct seq_file *sf,
605                                     struct blkg_policy_data *pd, int off)
606 {
607         struct blkg_rwstat rwstat = blkg_rwstat_read((void *)pd->blkg + off);
608 
609         return __blkg_prfill_rwstat(sf, pd, &rwstat);
610 }
611 
612 /**
613  * blkg_print_stat_bytes - seq_show callback for blkg->stat_bytes
614  * @sf: seq_file to print to
615  * @v: unused
616  *
617  * To be used as cftype->seq_show to print blkg->stat_bytes.
618  * cftype->private must be set to the blkcg_policy.
619  */
620 int blkg_print_stat_bytes(struct seq_file *sf, void *v)
621 {
622         blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
623                           blkg_prfill_rwstat_field, (void *)seq_cft(sf)->private,
624                           offsetof(struct blkcg_gq, stat_bytes), true);
625         return 0;
626 }
627 EXPORT_SYMBOL_GPL(blkg_print_stat_bytes);
628 
629 /**
630  * blkg_print_stat_bytes - seq_show callback for blkg->stat_ios
631  * @sf: seq_file to print to
632  * @v: unused
633  *
634  * To be used as cftype->seq_show to print blkg->stat_ios.  cftype->private
635  * must be set to the blkcg_policy.
636  */
637 int blkg_print_stat_ios(struct seq_file *sf, void *v)
638 {
639         blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
640                           blkg_prfill_rwstat_field, (void *)seq_cft(sf)->private,
641                           offsetof(struct blkcg_gq, stat_ios), true);
642         return 0;
643 }
644 EXPORT_SYMBOL_GPL(blkg_print_stat_ios);
645 
646 static u64 blkg_prfill_rwstat_field_recursive(struct seq_file *sf,
647                                               struct blkg_policy_data *pd,
648                                               int off)
649 {
650         struct blkg_rwstat rwstat = blkg_rwstat_recursive_sum(pd->blkg,
651                                                               NULL, off);
652         return __blkg_prfill_rwstat(sf, pd, &rwstat);
653 }
654 
655 /**
656  * blkg_print_stat_bytes_recursive - recursive version of blkg_print_stat_bytes
657  * @sf: seq_file to print to
658  * @v: unused
659  */
660 int blkg_print_stat_bytes_recursive(struct seq_file *sf, void *v)
661 {
662         blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
663                           blkg_prfill_rwstat_field_recursive,
664                           (void *)seq_cft(sf)->private,
665                           offsetof(struct blkcg_gq, stat_bytes), true);
666         return 0;
667 }
668 EXPORT_SYMBOL_GPL(blkg_print_stat_bytes_recursive);
669 
670 /**
671  * blkg_print_stat_ios_recursive - recursive version of blkg_print_stat_ios
672  * @sf: seq_file to print to
673  * @v: unused
674  */
675 int blkg_print_stat_ios_recursive(struct seq_file *sf, void *v)
676 {
677         blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
678                           blkg_prfill_rwstat_field_recursive,
679                           (void *)seq_cft(sf)->private,
680                           offsetof(struct blkcg_gq, stat_ios), true);
681         return 0;
682 }
683 EXPORT_SYMBOL_GPL(blkg_print_stat_ios_recursive);
684 
685 /**
686  * blkg_stat_recursive_sum - collect hierarchical blkg_stat
687  * @blkg: blkg of interest
688  * @pol: blkcg_policy which contains the blkg_stat
689  * @off: offset to the blkg_stat in blkg_policy_data or @blkg
690  *
691  * Collect the blkg_stat specified by @blkg, @pol and @off and all its
692  * online descendants and their aux counts.  The caller must be holding the
693  * queue lock for online tests.
694  *
695  * If @pol is NULL, blkg_stat is at @off bytes into @blkg; otherwise, it is
696  * at @off bytes into @blkg's blkg_policy_data of the policy.
697  */
698 u64 blkg_stat_recursive_sum(struct blkcg_gq *blkg,
699                             struct blkcg_policy *pol, int off)
700 {
701         struct blkcg_gq *pos_blkg;
702         struct cgroup_subsys_state *pos_css;
703         u64 sum = 0;
704 
705         lockdep_assert_held(blkg->q->queue_lock);
706 
707         rcu_read_lock();
708         blkg_for_each_descendant_pre(pos_blkg, pos_css, blkg) {
709                 struct blkg_stat *stat;
710 
711                 if (!pos_blkg->online)
712                         continue;
713 
714                 if (pol)
715                         stat = (void *)blkg_to_pd(pos_blkg, pol) + off;
716                 else
717                         stat = (void *)blkg + off;
718 
719                 sum += blkg_stat_read(stat) + atomic64_read(&stat->aux_cnt);
720         }
721         rcu_read_unlock();
722 
723         return sum;
724 }
725 EXPORT_SYMBOL_GPL(blkg_stat_recursive_sum);
726 
727 /**
728  * blkg_rwstat_recursive_sum - collect hierarchical blkg_rwstat
729  * @blkg: blkg of interest
730  * @pol: blkcg_policy which contains the blkg_rwstat
731  * @off: offset to the blkg_rwstat in blkg_policy_data or @blkg
732  *
733  * Collect the blkg_rwstat specified by @blkg, @pol and @off and all its
734  * online descendants and their aux counts.  The caller must be holding the
735  * queue lock for online tests.
736  *
737  * If @pol is NULL, blkg_rwstat is at @off bytes into @blkg; otherwise, it
738  * is at @off bytes into @blkg's blkg_policy_data of the policy.
739  */
740 struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkcg_gq *blkg,
741                                              struct blkcg_policy *pol, int off)
742 {
743         struct blkcg_gq *pos_blkg;
744         struct cgroup_subsys_state *pos_css;
745         struct blkg_rwstat sum = { };
746         int i;
747 
748         lockdep_assert_held(blkg->q->queue_lock);
749 
750         rcu_read_lock();
751         blkg_for_each_descendant_pre(pos_blkg, pos_css, blkg) {
752                 struct blkg_rwstat *rwstat;
753 
754                 if (!pos_blkg->online)
755                         continue;
756 
757                 if (pol)
758                         rwstat = (void *)blkg_to_pd(pos_blkg, pol) + off;
759                 else
760                         rwstat = (void *)pos_blkg + off;
761 
762                 for (i = 0; i < BLKG_RWSTAT_NR; i++)
763                         atomic64_add(atomic64_read(&rwstat->aux_cnt[i]) +
764                                 percpu_counter_sum_positive(&rwstat->cpu_cnt[i]),
765                                 &sum.aux_cnt[i]);
766         }
767         rcu_read_unlock();
768 
769         return sum;
770 }
771 EXPORT_SYMBOL_GPL(blkg_rwstat_recursive_sum);
772 
773 /**
774  * blkg_conf_prep - parse and prepare for per-blkg config update
775  * @blkcg: target block cgroup
776  * @pol: target policy
777  * @input: input string
778  * @ctx: blkg_conf_ctx to be filled
779  *
780  * Parse per-blkg config update from @input and initialize @ctx with the
781  * result.  @ctx->blkg points to the blkg to be updated and @ctx->body the
782  * part of @input following MAJ:MIN.  This function returns with RCU read
783  * lock and queue lock held and must be paired with blkg_conf_finish().
784  */
785 int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
786                    char *input, struct blkg_conf_ctx *ctx)
787         __acquires(rcu) __acquires(disk->queue->queue_lock)
788 {
789         struct gendisk *disk;
790         struct blkcg_gq *blkg;
791         unsigned int major, minor;
792         int key_len, part, ret;
793         char *body;
794 
795         if (sscanf(input, "%u:%u%n", &major, &minor, &key_len) != 2)
796                 return -EINVAL;
797 
798         body = input + key_len;
799         if (!isspace(*body))
800                 return -EINVAL;
801         body = skip_spaces(body);
802 
803         disk = get_gendisk(MKDEV(major, minor), &part);
804         if (!disk)
805                 return -ENODEV;
806         if (part) {
807                 put_disk(disk);
808                 return -ENODEV;
809         }
810 
811         rcu_read_lock();
812         spin_lock_irq(disk->queue->queue_lock);
813 
814         if (blkcg_policy_enabled(disk->queue, pol))
815                 blkg = blkg_lookup_create(blkcg, disk->queue);
816         else
817                 blkg = ERR_PTR(-EOPNOTSUPP);
818 
819         if (IS_ERR(blkg)) {
820                 ret = PTR_ERR(blkg);
821                 rcu_read_unlock();
822                 spin_unlock_irq(disk->queue->queue_lock);
823                 put_disk(disk);
824                 /*
825                  * If queue was bypassing, we should retry.  Do so after a
826                  * short msleep().  It isn't strictly necessary but queue
827                  * can be bypassing for some time and it's always nice to
828                  * avoid busy looping.
829                  */
830                 if (ret == -EBUSY) {
831                         msleep(10);
832                         ret = restart_syscall();
833                 }
834                 return ret;
835         }
836 
837         ctx->disk = disk;
838         ctx->blkg = blkg;
839         ctx->body = body;
840         return 0;
841 }
842 EXPORT_SYMBOL_GPL(blkg_conf_prep);
843 
844 /**
845  * blkg_conf_finish - finish up per-blkg config update
846  * @ctx: blkg_conf_ctx intiailized by blkg_conf_prep()
847  *
848  * Finish up after per-blkg config update.  This function must be paired
849  * with blkg_conf_prep().
850  */
851 void blkg_conf_finish(struct blkg_conf_ctx *ctx)
852         __releases(ctx->disk->queue->queue_lock) __releases(rcu)
853 {
854         spin_unlock_irq(ctx->disk->queue->queue_lock);
855         rcu_read_unlock();
856         put_disk(ctx->disk);
857 }
858 EXPORT_SYMBOL_GPL(blkg_conf_finish);
859 
860 static int blkcg_print_stat(struct seq_file *sf, void *v)
861 {
862         struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
863         struct blkcg_gq *blkg;
864 
865         rcu_read_lock();
866 
867         hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
868                 const char *dname;
869                 struct blkg_rwstat rwstat;
870                 u64 rbytes, wbytes, rios, wios;
871 
872                 dname = blkg_dev_name(blkg);
873                 if (!dname)
874                         continue;
875 
876                 spin_lock_irq(blkg->q->queue_lock);
877 
878                 rwstat = blkg_rwstat_recursive_sum(blkg, NULL,
879                                         offsetof(struct blkcg_gq, stat_bytes));
880                 rbytes = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_READ]);
881                 wbytes = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_WRITE]);
882 
883                 rwstat = blkg_rwstat_recursive_sum(blkg, NULL,
884                                         offsetof(struct blkcg_gq, stat_ios));
885                 rios = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_READ]);
886                 wios = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_WRITE]);
887 
888                 spin_unlock_irq(blkg->q->queue_lock);
889 
890                 if (rbytes || wbytes || rios || wios)
891                         seq_printf(sf, "%s rbytes=%llu wbytes=%llu rios=%llu wios=%llu\n",
892                                    dname, rbytes, wbytes, rios, wios);
893         }
894 
895         rcu_read_unlock();
896         return 0;
897 }
898 
899 struct cftype blkcg_files[] = {
900         {
901                 .name = "stat",
902                 .seq_show = blkcg_print_stat,
903         },
904         { }     /* terminate */
905 };
906 
907 struct cftype blkcg_legacy_files[] = {
908         {
909                 .name = "reset_stats",
910                 .write_u64 = blkcg_reset_stats,
911         },
912         { }     /* terminate */
913 };
914 
915 /**
916  * blkcg_css_offline - cgroup css_offline callback
917  * @css: css of interest
918  *
919  * This function is called when @css is about to go away and responsible
920  * for shooting down all blkgs associated with @css.  blkgs should be
921  * removed while holding both q and blkcg locks.  As blkcg lock is nested
922  * inside q lock, this function performs reverse double lock dancing.
923  *
924  * This is the blkcg counterpart of ioc_release_fn().
925  */
926 static void blkcg_css_offline(struct cgroup_subsys_state *css)
927 {
928         struct blkcg *blkcg = css_to_blkcg(css);
929 
930         spin_lock_irq(&blkcg->lock);
931 
932         while (!hlist_empty(&blkcg->blkg_list)) {
933                 struct blkcg_gq *blkg = hlist_entry(blkcg->blkg_list.first,
934                                                 struct blkcg_gq, blkcg_node);
935                 struct request_queue *q = blkg->q;
936 
937                 if (spin_trylock(q->queue_lock)) {
938                         blkg_destroy(blkg);
939                         spin_unlock(q->queue_lock);
940                 } else {
941                         spin_unlock_irq(&blkcg->lock);
942                         cpu_relax();
943                         spin_lock_irq(&blkcg->lock);
944                 }
945         }
946 
947         spin_unlock_irq(&blkcg->lock);
948 
949         wb_blkcg_offline(blkcg);
950 }
951 
952 static void blkcg_css_free(struct cgroup_subsys_state *css)
953 {
954         struct blkcg *blkcg = css_to_blkcg(css);
955         int i;
956 
957         mutex_lock(&blkcg_pol_mutex);
958 
959         list_del(&blkcg->all_blkcgs_node);
960 
961         for (i = 0; i < BLKCG_MAX_POLS; i++)
962                 if (blkcg->cpd[i])
963                         blkcg_policy[i]->cpd_free_fn(blkcg->cpd[i]);
964 
965         mutex_unlock(&blkcg_pol_mutex);
966 
967         kfree(blkcg);
968 }
969 
970 static struct cgroup_subsys_state *
971 blkcg_css_alloc(struct cgroup_subsys_state *parent_css)
972 {
973         struct blkcg *blkcg;
974         struct cgroup_subsys_state *ret;
975         int i;
976 
977         mutex_lock(&blkcg_pol_mutex);
978 
979         if (!parent_css) {
980                 blkcg = &blkcg_root;
981         } else {
982                 blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
983                 if (!blkcg) {
984                         ret = ERR_PTR(-ENOMEM);
985                         goto free_blkcg;
986                 }
987         }
988 
989         for (i = 0; i < BLKCG_MAX_POLS ; i++) {
990                 struct blkcg_policy *pol = blkcg_policy[i];
991                 struct blkcg_policy_data *cpd;
992 
993                 /*
994                  * If the policy hasn't been attached yet, wait for it
995                  * to be attached before doing anything else. Otherwise,
996                  * check if the policy requires any specific per-cgroup
997                  * data: if it does, allocate and initialize it.
998                  */
999                 if (!pol || !pol->cpd_alloc_fn)
1000                         continue;
1001 
1002                 cpd = pol->cpd_alloc_fn(GFP_KERNEL);
1003                 if (!cpd) {
1004                         ret = ERR_PTR(-ENOMEM);
1005                         goto free_pd_blkcg;
1006                 }
1007                 blkcg->cpd[i] = cpd;
1008                 cpd->blkcg = blkcg;
1009                 cpd->plid = i;
1010                 if (pol->cpd_init_fn)
1011                         pol->cpd_init_fn(cpd);
1012         }
1013 
1014         spin_lock_init(&blkcg->lock);
1015         INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_NOWAIT);
1016         INIT_HLIST_HEAD(&blkcg->blkg_list);
1017 #ifdef CONFIG_CGROUP_WRITEBACK
1018         INIT_LIST_HEAD(&blkcg->cgwb_list);
1019 #endif
1020         list_add_tail(&blkcg->all_blkcgs_node, &all_blkcgs);
1021 
1022         mutex_unlock(&blkcg_pol_mutex);
1023         return &blkcg->css;
1024 
1025 free_pd_blkcg:
1026         for (i--; i >= 0; i--)
1027                 if (blkcg->cpd[i])
1028                         blkcg_policy[i]->cpd_free_fn(blkcg->cpd[i]);
1029 free_blkcg:
1030         kfree(blkcg);
1031         mutex_unlock(&blkcg_pol_mutex);
1032         return ret;
1033 }
1034 
1035 /**
1036  * blkcg_init_queue - initialize blkcg part of request queue
1037  * @q: request_queue to initialize
1038  *
1039  * Called from blk_alloc_queue_node(). Responsible for initializing blkcg
1040  * part of new request_queue @q.
1041  *
1042  * RETURNS:
1043  * 0 on success, -errno on failure.
1044  */
1045 int blkcg_init_queue(struct request_queue *q)
1046 {
1047         struct blkcg_gq *new_blkg, *blkg;
1048         bool preloaded;
1049         int ret;
1050 
1051         new_blkg = blkg_alloc(&blkcg_root, q, GFP_KERNEL);
1052         if (!new_blkg)
1053                 return -ENOMEM;
1054 
1055         preloaded = !radix_tree_preload(GFP_KERNEL);
1056 
1057         /*
1058          * Make sure the root blkg exists and count the existing blkgs.  As
1059          * @q is bypassing at this point, blkg_lookup_create() can't be
1060          * used.  Open code insertion.
1061          */
1062         rcu_read_lock();
1063         spin_lock_irq(q->queue_lock);
1064         blkg = blkg_create(&blkcg_root, q, new_blkg);
1065         spin_unlock_irq(q->queue_lock);
1066         rcu_read_unlock();
1067 
1068         if (preloaded)
1069                 radix_tree_preload_end();
1070 
1071         if (IS_ERR(blkg)) {
1072                 blkg_free(new_blkg);
1073                 return PTR_ERR(blkg);
1074         }
1075 
1076         q->root_blkg = blkg;
1077         q->root_rl.blkg = blkg;
1078 
1079         ret = blk_throtl_init(q);
1080         if (ret) {
1081                 spin_lock_irq(q->queue_lock);
1082                 blkg_destroy_all(q);
1083                 spin_unlock_irq(q->queue_lock);
1084         }
1085         return ret;
1086 }
1087 
1088 /**
1089  * blkcg_drain_queue - drain blkcg part of request_queue
1090  * @q: request_queue to drain
1091  *
1092  * Called from blk_drain_queue().  Responsible for draining blkcg part.
1093  */
1094 void blkcg_drain_queue(struct request_queue *q)
1095 {
1096         lockdep_assert_held(q->queue_lock);
1097 
1098         /*
1099          * @q could be exiting and already have destroyed all blkgs as
1100          * indicated by NULL root_blkg.  If so, don't confuse policies.
1101          */
1102         if (!q->root_blkg)
1103                 return;
1104 
1105         blk_throtl_drain(q);
1106 }
1107 
1108 /**
1109  * blkcg_exit_queue - exit and release blkcg part of request_queue
1110  * @q: request_queue being released
1111  *
1112  * Called from blk_release_queue().  Responsible for exiting blkcg part.
1113  */
1114 void blkcg_exit_queue(struct request_queue *q)
1115 {
1116         spin_lock_irq(q->queue_lock);
1117         blkg_destroy_all(q);
1118         spin_unlock_irq(q->queue_lock);
1119 
1120         blk_throtl_exit(q);
1121 }
1122 
1123 /*
1124  * We cannot support shared io contexts, as we have no mean to support
1125  * two tasks with the same ioc in two different groups without major rework
1126  * of the main cic data structures.  For now we allow a task to change
1127  * its cgroup only if it's the only owner of its ioc.
1128  */
1129 static int blkcg_can_attach(struct cgroup_subsys_state *css,
1130                             struct cgroup_taskset *tset)
1131 {
1132         struct task_struct *task;
1133         struct io_context *ioc;
1134         int ret = 0;
1135 
1136         /* task_lock() is needed to avoid races with exit_io_context() */
1137         cgroup_taskset_for_each(task, tset) {
1138                 task_lock(task);
1139                 ioc = task->io_context;
1140                 if (ioc && atomic_read(&ioc->nr_tasks) > 1)
1141                         ret = -EINVAL;
1142                 task_unlock(task);
1143                 if (ret)
1144                         break;
1145         }
1146         return ret;
1147 }
1148 
1149 static void blkcg_bind(struct cgroup_subsys_state *root_css)
1150 {
1151         int i;
1152 
1153         mutex_lock(&blkcg_pol_mutex);
1154 
1155         for (i = 0; i < BLKCG_MAX_POLS; i++) {
1156                 struct blkcg_policy *pol = blkcg_policy[i];
1157                 struct blkcg *blkcg;
1158 
1159                 if (!pol || !pol->cpd_bind_fn)
1160                         continue;
1161 
1162                 list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node)
1163                         if (blkcg->cpd[pol->plid])
1164                                 pol->cpd_bind_fn(blkcg->cpd[pol->plid]);
1165         }
1166         mutex_unlock(&blkcg_pol_mutex);
1167 }
1168 
1169 struct cgroup_subsys io_cgrp_subsys = {
1170         .css_alloc = blkcg_css_alloc,
1171         .css_offline = blkcg_css_offline,
1172         .css_free = blkcg_css_free,
1173         .can_attach = blkcg_can_attach,
1174         .bind = blkcg_bind,
1175         .dfl_cftypes = blkcg_files,
1176         .legacy_cftypes = blkcg_legacy_files,
1177         .legacy_name = "blkio",
1178 #ifdef CONFIG_MEMCG
1179         /*
1180          * This ensures that, if available, memcg is automatically enabled
1181          * together on the default hierarchy so that the owner cgroup can
1182          * be retrieved from writeback pages.
1183          */
1184         .depends_on = 1 << memory_cgrp_id,
1185 #endif
1186 };
1187 EXPORT_SYMBOL_GPL(io_cgrp_subsys);
1188 
1189 /**
1190  * blkcg_activate_policy - activate a blkcg policy on a request_queue
1191  * @q: request_queue of interest
1192  * @pol: blkcg policy to activate
1193  *
1194  * Activate @pol on @q.  Requires %GFP_KERNEL context.  @q goes through
1195  * bypass mode to populate its blkgs with policy_data for @pol.
1196  *
1197  * Activation happens with @q bypassed, so nobody would be accessing blkgs
1198  * from IO path.  Update of each blkg is protected by both queue and blkcg
1199  * locks so that holding either lock and testing blkcg_policy_enabled() is
1200  * always enough for dereferencing policy data.
1201  *
1202  * The caller is responsible for synchronizing [de]activations and policy
1203  * [un]registerations.  Returns 0 on success, -errno on failure.
1204  */
1205 int blkcg_activate_policy(struct request_queue *q,
1206                           const struct blkcg_policy *pol)
1207 {
1208         struct blkg_policy_data *pd_prealloc = NULL;
1209         struct blkcg_gq *blkg;
1210         int ret;
1211 
1212         if (blkcg_policy_enabled(q, pol))
1213                 return 0;
1214 
1215         blk_queue_bypass_start(q);
1216 pd_prealloc:
1217         if (!pd_prealloc) {
1218                 pd_prealloc = pol->pd_alloc_fn(GFP_KERNEL, q->node);
1219                 if (!pd_prealloc) {
1220                         ret = -ENOMEM;
1221                         goto out_bypass_end;
1222                 }
1223         }
1224 
1225         spin_lock_irq(q->queue_lock);
1226 
1227         list_for_each_entry(blkg, &q->blkg_list, q_node) {
1228                 struct blkg_policy_data *pd;
1229 
1230                 if (blkg->pd[pol->plid])
1231                         continue;
1232 
1233                 pd = pol->pd_alloc_fn(GFP_NOWAIT, q->node);
1234                 if (!pd)
1235                         swap(pd, pd_prealloc);
1236                 if (!pd) {
1237                         spin_unlock_irq(q->queue_lock);
1238                         goto pd_prealloc;
1239                 }
1240 
1241                 blkg->pd[pol->plid] = pd;
1242                 pd->blkg = blkg;
1243                 pd->plid = pol->plid;
1244                 if (pol->pd_init_fn)
1245                         pol->pd_init_fn(pd);
1246         }
1247 
1248         __set_bit(pol->plid, q->blkcg_pols);
1249         ret = 0;
1250 
1251         spin_unlock_irq(q->queue_lock);
1252 out_bypass_end:
1253         blk_queue_bypass_end(q);
1254         if (pd_prealloc)
1255                 pol->pd_free_fn(pd_prealloc);
1256         return ret;
1257 }
1258 EXPORT_SYMBOL_GPL(blkcg_activate_policy);
1259 
1260 /**
1261  * blkcg_deactivate_policy - deactivate a blkcg policy on a request_queue
1262  * @q: request_queue of interest
1263  * @pol: blkcg policy to deactivate
1264  *
1265  * Deactivate @pol on @q.  Follows the same synchronization rules as
1266  * blkcg_activate_policy().
1267  */
1268 void blkcg_deactivate_policy(struct request_queue *q,
1269                              const struct blkcg_policy *pol)
1270 {
1271         struct blkcg_gq *blkg;
1272 
1273         if (!blkcg_policy_enabled(q, pol))
1274                 return;
1275 
1276         blk_queue_bypass_start(q);
1277         spin_lock_irq(q->queue_lock);
1278 
1279         __clear_bit(pol->plid, q->blkcg_pols);
1280 
1281         list_for_each_entry(blkg, &q->blkg_list, q_node) {
1282                 /* grab blkcg lock too while removing @pd from @blkg */
1283                 spin_lock(&blkg->blkcg->lock);
1284 
1285                 if (blkg->pd[pol->plid]) {
1286                         if (pol->pd_offline_fn)
1287                                 pol->pd_offline_fn(blkg->pd[pol->plid]);
1288                         pol->pd_free_fn(blkg->pd[pol->plid]);
1289                         blkg->pd[pol->plid] = NULL;
1290                 }
1291 
1292                 spin_unlock(&blkg->blkcg->lock);
1293         }
1294 
1295         spin_unlock_irq(q->queue_lock);
1296         blk_queue_bypass_end(q);
1297 }
1298 EXPORT_SYMBOL_GPL(blkcg_deactivate_policy);
1299 
1300 /**
1301  * blkcg_policy_register - register a blkcg policy
1302  * @pol: blkcg policy to register
1303  *
1304  * Register @pol with blkcg core.  Might sleep and @pol may be modified on
1305  * successful registration.  Returns 0 on success and -errno on failure.
1306  */
1307 int blkcg_policy_register(struct blkcg_policy *pol)
1308 {
1309         struct blkcg *blkcg;
1310         int i, ret;
1311 
1312         mutex_lock(&blkcg_pol_register_mutex);
1313         mutex_lock(&blkcg_pol_mutex);
1314 
1315         /* find an empty slot */
1316         ret = -ENOSPC;
1317         for (i = 0; i < BLKCG_MAX_POLS; i++)
1318                 if (!blkcg_policy[i])
1319                         break;
1320         if (i >= BLKCG_MAX_POLS)
1321                 goto err_unlock;
1322 
1323         /* register @pol */
1324         pol->plid = i;
1325         blkcg_policy[pol->plid] = pol;
1326 
1327         /* allocate and install cpd's */
1328         if (pol->cpd_alloc_fn) {
1329                 list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) {
1330                         struct blkcg_policy_data *cpd;
1331 
1332                         cpd = pol->cpd_alloc_fn(GFP_KERNEL);
1333                         if (!cpd) {
1334                                 mutex_unlock(&blkcg_pol_mutex);
1335                                 goto err_free_cpds;
1336                         }
1337 
1338                         blkcg->cpd[pol->plid] = cpd;
1339                         cpd->blkcg = blkcg;
1340                         cpd->plid = pol->plid;
1341                         pol->cpd_init_fn(cpd);
1342                 }
1343         }
1344 
1345         mutex_unlock(&blkcg_pol_mutex);
1346 
1347         /* everything is in place, add intf files for the new policy */
1348         if (pol->dfl_cftypes)
1349                 WARN_ON(cgroup_add_dfl_cftypes(&io_cgrp_subsys,
1350                                                pol->dfl_cftypes));
1351         if (pol->legacy_cftypes)
1352                 WARN_ON(cgroup_add_legacy_cftypes(&io_cgrp_subsys,
1353                                                   pol->legacy_cftypes));
1354         mutex_unlock(&blkcg_pol_register_mutex);
1355         return 0;
1356 
1357 err_free_cpds:
1358         if (pol->cpd_alloc_fn) {
1359                 list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) {
1360                         if (blkcg->cpd[pol->plid]) {
1361                                 pol->cpd_free_fn(blkcg->cpd[pol->plid]);
1362                                 blkcg->cpd[pol->plid] = NULL;
1363                         }
1364                 }
1365         }
1366         blkcg_policy[pol->plid] = NULL;
1367 err_unlock:
1368         mutex_unlock(&blkcg_pol_mutex);
1369         mutex_unlock(&blkcg_pol_register_mutex);
1370         return ret;
1371 }
1372 EXPORT_SYMBOL_GPL(blkcg_policy_register);
1373 
1374 /**
1375  * blkcg_policy_unregister - unregister a blkcg policy
1376  * @pol: blkcg policy to unregister
1377  *
1378  * Undo blkcg_policy_register(@pol).  Might sleep.
1379  */
1380 void blkcg_policy_unregister(struct blkcg_policy *pol)
1381 {
1382         struct blkcg *blkcg;
1383 
1384         mutex_lock(&blkcg_pol_register_mutex);
1385 
1386         if (WARN_ON(blkcg_policy[pol->plid] != pol))
1387                 goto out_unlock;
1388 
1389         /* kill the intf files first */
1390         if (pol->dfl_cftypes)
1391                 cgroup_rm_cftypes(pol->dfl_cftypes);
1392         if (pol->legacy_cftypes)
1393                 cgroup_rm_cftypes(pol->legacy_cftypes);
1394 
1395         /* remove cpds and unregister */
1396         mutex_lock(&blkcg_pol_mutex);
1397 
1398         if (pol->cpd_alloc_fn) {
1399                 list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) {
1400                         if (blkcg->cpd[pol->plid]) {
1401                                 pol->cpd_free_fn(blkcg->cpd[pol->plid]);
1402                                 blkcg->cpd[pol->plid] = NULL;
1403                         }
1404                 }
1405         }
1406         blkcg_policy[pol->plid] = NULL;
1407 
1408         mutex_unlock(&blkcg_pol_mutex);
1409 out_unlock:
1410         mutex_unlock(&blkcg_pol_register_mutex);
1411 }
1412 EXPORT_SYMBOL_GPL(blkcg_policy_unregister);
1413 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp