~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/include/linux/blk-cgroup.h

Version: ~ [ linux-5.8-rc3 ] ~ [ linux-5.7.5 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.48 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.129 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.185 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.228 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.228 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.85 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* SPDX-License-Identifier: GPL-2.0 */
  2 #ifndef _BLK_CGROUP_H
  3 #define _BLK_CGROUP_H
  4 /*
  5  * Common Block IO controller cgroup interface
  6  *
  7  * Based on ideas and code from CFQ, CFS and BFQ:
  8  * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
  9  *
 10  * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
 11  *                    Paolo Valente <paolo.valente@unimore.it>
 12  *
 13  * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
 14  *                    Nauman Rafique <nauman@google.com>
 15  */
 16 
 17 #include <linux/cgroup.h>
 18 #include <linux/percpu_counter.h>
 19 #include <linux/seq_file.h>
 20 #include <linux/radix-tree.h>
 21 #include <linux/blkdev.h>
 22 #include <linux/atomic.h>
 23 #include <linux/kthread.h>
 24 
 25 /* percpu_counter batch for blkg_[rw]stats, per-cpu drift doesn't matter */
 26 #define BLKG_STAT_CPU_BATCH     (INT_MAX / 2)
 27 
 28 /* Max limits for throttle policy */
 29 #define THROTL_IOPS_MAX         UINT_MAX
 30 
 31 #ifdef CONFIG_BLK_CGROUP
 32 
 33 enum blkg_rwstat_type {
 34         BLKG_RWSTAT_READ,
 35         BLKG_RWSTAT_WRITE,
 36         BLKG_RWSTAT_SYNC,
 37         BLKG_RWSTAT_ASYNC,
 38 
 39         BLKG_RWSTAT_NR,
 40         BLKG_RWSTAT_TOTAL = BLKG_RWSTAT_NR,
 41 };
 42 
 43 struct blkcg_gq;
 44 
 45 struct blkcg {
 46         struct cgroup_subsys_state      css;
 47         spinlock_t                      lock;
 48 
 49         struct radix_tree_root          blkg_tree;
 50         struct blkcg_gq __rcu           *blkg_hint;
 51         struct hlist_head               blkg_list;
 52 
 53         struct blkcg_policy_data        *cpd[BLKCG_MAX_POLS];
 54 
 55         struct list_head                all_blkcgs_node;
 56 #ifdef CONFIG_CGROUP_WRITEBACK
 57         struct list_head                cgwb_list;
 58 #endif
 59 };
 60 
 61 /*
 62  * blkg_[rw]stat->aux_cnt is excluded for local stats but included for
 63  * recursive.  Used to carry stats of dead children, and, for blkg_rwstat,
 64  * to carry result values from read and sum operations.
 65  */
 66 struct blkg_stat {
 67         struct percpu_counter           cpu_cnt;
 68         atomic64_t                      aux_cnt;
 69 };
 70 
 71 struct blkg_rwstat {
 72         struct percpu_counter           cpu_cnt[BLKG_RWSTAT_NR];
 73         atomic64_t                      aux_cnt[BLKG_RWSTAT_NR];
 74 };
 75 
 76 /*
 77  * A blkcg_gq (blkg) is association between a block cgroup (blkcg) and a
 78  * request_queue (q).  This is used by blkcg policies which need to track
 79  * information per blkcg - q pair.
 80  *
 81  * There can be multiple active blkcg policies and each blkg:policy pair is
 82  * represented by a blkg_policy_data which is allocated and freed by each
 83  * policy's pd_alloc/free_fn() methods.  A policy can allocate private data
 84  * area by allocating larger data structure which embeds blkg_policy_data
 85  * at the beginning.
 86  */
 87 struct blkg_policy_data {
 88         /* the blkg and policy id this per-policy data belongs to */
 89         struct blkcg_gq                 *blkg;
 90         int                             plid;
 91 };
 92 
 93 /*
 94  * Policies that need to keep per-blkcg data which is independent from any
 95  * request_queue associated to it should implement cpd_alloc/free_fn()
 96  * methods.  A policy can allocate private data area by allocating larger
 97  * data structure which embeds blkcg_policy_data at the beginning.
 98  * cpd_init() is invoked to let each policy handle per-blkcg data.
 99  */
100 struct blkcg_policy_data {
101         /* the blkcg and policy id this per-policy data belongs to */
102         struct blkcg                    *blkcg;
103         int                             plid;
104 };
105 
106 /* association between a blk cgroup and a request queue */
107 struct blkcg_gq {
108         /* Pointer to the associated request_queue */
109         struct request_queue            *q;
110         struct list_head                q_node;
111         struct hlist_node               blkcg_node;
112         struct blkcg                    *blkcg;
113 
114         /*
115          * Each blkg gets congested separately and the congestion state is
116          * propagated to the matching bdi_writeback_congested.
117          */
118         struct bdi_writeback_congested  *wb_congested;
119 
120         /* all non-root blkcg_gq's are guaranteed to have access to parent */
121         struct blkcg_gq                 *parent;
122 
123         /* request allocation list for this blkcg-q pair */
124         struct request_list             rl;
125 
126         /* reference count */
127         atomic_t                        refcnt;
128 
129         /* is this blkg online? protected by both blkcg and q locks */
130         bool                            online;
131 
132         struct blkg_rwstat              stat_bytes;
133         struct blkg_rwstat              stat_ios;
134 
135         struct blkg_policy_data         *pd[BLKCG_MAX_POLS];
136 
137         struct rcu_head                 rcu_head;
138 };
139 
140 typedef struct blkcg_policy_data *(blkcg_pol_alloc_cpd_fn)(gfp_t gfp);
141 typedef void (blkcg_pol_init_cpd_fn)(struct blkcg_policy_data *cpd);
142 typedef void (blkcg_pol_free_cpd_fn)(struct blkcg_policy_data *cpd);
143 typedef void (blkcg_pol_bind_cpd_fn)(struct blkcg_policy_data *cpd);
144 typedef struct blkg_policy_data *(blkcg_pol_alloc_pd_fn)(gfp_t gfp, int node);
145 typedef void (blkcg_pol_init_pd_fn)(struct blkg_policy_data *pd);
146 typedef void (blkcg_pol_online_pd_fn)(struct blkg_policy_data *pd);
147 typedef void (blkcg_pol_offline_pd_fn)(struct blkg_policy_data *pd);
148 typedef void (blkcg_pol_free_pd_fn)(struct blkg_policy_data *pd);
149 typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkg_policy_data *pd);
150 
151 struct blkcg_policy {
152         int                             plid;
153         /* cgroup files for the policy */
154         struct cftype                   *dfl_cftypes;
155         struct cftype                   *legacy_cftypes;
156 
157         /* operations */
158         blkcg_pol_alloc_cpd_fn          *cpd_alloc_fn;
159         blkcg_pol_init_cpd_fn           *cpd_init_fn;
160         blkcg_pol_free_cpd_fn           *cpd_free_fn;
161         blkcg_pol_bind_cpd_fn           *cpd_bind_fn;
162 
163         blkcg_pol_alloc_pd_fn           *pd_alloc_fn;
164         blkcg_pol_init_pd_fn            *pd_init_fn;
165         blkcg_pol_online_pd_fn          *pd_online_fn;
166         blkcg_pol_offline_pd_fn         *pd_offline_fn;
167         blkcg_pol_free_pd_fn            *pd_free_fn;
168         blkcg_pol_reset_pd_stats_fn     *pd_reset_stats_fn;
169 };
170 
171 extern struct blkcg blkcg_root;
172 extern struct cgroup_subsys_state * const blkcg_root_css;
173 
174 struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg,
175                                       struct request_queue *q, bool update_hint);
176 struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
177                                     struct request_queue *q);
178 int blkcg_init_queue(struct request_queue *q);
179 void blkcg_drain_queue(struct request_queue *q);
180 void blkcg_exit_queue(struct request_queue *q);
181 
182 /* Blkio controller policy registration */
183 int blkcg_policy_register(struct blkcg_policy *pol);
184 void blkcg_policy_unregister(struct blkcg_policy *pol);
185 int blkcg_activate_policy(struct request_queue *q,
186                           const struct blkcg_policy *pol);
187 void blkcg_deactivate_policy(struct request_queue *q,
188                              const struct blkcg_policy *pol);
189 
190 const char *blkg_dev_name(struct blkcg_gq *blkg);
191 void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
192                        u64 (*prfill)(struct seq_file *,
193                                      struct blkg_policy_data *, int),
194                        const struct blkcg_policy *pol, int data,
195                        bool show_total);
196 u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v);
197 u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
198                          const struct blkg_rwstat *rwstat);
199 u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off);
200 u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
201                        int off);
202 int blkg_print_stat_bytes(struct seq_file *sf, void *v);
203 int blkg_print_stat_ios(struct seq_file *sf, void *v);
204 int blkg_print_stat_bytes_recursive(struct seq_file *sf, void *v);
205 int blkg_print_stat_ios_recursive(struct seq_file *sf, void *v);
206 
207 u64 blkg_stat_recursive_sum(struct blkcg_gq *blkg,
208                             struct blkcg_policy *pol, int off);
209 struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkcg_gq *blkg,
210                                              struct blkcg_policy *pol, int off);
211 
212 struct blkg_conf_ctx {
213         struct gendisk                  *disk;
214         struct blkcg_gq                 *blkg;
215         char                            *body;
216 };
217 
218 int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
219                    char *input, struct blkg_conf_ctx *ctx);
220 void blkg_conf_finish(struct blkg_conf_ctx *ctx);
221 
222 
223 static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css)
224 {
225         return css ? container_of(css, struct blkcg, css) : NULL;
226 }
227 
228 static inline struct blkcg *bio_blkcg(struct bio *bio)
229 {
230         struct cgroup_subsys_state *css;
231 
232         if (bio && bio->bi_css)
233                 return css_to_blkcg(bio->bi_css);
234         css = kthread_blkcg();
235         if (css)
236                 return css_to_blkcg(css);
237         return css_to_blkcg(task_css(current, io_cgrp_id));
238 }
239 
240 /**
241  * blkcg_parent - get the parent of a blkcg
242  * @blkcg: blkcg of interest
243  *
244  * Return the parent blkcg of @blkcg.  Can be called anytime.
245  */
246 static inline struct blkcg *blkcg_parent(struct blkcg *blkcg)
247 {
248         return css_to_blkcg(blkcg->css.parent);
249 }
250 
251 /**
252  * __blkg_lookup - internal version of blkg_lookup()
253  * @blkcg: blkcg of interest
254  * @q: request_queue of interest
255  * @update_hint: whether to update lookup hint with the result or not
256  *
257  * This is internal version and shouldn't be used by policy
258  * implementations.  Looks up blkgs for the @blkcg - @q pair regardless of
259  * @q's bypass state.  If @update_hint is %true, the caller should be
260  * holding @q->queue_lock and lookup hint is updated on success.
261  */
262 static inline struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg,
263                                              struct request_queue *q,
264                                              bool update_hint)
265 {
266         struct blkcg_gq *blkg;
267 
268         if (blkcg == &blkcg_root)
269                 return q->root_blkg;
270 
271         blkg = rcu_dereference(blkcg->blkg_hint);
272         if (blkg && blkg->q == q)
273                 return blkg;
274 
275         return blkg_lookup_slowpath(blkcg, q, update_hint);
276 }
277 
278 /**
279  * blkg_lookup - lookup blkg for the specified blkcg - q pair
280  * @blkcg: blkcg of interest
281  * @q: request_queue of interest
282  *
283  * Lookup blkg for the @blkcg - @q pair.  This function should be called
284  * under RCU read lock and is guaranteed to return %NULL if @q is bypassing
285  * - see blk_queue_bypass_start() for details.
286  */
287 static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg,
288                                            struct request_queue *q)
289 {
290         WARN_ON_ONCE(!rcu_read_lock_held());
291 
292         if (unlikely(blk_queue_bypass(q)))
293                 return NULL;
294         return __blkg_lookup(blkcg, q, false);
295 }
296 
297 /**
298  * blkg_lookup - look up blkg for the specified request queue
299  * @q: request_queue of interest
300  *
301  * Lookup blkg for @q at the root level. See also blkg_lookup().
302  */
303 static inline struct blkcg_gq *blkg_root_lookup(struct request_queue *q)
304 {
305         struct blkcg_gq *blkg;
306 
307         rcu_read_lock();
308         blkg = blkg_lookup(&blkcg_root, q);
309         rcu_read_unlock();
310 
311         return blkg;
312 }
313 
314 /**
315  * blkg_to_pdata - get policy private data
316  * @blkg: blkg of interest
317  * @pol: policy of interest
318  *
319  * Return pointer to private data associated with the @blkg-@pol pair.
320  */
321 static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
322                                                   struct blkcg_policy *pol)
323 {
324         return blkg ? blkg->pd[pol->plid] : NULL;
325 }
326 
327 static inline struct blkcg_policy_data *blkcg_to_cpd(struct blkcg *blkcg,
328                                                      struct blkcg_policy *pol)
329 {
330         return blkcg ? blkcg->cpd[pol->plid] : NULL;
331 }
332 
333 /**
334  * pdata_to_blkg - get blkg associated with policy private data
335  * @pd: policy private data of interest
336  *
337  * @pd is policy private data.  Determine the blkg it's associated with.
338  */
339 static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd)
340 {
341         return pd ? pd->blkg : NULL;
342 }
343 
344 static inline struct blkcg *cpd_to_blkcg(struct blkcg_policy_data *cpd)
345 {
346         return cpd ? cpd->blkcg : NULL;
347 }
348 
349 /**
350  * blkg_path - format cgroup path of blkg
351  * @blkg: blkg of interest
352  * @buf: target buffer
353  * @buflen: target buffer length
354  *
355  * Format the path of the cgroup of @blkg into @buf.
356  */
357 static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen)
358 {
359         return cgroup_path(blkg->blkcg->css.cgroup, buf, buflen);
360 }
361 
362 /**
363  * blkg_get - get a blkg reference
364  * @blkg: blkg to get
365  *
366  * The caller should be holding an existing reference.
367  */
368 static inline void blkg_get(struct blkcg_gq *blkg)
369 {
370         WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
371         atomic_inc(&blkg->refcnt);
372 }
373 
374 void __blkg_release_rcu(struct rcu_head *rcu);
375 
376 /**
377  * blkg_put - put a blkg reference
378  * @blkg: blkg to put
379  */
380 static inline void blkg_put(struct blkcg_gq *blkg)
381 {
382         WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
383         if (atomic_dec_and_test(&blkg->refcnt))
384                 call_rcu(&blkg->rcu_head, __blkg_release_rcu);
385 }
386 
387 /**
388  * blkg_for_each_descendant_pre - pre-order walk of a blkg's descendants
389  * @d_blkg: loop cursor pointing to the current descendant
390  * @pos_css: used for iteration
391  * @p_blkg: target blkg to walk descendants of
392  *
393  * Walk @c_blkg through the descendants of @p_blkg.  Must be used with RCU
394  * read locked.  If called under either blkcg or queue lock, the iteration
395  * is guaranteed to include all and only online blkgs.  The caller may
396  * update @pos_css by calling css_rightmost_descendant() to skip subtree.
397  * @p_blkg is included in the iteration and the first node to be visited.
398  */
399 #define blkg_for_each_descendant_pre(d_blkg, pos_css, p_blkg)           \
400         css_for_each_descendant_pre((pos_css), &(p_blkg)->blkcg->css)   \
401                 if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css),    \
402                                               (p_blkg)->q, false)))
403 
404 /**
405  * blkg_for_each_descendant_post - post-order walk of a blkg's descendants
406  * @d_blkg: loop cursor pointing to the current descendant
407  * @pos_css: used for iteration
408  * @p_blkg: target blkg to walk descendants of
409  *
410  * Similar to blkg_for_each_descendant_pre() but performs post-order
411  * traversal instead.  Synchronization rules are the same.  @p_blkg is
412  * included in the iteration and the last node to be visited.
413  */
414 #define blkg_for_each_descendant_post(d_blkg, pos_css, p_blkg)          \
415         css_for_each_descendant_post((pos_css), &(p_blkg)->blkcg->css)  \
416                 if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css),    \
417                                               (p_blkg)->q, false)))
418 
419 /**
420  * blk_get_rl - get request_list to use
421  * @q: request_queue of interest
422  * @bio: bio which will be attached to the allocated request (may be %NULL)
423  *
424  * The caller wants to allocate a request from @q to use for @bio.  Find
425  * the request_list to use and obtain a reference on it.  Should be called
426  * under queue_lock.  This function is guaranteed to return non-%NULL
427  * request_list.
428  */
429 static inline struct request_list *blk_get_rl(struct request_queue *q,
430                                               struct bio *bio)
431 {
432         struct blkcg *blkcg;
433         struct blkcg_gq *blkg;
434 
435         rcu_read_lock();
436 
437         blkcg = bio_blkcg(bio);
438 
439         /* bypass blkg lookup and use @q->root_rl directly for root */
440         if (blkcg == &blkcg_root)
441                 goto root_rl;
442 
443         /*
444          * Try to use blkg->rl.  blkg lookup may fail under memory pressure
445          * or if either the blkcg or queue is going away.  Fall back to
446          * root_rl in such cases.
447          */
448         blkg = blkg_lookup(blkcg, q);
449         if (unlikely(!blkg))
450                 goto root_rl;
451 
452         blkg_get(blkg);
453         rcu_read_unlock();
454         return &blkg->rl;
455 root_rl:
456         rcu_read_unlock();
457         return &q->root_rl;
458 }
459 
460 /**
461  * blk_put_rl - put request_list
462  * @rl: request_list to put
463  *
464  * Put the reference acquired by blk_get_rl().  Should be called under
465  * queue_lock.
466  */
467 static inline void blk_put_rl(struct request_list *rl)
468 {
469         if (rl->blkg->blkcg != &blkcg_root)
470                 blkg_put(rl->blkg);
471 }
472 
473 /**
474  * blk_rq_set_rl - associate a request with a request_list
475  * @rq: request of interest
476  * @rl: target request_list
477  *
478  * Associate @rq with @rl so that accounting and freeing can know the
479  * request_list @rq came from.
480  */
481 static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl)
482 {
483         rq->rl = rl;
484 }
485 
486 /**
487  * blk_rq_rl - return the request_list a request came from
488  * @rq: request of interest
489  *
490  * Return the request_list @rq is allocated from.
491  */
492 static inline struct request_list *blk_rq_rl(struct request *rq)
493 {
494         return rq->rl;
495 }
496 
497 struct request_list *__blk_queue_next_rl(struct request_list *rl,
498                                          struct request_queue *q);
499 /**
500  * blk_queue_for_each_rl - iterate through all request_lists of a request_queue
501  *
502  * Should be used under queue_lock.
503  */
504 #define blk_queue_for_each_rl(rl, q)    \
505         for ((rl) = &(q)->root_rl; (rl); (rl) = __blk_queue_next_rl((rl), (q)))
506 
507 static inline int blkg_stat_init(struct blkg_stat *stat, gfp_t gfp)
508 {
509         int ret;
510 
511         ret = percpu_counter_init(&stat->cpu_cnt, 0, gfp);
512         if (ret)
513                 return ret;
514 
515         atomic64_set(&stat->aux_cnt, 0);
516         return 0;
517 }
518 
519 static inline void blkg_stat_exit(struct blkg_stat *stat)
520 {
521         percpu_counter_destroy(&stat->cpu_cnt);
522 }
523 
524 /**
525  * blkg_stat_add - add a value to a blkg_stat
526  * @stat: target blkg_stat
527  * @val: value to add
528  *
529  * Add @val to @stat.  The caller must ensure that IRQ on the same CPU
530  * don't re-enter this function for the same counter.
531  */
532 static inline void blkg_stat_add(struct blkg_stat *stat, uint64_t val)
533 {
534         percpu_counter_add_batch(&stat->cpu_cnt, val, BLKG_STAT_CPU_BATCH);
535 }
536 
537 /**
538  * blkg_stat_read - read the current value of a blkg_stat
539  * @stat: blkg_stat to read
540  */
541 static inline uint64_t blkg_stat_read(struct blkg_stat *stat)
542 {
543         return percpu_counter_sum_positive(&stat->cpu_cnt);
544 }
545 
546 /**
547  * blkg_stat_reset - reset a blkg_stat
548  * @stat: blkg_stat to reset
549  */
550 static inline void blkg_stat_reset(struct blkg_stat *stat)
551 {
552         percpu_counter_set(&stat->cpu_cnt, 0);
553         atomic64_set(&stat->aux_cnt, 0);
554 }
555 
556 /**
557  * blkg_stat_add_aux - add a blkg_stat into another's aux count
558  * @to: the destination blkg_stat
559  * @from: the source
560  *
561  * Add @from's count including the aux one to @to's aux count.
562  */
563 static inline void blkg_stat_add_aux(struct blkg_stat *to,
564                                      struct blkg_stat *from)
565 {
566         atomic64_add(blkg_stat_read(from) + atomic64_read(&from->aux_cnt),
567                      &to->aux_cnt);
568 }
569 
570 static inline int blkg_rwstat_init(struct blkg_rwstat *rwstat, gfp_t gfp)
571 {
572         int i, ret;
573 
574         for (i = 0; i < BLKG_RWSTAT_NR; i++) {
575                 ret = percpu_counter_init(&rwstat->cpu_cnt[i], 0, gfp);
576                 if (ret) {
577                         while (--i >= 0)
578                                 percpu_counter_destroy(&rwstat->cpu_cnt[i]);
579                         return ret;
580                 }
581                 atomic64_set(&rwstat->aux_cnt[i], 0);
582         }
583         return 0;
584 }
585 
586 static inline void blkg_rwstat_exit(struct blkg_rwstat *rwstat)
587 {
588         int i;
589 
590         for (i = 0; i < BLKG_RWSTAT_NR; i++)
591                 percpu_counter_destroy(&rwstat->cpu_cnt[i]);
592 }
593 
594 /**
595  * blkg_rwstat_add - add a value to a blkg_rwstat
596  * @rwstat: target blkg_rwstat
597  * @op: REQ_OP and flags
598  * @val: value to add
599  *
600  * Add @val to @rwstat.  The counters are chosen according to @rw.  The
601  * caller is responsible for synchronizing calls to this function.
602  */
603 static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat,
604                                    unsigned int op, uint64_t val)
605 {
606         struct percpu_counter *cnt;
607 
608         if (op_is_write(op))
609                 cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_WRITE];
610         else
611                 cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_READ];
612 
613         percpu_counter_add_batch(cnt, val, BLKG_STAT_CPU_BATCH);
614 
615         if (op_is_sync(op))
616                 cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_SYNC];
617         else
618                 cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_ASYNC];
619 
620         percpu_counter_add_batch(cnt, val, BLKG_STAT_CPU_BATCH);
621 }
622 
623 /**
624  * blkg_rwstat_read - read the current values of a blkg_rwstat
625  * @rwstat: blkg_rwstat to read
626  *
627  * Read the current snapshot of @rwstat and return it in the aux counts.
628  */
629 static inline struct blkg_rwstat blkg_rwstat_read(struct blkg_rwstat *rwstat)
630 {
631         struct blkg_rwstat result;
632         int i;
633 
634         for (i = 0; i < BLKG_RWSTAT_NR; i++)
635                 atomic64_set(&result.aux_cnt[i],
636                              percpu_counter_sum_positive(&rwstat->cpu_cnt[i]));
637         return result;
638 }
639 
640 /**
641  * blkg_rwstat_total - read the total count of a blkg_rwstat
642  * @rwstat: blkg_rwstat to read
643  *
644  * Return the total count of @rwstat regardless of the IO direction.  This
645  * function can be called without synchronization and takes care of u64
646  * atomicity.
647  */
648 static inline uint64_t blkg_rwstat_total(struct blkg_rwstat *rwstat)
649 {
650         struct blkg_rwstat tmp = blkg_rwstat_read(rwstat);
651 
652         return atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_READ]) +
653                 atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_WRITE]);
654 }
655 
656 /**
657  * blkg_rwstat_reset - reset a blkg_rwstat
658  * @rwstat: blkg_rwstat to reset
659  */
660 static inline void blkg_rwstat_reset(struct blkg_rwstat *rwstat)
661 {
662         int i;
663 
664         for (i = 0; i < BLKG_RWSTAT_NR; i++) {
665                 percpu_counter_set(&rwstat->cpu_cnt[i], 0);
666                 atomic64_set(&rwstat->aux_cnt[i], 0);
667         }
668 }
669 
670 /**
671  * blkg_rwstat_add_aux - add a blkg_rwstat into another's aux count
672  * @to: the destination blkg_rwstat
673  * @from: the source
674  *
675  * Add @from's count including the aux one to @to's aux count.
676  */
677 static inline void blkg_rwstat_add_aux(struct blkg_rwstat *to,
678                                        struct blkg_rwstat *from)
679 {
680         u64 sum[BLKG_RWSTAT_NR];
681         int i;
682 
683         for (i = 0; i < BLKG_RWSTAT_NR; i++)
684                 sum[i] = percpu_counter_sum_positive(&from->cpu_cnt[i]);
685 
686         for (i = 0; i < BLKG_RWSTAT_NR; i++)
687                 atomic64_add(sum[i] + atomic64_read(&from->aux_cnt[i]),
688                              &to->aux_cnt[i]);
689 }
690 
691 #ifdef CONFIG_BLK_DEV_THROTTLING
692 extern bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
693                            struct bio *bio);
694 #else
695 static inline bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
696                                   struct bio *bio) { return false; }
697 #endif
698 
699 static inline bool blkcg_bio_issue_check(struct request_queue *q,
700                                          struct bio *bio)
701 {
702         struct blkcg *blkcg;
703         struct blkcg_gq *blkg;
704         bool throtl = false;
705 
706         rcu_read_lock();
707         blkcg = bio_blkcg(bio);
708 
709         /* associate blkcg if bio hasn't attached one */
710         bio_associate_blkcg(bio, &blkcg->css);
711 
712         blkg = blkg_lookup(blkcg, q);
713         if (unlikely(!blkg)) {
714                 spin_lock_irq(q->queue_lock);
715                 blkg = blkg_lookup_create(blkcg, q);
716                 if (IS_ERR(blkg))
717                         blkg = NULL;
718                 spin_unlock_irq(q->queue_lock);
719         }
720 
721         throtl = blk_throtl_bio(q, blkg, bio);
722 
723         if (!throtl) {
724                 blkg = blkg ?: q->root_blkg;
725                 blkg_rwstat_add(&blkg->stat_bytes, bio->bi_opf,
726                                 bio->bi_iter.bi_size);
727                 blkg_rwstat_add(&blkg->stat_ios, bio->bi_opf, 1);
728         }
729 
730         rcu_read_unlock();
731         return !throtl;
732 }
733 
734 #else   /* CONFIG_BLK_CGROUP */
735 
736 struct blkcg {
737 };
738 
739 struct blkg_policy_data {
740 };
741 
742 struct blkcg_policy_data {
743 };
744 
745 struct blkcg_gq {
746 };
747 
748 struct blkcg_policy {
749 };
750 
751 #define blkcg_root_css  ((struct cgroup_subsys_state *)ERR_PTR(-EINVAL))
752 
753 #ifdef CONFIG_BLOCK
754 
755 static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; }
756 static inline struct blkcg_gq *blkg_root_lookup(struct request_queue *q) { return NULL; }
757 static inline int blkcg_init_queue(struct request_queue *q) { return 0; }
758 static inline void blkcg_drain_queue(struct request_queue *q) { }
759 static inline void blkcg_exit_queue(struct request_queue *q) { }
760 static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; }
761 static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { }
762 static inline int blkcg_activate_policy(struct request_queue *q,
763                                         const struct blkcg_policy *pol) { return 0; }
764 static inline void blkcg_deactivate_policy(struct request_queue *q,
765                                            const struct blkcg_policy *pol) { }
766 
767 static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; }
768 
769 static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
770                                                   struct blkcg_policy *pol) { return NULL; }
771 static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) { return NULL; }
772 static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; }
773 static inline void blkg_get(struct blkcg_gq *blkg) { }
774 static inline void blkg_put(struct blkcg_gq *blkg) { }
775 
776 static inline struct request_list *blk_get_rl(struct request_queue *q,
777                                               struct bio *bio) { return &q->root_rl; }
778 static inline void blk_put_rl(struct request_list *rl) { }
779 static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl) { }
780 static inline struct request_list *blk_rq_rl(struct request *rq) { return &rq->q->root_rl; }
781 
782 static inline bool blkcg_bio_issue_check(struct request_queue *q,
783                                          struct bio *bio) { return true; }
784 
785 #define blk_queue_for_each_rl(rl, q)    \
786         for ((rl) = &(q)->root_rl; (rl); (rl) = NULL)
787 
788 #endif  /* CONFIG_BLOCK */
789 #endif  /* CONFIG_BLK_CGROUP */
790 #endif  /* _BLK_CGROUP_H */
791 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp