~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/block/blk-throttle.c

Version: ~ [ linux-5.4-rc7 ] ~ [ linux-5.3.11 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.84 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.154 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.201 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.201 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.77 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-3.9.11 ] ~ [ linux-3.8.13 ] ~ [ linux-3.7.10 ] ~ [ linux-3.6.11 ] ~ [ linux-3.5.7 ] ~ [ linux-3.4.113 ] ~ [ linux-3.3.8 ] ~ [ linux-3.2.102 ] ~ [ linux-3.1.10 ] ~ [ linux-3.0.101 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  * Interface for controlling IO bandwidth on a request queue
  3  *
  4  * Copyright (C) 2010 Vivek Goyal <vgoyal@redhat.com>
  5  */
  6 
  7 #include <linux/module.h>
  8 #include <linux/slab.h>
  9 #include <linux/blkdev.h>
 10 #include <linux/bio.h>
 11 #include <linux/blktrace_api.h>
 12 #include <linux/blk-cgroup.h>
 13 #include "blk.h"
 14 
 15 /* Max dispatch from a group in 1 round */
 16 static int throtl_grp_quantum = 8;
 17 
 18 /* Total max dispatch from all groups in one round */
 19 static int throtl_quantum = 32;
 20 
 21 /* Throttling is performed over 100ms slice and after that slice is renewed */
 22 static unsigned long throtl_slice = HZ/10;      /* 100 ms */
 23 
 24 static struct blkcg_policy blkcg_policy_throtl;
 25 
 26 /* A workqueue to queue throttle related work */
 27 static struct workqueue_struct *kthrotld_workqueue;
 28 
 29 /*
 30  * To implement hierarchical throttling, throtl_grps form a tree and bios
 31  * are dispatched upwards level by level until they reach the top and get
 32  * issued.  When dispatching bios from the children and local group at each
 33  * level, if the bios are dispatched into a single bio_list, there's a risk
 34  * of a local or child group which can queue many bios at once filling up
 35  * the list starving others.
 36  *
 37  * To avoid such starvation, dispatched bios are queued separately
 38  * according to where they came from.  When they are again dispatched to
 39  * the parent, they're popped in round-robin order so that no single source
 40  * hogs the dispatch window.
 41  *
 42  * throtl_qnode is used to keep the queued bios separated by their sources.
 43  * Bios are queued to throtl_qnode which in turn is queued to
 44  * throtl_service_queue and then dispatched in round-robin order.
 45  *
 46  * It's also used to track the reference counts on blkg's.  A qnode always
 47  * belongs to a throtl_grp and gets queued on itself or the parent, so
 48  * incrementing the reference of the associated throtl_grp when a qnode is
 49  * queued and decrementing when dequeued is enough to keep the whole blkg
 50  * tree pinned while bios are in flight.
 51  */
 52 struct throtl_qnode {
 53         struct list_head        node;           /* service_queue->queued[] */
 54         struct bio_list         bios;           /* queued bios */
 55         struct throtl_grp       *tg;            /* tg this qnode belongs to */
 56 };
 57 
 58 struct throtl_service_queue {
 59         struct throtl_service_queue *parent_sq; /* the parent service_queue */
 60 
 61         /*
 62          * Bios queued directly to this service_queue or dispatched from
 63          * children throtl_grp's.
 64          */
 65         struct list_head        queued[2];      /* throtl_qnode [READ/WRITE] */
 66         unsigned int            nr_queued[2];   /* number of queued bios */
 67 
 68         /*
 69          * RB tree of active children throtl_grp's, which are sorted by
 70          * their ->disptime.
 71          */
 72         struct rb_root          pending_tree;   /* RB tree of active tgs */
 73         struct rb_node          *first_pending; /* first node in the tree */
 74         unsigned int            nr_pending;     /* # queued in the tree */
 75         unsigned long           first_pending_disptime; /* disptime of the first tg */
 76         struct timer_list       pending_timer;  /* fires on first_pending_disptime */
 77 };
 78 
 79 enum tg_state_flags {
 80         THROTL_TG_PENDING       = 1 << 0,       /* on parent's pending tree */
 81         THROTL_TG_WAS_EMPTY     = 1 << 1,       /* bio_lists[] became non-empty */
 82 };
 83 
 84 #define rb_entry_tg(node)       rb_entry((node), struct throtl_grp, rb_node)
 85 
 86 struct throtl_grp {
 87         /* must be the first member */
 88         struct blkg_policy_data pd;
 89 
 90         /* active throtl group service_queue member */
 91         struct rb_node rb_node;
 92 
 93         /* throtl_data this group belongs to */
 94         struct throtl_data *td;
 95 
 96         /* this group's service queue */
 97         struct throtl_service_queue service_queue;
 98 
 99         /*
100          * qnode_on_self is used when bios are directly queued to this
101          * throtl_grp so that local bios compete fairly with bios
102          * dispatched from children.  qnode_on_parent is used when bios are
103          * dispatched from this throtl_grp into its parent and will compete
104          * with the sibling qnode_on_parents and the parent's
105          * qnode_on_self.
106          */
107         struct throtl_qnode qnode_on_self[2];
108         struct throtl_qnode qnode_on_parent[2];
109 
110         /*
111          * Dispatch time in jiffies. This is the estimated time when group
112          * will unthrottle and is ready to dispatch more bio. It is used as
113          * key to sort active groups in service tree.
114          */
115         unsigned long disptime;
116 
117         unsigned int flags;
118 
119         /* are there any throtl rules between this group and td? */
120         bool has_rules[2];
121 
122         /* bytes per second rate limits */
123         uint64_t bps[2];
124 
125         /* IOPS limits */
126         unsigned int iops[2];
127 
128         /* Number of bytes disptached in current slice */
129         uint64_t bytes_disp[2];
130         /* Number of bio's dispatched in current slice */
131         unsigned int io_disp[2];
132 
133         /* When did we start a new slice */
134         unsigned long slice_start[2];
135         unsigned long slice_end[2];
136 };
137 
138 struct throtl_data
139 {
140         /* service tree for active throtl groups */
141         struct throtl_service_queue service_queue;
142 
143         struct request_queue *queue;
144 
145         /* Total Number of queued bios on READ and WRITE lists */
146         unsigned int nr_queued[2];
147 
148         /*
149          * number of total undestroyed groups
150          */
151         unsigned int nr_undestroyed_grps;
152 
153         /* Work for dispatching throttled bios */
154         struct work_struct dispatch_work;
155 };
156 
157 static void throtl_pending_timer_fn(unsigned long arg);
158 
159 static inline struct throtl_grp *pd_to_tg(struct blkg_policy_data *pd)
160 {
161         return pd ? container_of(pd, struct throtl_grp, pd) : NULL;
162 }
163 
164 static inline struct throtl_grp *blkg_to_tg(struct blkcg_gq *blkg)
165 {
166         return pd_to_tg(blkg_to_pd(blkg, &blkcg_policy_throtl));
167 }
168 
169 static inline struct blkcg_gq *tg_to_blkg(struct throtl_grp *tg)
170 {
171         return pd_to_blkg(&tg->pd);
172 }
173 
174 /**
175  * sq_to_tg - return the throl_grp the specified service queue belongs to
176  * @sq: the throtl_service_queue of interest
177  *
178  * Return the throtl_grp @sq belongs to.  If @sq is the top-level one
179  * embedded in throtl_data, %NULL is returned.
180  */
181 static struct throtl_grp *sq_to_tg(struct throtl_service_queue *sq)
182 {
183         if (sq && sq->parent_sq)
184                 return container_of(sq, struct throtl_grp, service_queue);
185         else
186                 return NULL;
187 }
188 
189 /**
190  * sq_to_td - return throtl_data the specified service queue belongs to
191  * @sq: the throtl_service_queue of interest
192  *
193  * A service_queue can be embeded in either a throtl_grp or throtl_data.
194  * Determine the associated throtl_data accordingly and return it.
195  */
196 static struct throtl_data *sq_to_td(struct throtl_service_queue *sq)
197 {
198         struct throtl_grp *tg = sq_to_tg(sq);
199 
200         if (tg)
201                 return tg->td;
202         else
203                 return container_of(sq, struct throtl_data, service_queue);
204 }
205 
206 /**
207  * throtl_log - log debug message via blktrace
208  * @sq: the service_queue being reported
209  * @fmt: printf format string
210  * @args: printf args
211  *
212  * The messages are prefixed with "throtl BLKG_NAME" if @sq belongs to a
213  * throtl_grp; otherwise, just "throtl".
214  *
215  * TODO: this should be made a function and name formatting should happen
216  * after testing whether blktrace is enabled.
217  */
218 #define throtl_log(sq, fmt, args...)    do {                            \
219         struct throtl_grp *__tg = sq_to_tg((sq));                       \
220         struct throtl_data *__td = sq_to_td((sq));                      \
221                                                                         \
222         (void)__td;                                                     \
223         if ((__tg)) {                                                   \
224                 char __pbuf[128];                                       \
225                                                                         \
226                 blkg_path(tg_to_blkg(__tg), __pbuf, sizeof(__pbuf));    \
227                 blk_add_trace_msg(__td->queue, "throtl %s " fmt, __pbuf, ##args); \
228         } else {                                                        \
229                 blk_add_trace_msg(__td->queue, "throtl " fmt, ##args);  \
230         }                                                               \
231 } while (0)
232 
233 static void throtl_qnode_init(struct throtl_qnode *qn, struct throtl_grp *tg)
234 {
235         INIT_LIST_HEAD(&qn->node);
236         bio_list_init(&qn->bios);
237         qn->tg = tg;
238 }
239 
240 /**
241  * throtl_qnode_add_bio - add a bio to a throtl_qnode and activate it
242  * @bio: bio being added
243  * @qn: qnode to add bio to
244  * @queued: the service_queue->queued[] list @qn belongs to
245  *
246  * Add @bio to @qn and put @qn on @queued if it's not already on.
247  * @qn->tg's reference count is bumped when @qn is activated.  See the
248  * comment on top of throtl_qnode definition for details.
249  */
250 static void throtl_qnode_add_bio(struct bio *bio, struct throtl_qnode *qn,
251                                  struct list_head *queued)
252 {
253         bio_list_add(&qn->bios, bio);
254         if (list_empty(&qn->node)) {
255                 list_add_tail(&qn->node, queued);
256                 blkg_get(tg_to_blkg(qn->tg));
257         }
258 }
259 
260 /**
261  * throtl_peek_queued - peek the first bio on a qnode list
262  * @queued: the qnode list to peek
263  */
264 static struct bio *throtl_peek_queued(struct list_head *queued)
265 {
266         struct throtl_qnode *qn = list_first_entry(queued, struct throtl_qnode, node);
267         struct bio *bio;
268 
269         if (list_empty(queued))
270                 return NULL;
271 
272         bio = bio_list_peek(&qn->bios);
273         WARN_ON_ONCE(!bio);
274         return bio;
275 }
276 
277 /**
278  * throtl_pop_queued - pop the first bio form a qnode list
279  * @queued: the qnode list to pop a bio from
280  * @tg_to_put: optional out argument for throtl_grp to put
281  *
282  * Pop the first bio from the qnode list @queued.  After popping, the first
283  * qnode is removed from @queued if empty or moved to the end of @queued so
284  * that the popping order is round-robin.
285  *
286  * When the first qnode is removed, its associated throtl_grp should be put
287  * too.  If @tg_to_put is NULL, this function automatically puts it;
288  * otherwise, *@tg_to_put is set to the throtl_grp to put and the caller is
289  * responsible for putting it.
290  */
291 static struct bio *throtl_pop_queued(struct list_head *queued,
292                                      struct throtl_grp **tg_to_put)
293 {
294         struct throtl_qnode *qn = list_first_entry(queued, struct throtl_qnode, node);
295         struct bio *bio;
296 
297         if (list_empty(queued))
298                 return NULL;
299 
300         bio = bio_list_pop(&qn->bios);
301         WARN_ON_ONCE(!bio);
302 
303         if (bio_list_empty(&qn->bios)) {
304                 list_del_init(&qn->node);
305                 if (tg_to_put)
306                         *tg_to_put = qn->tg;
307                 else
308                         blkg_put(tg_to_blkg(qn->tg));
309         } else {
310                 list_move_tail(&qn->node, queued);
311         }
312 
313         return bio;
314 }
315 
316 /* init a service_queue, assumes the caller zeroed it */
317 static void throtl_service_queue_init(struct throtl_service_queue *sq)
318 {
319         INIT_LIST_HEAD(&sq->queued[0]);
320         INIT_LIST_HEAD(&sq->queued[1]);
321         sq->pending_tree = RB_ROOT;
322         setup_timer(&sq->pending_timer, throtl_pending_timer_fn,
323                     (unsigned long)sq);
324 }
325 
326 static struct blkg_policy_data *throtl_pd_alloc(gfp_t gfp, int node)
327 {
328         struct throtl_grp *tg;
329         int rw;
330 
331         tg = kzalloc_node(sizeof(*tg), gfp, node);
332         if (!tg)
333                 return NULL;
334 
335         throtl_service_queue_init(&tg->service_queue);
336 
337         for (rw = READ; rw <= WRITE; rw++) {
338                 throtl_qnode_init(&tg->qnode_on_self[rw], tg);
339                 throtl_qnode_init(&tg->qnode_on_parent[rw], tg);
340         }
341 
342         RB_CLEAR_NODE(&tg->rb_node);
343         tg->bps[READ] = -1;
344         tg->bps[WRITE] = -1;
345         tg->iops[READ] = -1;
346         tg->iops[WRITE] = -1;
347 
348         return &tg->pd;
349 }
350 
351 static void throtl_pd_init(struct blkg_policy_data *pd)
352 {
353         struct throtl_grp *tg = pd_to_tg(pd);
354         struct blkcg_gq *blkg = tg_to_blkg(tg);
355         struct throtl_data *td = blkg->q->td;
356         struct throtl_service_queue *sq = &tg->service_queue;
357 
358         /*
359          * If on the default hierarchy, we switch to properly hierarchical
360          * behavior where limits on a given throtl_grp are applied to the
361          * whole subtree rather than just the group itself.  e.g. If 16M
362          * read_bps limit is set on the root group, the whole system can't
363          * exceed 16M for the device.
364          *
365          * If not on the default hierarchy, the broken flat hierarchy
366          * behavior is retained where all throtl_grps are treated as if
367          * they're all separate root groups right below throtl_data.
368          * Limits of a group don't interact with limits of other groups
369          * regardless of the position of the group in the hierarchy.
370          */
371         sq->parent_sq = &td->service_queue;
372         if (cgroup_on_dfl(blkg->blkcg->css.cgroup) && blkg->parent)
373                 sq->parent_sq = &blkg_to_tg(blkg->parent)->service_queue;
374         tg->td = td;
375 }
376 
377 /*
378  * Set has_rules[] if @tg or any of its parents have limits configured.
379  * This doesn't require walking up to the top of the hierarchy as the
380  * parent's has_rules[] is guaranteed to be correct.
381  */
382 static void tg_update_has_rules(struct throtl_grp *tg)
383 {
384         struct throtl_grp *parent_tg = sq_to_tg(tg->service_queue.parent_sq);
385         int rw;
386 
387         for (rw = READ; rw <= WRITE; rw++)
388                 tg->has_rules[rw] = (parent_tg && parent_tg->has_rules[rw]) ||
389                                     (tg->bps[rw] != -1 || tg->iops[rw] != -1);
390 }
391 
392 static void throtl_pd_online(struct blkg_policy_data *pd)
393 {
394         /*
395          * We don't want new groups to escape the limits of its ancestors.
396          * Update has_rules[] after a new group is brought online.
397          */
398         tg_update_has_rules(pd_to_tg(pd));
399 }
400 
401 static void throtl_pd_free(struct blkg_policy_data *pd)
402 {
403         struct throtl_grp *tg = pd_to_tg(pd);
404 
405         del_timer_sync(&tg->service_queue.pending_timer);
406         kfree(tg);
407 }
408 
409 static struct throtl_grp *
410 throtl_rb_first(struct throtl_service_queue *parent_sq)
411 {
412         /* Service tree is empty */
413         if (!parent_sq->nr_pending)
414                 return NULL;
415 
416         if (!parent_sq->first_pending)
417                 parent_sq->first_pending = rb_first(&parent_sq->pending_tree);
418 
419         if (parent_sq->first_pending)
420                 return rb_entry_tg(parent_sq->first_pending);
421 
422         return NULL;
423 }
424 
425 static void rb_erase_init(struct rb_node *n, struct rb_root *root)
426 {
427         rb_erase(n, root);
428         RB_CLEAR_NODE(n);
429 }
430 
431 static void throtl_rb_erase(struct rb_node *n,
432                             struct throtl_service_queue *parent_sq)
433 {
434         if (parent_sq->first_pending == n)
435                 parent_sq->first_pending = NULL;
436         rb_erase_init(n, &parent_sq->pending_tree);
437         --parent_sq->nr_pending;
438 }
439 
440 static void update_min_dispatch_time(struct throtl_service_queue *parent_sq)
441 {
442         struct throtl_grp *tg;
443 
444         tg = throtl_rb_first(parent_sq);
445         if (!tg)
446                 return;
447 
448         parent_sq->first_pending_disptime = tg->disptime;
449 }
450 
451 static void tg_service_queue_add(struct throtl_grp *tg)
452 {
453         struct throtl_service_queue *parent_sq = tg->service_queue.parent_sq;
454         struct rb_node **node = &parent_sq->pending_tree.rb_node;
455         struct rb_node *parent = NULL;
456         struct throtl_grp *__tg;
457         unsigned long key = tg->disptime;
458         int left = 1;
459 
460         while (*node != NULL) {
461                 parent = *node;
462                 __tg = rb_entry_tg(parent);
463 
464                 if (time_before(key, __tg->disptime))
465                         node = &parent->rb_left;
466                 else {
467                         node = &parent->rb_right;
468                         left = 0;
469                 }
470         }
471 
472         if (left)
473                 parent_sq->first_pending = &tg->rb_node;
474 
475         rb_link_node(&tg->rb_node, parent, node);
476         rb_insert_color(&tg->rb_node, &parent_sq->pending_tree);
477 }
478 
479 static void __throtl_enqueue_tg(struct throtl_grp *tg)
480 {
481         tg_service_queue_add(tg);
482         tg->flags |= THROTL_TG_PENDING;
483         tg->service_queue.parent_sq->nr_pending++;
484 }
485 
486 static void throtl_enqueue_tg(struct throtl_grp *tg)
487 {
488         if (!(tg->flags & THROTL_TG_PENDING))
489                 __throtl_enqueue_tg(tg);
490 }
491 
492 static void __throtl_dequeue_tg(struct throtl_grp *tg)
493 {
494         throtl_rb_erase(&tg->rb_node, tg->service_queue.parent_sq);
495         tg->flags &= ~THROTL_TG_PENDING;
496 }
497 
498 static void throtl_dequeue_tg(struct throtl_grp *tg)
499 {
500         if (tg->flags & THROTL_TG_PENDING)
501                 __throtl_dequeue_tg(tg);
502 }
503 
504 /* Call with queue lock held */
505 static void throtl_schedule_pending_timer(struct throtl_service_queue *sq,
506                                           unsigned long expires)
507 {
508         mod_timer(&sq->pending_timer, expires);
509         throtl_log(sq, "schedule timer. delay=%lu jiffies=%lu",
510                    expires - jiffies, jiffies);
511 }
512 
513 /**
514  * throtl_schedule_next_dispatch - schedule the next dispatch cycle
515  * @sq: the service_queue to schedule dispatch for
516  * @force: force scheduling
517  *
518  * Arm @sq->pending_timer so that the next dispatch cycle starts on the
519  * dispatch time of the first pending child.  Returns %true if either timer
520  * is armed or there's no pending child left.  %false if the current
521  * dispatch window is still open and the caller should continue
522  * dispatching.
523  *
524  * If @force is %true, the dispatch timer is always scheduled and this
525  * function is guaranteed to return %true.  This is to be used when the
526  * caller can't dispatch itself and needs to invoke pending_timer
527  * unconditionally.  Note that forced scheduling is likely to induce short
528  * delay before dispatch starts even if @sq->first_pending_disptime is not
529  * in the future and thus shouldn't be used in hot paths.
530  */
531 static bool throtl_schedule_next_dispatch(struct throtl_service_queue *sq,
532                                           bool force)
533 {
534         /* any pending children left? */
535         if (!sq->nr_pending)
536                 return true;
537 
538         update_min_dispatch_time(sq);
539 
540         /* is the next dispatch time in the future? */
541         if (force || time_after(sq->first_pending_disptime, jiffies)) {
542                 throtl_schedule_pending_timer(sq, sq->first_pending_disptime);
543                 return true;
544         }
545 
546         /* tell the caller to continue dispatching */
547         return false;
548 }
549 
550 static inline void throtl_start_new_slice_with_credit(struct throtl_grp *tg,
551                 bool rw, unsigned long start)
552 {
553         tg->bytes_disp[rw] = 0;
554         tg->io_disp[rw] = 0;
555 
556         /*
557          * Previous slice has expired. We must have trimmed it after last
558          * bio dispatch. That means since start of last slice, we never used
559          * that bandwidth. Do try to make use of that bandwidth while giving
560          * credit.
561          */
562         if (time_after_eq(start, tg->slice_start[rw]))
563                 tg->slice_start[rw] = start;
564 
565         tg->slice_end[rw] = jiffies + throtl_slice;
566         throtl_log(&tg->service_queue,
567                    "[%c] new slice with credit start=%lu end=%lu jiffies=%lu",
568                    rw == READ ? 'R' : 'W', tg->slice_start[rw],
569                    tg->slice_end[rw], jiffies);
570 }
571 
572 static inline void throtl_start_new_slice(struct throtl_grp *tg, bool rw)
573 {
574         tg->bytes_disp[rw] = 0;
575         tg->io_disp[rw] = 0;
576         tg->slice_start[rw] = jiffies;
577         tg->slice_end[rw] = jiffies + throtl_slice;
578         throtl_log(&tg->service_queue,
579                    "[%c] new slice start=%lu end=%lu jiffies=%lu",
580                    rw == READ ? 'R' : 'W', tg->slice_start[rw],
581                    tg->slice_end[rw], jiffies);
582 }
583 
584 static inline void throtl_set_slice_end(struct throtl_grp *tg, bool rw,
585                                         unsigned long jiffy_end)
586 {
587         tg->slice_end[rw] = roundup(jiffy_end, throtl_slice);
588 }
589 
590 static inline void throtl_extend_slice(struct throtl_grp *tg, bool rw,
591                                        unsigned long jiffy_end)
592 {
593         tg->slice_end[rw] = roundup(jiffy_end, throtl_slice);
594         throtl_log(&tg->service_queue,
595                    "[%c] extend slice start=%lu end=%lu jiffies=%lu",
596                    rw == READ ? 'R' : 'W', tg->slice_start[rw],
597                    tg->slice_end[rw], jiffies);
598 }
599 
600 /* Determine if previously allocated or extended slice is complete or not */
601 static bool throtl_slice_used(struct throtl_grp *tg, bool rw)
602 {
603         if (time_in_range(jiffies, tg->slice_start[rw], tg->slice_end[rw]))
604                 return false;
605 
606         return 1;
607 }
608 
609 /* Trim the used slices and adjust slice start accordingly */
610 static inline void throtl_trim_slice(struct throtl_grp *tg, bool rw)
611 {
612         unsigned long nr_slices, time_elapsed, io_trim;
613         u64 bytes_trim, tmp;
614 
615         BUG_ON(time_before(tg->slice_end[rw], tg->slice_start[rw]));
616 
617         /*
618          * If bps are unlimited (-1), then time slice don't get
619          * renewed. Don't try to trim the slice if slice is used. A new
620          * slice will start when appropriate.
621          */
622         if (throtl_slice_used(tg, rw))
623                 return;
624 
625         /*
626          * A bio has been dispatched. Also adjust slice_end. It might happen
627          * that initially cgroup limit was very low resulting in high
628          * slice_end, but later limit was bumped up and bio was dispached
629          * sooner, then we need to reduce slice_end. A high bogus slice_end
630          * is bad because it does not allow new slice to start.
631          */
632 
633         throtl_set_slice_end(tg, rw, jiffies + throtl_slice);
634 
635         time_elapsed = jiffies - tg->slice_start[rw];
636 
637         nr_slices = time_elapsed / throtl_slice;
638 
639         if (!nr_slices)
640                 return;
641         tmp = tg->bps[rw] * throtl_slice * nr_slices;
642         do_div(tmp, HZ);
643         bytes_trim = tmp;
644 
645         io_trim = (tg->iops[rw] * throtl_slice * nr_slices)/HZ;
646 
647         if (!bytes_trim && !io_trim)
648                 return;
649 
650         if (tg->bytes_disp[rw] >= bytes_trim)
651                 tg->bytes_disp[rw] -= bytes_trim;
652         else
653                 tg->bytes_disp[rw] = 0;
654 
655         if (tg->io_disp[rw] >= io_trim)
656                 tg->io_disp[rw] -= io_trim;
657         else
658                 tg->io_disp[rw] = 0;
659 
660         tg->slice_start[rw] += nr_slices * throtl_slice;
661 
662         throtl_log(&tg->service_queue,
663                    "[%c] trim slice nr=%lu bytes=%llu io=%lu start=%lu end=%lu jiffies=%lu",
664                    rw == READ ? 'R' : 'W', nr_slices, bytes_trim, io_trim,
665                    tg->slice_start[rw], tg->slice_end[rw], jiffies);
666 }
667 
668 static bool tg_with_in_iops_limit(struct throtl_grp *tg, struct bio *bio,
669                                   unsigned long *wait)
670 {
671         bool rw = bio_data_dir(bio);
672         unsigned int io_allowed;
673         unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
674         u64 tmp;
675 
676         jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
677 
678         /* Slice has just started. Consider one slice interval */
679         if (!jiffy_elapsed)
680                 jiffy_elapsed_rnd = throtl_slice;
681 
682         jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, throtl_slice);
683 
684         /*
685          * jiffy_elapsed_rnd should not be a big value as minimum iops can be
686          * 1 then at max jiffy elapsed should be equivalent of 1 second as we
687          * will allow dispatch after 1 second and after that slice should
688          * have been trimmed.
689          */
690 
691         tmp = (u64)tg->iops[rw] * jiffy_elapsed_rnd;
692         do_div(tmp, HZ);
693 
694         if (tmp > UINT_MAX)
695                 io_allowed = UINT_MAX;
696         else
697                 io_allowed = tmp;
698 
699         if (tg->io_disp[rw] + 1 <= io_allowed) {
700                 if (wait)
701                         *wait = 0;
702                 return true;
703         }
704 
705         /* Calc approx time to dispatch */
706         jiffy_wait = ((tg->io_disp[rw] + 1) * HZ)/tg->iops[rw] + 1;
707 
708         if (jiffy_wait > jiffy_elapsed)
709                 jiffy_wait = jiffy_wait - jiffy_elapsed;
710         else
711                 jiffy_wait = 1;
712 
713         if (wait)
714                 *wait = jiffy_wait;
715         return 0;
716 }
717 
718 static bool tg_with_in_bps_limit(struct throtl_grp *tg, struct bio *bio,
719                                  unsigned long *wait)
720 {
721         bool rw = bio_data_dir(bio);
722         u64 bytes_allowed, extra_bytes, tmp;
723         unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
724 
725         jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
726 
727         /* Slice has just started. Consider one slice interval */
728         if (!jiffy_elapsed)
729                 jiffy_elapsed_rnd = throtl_slice;
730 
731         jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, throtl_slice);
732 
733         tmp = tg->bps[rw] * jiffy_elapsed_rnd;
734         do_div(tmp, HZ);
735         bytes_allowed = tmp;
736 
737         if (tg->bytes_disp[rw] + bio->bi_iter.bi_size <= bytes_allowed) {
738                 if (wait)
739                         *wait = 0;
740                 return true;
741         }
742 
743         /* Calc approx time to dispatch */
744         extra_bytes = tg->bytes_disp[rw] + bio->bi_iter.bi_size - bytes_allowed;
745         jiffy_wait = div64_u64(extra_bytes * HZ, tg->bps[rw]);
746 
747         if (!jiffy_wait)
748                 jiffy_wait = 1;
749 
750         /*
751          * This wait time is without taking into consideration the rounding
752          * up we did. Add that time also.
753          */
754         jiffy_wait = jiffy_wait + (jiffy_elapsed_rnd - jiffy_elapsed);
755         if (wait)
756                 *wait = jiffy_wait;
757         return 0;
758 }
759 
760 /*
761  * Returns whether one can dispatch a bio or not. Also returns approx number
762  * of jiffies to wait before this bio is with-in IO rate and can be dispatched
763  */
764 static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio,
765                             unsigned long *wait)
766 {
767         bool rw = bio_data_dir(bio);
768         unsigned long bps_wait = 0, iops_wait = 0, max_wait = 0;
769 
770         /*
771          * Currently whole state machine of group depends on first bio
772          * queued in the group bio list. So one should not be calling
773          * this function with a different bio if there are other bios
774          * queued.
775          */
776         BUG_ON(tg->service_queue.nr_queued[rw] &&
777                bio != throtl_peek_queued(&tg->service_queue.queued[rw]));
778 
779         /* If tg->bps = -1, then BW is unlimited */
780         if (tg->bps[rw] == -1 && tg->iops[rw] == -1) {
781                 if (wait)
782                         *wait = 0;
783                 return true;
784         }
785 
786         /*
787          * If previous slice expired, start a new one otherwise renew/extend
788          * existing slice to make sure it is at least throtl_slice interval
789          * long since now.
790          */
791         if (throtl_slice_used(tg, rw))
792                 throtl_start_new_slice(tg, rw);
793         else {
794                 if (time_before(tg->slice_end[rw], jiffies + throtl_slice))
795                         throtl_extend_slice(tg, rw, jiffies + throtl_slice);
796         }
797 
798         if (tg_with_in_bps_limit(tg, bio, &bps_wait) &&
799             tg_with_in_iops_limit(tg, bio, &iops_wait)) {
800                 if (wait)
801                         *wait = 0;
802                 return 1;
803         }
804 
805         max_wait = max(bps_wait, iops_wait);
806 
807         if (wait)
808                 *wait = max_wait;
809 
810         if (time_before(tg->slice_end[rw], jiffies + max_wait))
811                 throtl_extend_slice(tg, rw, jiffies + max_wait);
812 
813         return 0;
814 }
815 
816 static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
817 {
818         bool rw = bio_data_dir(bio);
819 
820         /* Charge the bio to the group */
821         tg->bytes_disp[rw] += bio->bi_iter.bi_size;
822         tg->io_disp[rw]++;
823 
824         /*
825          * REQ_THROTTLED is used to prevent the same bio to be throttled
826          * more than once as a throttled bio will go through blk-throtl the
827          * second time when it eventually gets issued.  Set it when a bio
828          * is being charged to a tg.
829          */
830         if (!(bio->bi_rw & REQ_THROTTLED))
831                 bio->bi_rw |= REQ_THROTTLED;
832 }
833 
834 /**
835  * throtl_add_bio_tg - add a bio to the specified throtl_grp
836  * @bio: bio to add
837  * @qn: qnode to use
838  * @tg: the target throtl_grp
839  *
840  * Add @bio to @tg's service_queue using @qn.  If @qn is not specified,
841  * tg->qnode_on_self[] is used.
842  */
843 static void throtl_add_bio_tg(struct bio *bio, struct throtl_qnode *qn,
844                               struct throtl_grp *tg)
845 {
846         struct throtl_service_queue *sq = &tg->service_queue;
847         bool rw = bio_data_dir(bio);
848 
849         if (!qn)
850                 qn = &tg->qnode_on_self[rw];
851 
852         /*
853          * If @tg doesn't currently have any bios queued in the same
854          * direction, queueing @bio can change when @tg should be
855          * dispatched.  Mark that @tg was empty.  This is automatically
856          * cleaered on the next tg_update_disptime().
857          */
858         if (!sq->nr_queued[rw])
859                 tg->flags |= THROTL_TG_WAS_EMPTY;
860 
861         throtl_qnode_add_bio(bio, qn, &sq->queued[rw]);
862 
863         sq->nr_queued[rw]++;
864         throtl_enqueue_tg(tg);
865 }
866 
867 static void tg_update_disptime(struct throtl_grp *tg)
868 {
869         struct throtl_service_queue *sq = &tg->service_queue;
870         unsigned long read_wait = -1, write_wait = -1, min_wait = -1, disptime;
871         struct bio *bio;
872 
873         if ((bio = throtl_peek_queued(&sq->queued[READ])))
874                 tg_may_dispatch(tg, bio, &read_wait);
875 
876         if ((bio = throtl_peek_queued(&sq->queued[WRITE])))
877                 tg_may_dispatch(tg, bio, &write_wait);
878 
879         min_wait = min(read_wait, write_wait);
880         disptime = jiffies + min_wait;
881 
882         /* Update dispatch time */
883         throtl_dequeue_tg(tg);
884         tg->disptime = disptime;
885         throtl_enqueue_tg(tg);
886 
887         /* see throtl_add_bio_tg() */
888         tg->flags &= ~THROTL_TG_WAS_EMPTY;
889 }
890 
891 static void start_parent_slice_with_credit(struct throtl_grp *child_tg,
892                                         struct throtl_grp *parent_tg, bool rw)
893 {
894         if (throtl_slice_used(parent_tg, rw)) {
895                 throtl_start_new_slice_with_credit(parent_tg, rw,
896                                 child_tg->slice_start[rw]);
897         }
898 
899 }
900 
901 static void tg_dispatch_one_bio(struct throtl_grp *tg, bool rw)
902 {
903         struct throtl_service_queue *sq = &tg->service_queue;
904         struct throtl_service_queue *parent_sq = sq->parent_sq;
905         struct throtl_grp *parent_tg = sq_to_tg(parent_sq);
906         struct throtl_grp *tg_to_put = NULL;
907         struct bio *bio;
908 
909         /*
910          * @bio is being transferred from @tg to @parent_sq.  Popping a bio
911          * from @tg may put its reference and @parent_sq might end up
912          * getting released prematurely.  Remember the tg to put and put it
913          * after @bio is transferred to @parent_sq.
914          */
915         bio = throtl_pop_queued(&sq->queued[rw], &tg_to_put);
916         sq->nr_queued[rw]--;
917 
918         throtl_charge_bio(tg, bio);
919 
920         /*
921          * If our parent is another tg, we just need to transfer @bio to
922          * the parent using throtl_add_bio_tg().  If our parent is
923          * @td->service_queue, @bio is ready to be issued.  Put it on its
924          * bio_lists[] and decrease total number queued.  The caller is
925          * responsible for issuing these bios.
926          */
927         if (parent_tg) {
928                 throtl_add_bio_tg(bio, &tg->qnode_on_parent[rw], parent_tg);
929                 start_parent_slice_with_credit(tg, parent_tg, rw);
930         } else {
931                 throtl_qnode_add_bio(bio, &tg->qnode_on_parent[rw],
932                                      &parent_sq->queued[rw]);
933                 BUG_ON(tg->td->nr_queued[rw] <= 0);
934                 tg->td->nr_queued[rw]--;
935         }
936 
937         throtl_trim_slice(tg, rw);
938 
939         if (tg_to_put)
940                 blkg_put(tg_to_blkg(tg_to_put));
941 }
942 
943 static int throtl_dispatch_tg(struct throtl_grp *tg)
944 {
945         struct throtl_service_queue *sq = &tg->service_queue;
946         unsigned int nr_reads = 0, nr_writes = 0;
947         unsigned int max_nr_reads = throtl_grp_quantum*3/4;
948         unsigned int max_nr_writes = throtl_grp_quantum - max_nr_reads;
949         struct bio *bio;
950 
951         /* Try to dispatch 75% READS and 25% WRITES */
952 
953         while ((bio = throtl_peek_queued(&sq->queued[READ])) &&
954                tg_may_dispatch(tg, bio, NULL)) {
955 
956                 tg_dispatch_one_bio(tg, bio_data_dir(bio));
957                 nr_reads++;
958 
959                 if (nr_reads >= max_nr_reads)
960                         break;
961         }
962 
963         while ((bio = throtl_peek_queued(&sq->queued[WRITE])) &&
964                tg_may_dispatch(tg, bio, NULL)) {
965 
966                 tg_dispatch_one_bio(tg, bio_data_dir(bio));
967                 nr_writes++;
968 
969                 if (nr_writes >= max_nr_writes)
970                         break;
971         }
972 
973         return nr_reads + nr_writes;
974 }
975 
976 static int throtl_select_dispatch(struct throtl_service_queue *parent_sq)
977 {
978         unsigned int nr_disp = 0;
979 
980         while (1) {
981                 struct throtl_grp *tg = throtl_rb_first(parent_sq);
982                 struct throtl_service_queue *sq = &tg->service_queue;
983 
984                 if (!tg)
985                         break;
986 
987                 if (time_before(jiffies, tg->disptime))
988                         break;
989 
990                 throtl_dequeue_tg(tg);
991 
992                 nr_disp += throtl_dispatch_tg(tg);
993 
994                 if (sq->nr_queued[0] || sq->nr_queued[1])
995                         tg_update_disptime(tg);
996 
997                 if (nr_disp >= throtl_quantum)
998                         break;
999         }
1000 
1001         return nr_disp;
1002 }
1003 
1004 /**
1005  * throtl_pending_timer_fn - timer function for service_queue->pending_timer
1006  * @arg: the throtl_service_queue being serviced
1007  *
1008  * This timer is armed when a child throtl_grp with active bio's become
1009  * pending and queued on the service_queue's pending_tree and expires when
1010  * the first child throtl_grp should be dispatched.  This function
1011  * dispatches bio's from the children throtl_grps to the parent
1012  * service_queue.
1013  *
1014  * If the parent's parent is another throtl_grp, dispatching is propagated
1015  * by either arming its pending_timer or repeating dispatch directly.  If
1016  * the top-level service_tree is reached, throtl_data->dispatch_work is
1017  * kicked so that the ready bio's are issued.
1018  */
1019 static void throtl_pending_timer_fn(unsigned long arg)
1020 {
1021         struct throtl_service_queue *sq = (void *)arg;
1022         struct throtl_grp *tg = sq_to_tg(sq);
1023         struct throtl_data *td = sq_to_td(sq);
1024         struct request_queue *q = td->queue;
1025         struct throtl_service_queue *parent_sq;
1026         bool dispatched;
1027         int ret;
1028 
1029         spin_lock_irq(q->queue_lock);
1030 again:
1031         parent_sq = sq->parent_sq;
1032         dispatched = false;
1033 
1034         while (true) {
1035                 throtl_log(sq, "dispatch nr_queued=%u read=%u write=%u",
1036                            sq->nr_queued[READ] + sq->nr_queued[WRITE],
1037                            sq->nr_queued[READ], sq->nr_queued[WRITE]);
1038 
1039                 ret = throtl_select_dispatch(sq);
1040                 if (ret) {
1041                         throtl_log(sq, "bios disp=%u", ret);
1042                         dispatched = true;
1043                 }
1044 
1045                 if (throtl_schedule_next_dispatch(sq, false))
1046                         break;
1047 
1048                 /* this dispatch windows is still open, relax and repeat */
1049                 spin_unlock_irq(q->queue_lock);
1050                 cpu_relax();
1051                 spin_lock_irq(q->queue_lock);
1052         }
1053 
1054         if (!dispatched)
1055                 goto out_unlock;
1056 
1057         if (parent_sq) {
1058                 /* @parent_sq is another throl_grp, propagate dispatch */
1059                 if (tg->flags & THROTL_TG_WAS_EMPTY) {
1060                         tg_update_disptime(tg);
1061                         if (!throtl_schedule_next_dispatch(parent_sq, false)) {
1062                                 /* window is already open, repeat dispatching */
1063                                 sq = parent_sq;
1064                                 tg = sq_to_tg(sq);
1065                                 goto again;
1066                         }
1067                 }
1068         } else {
1069                 /* reached the top-level, queue issueing */
1070                 queue_work(kthrotld_workqueue, &td->dispatch_work);
1071         }
1072 out_unlock:
1073         spin_unlock_irq(q->queue_lock);
1074 }
1075 
1076 /**
1077  * blk_throtl_dispatch_work_fn - work function for throtl_data->dispatch_work
1078  * @work: work item being executed
1079  *
1080  * This function is queued for execution when bio's reach the bio_lists[]
1081  * of throtl_data->service_queue.  Those bio's are ready and issued by this
1082  * function.
1083  */
1084 static void blk_throtl_dispatch_work_fn(struct work_struct *work)
1085 {
1086         struct throtl_data *td = container_of(work, struct throtl_data,
1087                                               dispatch_work);
1088         struct throtl_service_queue *td_sq = &td->service_queue;
1089         struct request_queue *q = td->queue;
1090         struct bio_list bio_list_on_stack;
1091         struct bio *bio;
1092         struct blk_plug plug;
1093         int rw;
1094 
1095         bio_list_init(&bio_list_on_stack);
1096 
1097         spin_lock_irq(q->queue_lock);
1098         for (rw = READ; rw <= WRITE; rw++)
1099                 while ((bio = throtl_pop_queued(&td_sq->queued[rw], NULL)))
1100                         bio_list_add(&bio_list_on_stack, bio);
1101         spin_unlock_irq(q->queue_lock);
1102 
1103         if (!bio_list_empty(&bio_list_on_stack)) {
1104                 blk_start_plug(&plug);
1105                 while((bio = bio_list_pop(&bio_list_on_stack)))
1106                         generic_make_request(bio);
1107                 blk_finish_plug(&plug);
1108         }
1109 }
1110 
1111 static u64 tg_prfill_conf_u64(struct seq_file *sf, struct blkg_policy_data *pd,
1112                               int off)
1113 {
1114         struct throtl_grp *tg = pd_to_tg(pd);
1115         u64 v = *(u64 *)((void *)tg + off);
1116 
1117         if (v == -1)
1118                 return 0;
1119         return __blkg_prfill_u64(sf, pd, v);
1120 }
1121 
1122 static u64 tg_prfill_conf_uint(struct seq_file *sf, struct blkg_policy_data *pd,
1123                                int off)
1124 {
1125         struct throtl_grp *tg = pd_to_tg(pd);
1126         unsigned int v = *(unsigned int *)((void *)tg + off);
1127 
1128         if (v == -1)
1129                 return 0;
1130         return __blkg_prfill_u64(sf, pd, v);
1131 }
1132 
1133 static int tg_print_conf_u64(struct seq_file *sf, void *v)
1134 {
1135         blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_conf_u64,
1136                           &blkcg_policy_throtl, seq_cft(sf)->private, false);
1137         return 0;
1138 }
1139 
1140 static int tg_print_conf_uint(struct seq_file *sf, void *v)
1141 {
1142         blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_conf_uint,
1143                           &blkcg_policy_throtl, seq_cft(sf)->private, false);
1144         return 0;
1145 }
1146 
1147 static void tg_conf_updated(struct throtl_grp *tg)
1148 {
1149         struct throtl_service_queue *sq = &tg->service_queue;
1150         struct cgroup_subsys_state *pos_css;
1151         struct blkcg_gq *blkg;
1152 
1153         throtl_log(&tg->service_queue,
1154                    "limit change rbps=%llu wbps=%llu riops=%u wiops=%u",
1155                    tg->bps[READ], tg->bps[WRITE],
1156                    tg->iops[READ], tg->iops[WRITE]);
1157 
1158         /*
1159          * Update has_rules[] flags for the updated tg's subtree.  A tg is
1160          * considered to have rules if either the tg itself or any of its
1161          * ancestors has rules.  This identifies groups without any
1162          * restrictions in the whole hierarchy and allows them to bypass
1163          * blk-throttle.
1164          */
1165         blkg_for_each_descendant_pre(blkg, pos_css, tg_to_blkg(tg))
1166                 tg_update_has_rules(blkg_to_tg(blkg));
1167 
1168         /*
1169          * We're already holding queue_lock and know @tg is valid.  Let's
1170          * apply the new config directly.
1171          *
1172          * Restart the slices for both READ and WRITES. It might happen
1173          * that a group's limit are dropped suddenly and we don't want to
1174          * account recently dispatched IO with new low rate.
1175          */
1176         throtl_start_new_slice(tg, 0);
1177         throtl_start_new_slice(tg, 1);
1178 
1179         if (tg->flags & THROTL_TG_PENDING) {
1180                 tg_update_disptime(tg);
1181                 throtl_schedule_next_dispatch(sq->parent_sq, true);
1182         }
1183 }
1184 
1185 static ssize_t tg_set_conf(struct kernfs_open_file *of,
1186                            char *buf, size_t nbytes, loff_t off, bool is_u64)
1187 {
1188         struct blkcg *blkcg = css_to_blkcg(of_css(of));
1189         struct blkg_conf_ctx ctx;
1190         struct throtl_grp *tg;
1191         int ret;
1192         u64 v;
1193 
1194         ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, buf, &ctx);
1195         if (ret)
1196                 return ret;
1197 
1198         ret = -EINVAL;
1199         if (sscanf(ctx.body, "%llu", &v) != 1)
1200                 goto out_finish;
1201         if (!v)
1202                 v = -1;
1203 
1204         tg = blkg_to_tg(ctx.blkg);
1205 
1206         if (is_u64)
1207                 *(u64 *)((void *)tg + of_cft(of)->private) = v;
1208         else
1209                 *(unsigned int *)((void *)tg + of_cft(of)->private) = v;
1210 
1211         tg_conf_updated(tg);
1212         ret = 0;
1213 out_finish:
1214         blkg_conf_finish(&ctx);
1215         return ret ?: nbytes;
1216 }
1217 
1218 static ssize_t tg_set_conf_u64(struct kernfs_open_file *of,
1219                                char *buf, size_t nbytes, loff_t off)
1220 {
1221         return tg_set_conf(of, buf, nbytes, off, true);
1222 }
1223 
1224 static ssize_t tg_set_conf_uint(struct kernfs_open_file *of,
1225                                 char *buf, size_t nbytes, loff_t off)
1226 {
1227         return tg_set_conf(of, buf, nbytes, off, false);
1228 }
1229 
1230 static struct cftype throtl_legacy_files[] = {
1231         {
1232                 .name = "throttle.read_bps_device",
1233                 .private = offsetof(struct throtl_grp, bps[READ]),
1234                 .seq_show = tg_print_conf_u64,
1235                 .write = tg_set_conf_u64,
1236         },
1237         {
1238                 .name = "throttle.write_bps_device",
1239                 .private = offsetof(struct throtl_grp, bps[WRITE]),
1240                 .seq_show = tg_print_conf_u64,
1241                 .write = tg_set_conf_u64,
1242         },
1243         {
1244                 .name = "throttle.read_iops_device",
1245                 .private = offsetof(struct throtl_grp, iops[READ]),
1246                 .seq_show = tg_print_conf_uint,
1247                 .write = tg_set_conf_uint,
1248         },
1249         {
1250                 .name = "throttle.write_iops_device",
1251                 .private = offsetof(struct throtl_grp, iops[WRITE]),
1252                 .seq_show = tg_print_conf_uint,
1253                 .write = tg_set_conf_uint,
1254         },
1255         {
1256                 .name = "throttle.io_service_bytes",
1257                 .private = (unsigned long)&blkcg_policy_throtl,
1258                 .seq_show = blkg_print_stat_bytes,
1259         },
1260         {
1261                 .name = "throttle.io_serviced",
1262                 .private = (unsigned long)&blkcg_policy_throtl,
1263                 .seq_show = blkg_print_stat_ios,
1264         },
1265         { }     /* terminate */
1266 };
1267 
1268 static u64 tg_prfill_max(struct seq_file *sf, struct blkg_policy_data *pd,
1269                          int off)
1270 {
1271         struct throtl_grp *tg = pd_to_tg(pd);
1272         const char *dname = blkg_dev_name(pd->blkg);
1273         char bufs[4][21] = { "max", "max", "max", "max" };
1274 
1275         if (!dname)
1276                 return 0;
1277         if (tg->bps[READ] == -1 && tg->bps[WRITE] == -1 &&
1278             tg->iops[READ] == -1 && tg->iops[WRITE] == -1)
1279                 return 0;
1280 
1281         if (tg->bps[READ] != -1)
1282                 snprintf(bufs[0], sizeof(bufs[0]), "%llu", tg->bps[READ]);
1283         if (tg->bps[WRITE] != -1)
1284                 snprintf(bufs[1], sizeof(bufs[1]), "%llu", tg->bps[WRITE]);
1285         if (tg->iops[READ] != -1)
1286                 snprintf(bufs[2], sizeof(bufs[2]), "%u", tg->iops[READ]);
1287         if (tg->iops[WRITE] != -1)
1288                 snprintf(bufs[3], sizeof(bufs[3]), "%u", tg->iops[WRITE]);
1289 
1290         seq_printf(sf, "%s rbps=%s wbps=%s riops=%s wiops=%s\n",
1291                    dname, bufs[0], bufs[1], bufs[2], bufs[3]);
1292         return 0;
1293 }
1294 
1295 static int tg_print_max(struct seq_file *sf, void *v)
1296 {
1297         blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_max,
1298                           &blkcg_policy_throtl, seq_cft(sf)->private, false);
1299         return 0;
1300 }
1301 
1302 static ssize_t tg_set_max(struct kernfs_open_file *of,
1303                           char *buf, size_t nbytes, loff_t off)
1304 {
1305         struct blkcg *blkcg = css_to_blkcg(of_css(of));
1306         struct blkg_conf_ctx ctx;
1307         struct throtl_grp *tg;
1308         u64 v[4];
1309         int ret;
1310 
1311         ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, buf, &ctx);
1312         if (ret)
1313                 return ret;
1314 
1315         tg = blkg_to_tg(ctx.blkg);
1316 
1317         v[0] = tg->bps[READ];
1318         v[1] = tg->bps[WRITE];
1319         v[2] = tg->iops[READ];
1320         v[3] = tg->iops[WRITE];
1321 
1322         while (true) {
1323                 char tok[27];   /* wiops=18446744073709551616 */
1324                 char *p;
1325                 u64 val = -1;
1326                 int len;
1327 
1328                 if (sscanf(ctx.body, "%26s%n", tok, &len) != 1)
1329                         break;
1330                 if (tok[0] == '\0')
1331                         break;
1332                 ctx.body += len;
1333 
1334                 ret = -EINVAL;
1335                 p = tok;
1336                 strsep(&p, "=");
1337                 if (!p || (sscanf(p, "%llu", &val) != 1 && strcmp(p, "max")))
1338                         goto out_finish;
1339 
1340                 ret = -ERANGE;
1341                 if (!val)
1342                         goto out_finish;
1343 
1344                 ret = -EINVAL;
1345                 if (!strcmp(tok, "rbps"))
1346                         v[0] = val;
1347                 else if (!strcmp(tok, "wbps"))
1348                         v[1] = val;
1349                 else if (!strcmp(tok, "riops"))
1350                         v[2] = min_t(u64, val, UINT_MAX);
1351                 else if (!strcmp(tok, "wiops"))
1352                         v[3] = min_t(u64, val, UINT_MAX);
1353                 else
1354                         goto out_finish;
1355         }
1356 
1357         tg->bps[READ] = v[0];
1358         tg->bps[WRITE] = v[1];
1359         tg->iops[READ] = v[2];
1360         tg->iops[WRITE] = v[3];
1361 
1362         tg_conf_updated(tg);
1363         ret = 0;
1364 out_finish:
1365         blkg_conf_finish(&ctx);
1366         return ret ?: nbytes;
1367 }
1368 
1369 static struct cftype throtl_files[] = {
1370         {
1371                 .name = "max",
1372                 .flags = CFTYPE_NOT_ON_ROOT,
1373                 .seq_show = tg_print_max,
1374                 .write = tg_set_max,
1375         },
1376         { }     /* terminate */
1377 };
1378 
1379 static void throtl_shutdown_wq(struct request_queue *q)
1380 {
1381         struct throtl_data *td = q->td;
1382 
1383         cancel_work_sync(&td->dispatch_work);
1384 }
1385 
1386 static struct blkcg_policy blkcg_policy_throtl = {
1387         .dfl_cftypes            = throtl_files,
1388         .legacy_cftypes         = throtl_legacy_files,
1389 
1390         .pd_alloc_fn            = throtl_pd_alloc,
1391         .pd_init_fn             = throtl_pd_init,
1392         .pd_online_fn           = throtl_pd_online,
1393         .pd_free_fn             = throtl_pd_free,
1394 };
1395 
1396 bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
1397                     struct bio *bio)
1398 {
1399         struct throtl_qnode *qn = NULL;
1400         struct throtl_grp *tg = blkg_to_tg(blkg ?: q->root_blkg);
1401         struct throtl_service_queue *sq;
1402         bool rw = bio_data_dir(bio);
1403         bool throttled = false;
1404 
1405         WARN_ON_ONCE(!rcu_read_lock_held());
1406 
1407         /* see throtl_charge_bio() */
1408         if ((bio->bi_rw & REQ_THROTTLED) || !tg->has_rules[rw])
1409                 goto out;
1410 
1411         spin_lock_irq(q->queue_lock);
1412 
1413         if (unlikely(blk_queue_bypass(q)))
1414                 goto out_unlock;
1415 
1416         sq = &tg->service_queue;
1417 
1418         while (true) {
1419                 /* throtl is FIFO - if bios are already queued, should queue */
1420                 if (sq->nr_queued[rw])
1421                         break;
1422 
1423                 /* if above limits, break to queue */
1424                 if (!tg_may_dispatch(tg, bio, NULL))
1425                         break;
1426 
1427                 /* within limits, let's charge and dispatch directly */
1428                 throtl_charge_bio(tg, bio);
1429 
1430                 /*
1431                  * We need to trim slice even when bios are not being queued
1432                  * otherwise it might happen that a bio is not queued for
1433                  * a long time and slice keeps on extending and trim is not
1434                  * called for a long time. Now if limits are reduced suddenly
1435                  * we take into account all the IO dispatched so far at new
1436                  * low rate and * newly queued IO gets a really long dispatch
1437                  * time.
1438                  *
1439                  * So keep on trimming slice even if bio is not queued.
1440                  */
1441                 throtl_trim_slice(tg, rw);
1442 
1443                 /*
1444                  * @bio passed through this layer without being throttled.
1445                  * Climb up the ladder.  If we''re already at the top, it
1446                  * can be executed directly.
1447                  */
1448                 qn = &tg->qnode_on_parent[rw];
1449                 sq = sq->parent_sq;
1450                 tg = sq_to_tg(sq);
1451                 if (!tg)
1452                         goto out_unlock;
1453         }
1454 
1455         /* out-of-limit, queue to @tg */
1456         throtl_log(sq, "[%c] bio. bdisp=%llu sz=%u bps=%llu iodisp=%u iops=%u queued=%d/%d",
1457                    rw == READ ? 'R' : 'W',
1458                    tg->bytes_disp[rw], bio->bi_iter.bi_size, tg->bps[rw],
1459                    tg->io_disp[rw], tg->iops[rw],
1460                    sq->nr_queued[READ], sq->nr_queued[WRITE]);
1461 
1462         bio_associate_current(bio);
1463         tg->td->nr_queued[rw]++;
1464         throtl_add_bio_tg(bio, qn, tg);
1465         throttled = true;
1466 
1467         /*
1468          * Update @tg's dispatch time and force schedule dispatch if @tg
1469          * was empty before @bio.  The forced scheduling isn't likely to
1470          * cause undue delay as @bio is likely to be dispatched directly if
1471          * its @tg's disptime is not in the future.
1472          */
1473         if (tg->flags & THROTL_TG_WAS_EMPTY) {
1474                 tg_update_disptime(tg);
1475                 throtl_schedule_next_dispatch(tg->service_queue.parent_sq, true);
1476         }
1477 
1478 out_unlock:
1479         spin_unlock_irq(q->queue_lock);
1480 out:
1481         /*
1482          * As multiple blk-throtls may stack in the same issue path, we
1483          * don't want bios to leave with the flag set.  Clear the flag if
1484          * being issued.
1485          */
1486         if (!throttled)
1487                 bio->bi_rw &= ~REQ_THROTTLED;
1488         return throttled;
1489 }
1490 
1491 /*
1492  * Dispatch all bios from all children tg's queued on @parent_sq.  On
1493  * return, @parent_sq is guaranteed to not have any active children tg's
1494  * and all bios from previously active tg's are on @parent_sq->bio_lists[].
1495  */
1496 static void tg_drain_bios(struct throtl_service_queue *parent_sq)
1497 {
1498         struct throtl_grp *tg;
1499 
1500         while ((tg = throtl_rb_first(parent_sq))) {
1501                 struct throtl_service_queue *sq = &tg->service_queue;
1502                 struct bio *bio;
1503 
1504                 throtl_dequeue_tg(tg);
1505 
1506                 while ((bio = throtl_peek_queued(&sq->queued[READ])))
1507                         tg_dispatch_one_bio(tg, bio_data_dir(bio));
1508                 while ((bio = throtl_peek_queued(&sq->queued[WRITE])))
1509                         tg_dispatch_one_bio(tg, bio_data_dir(bio));
1510         }
1511 }
1512 
1513 /**
1514  * blk_throtl_drain - drain throttled bios
1515  * @q: request_queue to drain throttled bios for
1516  *
1517  * Dispatch all currently throttled bios on @q through ->make_request_fn().
1518  */
1519 void blk_throtl_drain(struct request_queue *q)
1520         __releases(q->queue_lock) __acquires(q->queue_lock)
1521 {
1522         struct throtl_data *td = q->td;
1523         struct blkcg_gq *blkg;
1524         struct cgroup_subsys_state *pos_css;
1525         struct bio *bio;
1526         int rw;
1527 
1528         queue_lockdep_assert_held(q);
1529         rcu_read_lock();
1530 
1531         /*
1532          * Drain each tg while doing post-order walk on the blkg tree, so
1533          * that all bios are propagated to td->service_queue.  It'd be
1534          * better to walk service_queue tree directly but blkg walk is
1535          * easier.
1536          */
1537         blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg)
1538                 tg_drain_bios(&blkg_to_tg(blkg)->service_queue);
1539 
1540         /* finally, transfer bios from top-level tg's into the td */
1541         tg_drain_bios(&td->service_queue);
1542 
1543         rcu_read_unlock();
1544         spin_unlock_irq(q->queue_lock);
1545 
1546         /* all bios now should be in td->service_queue, issue them */
1547         for (rw = READ; rw <= WRITE; rw++)
1548                 while ((bio = throtl_pop_queued(&td->service_queue.queued[rw],
1549                                                 NULL)))
1550                         generic_make_request(bio);
1551 
1552         spin_lock_irq(q->queue_lock);
1553 }
1554 
1555 int blk_throtl_init(struct request_queue *q)
1556 {
1557         struct throtl_data *td;
1558         int ret;
1559 
1560         td = kzalloc_node(sizeof(*td), GFP_KERNEL, q->node);
1561         if (!td)
1562                 return -ENOMEM;
1563 
1564         INIT_WORK(&td->dispatch_work, blk_throtl_dispatch_work_fn);
1565         throtl_service_queue_init(&td->service_queue);
1566 
1567         q->td = td;
1568         td->queue = q;
1569 
1570         /* activate policy */
1571         ret = blkcg_activate_policy(q, &blkcg_policy_throtl);
1572         if (ret)
1573                 kfree(td);
1574         return ret;
1575 }
1576 
1577 void blk_throtl_exit(struct request_queue *q)
1578 {
1579         BUG_ON(!q->td);
1580         throtl_shutdown_wq(q);
1581         blkcg_deactivate_policy(q, &blkcg_policy_throtl);
1582         kfree(q->td);
1583 }
1584 
1585 static int __init throtl_init(void)
1586 {
1587         kthrotld_workqueue = alloc_workqueue("kthrotld", WQ_MEM_RECLAIM, 0);
1588         if (!kthrotld_workqueue)
1589                 panic("Failed to create kthrotld\n");
1590 
1591         return blkcg_policy_register(&blkcg_policy_throtl);
1592 }
1593 
1594 module_init(throtl_init);
1595 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp