~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/block/blk-iolatency.c

Version: ~ [ linux-5.5-rc7 ] ~ [ linux-5.4.13 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.97 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.166 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.210 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.210 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.81 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-3.9.11 ] ~ [ linux-3.8.13 ] ~ [ linux-3.7.10 ] ~ [ linux-3.6.11 ] ~ [ linux-3.5.7 ] ~ [ linux-3.4.113 ] ~ [ linux-3.3.8 ] ~ [ linux-3.2.102 ] ~ [ linux-3.1.10 ] ~ [ linux-3.0.101 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  * Block rq-qos base io controller
  3  *
  4  * This works similar to wbt with a few exceptions
  5  *
  6  * - It's bio based, so the latency covers the whole block layer in addition to
  7  *   the actual io.
  8  * - We will throttle all IO that comes in here if we need to.
  9  * - We use the mean latency over the 100ms window.  This is because writes can
 10  *   be particularly fast, which could give us a false sense of the impact of
 11  *   other workloads on our protected workload.
 12  * - By default there's no throttling, we set the queue_depth to UINT_MAX so
 13  *   that we can have as many outstanding bio's as we're allowed to.  Only at
 14  *   throttle time do we pay attention to the actual queue depth.
 15  *
 16  * The hierarchy works like the cpu controller does, we track the latency at
 17  * every configured node, and each configured node has it's own independent
 18  * queue depth.  This means that we only care about our latency targets at the
 19  * peer level.  Some group at the bottom of the hierarchy isn't going to affect
 20  * a group at the end of some other path if we're only configred at leaf level.
 21  *
 22  * Consider the following
 23  *
 24  *                   root blkg
 25  *             /                     \
 26  *        fast (target=5ms)     slow (target=10ms)
 27  *         /     \                  /        \
 28  *       a        b          normal(15ms)   unloved
 29  *
 30  * "a" and "b" have no target, but their combined io under "fast" cannot exceed
 31  * an average latency of 5ms.  If it does then we will throttle the "slow"
 32  * group.  In the case of "normal", if it exceeds its 15ms target, we will
 33  * throttle "unloved", but nobody else.
 34  *
 35  * In this example "fast", "slow", and "normal" will be the only groups actually
 36  * accounting their io latencies.  We have to walk up the heirarchy to the root
 37  * on every submit and complete so we can do the appropriate stat recording and
 38  * adjust the queue depth of ourselves if needed.
 39  *
 40  * There are 2 ways we throttle IO.
 41  *
 42  * 1) Queue depth throttling.  As we throttle down we will adjust the maximum
 43  * number of IO's we're allowed to have in flight.  This starts at (u64)-1 down
 44  * to 1.  If the group is only ever submitting IO for itself then this is the
 45  * only way we throttle.
 46  *
 47  * 2) Induced delay throttling.  This is for the case that a group is generating
 48  * IO that has to be issued by the root cg to avoid priority inversion. So think
 49  * REQ_META or REQ_SWAP.  If we are already at qd == 1 and we're getting a lot
 50  * of work done for us on behalf of the root cg and are being asked to scale
 51  * down more then we induce a latency at userspace return.  We accumulate the
 52  * total amount of time we need to be punished by doing
 53  *
 54  * total_time += min_lat_nsec - actual_io_completion
 55  *
 56  * and then at throttle time will do
 57  *
 58  * throttle_time = min(total_time, NSEC_PER_SEC)
 59  *
 60  * This induced delay will throttle back the activity that is generating the
 61  * root cg issued io's, wethere that's some metadata intensive operation or the
 62  * group is using so much memory that it is pushing us into swap.
 63  *
 64  * Copyright (C) 2018 Josef Bacik
 65  */
 66 #include <linux/kernel.h>
 67 #include <linux/blk_types.h>
 68 #include <linux/backing-dev.h>
 69 #include <linux/module.h>
 70 #include <linux/timer.h>
 71 #include <linux/memcontrol.h>
 72 #include <linux/sched/loadavg.h>
 73 #include <linux/sched/signal.h>
 74 #include <trace/events/block.h>
 75 #include <linux/blk-mq.h>
 76 #include "blk-rq-qos.h"
 77 #include "blk-stat.h"
 78 
 79 #define DEFAULT_SCALE_COOKIE 1000000U
 80 
 81 static struct blkcg_policy blkcg_policy_iolatency;
 82 struct iolatency_grp;
 83 
 84 struct blk_iolatency {
 85         struct rq_qos rqos;
 86         struct timer_list timer;
 87         atomic_t enabled;
 88 };
 89 
 90 static inline struct blk_iolatency *BLKIOLATENCY(struct rq_qos *rqos)
 91 {
 92         return container_of(rqos, struct blk_iolatency, rqos);
 93 }
 94 
 95 static inline bool blk_iolatency_enabled(struct blk_iolatency *blkiolat)
 96 {
 97         return atomic_read(&blkiolat->enabled) > 0;
 98 }
 99 
100 struct child_latency_info {
101         spinlock_t lock;
102 
103         /* Last time we adjusted the scale of everybody. */
104         u64 last_scale_event;
105 
106         /* The latency that we missed. */
107         u64 scale_lat;
108 
109         /* Total io's from all of our children for the last summation. */
110         u64 nr_samples;
111 
112         /* The guy who actually changed the latency numbers. */
113         struct iolatency_grp *scale_grp;
114 
115         /* Cookie to tell if we need to scale up or down. */
116         atomic_t scale_cookie;
117 };
118 
119 struct percentile_stats {
120         u64 total;
121         u64 missed;
122 };
123 
124 struct latency_stat {
125         union {
126                 struct percentile_stats ps;
127                 struct blk_rq_stat rqs;
128         };
129 };
130 
131 struct iolatency_grp {
132         struct blkg_policy_data pd;
133         struct latency_stat __percpu *stats;
134         struct latency_stat cur_stat;
135         struct blk_iolatency *blkiolat;
136         struct rq_depth rq_depth;
137         struct rq_wait rq_wait;
138         atomic64_t window_start;
139         atomic_t scale_cookie;
140         u64 min_lat_nsec;
141         u64 cur_win_nsec;
142 
143         /* total running average of our io latency. */
144         u64 lat_avg;
145 
146         /* Our current number of IO's for the last summation. */
147         u64 nr_samples;
148 
149         bool ssd;
150         struct child_latency_info child_lat;
151 };
152 
153 #define BLKIOLATENCY_MIN_WIN_SIZE (100 * NSEC_PER_MSEC)
154 #define BLKIOLATENCY_MAX_WIN_SIZE NSEC_PER_SEC
155 /*
156  * These are the constants used to fake the fixed-point moving average
157  * calculation just like load average.  The call to calc_load() folds
158  * (FIXED_1 (2048) - exp_factor) * new_sample into lat_avg.  The sampling
159  * window size is bucketed to try to approximately calculate average
160  * latency such that 1/exp (decay rate) is [1 min, 2.5 min) when windows
161  * elapse immediately.  Note, windows only elapse with IO activity.  Idle
162  * periods extend the most recent window.
163  */
164 #define BLKIOLATENCY_NR_EXP_FACTORS 5
165 #define BLKIOLATENCY_EXP_BUCKET_SIZE (BLKIOLATENCY_MAX_WIN_SIZE / \
166                                       (BLKIOLATENCY_NR_EXP_FACTORS - 1))
167 static const u64 iolatency_exp_factors[BLKIOLATENCY_NR_EXP_FACTORS] = {
168         2045, // exp(1/600) - 600 samples
169         2039, // exp(1/240) - 240 samples
170         2031, // exp(1/120) - 120 samples
171         2023, // exp(1/80)  - 80 samples
172         2014, // exp(1/60)  - 60 samples
173 };
174 
175 static inline struct iolatency_grp *pd_to_lat(struct blkg_policy_data *pd)
176 {
177         return pd ? container_of(pd, struct iolatency_grp, pd) : NULL;
178 }
179 
180 static inline struct iolatency_grp *blkg_to_lat(struct blkcg_gq *blkg)
181 {
182         return pd_to_lat(blkg_to_pd(blkg, &blkcg_policy_iolatency));
183 }
184 
185 static inline struct blkcg_gq *lat_to_blkg(struct iolatency_grp *iolat)
186 {
187         return pd_to_blkg(&iolat->pd);
188 }
189 
190 static inline void latency_stat_init(struct iolatency_grp *iolat,
191                                      struct latency_stat *stat)
192 {
193         if (iolat->ssd) {
194                 stat->ps.total = 0;
195                 stat->ps.missed = 0;
196         } else
197                 blk_rq_stat_init(&stat->rqs);
198 }
199 
200 static inline void latency_stat_sum(struct iolatency_grp *iolat,
201                                     struct latency_stat *sum,
202                                     struct latency_stat *stat)
203 {
204         if (iolat->ssd) {
205                 sum->ps.total += stat->ps.total;
206                 sum->ps.missed += stat->ps.missed;
207         } else
208                 blk_rq_stat_sum(&sum->rqs, &stat->rqs);
209 }
210 
211 static inline void latency_stat_record_time(struct iolatency_grp *iolat,
212                                             u64 req_time)
213 {
214         struct latency_stat *stat = get_cpu_ptr(iolat->stats);
215         if (iolat->ssd) {
216                 if (req_time >= iolat->min_lat_nsec)
217                         stat->ps.missed++;
218                 stat->ps.total++;
219         } else
220                 blk_rq_stat_add(&stat->rqs, req_time);
221         put_cpu_ptr(stat);
222 }
223 
224 static inline bool latency_sum_ok(struct iolatency_grp *iolat,
225                                   struct latency_stat *stat)
226 {
227         if (iolat->ssd) {
228                 u64 thresh = div64_u64(stat->ps.total, 10);
229                 thresh = max(thresh, 1ULL);
230                 return stat->ps.missed < thresh;
231         }
232         return stat->rqs.mean <= iolat->min_lat_nsec;
233 }
234 
235 static inline u64 latency_stat_samples(struct iolatency_grp *iolat,
236                                        struct latency_stat *stat)
237 {
238         if (iolat->ssd)
239                 return stat->ps.total;
240         return stat->rqs.nr_samples;
241 }
242 
243 static inline void iolat_update_total_lat_avg(struct iolatency_grp *iolat,
244                                               struct latency_stat *stat)
245 {
246         int exp_idx;
247 
248         if (iolat->ssd)
249                 return;
250 
251         /*
252          * calc_load() takes in a number stored in fixed point representation.
253          * Because we are using this for IO time in ns, the values stored
254          * are significantly larger than the FIXED_1 denominator (2048).
255          * Therefore, rounding errors in the calculation are negligible and
256          * can be ignored.
257          */
258         exp_idx = min_t(int, BLKIOLATENCY_NR_EXP_FACTORS - 1,
259                         div64_u64(iolat->cur_win_nsec,
260                                   BLKIOLATENCY_EXP_BUCKET_SIZE));
261         iolat->lat_avg = calc_load(iolat->lat_avg,
262                                    iolatency_exp_factors[exp_idx],
263                                    stat->rqs.mean);
264 }
265 
266 static inline bool iolatency_may_queue(struct iolatency_grp *iolat,
267                                        wait_queue_entry_t *wait,
268                                        bool first_block)
269 {
270         struct rq_wait *rqw = &iolat->rq_wait;
271 
272         if (first_block && waitqueue_active(&rqw->wait) &&
273             rqw->wait.head.next != &wait->entry)
274                 return false;
275         return rq_wait_inc_below(rqw, iolat->rq_depth.max_depth);
276 }
277 
278 static void __blkcg_iolatency_throttle(struct rq_qos *rqos,
279                                        struct iolatency_grp *iolat,
280                                        spinlock_t *lock, bool issue_as_root,
281                                        bool use_memdelay)
282         __releases(lock)
283         __acquires(lock)
284 {
285         struct rq_wait *rqw = &iolat->rq_wait;
286         unsigned use_delay = atomic_read(&lat_to_blkg(iolat)->use_delay);
287         DEFINE_WAIT(wait);
288         bool first_block = true;
289 
290         if (use_delay)
291                 blkcg_schedule_throttle(rqos->q, use_memdelay);
292 
293         /*
294          * To avoid priority inversions we want to just take a slot if we are
295          * issuing as root.  If we're being killed off there's no point in
296          * delaying things, we may have been killed by OOM so throttling may
297          * make recovery take even longer, so just let the IO's through so the
298          * task can go away.
299          */
300         if (issue_as_root || fatal_signal_pending(current)) {
301                 atomic_inc(&rqw->inflight);
302                 return;
303         }
304 
305         if (iolatency_may_queue(iolat, &wait, first_block))
306                 return;
307 
308         do {
309                 prepare_to_wait_exclusive(&rqw->wait, &wait,
310                                           TASK_UNINTERRUPTIBLE);
311 
312                 if (iolatency_may_queue(iolat, &wait, first_block))
313                         break;
314                 first_block = false;
315 
316                 if (lock) {
317                         spin_unlock_irq(lock);
318                         io_schedule();
319                         spin_lock_irq(lock);
320                 } else {
321                         io_schedule();
322                 }
323         } while (1);
324 
325         finish_wait(&rqw->wait, &wait);
326 }
327 
328 #define SCALE_DOWN_FACTOR 2
329 #define SCALE_UP_FACTOR 4
330 
331 static inline unsigned long scale_amount(unsigned long qd, bool up)
332 {
333         return max(up ? qd >> SCALE_UP_FACTOR : qd >> SCALE_DOWN_FACTOR, 1UL);
334 }
335 
336 /*
337  * We scale the qd down faster than we scale up, so we need to use this helper
338  * to adjust the scale_cookie accordingly so we don't prematurely get
339  * scale_cookie at DEFAULT_SCALE_COOKIE and unthrottle too much.
340  *
341  * Each group has their own local copy of the last scale cookie they saw, so if
342  * the global scale cookie goes up or down they know which way they need to go
343  * based on their last knowledge of it.
344  */
345 static void scale_cookie_change(struct blk_iolatency *blkiolat,
346                                 struct child_latency_info *lat_info,
347                                 bool up)
348 {
349         unsigned long qd = blkiolat->rqos.q->nr_requests;
350         unsigned long scale = scale_amount(qd, up);
351         unsigned long old = atomic_read(&lat_info->scale_cookie);
352         unsigned long max_scale = qd << 1;
353         unsigned long diff = 0;
354 
355         if (old < DEFAULT_SCALE_COOKIE)
356                 diff = DEFAULT_SCALE_COOKIE - old;
357 
358         if (up) {
359                 if (scale + old > DEFAULT_SCALE_COOKIE)
360                         atomic_set(&lat_info->scale_cookie,
361                                    DEFAULT_SCALE_COOKIE);
362                 else if (diff > qd)
363                         atomic_inc(&lat_info->scale_cookie);
364                 else
365                         atomic_add(scale, &lat_info->scale_cookie);
366         } else {
367                 /*
368                  * We don't want to dig a hole so deep that it takes us hours to
369                  * dig out of it.  Just enough that we don't throttle/unthrottle
370                  * with jagged workloads but can still unthrottle once pressure
371                  * has sufficiently dissipated.
372                  */
373                 if (diff > qd) {
374                         if (diff < max_scale)
375                                 atomic_dec(&lat_info->scale_cookie);
376                 } else {
377                         atomic_sub(scale, &lat_info->scale_cookie);
378                 }
379         }
380 }
381 
382 /*
383  * Change the queue depth of the iolatency_grp.  We add/subtract 1/16th of the
384  * queue depth at a time so we don't get wild swings and hopefully dial in to
385  * fairer distribution of the overall queue depth.
386  */
387 static void scale_change(struct iolatency_grp *iolat, bool up)
388 {
389         unsigned long qd = iolat->blkiolat->rqos.q->nr_requests;
390         unsigned long scale = scale_amount(qd, up);
391         unsigned long old = iolat->rq_depth.max_depth;
392 
393         if (old > qd)
394                 old = qd;
395 
396         if (up) {
397                 if (old == 1 && blkcg_unuse_delay(lat_to_blkg(iolat)))
398                         return;
399 
400                 if (old < qd) {
401                         old += scale;
402                         old = min(old, qd);
403                         iolat->rq_depth.max_depth = old;
404                         wake_up_all(&iolat->rq_wait.wait);
405                 }
406         } else {
407                 old >>= 1;
408                 iolat->rq_depth.max_depth = max(old, 1UL);
409         }
410 }
411 
412 /* Check our parent and see if the scale cookie has changed. */
413 static void check_scale_change(struct iolatency_grp *iolat)
414 {
415         struct iolatency_grp *parent;
416         struct child_latency_info *lat_info;
417         unsigned int cur_cookie;
418         unsigned int our_cookie = atomic_read(&iolat->scale_cookie);
419         u64 scale_lat;
420         unsigned int old;
421         int direction = 0;
422 
423         if (lat_to_blkg(iolat)->parent == NULL)
424                 return;
425 
426         parent = blkg_to_lat(lat_to_blkg(iolat)->parent);
427         if (!parent)
428                 return;
429 
430         lat_info = &parent->child_lat;
431         cur_cookie = atomic_read(&lat_info->scale_cookie);
432         scale_lat = READ_ONCE(lat_info->scale_lat);
433 
434         if (cur_cookie < our_cookie)
435                 direction = -1;
436         else if (cur_cookie > our_cookie)
437                 direction = 1;
438         else
439                 return;
440 
441         old = atomic_cmpxchg(&iolat->scale_cookie, our_cookie, cur_cookie);
442 
443         /* Somebody beat us to the punch, just bail. */
444         if (old != our_cookie)
445                 return;
446 
447         if (direction < 0 && iolat->min_lat_nsec) {
448                 u64 samples_thresh;
449 
450                 if (!scale_lat || iolat->min_lat_nsec <= scale_lat)
451                         return;
452 
453                 /*
454                  * Sometimes high priority groups are their own worst enemy, so
455                  * instead of taking it out on some poor other group that did 5%
456                  * or less of the IO's for the last summation just skip this
457                  * scale down event.
458                  */
459                 samples_thresh = lat_info->nr_samples * 5;
460                 samples_thresh = max(1ULL, div64_u64(samples_thresh, 100));
461                 if (iolat->nr_samples <= samples_thresh)
462                         return;
463         }
464 
465         /* We're as low as we can go. */
466         if (iolat->rq_depth.max_depth == 1 && direction < 0) {
467                 blkcg_use_delay(lat_to_blkg(iolat));
468                 return;
469         }
470 
471         /* We're back to the default cookie, unthrottle all the things. */
472         if (cur_cookie == DEFAULT_SCALE_COOKIE) {
473                 blkcg_clear_delay(lat_to_blkg(iolat));
474                 iolat->rq_depth.max_depth = UINT_MAX;
475                 wake_up_all(&iolat->rq_wait.wait);
476                 return;
477         }
478 
479         scale_change(iolat, direction > 0);
480 }
481 
482 static void blkcg_iolatency_throttle(struct rq_qos *rqos, struct bio *bio,
483                                      spinlock_t *lock)
484 {
485         struct blk_iolatency *blkiolat = BLKIOLATENCY(rqos);
486         struct blkcg *blkcg;
487         struct blkcg_gq *blkg;
488         struct request_queue *q = rqos->q;
489         bool issue_as_root = bio_issue_as_root_blkg(bio);
490 
491         if (!blk_iolatency_enabled(blkiolat))
492                 return;
493 
494         rcu_read_lock();
495         blkcg = bio_blkcg(bio);
496         bio_associate_blkcg(bio, &blkcg->css);
497         blkg = blkg_lookup(blkcg, q);
498         if (unlikely(!blkg)) {
499                 if (!lock)
500                         spin_lock_irq(q->queue_lock);
501                 blkg = blkg_lookup_create(blkcg, q);
502                 if (IS_ERR(blkg))
503                         blkg = NULL;
504                 if (!lock)
505                         spin_unlock_irq(q->queue_lock);
506         }
507         if (!blkg)
508                 goto out;
509 
510         bio_issue_init(&bio->bi_issue, bio_sectors(bio));
511         bio_associate_blkg(bio, blkg);
512 out:
513         rcu_read_unlock();
514         while (blkg && blkg->parent) {
515                 struct iolatency_grp *iolat = blkg_to_lat(blkg);
516                 if (!iolat) {
517                         blkg = blkg->parent;
518                         continue;
519                 }
520 
521                 check_scale_change(iolat);
522                 __blkcg_iolatency_throttle(rqos, iolat, lock, issue_as_root,
523                                      (bio->bi_opf & REQ_SWAP) == REQ_SWAP);
524                 blkg = blkg->parent;
525         }
526         if (!timer_pending(&blkiolat->timer))
527                 mod_timer(&blkiolat->timer, jiffies + HZ);
528 }
529 
530 static void iolatency_record_time(struct iolatency_grp *iolat,
531                                   struct bio_issue *issue, u64 now,
532                                   bool issue_as_root)
533 {
534         u64 start = bio_issue_time(issue);
535         u64 req_time;
536 
537         /*
538          * Have to do this so we are truncated to the correct time that our
539          * issue is truncated to.
540          */
541         now = __bio_issue_time(now);
542 
543         if (now <= start)
544                 return;
545 
546         req_time = now - start;
547 
548         /*
549          * We don't want to count issue_as_root bio's in the cgroups latency
550          * statistics as it could skew the numbers downwards.
551          */
552         if (unlikely(issue_as_root && iolat->rq_depth.max_depth != UINT_MAX)) {
553                 u64 sub = iolat->min_lat_nsec;
554                 if (req_time < sub)
555                         blkcg_add_delay(lat_to_blkg(iolat), now, sub - req_time);
556                 return;
557         }
558 
559         latency_stat_record_time(iolat, req_time);
560 }
561 
562 #define BLKIOLATENCY_MIN_ADJUST_TIME (500 * NSEC_PER_MSEC)
563 #define BLKIOLATENCY_MIN_GOOD_SAMPLES 5
564 
565 static void iolatency_check_latencies(struct iolatency_grp *iolat, u64 now)
566 {
567         struct blkcg_gq *blkg = lat_to_blkg(iolat);
568         struct iolatency_grp *parent;
569         struct child_latency_info *lat_info;
570         struct latency_stat stat;
571         unsigned long flags;
572         int cpu;
573 
574         latency_stat_init(iolat, &stat);
575         preempt_disable();
576         for_each_online_cpu(cpu) {
577                 struct latency_stat *s;
578                 s = per_cpu_ptr(iolat->stats, cpu);
579                 latency_stat_sum(iolat, &stat, s);
580                 latency_stat_init(iolat, s);
581         }
582         preempt_enable();
583 
584         parent = blkg_to_lat(blkg->parent);
585         if (!parent)
586                 return;
587 
588         lat_info = &parent->child_lat;
589 
590         iolat_update_total_lat_avg(iolat, &stat);
591 
592         /* Everything is ok and we don't need to adjust the scale. */
593         if (latency_sum_ok(iolat, &stat) &&
594             atomic_read(&lat_info->scale_cookie) == DEFAULT_SCALE_COOKIE)
595                 return;
596 
597         /* Somebody beat us to the punch, just bail. */
598         spin_lock_irqsave(&lat_info->lock, flags);
599 
600         latency_stat_sum(iolat, &iolat->cur_stat, &stat);
601         lat_info->nr_samples -= iolat->nr_samples;
602         lat_info->nr_samples += latency_stat_samples(iolat, &iolat->cur_stat);
603         iolat->nr_samples = latency_stat_samples(iolat, &iolat->cur_stat);
604 
605         if ((lat_info->last_scale_event >= now ||
606             now - lat_info->last_scale_event < BLKIOLATENCY_MIN_ADJUST_TIME))
607                 goto out;
608 
609         if (latency_sum_ok(iolat, &iolat->cur_stat) &&
610             latency_sum_ok(iolat, &stat)) {
611                 if (latency_stat_samples(iolat, &iolat->cur_stat) <
612                     BLKIOLATENCY_MIN_GOOD_SAMPLES)
613                         goto out;
614                 if (lat_info->scale_grp == iolat) {
615                         lat_info->last_scale_event = now;
616                         scale_cookie_change(iolat->blkiolat, lat_info, true);
617                 }
618         } else if (lat_info->scale_lat == 0 ||
619                    lat_info->scale_lat >= iolat->min_lat_nsec) {
620                 lat_info->last_scale_event = now;
621                 if (!lat_info->scale_grp ||
622                     lat_info->scale_lat > iolat->min_lat_nsec) {
623                         WRITE_ONCE(lat_info->scale_lat, iolat->min_lat_nsec);
624                         lat_info->scale_grp = iolat;
625                 }
626                 scale_cookie_change(iolat->blkiolat, lat_info, false);
627         }
628         latency_stat_init(iolat, &iolat->cur_stat);
629 out:
630         spin_unlock_irqrestore(&lat_info->lock, flags);
631 }
632 
633 static void blkcg_iolatency_done_bio(struct rq_qos *rqos, struct bio *bio)
634 {
635         struct blkcg_gq *blkg;
636         struct rq_wait *rqw;
637         struct iolatency_grp *iolat;
638         u64 window_start;
639         u64 now = ktime_to_ns(ktime_get());
640         bool issue_as_root = bio_issue_as_root_blkg(bio);
641         bool enabled = false;
642 
643         blkg = bio->bi_blkg;
644         if (!blkg)
645                 return;
646 
647         iolat = blkg_to_lat(bio->bi_blkg);
648         if (!iolat)
649                 return;
650 
651         enabled = blk_iolatency_enabled(iolat->blkiolat);
652         if (!enabled)
653                 return;
654 
655         while (blkg && blkg->parent) {
656                 iolat = blkg_to_lat(blkg);
657                 if (!iolat) {
658                         blkg = blkg->parent;
659                         continue;
660                 }
661                 rqw = &iolat->rq_wait;
662 
663                 atomic_dec(&rqw->inflight);
664                 if (iolat->min_lat_nsec == 0)
665                         goto next;
666                 iolatency_record_time(iolat, &bio->bi_issue, now,
667                                       issue_as_root);
668                 window_start = atomic64_read(&iolat->window_start);
669                 if (now > window_start &&
670                     (now - window_start) >= iolat->cur_win_nsec) {
671                         if (atomic64_cmpxchg(&iolat->window_start,
672                                         window_start, now) == window_start)
673                                 iolatency_check_latencies(iolat, now);
674                 }
675 next:
676                 wake_up(&rqw->wait);
677                 blkg = blkg->parent;
678         }
679 }
680 
681 static void blkcg_iolatency_cleanup(struct rq_qos *rqos, struct bio *bio)
682 {
683         struct blkcg_gq *blkg;
684 
685         blkg = bio->bi_blkg;
686         while (blkg && blkg->parent) {
687                 struct rq_wait *rqw;
688                 struct iolatency_grp *iolat;
689 
690                 iolat = blkg_to_lat(blkg);
691                 if (!iolat)
692                         goto next;
693 
694                 rqw = &iolat->rq_wait;
695                 atomic_dec(&rqw->inflight);
696                 wake_up(&rqw->wait);
697 next:
698                 blkg = blkg->parent;
699         }
700 }
701 
702 static void blkcg_iolatency_exit(struct rq_qos *rqos)
703 {
704         struct blk_iolatency *blkiolat = BLKIOLATENCY(rqos);
705 
706         del_timer_sync(&blkiolat->timer);
707         blkcg_deactivate_policy(rqos->q, &blkcg_policy_iolatency);
708         kfree(blkiolat);
709 }
710 
711 static struct rq_qos_ops blkcg_iolatency_ops = {
712         .throttle = blkcg_iolatency_throttle,
713         .cleanup = blkcg_iolatency_cleanup,
714         .done_bio = blkcg_iolatency_done_bio,
715         .exit = blkcg_iolatency_exit,
716 };
717 
718 static void blkiolatency_timer_fn(struct timer_list *t)
719 {
720         struct blk_iolatency *blkiolat = from_timer(blkiolat, t, timer);
721         struct blkcg_gq *blkg;
722         struct cgroup_subsys_state *pos_css;
723         u64 now = ktime_to_ns(ktime_get());
724 
725         rcu_read_lock();
726         blkg_for_each_descendant_pre(blkg, pos_css,
727                                      blkiolat->rqos.q->root_blkg) {
728                 struct iolatency_grp *iolat;
729                 struct child_latency_info *lat_info;
730                 unsigned long flags;
731                 u64 cookie;
732 
733                 /*
734                  * We could be exiting, don't access the pd unless we have a
735                  * ref on the blkg.
736                  */
737                 if (!blkg_try_get(blkg))
738                         continue;
739 
740                 iolat = blkg_to_lat(blkg);
741                 if (!iolat)
742                         goto next;
743 
744                 lat_info = &iolat->child_lat;
745                 cookie = atomic_read(&lat_info->scale_cookie);
746 
747                 if (cookie >= DEFAULT_SCALE_COOKIE)
748                         goto next;
749 
750                 spin_lock_irqsave(&lat_info->lock, flags);
751                 if (lat_info->last_scale_event >= now)
752                         goto next_lock;
753 
754                 /*
755                  * We scaled down but don't have a scale_grp, scale up and carry
756                  * on.
757                  */
758                 if (lat_info->scale_grp == NULL) {
759                         scale_cookie_change(iolat->blkiolat, lat_info, true);
760                         goto next_lock;
761                 }
762 
763                 /*
764                  * It's been 5 seconds since our last scale event, clear the
765                  * scale grp in case the group that needed the scale down isn't
766                  * doing any IO currently.
767                  */
768                 if (now - lat_info->last_scale_event >=
769                     ((u64)NSEC_PER_SEC * 5))
770                         lat_info->scale_grp = NULL;
771 next_lock:
772                 spin_unlock_irqrestore(&lat_info->lock, flags);
773 next:
774                 blkg_put(blkg);
775         }
776         rcu_read_unlock();
777 }
778 
779 int blk_iolatency_init(struct request_queue *q)
780 {
781         struct blk_iolatency *blkiolat;
782         struct rq_qos *rqos;
783         int ret;
784 
785         blkiolat = kzalloc(sizeof(*blkiolat), GFP_KERNEL);
786         if (!blkiolat)
787                 return -ENOMEM;
788 
789         rqos = &blkiolat->rqos;
790         rqos->id = RQ_QOS_CGROUP;
791         rqos->ops = &blkcg_iolatency_ops;
792         rqos->q = q;
793 
794         rq_qos_add(q, rqos);
795 
796         ret = blkcg_activate_policy(q, &blkcg_policy_iolatency);
797         if (ret) {
798                 rq_qos_del(q, rqos);
799                 kfree(blkiolat);
800                 return ret;
801         }
802 
803         timer_setup(&blkiolat->timer, blkiolatency_timer_fn, 0);
804 
805         return 0;
806 }
807 
808 /*
809  * return 1 for enabling iolatency, return -1 for disabling iolatency, otherwise
810  * return 0.
811  */
812 static int iolatency_set_min_lat_nsec(struct blkcg_gq *blkg, u64 val)
813 {
814         struct iolatency_grp *iolat = blkg_to_lat(blkg);
815         u64 oldval = iolat->min_lat_nsec;
816 
817         iolat->min_lat_nsec = val;
818         iolat->cur_win_nsec = max_t(u64, val << 4, BLKIOLATENCY_MIN_WIN_SIZE);
819         iolat->cur_win_nsec = min_t(u64, iolat->cur_win_nsec,
820                                     BLKIOLATENCY_MAX_WIN_SIZE);
821 
822         if (!oldval && val)
823                 return 1;
824         if (oldval && !val)
825                 return -1;
826         return 0;
827 }
828 
829 static void iolatency_clear_scaling(struct blkcg_gq *blkg)
830 {
831         if (blkg->parent) {
832                 struct iolatency_grp *iolat = blkg_to_lat(blkg->parent);
833                 struct child_latency_info *lat_info;
834                 if (!iolat)
835                         return;
836 
837                 lat_info = &iolat->child_lat;
838                 spin_lock(&lat_info->lock);
839                 atomic_set(&lat_info->scale_cookie, DEFAULT_SCALE_COOKIE);
840                 lat_info->last_scale_event = 0;
841                 lat_info->scale_grp = NULL;
842                 lat_info->scale_lat = 0;
843                 spin_unlock(&lat_info->lock);
844         }
845 }
846 
847 static ssize_t iolatency_set_limit(struct kernfs_open_file *of, char *buf,
848                              size_t nbytes, loff_t off)
849 {
850         struct blkcg *blkcg = css_to_blkcg(of_css(of));
851         struct blkcg_gq *blkg;
852         struct blkg_conf_ctx ctx;
853         struct iolatency_grp *iolat;
854         char *p, *tok;
855         u64 lat_val = 0;
856         u64 oldval;
857         int ret;
858         int enable = 0;
859 
860         ret = blkg_conf_prep(blkcg, &blkcg_policy_iolatency, buf, &ctx);
861         if (ret)
862                 return ret;
863 
864         iolat = blkg_to_lat(ctx.blkg);
865         p = ctx.body;
866 
867         ret = -EINVAL;
868         while ((tok = strsep(&p, " "))) {
869                 char key[16];
870                 char val[21];   /* 18446744073709551616 */
871 
872                 if (sscanf(tok, "%15[^=]=%20s", key, val) != 2)
873                         goto out;
874 
875                 if (!strcmp(key, "target")) {
876                         u64 v;
877 
878                         if (!strcmp(val, "max"))
879                                 lat_val = 0;
880                         else if (sscanf(val, "%llu", &v) == 1)
881                                 lat_val = v * NSEC_PER_USEC;
882                         else
883                                 goto out;
884                 } else {
885                         goto out;
886                 }
887         }
888 
889         /* Walk up the tree to see if our new val is lower than it should be. */
890         blkg = ctx.blkg;
891         oldval = iolat->min_lat_nsec;
892 
893         enable = iolatency_set_min_lat_nsec(blkg, lat_val);
894         if (enable) {
895                 WARN_ON_ONCE(!blk_get_queue(blkg->q));
896                 blkg_get(blkg);
897         }
898 
899         if (oldval != iolat->min_lat_nsec) {
900                 iolatency_clear_scaling(blkg);
901         }
902 
903         ret = 0;
904 out:
905         blkg_conf_finish(&ctx);
906         if (ret == 0 && enable) {
907                 struct iolatency_grp *tmp = blkg_to_lat(blkg);
908                 struct blk_iolatency *blkiolat = tmp->blkiolat;
909 
910                 blk_mq_freeze_queue(blkg->q);
911 
912                 if (enable == 1)
913                         atomic_inc(&blkiolat->enabled);
914                 else if (enable == -1)
915                         atomic_dec(&blkiolat->enabled);
916                 else
917                         WARN_ON_ONCE(1);
918 
919                 blk_mq_unfreeze_queue(blkg->q);
920 
921                 blkg_put(blkg);
922                 blk_put_queue(blkg->q);
923         }
924         return ret ?: nbytes;
925 }
926 
927 static u64 iolatency_prfill_limit(struct seq_file *sf,
928                                   struct blkg_policy_data *pd, int off)
929 {
930         struct iolatency_grp *iolat = pd_to_lat(pd);
931         const char *dname = blkg_dev_name(pd->blkg);
932 
933         if (!dname || !iolat->min_lat_nsec)
934                 return 0;
935         seq_printf(sf, "%s target=%llu\n",
936                    dname, div_u64(iolat->min_lat_nsec, NSEC_PER_USEC));
937         return 0;
938 }
939 
940 static int iolatency_print_limit(struct seq_file *sf, void *v)
941 {
942         blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
943                           iolatency_prfill_limit,
944                           &blkcg_policy_iolatency, seq_cft(sf)->private, false);
945         return 0;
946 }
947 
948 static size_t iolatency_ssd_stat(struct iolatency_grp *iolat, char *buf,
949                                  size_t size)
950 {
951         struct latency_stat stat;
952         int cpu;
953 
954         latency_stat_init(iolat, &stat);
955         preempt_disable();
956         for_each_online_cpu(cpu) {
957                 struct latency_stat *s;
958                 s = per_cpu_ptr(iolat->stats, cpu);
959                 latency_stat_sum(iolat, &stat, s);
960         }
961         preempt_enable();
962 
963         if (iolat->rq_depth.max_depth == UINT_MAX)
964                 return scnprintf(buf, size, " missed=%llu total=%llu depth=max",
965                                  (unsigned long long)stat.ps.missed,
966                                  (unsigned long long)stat.ps.total);
967         return scnprintf(buf, size, " missed=%llu total=%llu depth=%u",
968                          (unsigned long long)stat.ps.missed,
969                          (unsigned long long)stat.ps.total,
970                          iolat->rq_depth.max_depth);
971 }
972 
973 static size_t iolatency_pd_stat(struct blkg_policy_data *pd, char *buf,
974                                 size_t size)
975 {
976         struct iolatency_grp *iolat = pd_to_lat(pd);
977         unsigned long long avg_lat;
978         unsigned long long cur_win;
979 
980         if (iolat->ssd)
981                 return iolatency_ssd_stat(iolat, buf, size);
982 
983         avg_lat = div64_u64(iolat->lat_avg, NSEC_PER_USEC);
984         cur_win = div64_u64(iolat->cur_win_nsec, NSEC_PER_MSEC);
985         if (iolat->rq_depth.max_depth == UINT_MAX)
986                 return scnprintf(buf, size, " depth=max avg_lat=%llu win=%llu",
987                                  avg_lat, cur_win);
988 
989         return scnprintf(buf, size, " depth=%u avg_lat=%llu win=%llu",
990                          iolat->rq_depth.max_depth, avg_lat, cur_win);
991 }
992 
993 
994 static struct blkg_policy_data *iolatency_pd_alloc(gfp_t gfp, int node)
995 {
996         struct iolatency_grp *iolat;
997 
998         iolat = kzalloc_node(sizeof(*iolat), gfp, node);
999         if (!iolat)
1000                 return NULL;
1001         iolat->stats = __alloc_percpu_gfp(sizeof(struct latency_stat),
1002                                        __alignof__(struct latency_stat), gfp);
1003         if (!iolat->stats) {
1004                 kfree(iolat);
1005                 return NULL;
1006         }
1007         return &iolat->pd;
1008 }
1009 
1010 static void iolatency_pd_init(struct blkg_policy_data *pd)
1011 {
1012         struct iolatency_grp *iolat = pd_to_lat(pd);
1013         struct blkcg_gq *blkg = lat_to_blkg(iolat);
1014         struct rq_qos *rqos = blkcg_rq_qos(blkg->q);
1015         struct blk_iolatency *blkiolat = BLKIOLATENCY(rqos);
1016         u64 now = ktime_to_ns(ktime_get());
1017         int cpu;
1018 
1019         if (blk_queue_nonrot(blkg->q))
1020                 iolat->ssd = true;
1021         else
1022                 iolat->ssd = false;
1023 
1024         for_each_possible_cpu(cpu) {
1025                 struct latency_stat *stat;
1026                 stat = per_cpu_ptr(iolat->stats, cpu);
1027                 latency_stat_init(iolat, stat);
1028         }
1029 
1030         latency_stat_init(iolat, &iolat->cur_stat);
1031         rq_wait_init(&iolat->rq_wait);
1032         spin_lock_init(&iolat->child_lat.lock);
1033         iolat->rq_depth.queue_depth = blkg->q->nr_requests;
1034         iolat->rq_depth.max_depth = UINT_MAX;
1035         iolat->rq_depth.default_depth = iolat->rq_depth.queue_depth;
1036         iolat->blkiolat = blkiolat;
1037         iolat->cur_win_nsec = 100 * NSEC_PER_MSEC;
1038         atomic64_set(&iolat->window_start, now);
1039 
1040         /*
1041          * We init things in list order, so the pd for the parent may not be
1042          * init'ed yet for whatever reason.
1043          */
1044         if (blkg->parent && blkg_to_pd(blkg->parent, &blkcg_policy_iolatency)) {
1045                 struct iolatency_grp *parent = blkg_to_lat(blkg->parent);
1046                 atomic_set(&iolat->scale_cookie,
1047                            atomic_read(&parent->child_lat.scale_cookie));
1048         } else {
1049                 atomic_set(&iolat->scale_cookie, DEFAULT_SCALE_COOKIE);
1050         }
1051 
1052         atomic_set(&iolat->child_lat.scale_cookie, DEFAULT_SCALE_COOKIE);
1053 }
1054 
1055 static void iolatency_pd_offline(struct blkg_policy_data *pd)
1056 {
1057         struct iolatency_grp *iolat = pd_to_lat(pd);
1058         struct blkcg_gq *blkg = lat_to_blkg(iolat);
1059         struct blk_iolatency *blkiolat = iolat->blkiolat;
1060         int ret;
1061 
1062         ret = iolatency_set_min_lat_nsec(blkg, 0);
1063         if (ret == 1)
1064                 atomic_inc(&blkiolat->enabled);
1065         if (ret == -1)
1066                 atomic_dec(&blkiolat->enabled);
1067         iolatency_clear_scaling(blkg);
1068 }
1069 
1070 static void iolatency_pd_free(struct blkg_policy_data *pd)
1071 {
1072         struct iolatency_grp *iolat = pd_to_lat(pd);
1073         free_percpu(iolat->stats);
1074         kfree(iolat);
1075 }
1076 
1077 static struct cftype iolatency_files[] = {
1078         {
1079                 .name = "latency",
1080                 .flags = CFTYPE_NOT_ON_ROOT,
1081                 .seq_show = iolatency_print_limit,
1082                 .write = iolatency_set_limit,
1083         },
1084         {}
1085 };
1086 
1087 static struct blkcg_policy blkcg_policy_iolatency = {
1088         .dfl_cftypes    = iolatency_files,
1089         .pd_alloc_fn    = iolatency_pd_alloc,
1090         .pd_init_fn     = iolatency_pd_init,
1091         .pd_offline_fn  = iolatency_pd_offline,
1092         .pd_free_fn     = iolatency_pd_free,
1093         .pd_stat_fn     = iolatency_pd_stat,
1094 };
1095 
1096 static int __init iolatency_init(void)
1097 {
1098         return blkcg_policy_register(&blkcg_policy_iolatency);
1099 }
1100 
1101 static void __exit iolatency_exit(void)
1102 {
1103         return blkcg_policy_unregister(&blkcg_policy_iolatency);
1104 }
1105 
1106 module_init(iolatency_init);
1107 module_exit(iolatency_exit);
1108 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp