~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/block/blk-mq-tag.c

Version: ~ [ linux-5.14-rc1 ] ~ [ linux-5.13.1 ] ~ [ linux-5.12.16 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.49 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.131 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.197 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.239 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.275 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.275 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.18.140 ] ~ [ linux-3.16.85 ] ~ [ linux-3.14.79 ] ~ [ linux-3.12.74 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  * Tag allocation using scalable bitmaps. Uses active queue tracking to support
  3  * fairer distribution of tags between multiple submitters when a shared tag map
  4  * is used.
  5  *
  6  * Copyright (C) 2013-2014 Jens Axboe
  7  */
  8 #include <linux/kernel.h>
  9 #include <linux/module.h>
 10 
 11 #include <linux/blk-mq.h>
 12 #include "blk.h"
 13 #include "blk-mq.h"
 14 #include "blk-mq-tag.h"
 15 
 16 bool blk_mq_has_free_tags(struct blk_mq_tags *tags)
 17 {
 18         if (!tags)
 19                 return true;
 20 
 21         return sbitmap_any_bit_clear(&tags->bitmap_tags.sb);
 22 }
 23 
 24 /*
 25  * If a previously inactive queue goes active, bump the active user count.
 26  * We need to do this before try to allocate driver tag, then even if fail
 27  * to get tag when first time, the other shared-tag users could reserve
 28  * budget for it.
 29  */
 30 bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
 31 {
 32         if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) &&
 33             !test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
 34                 atomic_inc(&hctx->tags->active_queues);
 35 
 36         return true;
 37 }
 38 
 39 /*
 40  * Wakeup all potentially sleeping on tags
 41  */
 42 void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool include_reserve)
 43 {
 44         sbitmap_queue_wake_all(&tags->bitmap_tags);
 45         if (include_reserve)
 46                 sbitmap_queue_wake_all(&tags->breserved_tags);
 47 }
 48 
 49 /*
 50  * If a previously busy queue goes inactive, potential waiters could now
 51  * be allowed to queue. Wake them up and check.
 52  */
 53 void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
 54 {
 55         struct blk_mq_tags *tags = hctx->tags;
 56 
 57         if (!test_and_clear_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
 58                 return;
 59 
 60         atomic_dec(&tags->active_queues);
 61 
 62         blk_mq_tag_wakeup_all(tags, false);
 63 }
 64 
 65 /*
 66  * For shared tag users, we track the number of currently active users
 67  * and attempt to provide a fair share of the tag depth for each of them.
 68  */
 69 static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
 70                                   struct sbitmap_queue *bt)
 71 {
 72         unsigned int depth, users;
 73 
 74         if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_SHARED))
 75                 return true;
 76         if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
 77                 return true;
 78 
 79         /*
 80          * Don't try dividing an ant
 81          */
 82         if (bt->sb.depth == 1)
 83                 return true;
 84 
 85         users = atomic_read(&hctx->tags->active_queues);
 86         if (!users)
 87                 return true;
 88 
 89         /*
 90          * Allow at least some tags
 91          */
 92         depth = max((bt->sb.depth + users - 1) / users, 4U);
 93         return atomic_read(&hctx->nr_active) < depth;
 94 }
 95 
 96 static int __blk_mq_get_tag(struct blk_mq_alloc_data *data,
 97                             struct sbitmap_queue *bt)
 98 {
 99         if (!(data->flags & BLK_MQ_REQ_INTERNAL) &&
100             !hctx_may_queue(data->hctx, bt))
101                 return -1;
102         if (data->shallow_depth)
103                 return __sbitmap_queue_get_shallow(bt, data->shallow_depth);
104         else
105                 return __sbitmap_queue_get(bt);
106 }
107 
108 unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
109 {
110         struct blk_mq_tags *tags = blk_mq_tags_from_data(data);
111         struct sbitmap_queue *bt;
112         struct sbq_wait_state *ws;
113         DEFINE_SBQ_WAIT(wait);
114         unsigned int tag_offset;
115         bool drop_ctx;
116         int tag;
117 
118         if (data->flags & BLK_MQ_REQ_RESERVED) {
119                 if (unlikely(!tags->nr_reserved_tags)) {
120                         WARN_ON_ONCE(1);
121                         return BLK_MQ_TAG_FAIL;
122                 }
123                 bt = &tags->breserved_tags;
124                 tag_offset = 0;
125         } else {
126                 bt = &tags->bitmap_tags;
127                 tag_offset = tags->nr_reserved_tags;
128         }
129 
130         tag = __blk_mq_get_tag(data, bt);
131         if (tag != -1)
132                 goto found_tag;
133 
134         if (data->flags & BLK_MQ_REQ_NOWAIT)
135                 return BLK_MQ_TAG_FAIL;
136 
137         ws = bt_wait_ptr(bt, data->hctx);
138         drop_ctx = data->ctx == NULL;
139         do {
140                 struct sbitmap_queue *bt_prev;
141 
142                 /*
143                  * We're out of tags on this hardware queue, kick any
144                  * pending IO submits before going to sleep waiting for
145                  * some to complete.
146                  */
147                 blk_mq_run_hw_queue(data->hctx, false);
148 
149                 /*
150                  * Retry tag allocation after running the hardware queue,
151                  * as running the queue may also have found completions.
152                  */
153                 tag = __blk_mq_get_tag(data, bt);
154                 if (tag != -1)
155                         break;
156 
157                 sbitmap_prepare_to_wait(bt, ws, &wait, TASK_UNINTERRUPTIBLE);
158 
159                 tag = __blk_mq_get_tag(data, bt);
160                 if (tag != -1)
161                         break;
162 
163                 if (data->ctx)
164                         blk_mq_put_ctx(data->ctx);
165 
166                 bt_prev = bt;
167                 io_schedule();
168 
169                 sbitmap_finish_wait(bt, ws, &wait);
170 
171                 data->ctx = blk_mq_get_ctx(data->q);
172                 data->hctx = blk_mq_map_queue(data->q, data->cmd_flags,
173                                                 data->ctx);
174                 tags = blk_mq_tags_from_data(data);
175                 if (data->flags & BLK_MQ_REQ_RESERVED)
176                         bt = &tags->breserved_tags;
177                 else
178                         bt = &tags->bitmap_tags;
179 
180                 /*
181                  * If destination hw queue is changed, fake wake up on
182                  * previous queue for compensating the wake up miss, so
183                  * other allocations on previous queue won't be starved.
184                  */
185                 if (bt != bt_prev)
186                         sbitmap_queue_wake_up(bt_prev);
187 
188                 ws = bt_wait_ptr(bt, data->hctx);
189         } while (1);
190 
191         if (drop_ctx && data->ctx)
192                 blk_mq_put_ctx(data->ctx);
193 
194         sbitmap_finish_wait(bt, ws, &wait);
195 
196 found_tag:
197         return tag + tag_offset;
198 }
199 
200 void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, struct blk_mq_tags *tags,
201                     struct blk_mq_ctx *ctx, unsigned int tag)
202 {
203         if (!blk_mq_tag_is_reserved(tags, tag)) {
204                 const int real_tag = tag - tags->nr_reserved_tags;
205 
206                 BUG_ON(real_tag >= tags->nr_tags);
207                 sbitmap_queue_clear(&tags->bitmap_tags, real_tag, ctx->cpu);
208         } else {
209                 BUG_ON(tag >= tags->nr_reserved_tags);
210                 sbitmap_queue_clear(&tags->breserved_tags, tag, ctx->cpu);
211         }
212 }
213 
214 struct bt_iter_data {
215         struct blk_mq_hw_ctx *hctx;
216         busy_iter_fn *fn;
217         void *data;
218         bool reserved;
219 };
220 
221 static bool bt_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
222 {
223         struct bt_iter_data *iter_data = data;
224         struct blk_mq_hw_ctx *hctx = iter_data->hctx;
225         struct blk_mq_tags *tags = hctx->tags;
226         bool reserved = iter_data->reserved;
227         struct request *rq;
228 
229         if (!reserved)
230                 bitnr += tags->nr_reserved_tags;
231         rq = tags->rqs[bitnr];
232 
233         /*
234          * We can hit rq == NULL here, because the tagging functions
235          * test and set the bit before assigning ->rqs[].
236          */
237         if (rq && rq->q == hctx->queue)
238                 return iter_data->fn(hctx, rq, iter_data->data, reserved);
239         return true;
240 }
241 
242 /**
243  * bt_for_each - iterate over the requests associated with a hardware queue
244  * @hctx:       Hardware queue to examine.
245  * @bt:         sbitmap to examine. This is either the breserved_tags member
246  *              or the bitmap_tags member of struct blk_mq_tags.
247  * @fn:         Pointer to the function that will be called for each request
248  *              associated with @hctx that has been assigned a driver tag.
249  *              @fn will be called as follows: @fn(@hctx, rq, @data, @reserved)
250  *              where rq is a pointer to a request. Return true to continue
251  *              iterating tags, false to stop.
252  * @data:       Will be passed as third argument to @fn.
253  * @reserved:   Indicates whether @bt is the breserved_tags member or the
254  *              bitmap_tags member of struct blk_mq_tags.
255  */
256 static void bt_for_each(struct blk_mq_hw_ctx *hctx, struct sbitmap_queue *bt,
257                         busy_iter_fn *fn, void *data, bool reserved)
258 {
259         struct bt_iter_data iter_data = {
260                 .hctx = hctx,
261                 .fn = fn,
262                 .data = data,
263                 .reserved = reserved,
264         };
265 
266         sbitmap_for_each_set(&bt->sb, bt_iter, &iter_data);
267 }
268 
269 struct bt_tags_iter_data {
270         struct blk_mq_tags *tags;
271         busy_tag_iter_fn *fn;
272         void *data;
273         bool reserved;
274 };
275 
276 static bool bt_tags_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
277 {
278         struct bt_tags_iter_data *iter_data = data;
279         struct blk_mq_tags *tags = iter_data->tags;
280         bool reserved = iter_data->reserved;
281         struct request *rq;
282 
283         if (!reserved)
284                 bitnr += tags->nr_reserved_tags;
285 
286         /*
287          * We can hit rq == NULL here, because the tagging functions
288          * test and set the bit before assining ->rqs[].
289          */
290         rq = tags->rqs[bitnr];
291         if (rq && blk_mq_request_started(rq))
292                 return iter_data->fn(rq, iter_data->data, reserved);
293 
294         return true;
295 }
296 
297 /**
298  * bt_tags_for_each - iterate over the requests in a tag map
299  * @tags:       Tag map to iterate over.
300  * @bt:         sbitmap to examine. This is either the breserved_tags member
301  *              or the bitmap_tags member of struct blk_mq_tags.
302  * @fn:         Pointer to the function that will be called for each started
303  *              request. @fn will be called as follows: @fn(rq, @data,
304  *              @reserved) where rq is a pointer to a request. Return true
305  *              to continue iterating tags, false to stop.
306  * @data:       Will be passed as second argument to @fn.
307  * @reserved:   Indicates whether @bt is the breserved_tags member or the
308  *              bitmap_tags member of struct blk_mq_tags.
309  */
310 static void bt_tags_for_each(struct blk_mq_tags *tags, struct sbitmap_queue *bt,
311                              busy_tag_iter_fn *fn, void *data, bool reserved)
312 {
313         struct bt_tags_iter_data iter_data = {
314                 .tags = tags,
315                 .fn = fn,
316                 .data = data,
317                 .reserved = reserved,
318         };
319 
320         if (tags->rqs)
321                 sbitmap_for_each_set(&bt->sb, bt_tags_iter, &iter_data);
322 }
323 
324 /**
325  * blk_mq_all_tag_busy_iter - iterate over all started requests in a tag map
326  * @tags:       Tag map to iterate over.
327  * @fn:         Pointer to the function that will be called for each started
328  *              request. @fn will be called as follows: @fn(rq, @priv,
329  *              reserved) where rq is a pointer to a request. 'reserved'
330  *              indicates whether or not @rq is a reserved request. Return
331  *              true to continue iterating tags, false to stop.
332  * @priv:       Will be passed as second argument to @fn.
333  */
334 static void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags,
335                 busy_tag_iter_fn *fn, void *priv)
336 {
337         if (tags->nr_reserved_tags)
338                 bt_tags_for_each(tags, &tags->breserved_tags, fn, priv, true);
339         bt_tags_for_each(tags, &tags->bitmap_tags, fn, priv, false);
340 }
341 
342 /**
343  * blk_mq_tagset_busy_iter - iterate over all started requests in a tag set
344  * @tagset:     Tag set to iterate over.
345  * @fn:         Pointer to the function that will be called for each started
346  *              request. @fn will be called as follows: @fn(rq, @priv,
347  *              reserved) where rq is a pointer to a request. 'reserved'
348  *              indicates whether or not @rq is a reserved request. Return
349  *              true to continue iterating tags, false to stop.
350  * @priv:       Will be passed as second argument to @fn.
351  */
352 void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
353                 busy_tag_iter_fn *fn, void *priv)
354 {
355         int i;
356 
357         for (i = 0; i < tagset->nr_hw_queues; i++) {
358                 if (tagset->tags && tagset->tags[i])
359                         blk_mq_all_tag_busy_iter(tagset->tags[i], fn, priv);
360         }
361 }
362 EXPORT_SYMBOL(blk_mq_tagset_busy_iter);
363 
364 /**
365  * blk_mq_queue_tag_busy_iter - iterate over all requests with a driver tag
366  * @q:          Request queue to examine.
367  * @fn:         Pointer to the function that will be called for each request
368  *              on @q. @fn will be called as follows: @fn(hctx, rq, @priv,
369  *              reserved) where rq is a pointer to a request and hctx points
370  *              to the hardware queue associated with the request. 'reserved'
371  *              indicates whether or not @rq is a reserved request.
372  * @priv:       Will be passed as third argument to @fn.
373  *
374  * Note: if @q->tag_set is shared with other request queues then @fn will be
375  * called for all requests on all queues that share that tag set and not only
376  * for requests associated with @q.
377  */
378 void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
379                 void *priv)
380 {
381         struct blk_mq_hw_ctx *hctx;
382         int i;
383 
384         /*
385          * __blk_mq_update_nr_hw_queues() updates nr_hw_queues and queue_hw_ctx
386          * while the queue is frozen. So we can use q_usage_counter to avoid
387          * racing with it. __blk_mq_update_nr_hw_queues() uses
388          * synchronize_rcu() to ensure this function left the critical section
389          * below.
390          */
391         if (!percpu_ref_tryget(&q->q_usage_counter))
392                 return;
393 
394         queue_for_each_hw_ctx(q, hctx, i) {
395                 struct blk_mq_tags *tags = hctx->tags;
396 
397                 /*
398                  * If no software queues are currently mapped to this
399                  * hardware queue, there's nothing to check
400                  */
401                 if (!blk_mq_hw_queue_mapped(hctx))
402                         continue;
403 
404                 if (tags->nr_reserved_tags)
405                         bt_for_each(hctx, &tags->breserved_tags, fn, priv, true);
406                 bt_for_each(hctx, &tags->bitmap_tags, fn, priv, false);
407         }
408         blk_queue_exit(q);
409 }
410 
411 static int bt_alloc(struct sbitmap_queue *bt, unsigned int depth,
412                     bool round_robin, int node)
413 {
414         return sbitmap_queue_init_node(bt, depth, -1, round_robin, GFP_KERNEL,
415                                        node);
416 }
417 
418 static struct blk_mq_tags *blk_mq_init_bitmap_tags(struct blk_mq_tags *tags,
419                                                    int node, int alloc_policy)
420 {
421         unsigned int depth = tags->nr_tags - tags->nr_reserved_tags;
422         bool round_robin = alloc_policy == BLK_TAG_ALLOC_RR;
423 
424         if (bt_alloc(&tags->bitmap_tags, depth, round_robin, node))
425                 goto free_tags;
426         if (bt_alloc(&tags->breserved_tags, tags->nr_reserved_tags, round_robin,
427                      node))
428                 goto free_bitmap_tags;
429 
430         return tags;
431 free_bitmap_tags:
432         sbitmap_queue_free(&tags->bitmap_tags);
433 free_tags:
434         kfree(tags);
435         return NULL;
436 }
437 
438 struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags,
439                                      unsigned int reserved_tags,
440                                      int node, int alloc_policy)
441 {
442         struct blk_mq_tags *tags;
443 
444         if (total_tags > BLK_MQ_TAG_MAX) {
445                 pr_err("blk-mq: tag depth too large\n");
446                 return NULL;
447         }
448 
449         tags = kzalloc_node(sizeof(*tags), GFP_KERNEL, node);
450         if (!tags)
451                 return NULL;
452 
453         tags->nr_tags = total_tags;
454         tags->nr_reserved_tags = reserved_tags;
455 
456         return blk_mq_init_bitmap_tags(tags, node, alloc_policy);
457 }
458 
459 void blk_mq_free_tags(struct blk_mq_tags *tags)
460 {
461         sbitmap_queue_free(&tags->bitmap_tags);
462         sbitmap_queue_free(&tags->breserved_tags);
463         kfree(tags);
464 }
465 
466 int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
467                             struct blk_mq_tags **tagsptr, unsigned int tdepth,
468                             bool can_grow)
469 {
470         struct blk_mq_tags *tags = *tagsptr;
471 
472         if (tdepth <= tags->nr_reserved_tags)
473                 return -EINVAL;
474 
475         /*
476          * If we are allowed to grow beyond the original size, allocate
477          * a new set of tags before freeing the old one.
478          */
479         if (tdepth > tags->nr_tags) {
480                 struct blk_mq_tag_set *set = hctx->queue->tag_set;
481                 struct blk_mq_tags *new;
482                 bool ret;
483 
484                 if (!can_grow)
485                         return -EINVAL;
486 
487                 /*
488                  * We need some sort of upper limit, set it high enough that
489                  * no valid use cases should require more.
490                  */
491                 if (tdepth > 16 * BLKDEV_MAX_RQ)
492                         return -EINVAL;
493 
494                 new = blk_mq_alloc_rq_map(set, hctx->queue_num, tdepth,
495                                 tags->nr_reserved_tags);
496                 if (!new)
497                         return -ENOMEM;
498                 ret = blk_mq_alloc_rqs(set, new, hctx->queue_num, tdepth);
499                 if (ret) {
500                         blk_mq_free_rq_map(new);
501                         return -ENOMEM;
502                 }
503 
504                 blk_mq_free_rqs(set, *tagsptr, hctx->queue_num);
505                 blk_mq_free_rq_map(*tagsptr);
506                 *tagsptr = new;
507         } else {
508                 /*
509                  * Don't need (or can't) update reserved tags here, they
510                  * remain static and should never need resizing.
511                  */
512                 sbitmap_queue_resize(&tags->bitmap_tags,
513                                 tdepth - tags->nr_reserved_tags);
514         }
515 
516         return 0;
517 }
518 
519 /**
520  * blk_mq_unique_tag() - return a tag that is unique queue-wide
521  * @rq: request for which to compute a unique tag
522  *
523  * The tag field in struct request is unique per hardware queue but not over
524  * all hardware queues. Hence this function that returns a tag with the
525  * hardware context index in the upper bits and the per hardware queue tag in
526  * the lower bits.
527  *
528  * Note: When called for a request that is queued on a non-multiqueue request
529  * queue, the hardware context index is set to zero.
530  */
531 u32 blk_mq_unique_tag(struct request *rq)
532 {
533         return (rq->mq_hctx->queue_num << BLK_MQ_UNIQUE_TAG_BITS) |
534                 (rq->tag & BLK_MQ_UNIQUE_TAG_MASK);
535 }
536 EXPORT_SYMBOL(blk_mq_unique_tag);
537 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp