~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/block/blk-mq-tag.c

Version: ~ [ linux-5.14-rc3 ] ~ [ linux-5.13.5 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.53 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.135 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.198 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.240 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.276 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.276 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.18.140 ] ~ [ linux-3.16.85 ] ~ [ linux-3.14.79 ] ~ [ linux-3.12.74 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  * Fast and scalable bitmap tagging variant. Uses sparser bitmaps spread
  3  * over multiple cachelines to avoid ping-pong between multiple submitters
  4  * or submitter and completer. Uses rolling wakeups to avoid falling of
  5  * the scaling cliff when we run out of tags and have to start putting
  6  * submitters to sleep.
  7  *
  8  * Uses active queue tracking to support fairer distribution of tags
  9  * between multiple submitters when a shared tag map is used.
 10  *
 11  * Copyright (C) 2013-2014 Jens Axboe
 12  */
 13 #include <linux/kernel.h>
 14 #include <linux/module.h>
 15 #include <linux/random.h>
 16 
 17 #include <linux/blk-mq.h>
 18 #include "blk.h"
 19 #include "blk-mq.h"
 20 #include "blk-mq-tag.h"
 21 
 22 static bool bt_has_free_tags(struct blk_mq_bitmap_tags *bt)
 23 {
 24         int i;
 25 
 26         for (i = 0; i < bt->map_nr; i++) {
 27                 struct blk_align_bitmap *bm = &bt->map[i];
 28                 int ret;
 29 
 30                 ret = find_first_zero_bit(&bm->word, bm->depth);
 31                 if (ret < bm->depth)
 32                         return true;
 33         }
 34 
 35         return false;
 36 }
 37 
 38 bool blk_mq_has_free_tags(struct blk_mq_tags *tags)
 39 {
 40         if (!tags)
 41                 return true;
 42 
 43         return bt_has_free_tags(&tags->bitmap_tags);
 44 }
 45 
 46 static inline int bt_index_inc(int index)
 47 {
 48         return (index + 1) & (BT_WAIT_QUEUES - 1);
 49 }
 50 
 51 static inline void bt_index_atomic_inc(atomic_t *index)
 52 {
 53         int old = atomic_read(index);
 54         int new = bt_index_inc(old);
 55         atomic_cmpxchg(index, old, new);
 56 }
 57 
 58 /*
 59  * If a previously inactive queue goes active, bump the active user count.
 60  */
 61 bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
 62 {
 63         if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) &&
 64             !test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
 65                 atomic_inc(&hctx->tags->active_queues);
 66 
 67         return true;
 68 }
 69 
 70 /*
 71  * Wakeup all potentially sleeping on tags
 72  */
 73 void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool include_reserve)
 74 {
 75         struct blk_mq_bitmap_tags *bt;
 76         int i, wake_index;
 77 
 78         /*
 79          * Make sure all changes prior to this are visible from other CPUs.
 80          */
 81         smp_mb();
 82         bt = &tags->bitmap_tags;
 83         wake_index = atomic_read(&bt->wake_index);
 84         for (i = 0; i < BT_WAIT_QUEUES; i++) {
 85                 struct bt_wait_state *bs = &bt->bs[wake_index];
 86 
 87                 if (waitqueue_active(&bs->wait))
 88                         wake_up(&bs->wait);
 89 
 90                 wake_index = bt_index_inc(wake_index);
 91         }
 92 
 93         if (include_reserve) {
 94                 bt = &tags->breserved_tags;
 95                 if (waitqueue_active(&bt->bs[0].wait))
 96                         wake_up(&bt->bs[0].wait);
 97         }
 98 }
 99 
100 /*
101  * If a previously busy queue goes inactive, potential waiters could now
102  * be allowed to queue. Wake them up and check.
103  */
104 void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
105 {
106         struct blk_mq_tags *tags = hctx->tags;
107 
108         if (!test_and_clear_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
109                 return;
110 
111         atomic_dec(&tags->active_queues);
112 
113         blk_mq_tag_wakeup_all(tags, false);
114 }
115 
116 /*
117  * For shared tag users, we track the number of currently active users
118  * and attempt to provide a fair share of the tag depth for each of them.
119  */
120 static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
121                                   struct blk_mq_bitmap_tags *bt)
122 {
123         unsigned int depth, users;
124 
125         if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_SHARED))
126                 return true;
127         if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
128                 return true;
129 
130         /*
131          * Don't try dividing an ant
132          */
133         if (bt->depth == 1)
134                 return true;
135 
136         users = atomic_read(&hctx->tags->active_queues);
137         if (!users)
138                 return true;
139 
140         /*
141          * Allow at least some tags
142          */
143         depth = max((bt->depth + users - 1) / users, 4U);
144         return atomic_read(&hctx->nr_active) < depth;
145 }
146 
147 static int __bt_get_word(struct blk_align_bitmap *bm, unsigned int last_tag,
148                          bool nowrap)
149 {
150         int tag, org_last_tag = last_tag;
151 
152         while (1) {
153                 tag = find_next_zero_bit(&bm->word, bm->depth, last_tag);
154                 if (unlikely(tag >= bm->depth)) {
155                         /*
156                          * We started with an offset, and we didn't reset the
157                          * offset to 0 in a failure case, so start from 0 to
158                          * exhaust the map.
159                          */
160                         if (org_last_tag && last_tag && !nowrap) {
161                                 last_tag = org_last_tag = 0;
162                                 continue;
163                         }
164                         return -1;
165                 }
166 
167                 if (!test_and_set_bit(tag, &bm->word))
168                         break;
169 
170                 last_tag = tag + 1;
171                 if (last_tag >= bm->depth - 1)
172                         last_tag = 0;
173         }
174 
175         return tag;
176 }
177 
178 #define BT_ALLOC_RR(tags) (tags->alloc_policy == BLK_TAG_ALLOC_RR)
179 
180 /*
181  * Straight forward bitmap tag implementation, where each bit is a tag
182  * (cleared == free, and set == busy). The small twist is using per-cpu
183  * last_tag caches, which blk-mq stores in the blk_mq_ctx software queue
184  * contexts. This enables us to drastically limit the space searched,
185  * without dirtying an extra shared cacheline like we would if we stored
186  * the cache value inside the shared blk_mq_bitmap_tags structure. On top
187  * of that, each word of tags is in a separate cacheline. This means that
188  * multiple users will tend to stick to different cachelines, at least
189  * until the map is exhausted.
190  */
191 static int __bt_get(struct blk_mq_hw_ctx *hctx, struct blk_mq_bitmap_tags *bt,
192                     unsigned int *tag_cache, struct blk_mq_tags *tags)
193 {
194         unsigned int last_tag, org_last_tag;
195         int index, i, tag;
196 
197         if (!hctx_may_queue(hctx, bt))
198                 return -1;
199 
200         last_tag = org_last_tag = *tag_cache;
201         index = TAG_TO_INDEX(bt, last_tag);
202 
203         for (i = 0; i < bt->map_nr; i++) {
204                 tag = __bt_get_word(&bt->map[index], TAG_TO_BIT(bt, last_tag),
205                                     BT_ALLOC_RR(tags));
206                 if (tag != -1) {
207                         tag += (index << bt->bits_per_word);
208                         goto done;
209                 }
210 
211                 /*
212                  * Jump to next index, and reset the last tag to be the
213                  * first tag of that index
214                  */
215                 index++;
216                 last_tag = (index << bt->bits_per_word);
217 
218                 if (index >= bt->map_nr) {
219                         index = 0;
220                         last_tag = 0;
221                 }
222         }
223 
224         *tag_cache = 0;
225         return -1;
226 
227         /*
228          * Only update the cache from the allocation path, if we ended
229          * up using the specific cached tag.
230          */
231 done:
232         if (tag == org_last_tag || unlikely(BT_ALLOC_RR(tags))) {
233                 last_tag = tag + 1;
234                 if (last_tag >= bt->depth - 1)
235                         last_tag = 0;
236 
237                 *tag_cache = last_tag;
238         }
239 
240         return tag;
241 }
242 
243 static struct bt_wait_state *bt_wait_ptr(struct blk_mq_bitmap_tags *bt,
244                                          struct blk_mq_hw_ctx *hctx)
245 {
246         struct bt_wait_state *bs;
247         int wait_index;
248 
249         if (!hctx)
250                 return &bt->bs[0];
251 
252         wait_index = atomic_read(&hctx->wait_index);
253         bs = &bt->bs[wait_index];
254         bt_index_atomic_inc(&hctx->wait_index);
255         return bs;
256 }
257 
258 static int bt_get(struct blk_mq_alloc_data *data,
259                 struct blk_mq_bitmap_tags *bt,
260                 struct blk_mq_hw_ctx *hctx,
261                 unsigned int *last_tag, struct blk_mq_tags *tags)
262 {
263         struct bt_wait_state *bs;
264         DEFINE_WAIT(wait);
265         int tag;
266 
267         tag = __bt_get(hctx, bt, last_tag, tags);
268         if (tag != -1)
269                 return tag;
270 
271         if (data->flags & BLK_MQ_REQ_NOWAIT)
272                 return -1;
273 
274         bs = bt_wait_ptr(bt, hctx);
275         do {
276                 prepare_to_wait(&bs->wait, &wait, TASK_UNINTERRUPTIBLE);
277 
278                 tag = __bt_get(hctx, bt, last_tag, tags);
279                 if (tag != -1)
280                         break;
281 
282                 /*
283                  * We're out of tags on this hardware queue, kick any
284                  * pending IO submits before going to sleep waiting for
285                  * some to complete. Note that hctx can be NULL here for
286                  * reserved tag allocation.
287                  */
288                 if (hctx)
289                         blk_mq_run_hw_queue(hctx, false);
290 
291                 /*
292                  * Retry tag allocation after running the hardware queue,
293                  * as running the queue may also have found completions.
294                  */
295                 tag = __bt_get(hctx, bt, last_tag, tags);
296                 if (tag != -1)
297                         break;
298 
299                 blk_mq_put_ctx(data->ctx);
300 
301                 io_schedule();
302 
303                 data->ctx = blk_mq_get_ctx(data->q);
304                 data->hctx = data->q->mq_ops->map_queue(data->q,
305                                 data->ctx->cpu);
306                 if (data->flags & BLK_MQ_REQ_RESERVED) {
307                         bt = &data->hctx->tags->breserved_tags;
308                 } else {
309                         last_tag = &data->ctx->last_tag;
310                         hctx = data->hctx;
311                         bt = &hctx->tags->bitmap_tags;
312                 }
313                 finish_wait(&bs->wait, &wait);
314                 bs = bt_wait_ptr(bt, hctx);
315         } while (1);
316 
317         finish_wait(&bs->wait, &wait);
318         return tag;
319 }
320 
321 static unsigned int __blk_mq_get_tag(struct blk_mq_alloc_data *data)
322 {
323         int tag;
324 
325         tag = bt_get(data, &data->hctx->tags->bitmap_tags, data->hctx,
326                         &data->ctx->last_tag, data->hctx->tags);
327         if (tag >= 0)
328                 return tag + data->hctx->tags->nr_reserved_tags;
329 
330         return BLK_MQ_TAG_FAIL;
331 }
332 
333 static unsigned int __blk_mq_get_reserved_tag(struct blk_mq_alloc_data *data)
334 {
335         int tag, zero = 0;
336 
337         if (unlikely(!data->hctx->tags->nr_reserved_tags)) {
338                 WARN_ON_ONCE(1);
339                 return BLK_MQ_TAG_FAIL;
340         }
341 
342         tag = bt_get(data, &data->hctx->tags->breserved_tags, NULL, &zero,
343                 data->hctx->tags);
344         if (tag < 0)
345                 return BLK_MQ_TAG_FAIL;
346 
347         return tag;
348 }
349 
350 unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
351 {
352         if (data->flags & BLK_MQ_REQ_RESERVED)
353                 return __blk_mq_get_reserved_tag(data);
354         return __blk_mq_get_tag(data);
355 }
356 
357 static struct bt_wait_state *bt_wake_ptr(struct blk_mq_bitmap_tags *bt)
358 {
359         int i, wake_index;
360 
361         wake_index = atomic_read(&bt->wake_index);
362         for (i = 0; i < BT_WAIT_QUEUES; i++) {
363                 struct bt_wait_state *bs = &bt->bs[wake_index];
364 
365                 if (waitqueue_active(&bs->wait)) {
366                         int o = atomic_read(&bt->wake_index);
367                         if (wake_index != o)
368                                 atomic_cmpxchg(&bt->wake_index, o, wake_index);
369 
370                         return bs;
371                 }
372 
373                 wake_index = bt_index_inc(wake_index);
374         }
375 
376         return NULL;
377 }
378 
379 static void bt_clear_tag(struct blk_mq_bitmap_tags *bt, unsigned int tag)
380 {
381         const int index = TAG_TO_INDEX(bt, tag);
382         struct bt_wait_state *bs;
383         int wait_cnt;
384 
385         clear_bit(TAG_TO_BIT(bt, tag), &bt->map[index].word);
386 
387         /* Ensure that the wait list checks occur after clear_bit(). */
388         smp_mb();
389 
390         bs = bt_wake_ptr(bt);
391         if (!bs)
392                 return;
393 
394         wait_cnt = atomic_dec_return(&bs->wait_cnt);
395         if (unlikely(wait_cnt < 0))
396                 wait_cnt = atomic_inc_return(&bs->wait_cnt);
397         if (wait_cnt == 0) {
398                 atomic_add(bt->wake_cnt, &bs->wait_cnt);
399                 bt_index_atomic_inc(&bt->wake_index);
400                 wake_up(&bs->wait);
401         }
402 }
403 
404 void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, unsigned int tag,
405                     unsigned int *last_tag)
406 {
407         struct blk_mq_tags *tags = hctx->tags;
408 
409         if (tag >= tags->nr_reserved_tags) {
410                 const int real_tag = tag - tags->nr_reserved_tags;
411 
412                 BUG_ON(real_tag >= tags->nr_tags);
413                 bt_clear_tag(&tags->bitmap_tags, real_tag);
414                 if (likely(tags->alloc_policy == BLK_TAG_ALLOC_FIFO))
415                         *last_tag = real_tag;
416         } else {
417                 BUG_ON(tag >= tags->nr_reserved_tags);
418                 bt_clear_tag(&tags->breserved_tags, tag);
419         }
420 }
421 
422 static void bt_for_each(struct blk_mq_hw_ctx *hctx,
423                 struct blk_mq_bitmap_tags *bt, unsigned int off,
424                 busy_iter_fn *fn, void *data, bool reserved)
425 {
426         struct request *rq;
427         int bit, i;
428 
429         for (i = 0; i < bt->map_nr; i++) {
430                 struct blk_align_bitmap *bm = &bt->map[i];
431 
432                 for (bit = find_first_bit(&bm->word, bm->depth);
433                      bit < bm->depth;
434                      bit = find_next_bit(&bm->word, bm->depth, bit + 1)) {
435                         rq = hctx->tags->rqs[off + bit];
436                         if (rq->q == hctx->queue)
437                                 fn(hctx, rq, data, reserved);
438                 }
439 
440                 off += (1 << bt->bits_per_word);
441         }
442 }
443 
444 static void bt_tags_for_each(struct blk_mq_tags *tags,
445                 struct blk_mq_bitmap_tags *bt, unsigned int off,
446                 busy_tag_iter_fn *fn, void *data, bool reserved)
447 {
448         struct request *rq;
449         int bit, i;
450 
451         if (!tags->rqs)
452                 return;
453         for (i = 0; i < bt->map_nr; i++) {
454                 struct blk_align_bitmap *bm = &bt->map[i];
455 
456                 for (bit = find_first_bit(&bm->word, bm->depth);
457                      bit < bm->depth;
458                      bit = find_next_bit(&bm->word, bm->depth, bit + 1)) {
459                         rq = tags->rqs[off + bit];
460                         fn(rq, data, reserved);
461                 }
462 
463                 off += (1 << bt->bits_per_word);
464         }
465 }
466 
467 void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn,
468                 void *priv)
469 {
470         if (tags->nr_reserved_tags)
471                 bt_tags_for_each(tags, &tags->breserved_tags, 0, fn, priv, true);
472         bt_tags_for_each(tags, &tags->bitmap_tags, tags->nr_reserved_tags, fn, priv,
473                         false);
474 }
475 EXPORT_SYMBOL(blk_mq_all_tag_busy_iter);
476 
477 void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
478                 void *priv)
479 {
480         struct blk_mq_hw_ctx *hctx;
481         int i;
482 
483 
484         queue_for_each_hw_ctx(q, hctx, i) {
485                 struct blk_mq_tags *tags = hctx->tags;
486 
487                 /*
488                  * If not software queues are currently mapped to this
489                  * hardware queue, there's nothing to check
490                  */
491                 if (!blk_mq_hw_queue_mapped(hctx))
492                         continue;
493 
494                 if (tags->nr_reserved_tags)
495                         bt_for_each(hctx, &tags->breserved_tags, 0, fn, priv, true);
496                 bt_for_each(hctx, &tags->bitmap_tags, tags->nr_reserved_tags, fn, priv,
497                       false);
498         }
499 
500 }
501 
502 static unsigned int bt_unused_tags(struct blk_mq_bitmap_tags *bt)
503 {
504         unsigned int i, used;
505 
506         for (i = 0, used = 0; i < bt->map_nr; i++) {
507                 struct blk_align_bitmap *bm = &bt->map[i];
508 
509                 used += bitmap_weight(&bm->word, bm->depth);
510         }
511 
512         return bt->depth - used;
513 }
514 
515 static void bt_update_count(struct blk_mq_bitmap_tags *bt,
516                             unsigned int depth)
517 {
518         unsigned int tags_per_word = 1U << bt->bits_per_word;
519         unsigned int map_depth = depth;
520 
521         if (depth) {
522                 int i;
523 
524                 for (i = 0; i < bt->map_nr; i++) {
525                         bt->map[i].depth = min(map_depth, tags_per_word);
526                         map_depth -= bt->map[i].depth;
527                 }
528         }
529 
530         bt->wake_cnt = BT_WAIT_BATCH;
531         if (bt->wake_cnt > depth / BT_WAIT_QUEUES)
532                 bt->wake_cnt = max(1U, depth / BT_WAIT_QUEUES);
533 
534         bt->depth = depth;
535 }
536 
537 static int bt_alloc(struct blk_mq_bitmap_tags *bt, unsigned int depth,
538                         int node, bool reserved)
539 {
540         int i;
541 
542         bt->bits_per_word = ilog2(BITS_PER_LONG);
543 
544         /*
545          * Depth can be zero for reserved tags, that's not a failure
546          * condition.
547          */
548         if (depth) {
549                 unsigned int nr, tags_per_word;
550 
551                 tags_per_word = (1 << bt->bits_per_word);
552 
553                 /*
554                  * If the tag space is small, shrink the number of tags
555                  * per word so we spread over a few cachelines, at least.
556                  * If less than 4 tags, just forget about it, it's not
557                  * going to work optimally anyway.
558                  */
559                 if (depth >= 4) {
560                         while (tags_per_word * 4 > depth) {
561                                 bt->bits_per_word--;
562                                 tags_per_word = (1 << bt->bits_per_word);
563                         }
564                 }
565 
566                 nr = ALIGN(depth, tags_per_word) / tags_per_word;
567                 bt->map = kzalloc_node(nr * sizeof(struct blk_align_bitmap),
568                                                 GFP_KERNEL, node);
569                 if (!bt->map)
570                         return -ENOMEM;
571 
572                 bt->map_nr = nr;
573         }
574 
575         bt->bs = kzalloc(BT_WAIT_QUEUES * sizeof(*bt->bs), GFP_KERNEL);
576         if (!bt->bs) {
577                 kfree(bt->map);
578                 bt->map = NULL;
579                 return -ENOMEM;
580         }
581 
582         bt_update_count(bt, depth);
583 
584         for (i = 0; i < BT_WAIT_QUEUES; i++) {
585                 init_waitqueue_head(&bt->bs[i].wait);
586                 atomic_set(&bt->bs[i].wait_cnt, bt->wake_cnt);
587         }
588 
589         return 0;
590 }
591 
592 static void bt_free(struct blk_mq_bitmap_tags *bt)
593 {
594         kfree(bt->map);
595         kfree(bt->bs);
596 }
597 
598 static struct blk_mq_tags *blk_mq_init_bitmap_tags(struct blk_mq_tags *tags,
599                                                    int node, int alloc_policy)
600 {
601         unsigned int depth = tags->nr_tags - tags->nr_reserved_tags;
602 
603         tags->alloc_policy = alloc_policy;
604 
605         if (bt_alloc(&tags->bitmap_tags, depth, node, false))
606                 goto enomem;
607         if (bt_alloc(&tags->breserved_tags, tags->nr_reserved_tags, node, true))
608                 goto enomem;
609 
610         return tags;
611 enomem:
612         bt_free(&tags->bitmap_tags);
613         kfree(tags);
614         return NULL;
615 }
616 
617 struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags,
618                                      unsigned int reserved_tags,
619                                      int node, int alloc_policy)
620 {
621         struct blk_mq_tags *tags;
622 
623         if (total_tags > BLK_MQ_TAG_MAX) {
624                 pr_err("blk-mq: tag depth too large\n");
625                 return NULL;
626         }
627 
628         tags = kzalloc_node(sizeof(*tags), GFP_KERNEL, node);
629         if (!tags)
630                 return NULL;
631 
632         if (!zalloc_cpumask_var(&tags->cpumask, GFP_KERNEL)) {
633                 kfree(tags);
634                 return NULL;
635         }
636 
637         tags->nr_tags = total_tags;
638         tags->nr_reserved_tags = reserved_tags;
639 
640         return blk_mq_init_bitmap_tags(tags, node, alloc_policy);
641 }
642 
643 void blk_mq_free_tags(struct blk_mq_tags *tags)
644 {
645         bt_free(&tags->bitmap_tags);
646         bt_free(&tags->breserved_tags);
647         free_cpumask_var(tags->cpumask);
648         kfree(tags);
649 }
650 
651 void blk_mq_tag_init_last_tag(struct blk_mq_tags *tags, unsigned int *tag)
652 {
653         unsigned int depth = tags->nr_tags - tags->nr_reserved_tags;
654 
655         *tag = prandom_u32() % depth;
656 }
657 
658 int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int tdepth)
659 {
660         tdepth -= tags->nr_reserved_tags;
661         if (tdepth > tags->nr_tags)
662                 return -EINVAL;
663 
664         /*
665          * Don't need (or can't) update reserved tags here, they remain
666          * static and should never need resizing.
667          */
668         bt_update_count(&tags->bitmap_tags, tdepth);
669         blk_mq_tag_wakeup_all(tags, false);
670         return 0;
671 }
672 
673 /**
674  * blk_mq_unique_tag() - return a tag that is unique queue-wide
675  * @rq: request for which to compute a unique tag
676  *
677  * The tag field in struct request is unique per hardware queue but not over
678  * all hardware queues. Hence this function that returns a tag with the
679  * hardware context index in the upper bits and the per hardware queue tag in
680  * the lower bits.
681  *
682  * Note: When called for a request that is queued on a non-multiqueue request
683  * queue, the hardware context index is set to zero.
684  */
685 u32 blk_mq_unique_tag(struct request *rq)
686 {
687         struct request_queue *q = rq->q;
688         struct blk_mq_hw_ctx *hctx;
689         int hwq = 0;
690 
691         if (q->mq_ops) {
692                 hctx = q->mq_ops->map_queue(q, rq->mq_ctx->cpu);
693                 hwq = hctx->queue_num;
694         }
695 
696         return (hwq << BLK_MQ_UNIQUE_TAG_BITS) |
697                 (rq->tag & BLK_MQ_UNIQUE_TAG_MASK);
698 }
699 EXPORT_SYMBOL(blk_mq_unique_tag);
700 
701 ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page)
702 {
703         char *orig_page = page;
704         unsigned int free, res;
705 
706         if (!tags)
707                 return 0;
708 
709         page += sprintf(page, "nr_tags=%u, reserved_tags=%u, "
710                         "bits_per_word=%u\n",
711                         tags->nr_tags, tags->nr_reserved_tags,
712                         tags->bitmap_tags.bits_per_word);
713 
714         free = bt_unused_tags(&tags->bitmap_tags);
715         res = bt_unused_tags(&tags->breserved_tags);
716 
717         page += sprintf(page, "nr_free=%u, nr_reserved=%u\n", free, res);
718         page += sprintf(page, "active_queues=%u\n", atomic_read(&tags->active_queues));
719 
720         return page - orig_page;
721 }
722 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp