~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/block/blk-mq-tag.c

Version: ~ [ linux-5.15-rc5 ] ~ [ linux-5.14.11 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.72 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.152 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.210 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.250 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.286 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.288 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.18.140 ] ~ [ linux-3.16.85 ] ~ [ linux-3.14.79 ] ~ [ linux-3.12.74 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

Diff markup

Differences between /block/blk-mq-tag.c (Version linux-4.17.19) and /block/blk-mq-tag.c (Version linux-4.7.10)


  1 /*                                                  1 /*
  2  * Tag allocation using scalable bitmaps. Uses !!   2  * Fast and scalable bitmap tagging variant. Uses sparser bitmaps spread
  3  * fairer distribution of tags between multipl !!   3  * over multiple cachelines to avoid ping-pong between multiple submitters
  4  * is used.                                    !!   4  * or submitter and completer. Uses rolling wakeups to avoid falling of
                                                   >>   5  * the scaling cliff when we run out of tags and have to start putting
                                                   >>   6  * submitters to sleep.
                                                   >>   7  *
                                                   >>   8  * Uses active queue tracking to support fairer distribution of tags
                                                   >>   9  * between multiple submitters when a shared tag map is used.
  5  *                                                 10  *
  6  * Copyright (C) 2013-2014 Jens Axboe              11  * Copyright (C) 2013-2014 Jens Axboe
  7  */                                                12  */
  8 #include <linux/kernel.h>                          13 #include <linux/kernel.h>
  9 #include <linux/module.h>                          14 #include <linux/module.h>
                                                   >>  15 #include <linux/random.h>
 10                                                    16 
 11 #include <linux/blk-mq.h>                          17 #include <linux/blk-mq.h>
 12 #include "blk.h"                                   18 #include "blk.h"
 13 #include "blk-mq.h"                                19 #include "blk-mq.h"
 14 #include "blk-mq-tag.h"                            20 #include "blk-mq-tag.h"
 15                                                    21 
                                                   >>  22 static bool bt_has_free_tags(struct blk_mq_bitmap_tags *bt)
                                                   >>  23 {
                                                   >>  24         int i;
                                                   >>  25 
                                                   >>  26         for (i = 0; i < bt->map_nr; i++) {
                                                   >>  27                 struct blk_align_bitmap *bm = &bt->map[i];
                                                   >>  28                 int ret;
                                                   >>  29 
                                                   >>  30                 ret = find_first_zero_bit(&bm->word, bm->depth);
                                                   >>  31                 if (ret < bm->depth)
                                                   >>  32                         return true;
                                                   >>  33         }
                                                   >>  34 
                                                   >>  35         return false;
                                                   >>  36 }
                                                   >>  37 
 16 bool blk_mq_has_free_tags(struct blk_mq_tags *     38 bool blk_mq_has_free_tags(struct blk_mq_tags *tags)
 17 {                                                  39 {
 18         if (!tags)                                 40         if (!tags)
 19                 return true;                       41                 return true;
 20                                                    42 
 21         return sbitmap_any_bit_clear(&tags->bi !!  43         return bt_has_free_tags(&tags->bitmap_tags);
                                                   >>  44 }
                                                   >>  45 
                                                   >>  46 static inline int bt_index_inc(int index)
                                                   >>  47 {
                                                   >>  48         return (index + 1) & (BT_WAIT_QUEUES - 1);
                                                   >>  49 }
                                                   >>  50 
                                                   >>  51 static inline void bt_index_atomic_inc(atomic_t *index)
                                                   >>  52 {
                                                   >>  53         int old = atomic_read(index);
                                                   >>  54         int new = bt_index_inc(old);
                                                   >>  55         atomic_cmpxchg(index, old, new);
 22 }                                                  56 }
 23                                                    57 
 24 /*                                                 58 /*
 25  * If a previously inactive queue goes active,     59  * If a previously inactive queue goes active, bump the active user count.
 26  */                                                60  */
 27 bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *h     61 bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
 28 {                                                  62 {
 29         if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hc     63         if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) &&
 30             !test_and_set_bit(BLK_MQ_S_TAG_ACT     64             !test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
 31                 atomic_inc(&hctx->tags->active     65                 atomic_inc(&hctx->tags->active_queues);
 32                                                    66 
 33         return true;                               67         return true;
 34 }                                                  68 }
 35                                                    69 
 36 /*                                                 70 /*
 37  * Wakeup all potentially sleeping on tags         71  * Wakeup all potentially sleeping on tags
 38  */                                                72  */
 39 void blk_mq_tag_wakeup_all(struct blk_mq_tags      73 void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool include_reserve)
 40 {                                                  74 {
 41         sbitmap_queue_wake_all(&tags->bitmap_t !!  75         struct blk_mq_bitmap_tags *bt;
 42         if (include_reserve)                   !!  76         int i, wake_index;
 43                 sbitmap_queue_wake_all(&tags-> !!  77 
                                                   >>  78         /*
                                                   >>  79          * Make sure all changes prior to this are visible from other CPUs.
                                                   >>  80          */
                                                   >>  81         smp_mb();
                                                   >>  82         bt = &tags->bitmap_tags;
                                                   >>  83         wake_index = atomic_read(&bt->wake_index);
                                                   >>  84         for (i = 0; i < BT_WAIT_QUEUES; i++) {
                                                   >>  85                 struct bt_wait_state *bs = &bt->bs[wake_index];
                                                   >>  86 
                                                   >>  87                 if (waitqueue_active(&bs->wait))
                                                   >>  88                         wake_up(&bs->wait);
                                                   >>  89 
                                                   >>  90                 wake_index = bt_index_inc(wake_index);
                                                   >>  91         }
                                                   >>  92 
                                                   >>  93         if (include_reserve) {
                                                   >>  94                 bt = &tags->breserved_tags;
                                                   >>  95                 if (waitqueue_active(&bt->bs[0].wait))
                                                   >>  96                         wake_up(&bt->bs[0].wait);
                                                   >>  97         }
 44 }                                                  98 }
 45                                                    99 
 46 /*                                                100 /*
 47  * If a previously busy queue goes inactive, p    101  * If a previously busy queue goes inactive, potential waiters could now
 48  * be allowed to queue. Wake them up and check    102  * be allowed to queue. Wake them up and check.
 49  */                                               103  */
 50 void __blk_mq_tag_idle(struct blk_mq_hw_ctx *h    104 void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
 51 {                                                 105 {
 52         struct blk_mq_tags *tags = hctx->tags;    106         struct blk_mq_tags *tags = hctx->tags;
 53                                                   107 
 54         if (!test_and_clear_bit(BLK_MQ_S_TAG_A    108         if (!test_and_clear_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
 55                 return;                           109                 return;
 56                                                   110 
 57         atomic_dec(&tags->active_queues);         111         atomic_dec(&tags->active_queues);
 58                                                   112 
 59         blk_mq_tag_wakeup_all(tags, false);       113         blk_mq_tag_wakeup_all(tags, false);
 60 }                                                 114 }
 61                                                   115 
 62 /*                                                116 /*
 63  * For shared tag users, we track the number o    117  * For shared tag users, we track the number of currently active users
 64  * and attempt to provide a fair share of the     118  * and attempt to provide a fair share of the tag depth for each of them.
 65  */                                               119  */
 66 static inline bool hctx_may_queue(struct blk_m    120 static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
 67                                   struct sbitm !! 121                                   struct blk_mq_bitmap_tags *bt)
 68 {                                                 122 {
 69         unsigned int depth, users;                123         unsigned int depth, users;
 70                                                   124 
 71         if (!hctx || !(hctx->flags & BLK_MQ_F_    125         if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_SHARED))
 72                 return true;                      126                 return true;
 73         if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hc    127         if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
 74                 return true;                      128                 return true;
 75                                                   129 
 76         /*                                        130         /*
 77          * Don't try dividing an ant              131          * Don't try dividing an ant
 78          */                                       132          */
 79         if (bt->sb.depth == 1)                 !! 133         if (bt->depth == 1)
 80                 return true;                      134                 return true;
 81                                                   135 
 82         users = atomic_read(&hctx->tags->activ    136         users = atomic_read(&hctx->tags->active_queues);
 83         if (!users)                               137         if (!users)
 84                 return true;                      138                 return true;
 85                                                   139 
 86         /*                                        140         /*
 87          * Allow at least some tags               141          * Allow at least some tags
 88          */                                       142          */
 89         depth = max((bt->sb.depth + users - 1) !! 143         depth = max((bt->depth + users - 1) / users, 4U);
 90         return atomic_read(&hctx->nr_active) <    144         return atomic_read(&hctx->nr_active) < depth;
 91 }                                                 145 }
 92                                                   146 
 93 static int __blk_mq_get_tag(struct blk_mq_allo !! 147 static int __bt_get_word(struct blk_align_bitmap *bm, unsigned int last_tag,
 94                             struct sbitmap_que !! 148                          bool nowrap)
 95 {                                                 149 {
 96         if (!(data->flags & BLK_MQ_REQ_INTERNA !! 150         int tag, org_last_tag = last_tag;
 97             !hctx_may_queue(data->hctx, bt))   !! 151 
 98                 return -1;                     !! 152         while (1) {
 99         if (data->shallow_depth)               !! 153                 tag = find_next_zero_bit(&bm->word, bm->depth, last_tag);
100                 return __sbitmap_queue_get_sha !! 154                 if (unlikely(tag >= bm->depth)) {
101         else                                   !! 155                         /*
102                 return __sbitmap_queue_get(bt) !! 156                          * We started with an offset, and we didn't reset the
                                                   >> 157                          * offset to 0 in a failure case, so start from 0 to
                                                   >> 158                          * exhaust the map.
                                                   >> 159                          */
                                                   >> 160                         if (org_last_tag && last_tag && !nowrap) {
                                                   >> 161                                 last_tag = org_last_tag = 0;
                                                   >> 162                                 continue;
                                                   >> 163                         }
                                                   >> 164                         return -1;
                                                   >> 165                 }
                                                   >> 166 
                                                   >> 167                 if (!test_and_set_bit(tag, &bm->word))
                                                   >> 168                         break;
                                                   >> 169 
                                                   >> 170                 last_tag = tag + 1;
                                                   >> 171                 if (last_tag >= bm->depth - 1)
                                                   >> 172                         last_tag = 0;
                                                   >> 173         }
                                                   >> 174 
                                                   >> 175         return tag;
103 }                                                 176 }
104                                                   177 
105 unsigned int blk_mq_get_tag(struct blk_mq_allo !! 178 #define BT_ALLOC_RR(tags) (tags->alloc_policy == BLK_TAG_ALLOC_RR)
                                                   >> 179 
                                                   >> 180 /*
                                                   >> 181  * Straight forward bitmap tag implementation, where each bit is a tag
                                                   >> 182  * (cleared == free, and set == busy). The small twist is using per-cpu
                                                   >> 183  * last_tag caches, which blk-mq stores in the blk_mq_ctx software queue
                                                   >> 184  * contexts. This enables us to drastically limit the space searched,
                                                   >> 185  * without dirtying an extra shared cacheline like we would if we stored
                                                   >> 186  * the cache value inside the shared blk_mq_bitmap_tags structure. On top
                                                   >> 187  * of that, each word of tags is in a separate cacheline. This means that
                                                   >> 188  * multiple users will tend to stick to different cachelines, at least
                                                   >> 189  * until the map is exhausted.
                                                   >> 190  */
                                                   >> 191 static int __bt_get(struct blk_mq_hw_ctx *hctx, struct blk_mq_bitmap_tags *bt,
                                                   >> 192                     unsigned int *tag_cache, struct blk_mq_tags *tags)
106 {                                                 193 {
107         struct blk_mq_tags *tags = blk_mq_tags !! 194         unsigned int last_tag, org_last_tag;
108         struct sbitmap_queue *bt;              !! 195         int index, i, tag;
109         struct sbq_wait_state *ws;             << 
110         DEFINE_WAIT(wait);                     << 
111         unsigned int tag_offset;               << 
112         bool drop_ctx;                         << 
113         int tag;                               << 
114                                                   196 
115         if (data->flags & BLK_MQ_REQ_RESERVED) !! 197         if (!hctx_may_queue(hctx, bt))
116                 if (unlikely(!tags->nr_reserve !! 198                 return -1;
117                         WARN_ON_ONCE(1);       !! 199 
118                         return BLK_MQ_TAG_FAIL !! 200         last_tag = org_last_tag = *tag_cache;
                                                   >> 201         index = TAG_TO_INDEX(bt, last_tag);
                                                   >> 202 
                                                   >> 203         for (i = 0; i < bt->map_nr; i++) {
                                                   >> 204                 tag = __bt_get_word(&bt->map[index], TAG_TO_BIT(bt, last_tag),
                                                   >> 205                                     BT_ALLOC_RR(tags));
                                                   >> 206                 if (tag != -1) {
                                                   >> 207                         tag += (index << bt->bits_per_word);
                                                   >> 208                         goto done;
                                                   >> 209                 }
                                                   >> 210 
                                                   >> 211                 /*
                                                   >> 212                  * Jump to next index, and reset the last tag to be the
                                                   >> 213                  * first tag of that index
                                                   >> 214                  */
                                                   >> 215                 index++;
                                                   >> 216                 last_tag = (index << bt->bits_per_word);
                                                   >> 217 
                                                   >> 218                 if (index >= bt->map_nr) {
                                                   >> 219                         index = 0;
                                                   >> 220                         last_tag = 0;
119                 }                                 221                 }
120                 bt = &tags->breserved_tags;    << 
121                 tag_offset = 0;                << 
122         } else {                               << 
123                 bt = &tags->bitmap_tags;       << 
124                 tag_offset = tags->nr_reserved << 
125         }                                         222         }
126                                                   223 
127         tag = __blk_mq_get_tag(data, bt);      !! 224         *tag_cache = 0;
                                                   >> 225         return -1;
                                                   >> 226 
                                                   >> 227         /*
                                                   >> 228          * Only update the cache from the allocation path, if we ended
                                                   >> 229          * up using the specific cached tag.
                                                   >> 230          */
                                                   >> 231 done:
                                                   >> 232         if (tag == org_last_tag || unlikely(BT_ALLOC_RR(tags))) {
                                                   >> 233                 last_tag = tag + 1;
                                                   >> 234                 if (last_tag >= bt->depth - 1)
                                                   >> 235                         last_tag = 0;
                                                   >> 236 
                                                   >> 237                 *tag_cache = last_tag;
                                                   >> 238         }
                                                   >> 239 
                                                   >> 240         return tag;
                                                   >> 241 }
                                                   >> 242 
                                                   >> 243 static struct bt_wait_state *bt_wait_ptr(struct blk_mq_bitmap_tags *bt,
                                                   >> 244                                          struct blk_mq_hw_ctx *hctx)
                                                   >> 245 {
                                                   >> 246         struct bt_wait_state *bs;
                                                   >> 247         int wait_index;
                                                   >> 248 
                                                   >> 249         if (!hctx)
                                                   >> 250                 return &bt->bs[0];
                                                   >> 251 
                                                   >> 252         wait_index = atomic_read(&hctx->wait_index);
                                                   >> 253         bs = &bt->bs[wait_index];
                                                   >> 254         bt_index_atomic_inc(&hctx->wait_index);
                                                   >> 255         return bs;
                                                   >> 256 }
                                                   >> 257 
                                                   >> 258 static int bt_get(struct blk_mq_alloc_data *data,
                                                   >> 259                 struct blk_mq_bitmap_tags *bt,
                                                   >> 260                 struct blk_mq_hw_ctx *hctx,
                                                   >> 261                 unsigned int *last_tag, struct blk_mq_tags *tags)
                                                   >> 262 {
                                                   >> 263         struct bt_wait_state *bs;
                                                   >> 264         DEFINE_WAIT(wait);
                                                   >> 265         int tag;
                                                   >> 266 
                                                   >> 267         tag = __bt_get(hctx, bt, last_tag, tags);
128         if (tag != -1)                            268         if (tag != -1)
129                 goto found_tag;                !! 269                 return tag;
130                                                   270 
131         if (data->flags & BLK_MQ_REQ_NOWAIT)      271         if (data->flags & BLK_MQ_REQ_NOWAIT)
132                 return BLK_MQ_TAG_FAIL;        !! 272                 return -1;
133                                                   273 
134         ws = bt_wait_ptr(bt, data->hctx);      !! 274         bs = bt_wait_ptr(bt, hctx);
135         drop_ctx = data->ctx == NULL;          << 
136         do {                                      275         do {
                                                   >> 276                 prepare_to_wait(&bs->wait, &wait, TASK_UNINTERRUPTIBLE);
                                                   >> 277 
                                                   >> 278                 tag = __bt_get(hctx, bt, last_tag, tags);
                                                   >> 279                 if (tag != -1)
                                                   >> 280                         break;
                                                   >> 281 
137                 /*                                282                 /*
138                  * We're out of tags on this h    283                  * We're out of tags on this hardware queue, kick any
139                  * pending IO submits before g    284                  * pending IO submits before going to sleep waiting for
140                  * some to complete.           !! 285                  * some to complete. Note that hctx can be NULL here for
                                                   >> 286                  * reserved tag allocation.
141                  */                               287                  */
142                 blk_mq_run_hw_queue(data->hctx !! 288                 if (hctx)
                                                   >> 289                         blk_mq_run_hw_queue(hctx, false);
143                                                   290 
144                 /*                                291                 /*
145                  * Retry tag allocation after     292                  * Retry tag allocation after running the hardware queue,
146                  * as running the queue may al    293                  * as running the queue may also have found completions.
147                  */                               294                  */
148                 tag = __blk_mq_get_tag(data, b !! 295                 tag = __bt_get(hctx, bt, last_tag, tags);
149                 if (tag != -1)                    296                 if (tag != -1)
150                         break;                    297                         break;
151                                                   298 
152                 prepare_to_wait_exclusive(&ws- !! 299                 blk_mq_put_ctx(data->ctx);
153                                                << 
154                                                << 
155                 tag = __blk_mq_get_tag(data, b << 
156                 if (tag != -1)                 << 
157                         break;                 << 
158                                                << 
159                 if (data->ctx)                 << 
160                         blk_mq_put_ctx(data->c << 
161                                                   300 
162                 io_schedule();                    301                 io_schedule();
163                                                   302 
164                 data->ctx = blk_mq_get_ctx(dat    303                 data->ctx = blk_mq_get_ctx(data->q);
165                 data->hctx = blk_mq_map_queue( !! 304                 data->hctx = data->q->mq_ops->map_queue(data->q,
166                 tags = blk_mq_tags_from_data(d !! 305                                 data->ctx->cpu);
167                 if (data->flags & BLK_MQ_REQ_R !! 306                 if (data->flags & BLK_MQ_REQ_RESERVED) {
168                         bt = &tags->breserved_ !! 307                         bt = &data->hctx->tags->breserved_tags;
169                 else                           !! 308                 } else {
170                         bt = &tags->bitmap_tag !! 309                         last_tag = &data->ctx->last_tag;
171                                                !! 310                         hctx = data->hctx;
172                 finish_wait(&ws->wait, &wait); !! 311                         bt = &hctx->tags->bitmap_tags;
173                 ws = bt_wait_ptr(bt, data->hct !! 312                 }
                                                   >> 313                 finish_wait(&bs->wait, &wait);
                                                   >> 314                 bs = bt_wait_ptr(bt, hctx);
174         } while (1);                              315         } while (1);
175                                                   316 
176         if (drop_ctx && data->ctx)             !! 317         finish_wait(&bs->wait, &wait);
177                 blk_mq_put_ctx(data->ctx);     !! 318         return tag;
                                                   >> 319 }
                                                   >> 320 
                                                   >> 321 static unsigned int __blk_mq_get_tag(struct blk_mq_alloc_data *data)
                                                   >> 322 {
                                                   >> 323         int tag;
178                                                   324 
179         finish_wait(&ws->wait, &wait);         !! 325         tag = bt_get(data, &data->hctx->tags->bitmap_tags, data->hctx,
                                                   >> 326                         &data->ctx->last_tag, data->hctx->tags);
                                                   >> 327         if (tag >= 0)
                                                   >> 328                 return tag + data->hctx->tags->nr_reserved_tags;
180                                                   329 
181 found_tag:                                     !! 330         return BLK_MQ_TAG_FAIL;
182         return tag + tag_offset;               << 
183 }                                                 331 }
184                                                   332 
185 void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx !! 333 static unsigned int __blk_mq_get_reserved_tag(struct blk_mq_alloc_data *data)
186                     struct blk_mq_ctx *ctx, un << 
187 {                                                 334 {
188         if (!blk_mq_tag_is_reserved(tags, tag) !! 335         int tag, zero = 0;
189                 const int real_tag = tag - tag << 
190                                                   336 
191                 BUG_ON(real_tag >= tags->nr_ta !! 337         if (unlikely(!data->hctx->tags->nr_reserved_tags)) {
192                 sbitmap_queue_clear(&tags->bit !! 338                 WARN_ON_ONCE(1);
193         } else {                               !! 339                 return BLK_MQ_TAG_FAIL;
194                 BUG_ON(tag >= tags->nr_reserve << 
195                 sbitmap_queue_clear(&tags->bre << 
196         }                                         340         }
                                                   >> 341 
                                                   >> 342         tag = bt_get(data, &data->hctx->tags->breserved_tags, NULL, &zero,
                                                   >> 343                 data->hctx->tags);
                                                   >> 344         if (tag < 0)
                                                   >> 345                 return BLK_MQ_TAG_FAIL;
                                                   >> 346 
                                                   >> 347         return tag;
197 }                                                 348 }
198                                                   349 
199 struct bt_iter_data {                          !! 350 unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
200         struct blk_mq_hw_ctx *hctx;            !! 351 {
201         busy_iter_fn *fn;                      !! 352         if (data->flags & BLK_MQ_REQ_RESERVED)
202         void *data;                            !! 353                 return __blk_mq_get_reserved_tag(data);
203         bool reserved;                         !! 354         return __blk_mq_get_tag(data);
204 };                                             !! 355 }
205                                                   356 
206 static bool bt_iter(struct sbitmap *bitmap, un !! 357 static struct bt_wait_state *bt_wake_ptr(struct blk_mq_bitmap_tags *bt)
207 {                                                 358 {
208         struct bt_iter_data *iter_data = data; !! 359         int i, wake_index;
209         struct blk_mq_hw_ctx *hctx = iter_data << 
210         struct blk_mq_tags *tags = hctx->tags; << 
211         bool reserved = iter_data->reserved;   << 
212         struct request *rq;                    << 
213                                                   360 
214         if (!reserved)                         !! 361         wake_index = atomic_read(&bt->wake_index);
215                 bitnr += tags->nr_reserved_tag !! 362         for (i = 0; i < BT_WAIT_QUEUES; i++) {
216         rq = tags->rqs[bitnr];                 !! 363                 struct bt_wait_state *bs = &bt->bs[wake_index];
217                                                   364 
218         /*                                     !! 365                 if (waitqueue_active(&bs->wait)) {
219          * We can hit rq == NULL here, because !! 366                         int o = atomic_read(&bt->wake_index);
220          * test and set the bit before assinin !! 367                         if (wake_index != o)
221          */                                    !! 368                                 atomic_cmpxchg(&bt->wake_index, o, wake_index);
222         if (rq && rq->q == hctx->queue)        !! 369 
223                 iter_data->fn(hctx, rq, iter_d !! 370                         return bs;
224         return true;                           !! 371                 }
                                                   >> 372 
                                                   >> 373                 wake_index = bt_index_inc(wake_index);
                                                   >> 374         }
                                                   >> 375 
                                                   >> 376         return NULL;
225 }                                                 377 }
226                                                   378 
227 static void bt_for_each(struct blk_mq_hw_ctx * !! 379 static void bt_clear_tag(struct blk_mq_bitmap_tags *bt, unsigned int tag)
228                         busy_iter_fn *fn, void << 
229 {                                                 380 {
230         struct bt_iter_data iter_data = {      !! 381         const int index = TAG_TO_INDEX(bt, tag);
231                 .hctx = hctx,                  !! 382         struct bt_wait_state *bs;
232                 .fn = fn,                      !! 383         int wait_cnt;
233                 .data = data,                  << 
234                 .reserved = reserved,          << 
235         };                                     << 
236                                                   384 
237         sbitmap_for_each_set(&bt->sb, bt_iter, !! 385         clear_bit(TAG_TO_BIT(bt, tag), &bt->map[index].word);
                                                   >> 386 
                                                   >> 387         /* Ensure that the wait list checks occur after clear_bit(). */
                                                   >> 388         smp_mb();
                                                   >> 389 
                                                   >> 390         bs = bt_wake_ptr(bt);
                                                   >> 391         if (!bs)
                                                   >> 392                 return;
                                                   >> 393 
                                                   >> 394         wait_cnt = atomic_dec_return(&bs->wait_cnt);
                                                   >> 395         if (unlikely(wait_cnt < 0))
                                                   >> 396                 wait_cnt = atomic_inc_return(&bs->wait_cnt);
                                                   >> 397         if (wait_cnt == 0) {
                                                   >> 398                 atomic_add(bt->wake_cnt, &bs->wait_cnt);
                                                   >> 399                 bt_index_atomic_inc(&bt->wake_index);
                                                   >> 400                 wake_up(&bs->wait);
                                                   >> 401         }
238 }                                                 402 }
239                                                   403 
240 struct bt_tags_iter_data {                     !! 404 void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, unsigned int tag,
241         struct blk_mq_tags *tags;              !! 405                     unsigned int *last_tag)
242         busy_tag_iter_fn *fn;                  !! 406 {
243         void *data;                            !! 407         struct blk_mq_tags *tags = hctx->tags;
244         bool reserved;                         !! 408 
245 };                                             !! 409         if (tag >= tags->nr_reserved_tags) {
246                                                !! 410                 const int real_tag = tag - tags->nr_reserved_tags;
247 static bool bt_tags_iter(struct sbitmap *bitma !! 411 
248 {                                              !! 412                 BUG_ON(real_tag >= tags->nr_tags);
249         struct bt_tags_iter_data *iter_data =  !! 413                 bt_clear_tag(&tags->bitmap_tags, real_tag);
250         struct blk_mq_tags *tags = iter_data-> !! 414                 if (likely(tags->alloc_policy == BLK_TAG_ALLOC_FIFO))
251         bool reserved = iter_data->reserved;   !! 415                         *last_tag = real_tag;
                                                   >> 416         } else {
                                                   >> 417                 BUG_ON(tag >= tags->nr_reserved_tags);
                                                   >> 418                 bt_clear_tag(&tags->breserved_tags, tag);
                                                   >> 419         }
                                                   >> 420 }
                                                   >> 421 
                                                   >> 422 static void bt_for_each(struct blk_mq_hw_ctx *hctx,
                                                   >> 423                 struct blk_mq_bitmap_tags *bt, unsigned int off,
                                                   >> 424                 busy_iter_fn *fn, void *data, bool reserved)
                                                   >> 425 {
252         struct request *rq;                       426         struct request *rq;
                                                   >> 427         int bit, i;
253                                                   428 
254         if (!reserved)                         !! 429         for (i = 0; i < bt->map_nr; i++) {
255                 bitnr += tags->nr_reserved_tag !! 430                 struct blk_align_bitmap *bm = &bt->map[i];
256                                                   431 
257         /*                                     !! 432                 for (bit = find_first_bit(&bm->word, bm->depth);
258          * We can hit rq == NULL here, because !! 433                      bit < bm->depth;
259          * test and set the bit before assinin !! 434                      bit = find_next_bit(&bm->word, bm->depth, bit + 1)) {
260          */                                    !! 435                         rq = hctx->tags->rqs[off + bit];
261         rq = tags->rqs[bitnr];                 !! 436                         if (rq->q == hctx->queue)
262         if (rq)                                !! 437                                 fn(hctx, rq, data, reserved);
263                 iter_data->fn(rq, iter_data->d !! 438                 }
264                                                   439 
265         return true;                           !! 440                 off += (1 << bt->bits_per_word);
                                                   >> 441         }
266 }                                                 442 }
267                                                   443 
268 static void bt_tags_for_each(struct blk_mq_tag !! 444 static void bt_tags_for_each(struct blk_mq_tags *tags,
269                              busy_tag_iter_fn  !! 445                 struct blk_mq_bitmap_tags *bt, unsigned int off,
                                                   >> 446                 busy_tag_iter_fn *fn, void *data, bool reserved)
270 {                                                 447 {
271         struct bt_tags_iter_data iter_data = { !! 448         struct request *rq;
272                 .tags = tags,                  !! 449         int bit, i;
273                 .fn = fn,                      << 
274                 .data = data,                  << 
275                 .reserved = reserved,          << 
276         };                                     << 
277                                                   450 
278         if (tags->rqs)                         !! 451         if (!tags->rqs)
279                 sbitmap_for_each_set(&bt->sb,  !! 452                 return;
                                                   >> 453         for (i = 0; i < bt->map_nr; i++) {
                                                   >> 454                 struct blk_align_bitmap *bm = &bt->map[i];
                                                   >> 455 
                                                   >> 456                 for (bit = find_first_bit(&bm->word, bm->depth);
                                                   >> 457                      bit < bm->depth;
                                                   >> 458                      bit = find_next_bit(&bm->word, bm->depth, bit + 1)) {
                                                   >> 459                         rq = tags->rqs[off + bit];
                                                   >> 460                         fn(rq, data, reserved);
                                                   >> 461                 }
                                                   >> 462 
                                                   >> 463                 off += (1 << bt->bits_per_word);
                                                   >> 464         }
280 }                                                 465 }
281                                                   466 
282 static void blk_mq_all_tag_busy_iter(struct bl    467 static void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags,
283                 busy_tag_iter_fn *fn, void *pr    468                 busy_tag_iter_fn *fn, void *priv)
284 {                                                 469 {
285         if (tags->nr_reserved_tags)               470         if (tags->nr_reserved_tags)
286                 bt_tags_for_each(tags, &tags-> !! 471                 bt_tags_for_each(tags, &tags->breserved_tags, 0, fn, priv, true);
287         bt_tags_for_each(tags, &tags->bitmap_t !! 472         bt_tags_for_each(tags, &tags->bitmap_tags, tags->nr_reserved_tags, fn, priv,
                                                   >> 473                         false);
288 }                                                 474 }
289                                                   475 
290 void blk_mq_tagset_busy_iter(struct blk_mq_tag    476 void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
291                 busy_tag_iter_fn *fn, void *pr    477                 busy_tag_iter_fn *fn, void *priv)
292 {                                                 478 {
293         int i;                                    479         int i;
294                                                   480 
295         for (i = 0; i < tagset->nr_hw_queues;     481         for (i = 0; i < tagset->nr_hw_queues; i++) {
296                 if (tagset->tags && tagset->ta    482                 if (tagset->tags && tagset->tags[i])
297                         blk_mq_all_tag_busy_it    483                         blk_mq_all_tag_busy_iter(tagset->tags[i], fn, priv);
298         }                                         484         }
299 }                                                 485 }
300 EXPORT_SYMBOL(blk_mq_tagset_busy_iter);           486 EXPORT_SYMBOL(blk_mq_tagset_busy_iter);
301                                                   487 
302 int blk_mq_tagset_iter(struct blk_mq_tag_set * !! 488 void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
303                          int (fn)(void *, stru !! 489                 void *priv)
304 {                                                 490 {
305         int i, j, ret = 0;                     !! 491         struct blk_mq_hw_ctx *hctx;
                                                   >> 492         int i;
306                                                   493 
307         if (WARN_ON_ONCE(!fn))                 << 
308                 goto out;                      << 
309                                                   494 
310         for (i = 0; i < set->nr_hw_queues; i++ !! 495         queue_for_each_hw_ctx(q, hctx, i) {
311                 struct blk_mq_tags *tags = set !! 496                 struct blk_mq_tags *tags = hctx->tags;
312                                                   497 
313                 if (!tags)                     !! 498                 /*
                                                   >> 499                  * If not software queues are currently mapped to this
                                                   >> 500                  * hardware queue, there's nothing to check
                                                   >> 501                  */
                                                   >> 502                 if (!blk_mq_hw_queue_mapped(hctx))
314                         continue;                 503                         continue;
315                                                   504 
316                 for (j = 0; j < tags->nr_tags; !! 505                 if (tags->nr_reserved_tags)
317                         if (!tags->static_rqs[ !! 506                         bt_for_each(hctx, &tags->breserved_tags, 0, fn, priv, true);
318                                 continue;      !! 507                 bt_for_each(hctx, &tags->bitmap_tags, tags->nr_reserved_tags, fn, priv,
                                                   >> 508                       false);
                                                   >> 509         }
319                                                   510 
320                         ret = fn(data, tags->s !! 511 }
321                         if (ret)               !! 512 
322                                 goto out;      !! 513 static unsigned int bt_unused_tags(struct blk_mq_bitmap_tags *bt)
                                                   >> 514 {
                                                   >> 515         unsigned int i, used;
                                                   >> 516 
                                                   >> 517         for (i = 0, used = 0; i < bt->map_nr; i++) {
                                                   >> 518                 struct blk_align_bitmap *bm = &bt->map[i];
                                                   >> 519 
                                                   >> 520                 used += bitmap_weight(&bm->word, bm->depth);
                                                   >> 521         }
                                                   >> 522 
                                                   >> 523         return bt->depth - used;
                                                   >> 524 }
                                                   >> 525 
                                                   >> 526 static void bt_update_count(struct blk_mq_bitmap_tags *bt,
                                                   >> 527                             unsigned int depth)
                                                   >> 528 {
                                                   >> 529         unsigned int tags_per_word = 1U << bt->bits_per_word;
                                                   >> 530         unsigned int map_depth = depth;
                                                   >> 531 
                                                   >> 532         if (depth) {
                                                   >> 533                 int i;
                                                   >> 534 
                                                   >> 535                 for (i = 0; i < bt->map_nr; i++) {
                                                   >> 536                         bt->map[i].depth = min(map_depth, tags_per_word);
                                                   >> 537                         map_depth -= bt->map[i].depth;
323                 }                                 538                 }
324         }                                         539         }
325                                                   540 
326 out:                                           !! 541         bt->wake_cnt = BT_WAIT_BATCH;
327         return ret;                            !! 542         if (bt->wake_cnt > depth / BT_WAIT_QUEUES)
                                                   >> 543                 bt->wake_cnt = max(1U, depth / BT_WAIT_QUEUES);
                                                   >> 544 
                                                   >> 545         bt->depth = depth;
328 }                                                 546 }
329 EXPORT_SYMBOL_GPL(blk_mq_tagset_iter);         << 
330                                                   547 
331 void blk_mq_queue_tag_busy_iter(struct request !! 548 static int bt_alloc(struct blk_mq_bitmap_tags *bt, unsigned int depth,
332                 void *priv)                    !! 549                         int node, bool reserved)
333 {                                                 550 {
334         struct blk_mq_hw_ctx *hctx;            << 
335         int i;                                    551         int i;
336                                                   552 
                                                   >> 553         bt->bits_per_word = ilog2(BITS_PER_LONG);
337                                                   554 
338         queue_for_each_hw_ctx(q, hctx, i) {    !! 555         /*
339                 struct blk_mq_tags *tags = hct !! 556          * Depth can be zero for reserved tags, that's not a failure
                                                   >> 557          * condition.
                                                   >> 558          */
                                                   >> 559         if (depth) {
                                                   >> 560                 unsigned int nr, tags_per_word;
                                                   >> 561 
                                                   >> 562                 tags_per_word = (1 << bt->bits_per_word);
340                                                   563 
341                 /*                                564                 /*
342                  * If not software queues are  !! 565                  * If the tag space is small, shrink the number of tags
343                  * hardware queue, there's not !! 566                  * per word so we spread over a few cachelines, at least.
                                                   >> 567                  * If less than 4 tags, just forget about it, it's not
                                                   >> 568                  * going to work optimally anyway.
344                  */                               569                  */
345                 if (!blk_mq_hw_queue_mapped(hc !! 570                 if (depth >= 4) {
346                         continue;              !! 571                         while (tags_per_word * 4 > depth) {
                                                   >> 572                                 bt->bits_per_word--;
                                                   >> 573                                 tags_per_word = (1 << bt->bits_per_word);
                                                   >> 574                         }
                                                   >> 575                 }
347                                                   576 
348                 if (tags->nr_reserved_tags)    !! 577                 nr = ALIGN(depth, tags_per_word) / tags_per_word;
349                         bt_for_each(hctx, &tag !! 578                 bt->map = kzalloc_node(nr * sizeof(struct blk_align_bitmap),
350                 bt_for_each(hctx, &tags->bitma !! 579                                                 GFP_KERNEL, node);
                                                   >> 580                 if (!bt->map)
                                                   >> 581                         return -ENOMEM;
                                                   >> 582 
                                                   >> 583                 bt->map_nr = nr;
                                                   >> 584         }
                                                   >> 585 
                                                   >> 586         bt->bs = kzalloc(BT_WAIT_QUEUES * sizeof(*bt->bs), GFP_KERNEL);
                                                   >> 587         if (!bt->bs) {
                                                   >> 588                 kfree(bt->map);
                                                   >> 589                 bt->map = NULL;
                                                   >> 590                 return -ENOMEM;
351         }                                         591         }
352                                                   592 
                                                   >> 593         bt_update_count(bt, depth);
                                                   >> 594 
                                                   >> 595         for (i = 0; i < BT_WAIT_QUEUES; i++) {
                                                   >> 596                 init_waitqueue_head(&bt->bs[i].wait);
                                                   >> 597                 atomic_set(&bt->bs[i].wait_cnt, bt->wake_cnt);
                                                   >> 598         }
                                                   >> 599 
                                                   >> 600         return 0;
353 }                                                 601 }
354                                                   602 
355 static int bt_alloc(struct sbitmap_queue *bt,  !! 603 static void bt_free(struct blk_mq_bitmap_tags *bt)
356                     bool round_robin, int node << 
357 {                                                 604 {
358         return sbitmap_queue_init_node(bt, dep !! 605         kfree(bt->map);
359                                        node);  !! 606         kfree(bt->bs);
360 }                                                 607 }
361                                                   608 
362 static struct blk_mq_tags *blk_mq_init_bitmap_    609 static struct blk_mq_tags *blk_mq_init_bitmap_tags(struct blk_mq_tags *tags,
363                                                   610                                                    int node, int alloc_policy)
364 {                                                 611 {
365         unsigned int depth = tags->nr_tags - t    612         unsigned int depth = tags->nr_tags - tags->nr_reserved_tags;
366         bool round_robin = alloc_policy == BLK << 
367                                                   613 
368         if (bt_alloc(&tags->bitmap_tags, depth !! 614         tags->alloc_policy = alloc_policy;
369                 goto free_tags;                !! 615 
370         if (bt_alloc(&tags->breserved_tags, ta !! 616         if (bt_alloc(&tags->bitmap_tags, depth, node, false))
371                      node))                    !! 617                 goto enomem;
372                 goto free_bitmap_tags;         !! 618         if (bt_alloc(&tags->breserved_tags, tags->nr_reserved_tags, node, true))
                                                   >> 619                 goto enomem;
373                                                   620 
374         return tags;                              621         return tags;
375 free_bitmap_tags:                              !! 622 enomem:
376         sbitmap_queue_free(&tags->bitmap_tags) !! 623         bt_free(&tags->bitmap_tags);
377 free_tags:                                     << 
378         kfree(tags);                              624         kfree(tags);
379         return NULL;                              625         return NULL;
380 }                                                 626 }
381                                                   627 
382 struct blk_mq_tags *blk_mq_init_tags(unsigned     628 struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags,
383                                      unsigned     629                                      unsigned int reserved_tags,
384                                      int node,    630                                      int node, int alloc_policy)
385 {                                                 631 {
386         struct blk_mq_tags *tags;                 632         struct blk_mq_tags *tags;
387                                                   633 
388         if (total_tags > BLK_MQ_TAG_MAX) {        634         if (total_tags > BLK_MQ_TAG_MAX) {
389                 pr_err("blk-mq: tag depth too     635                 pr_err("blk-mq: tag depth too large\n");
390                 return NULL;                      636                 return NULL;
391         }                                         637         }
392                                                   638 
393         tags = kzalloc_node(sizeof(*tags), GFP    639         tags = kzalloc_node(sizeof(*tags), GFP_KERNEL, node);
394         if (!tags)                                640         if (!tags)
395                 return NULL;                      641                 return NULL;
396                                                   642 
                                                   >> 643         if (!zalloc_cpumask_var(&tags->cpumask, GFP_KERNEL)) {
                                                   >> 644                 kfree(tags);
                                                   >> 645                 return NULL;
                                                   >> 646         }
                                                   >> 647 
397         tags->nr_tags = total_tags;               648         tags->nr_tags = total_tags;
398         tags->nr_reserved_tags = reserved_tags    649         tags->nr_reserved_tags = reserved_tags;
399                                                   650 
400         return blk_mq_init_bitmap_tags(tags, n    651         return blk_mq_init_bitmap_tags(tags, node, alloc_policy);
401 }                                                 652 }
402                                                   653 
403 void blk_mq_free_tags(struct blk_mq_tags *tags    654 void blk_mq_free_tags(struct blk_mq_tags *tags)
404 {                                                 655 {
405         sbitmap_queue_free(&tags->bitmap_tags) !! 656         bt_free(&tags->bitmap_tags);
406         sbitmap_queue_free(&tags->breserved_ta !! 657         bt_free(&tags->breserved_tags);
                                                   >> 658         free_cpumask_var(tags->cpumask);
407         kfree(tags);                              659         kfree(tags);
408 }                                                 660 }
409                                                   661 
410 int blk_mq_tag_update_depth(struct blk_mq_hw_c !! 662 void blk_mq_tag_init_last_tag(struct blk_mq_tags *tags, unsigned int *tag)
411                             struct blk_mq_tags << 
412                             bool can_grow)     << 
413 {                                                 663 {
414         struct blk_mq_tags *tags = *tagsptr;   !! 664         unsigned int depth = tags->nr_tags - tags->nr_reserved_tags;
415                                                   665 
416         if (tdepth <= tags->nr_reserved_tags)  !! 666         *tag = prandom_u32() % depth;
417                 return -EINVAL;                !! 667 }
418                                                   668 
                                                   >> 669 int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int tdepth)
                                                   >> 670 {
419         tdepth -= tags->nr_reserved_tags;         671         tdepth -= tags->nr_reserved_tags;
                                                   >> 672         if (tdepth > tags->nr_tags)
                                                   >> 673                 return -EINVAL;
420                                                   674 
421         /*                                        675         /*
422          * If we are allowed to grow beyond th !! 676          * Don't need (or can't) update reserved tags here, they remain
423          * a new set of tags before freeing th !! 677          * static and should never need resizing.
424          */                                       678          */
425         if (tdepth > tags->nr_tags) {          !! 679         bt_update_count(&tags->bitmap_tags, tdepth);
426                 struct blk_mq_tag_set *set = h !! 680         blk_mq_tag_wakeup_all(tags, false);
427                 struct blk_mq_tags *new;       << 
428                 bool ret;                      << 
429                                                << 
430                 if (!can_grow)                 << 
431                         return -EINVAL;        << 
432                                                << 
433                 /*                             << 
434                  * We need some sort of upper  << 
435                  * no valid use cases should r << 
436                  */                            << 
437                 if (tdepth > 16 * BLKDEV_MAX_R << 
438                         return -EINVAL;        << 
439                                                << 
440                 new = blk_mq_alloc_rq_map(set, << 
441                 if (!new)                      << 
442                         return -ENOMEM;        << 
443                 ret = blk_mq_alloc_rqs(set, ne << 
444                 if (ret) {                     << 
445                         blk_mq_free_rq_map(new << 
446                         return -ENOMEM;        << 
447                 }                              << 
448                                                << 
449                 blk_mq_free_rqs(set, *tagsptr, << 
450                 blk_mq_free_rq_map(*tagsptr);  << 
451                 *tagsptr = new;                << 
452         } else {                               << 
453                 /*                             << 
454                  * Don't need (or can't) updat << 
455                  * remain static and should ne << 
456                  */                            << 
457                 sbitmap_queue_resize(&tags->bi << 
458         }                                      << 
459                                                << 
460         return 0;                                 681         return 0;
461 }                                                 682 }
462                                                   683 
463 /**                                               684 /**
464  * blk_mq_unique_tag() - return a tag that is     685  * blk_mq_unique_tag() - return a tag that is unique queue-wide
465  * @rq: request for which to compute a unique     686  * @rq: request for which to compute a unique tag
466  *                                                687  *
467  * The tag field in struct request is unique p    688  * The tag field in struct request is unique per hardware queue but not over
468  * all hardware queues. Hence this function th    689  * all hardware queues. Hence this function that returns a tag with the
469  * hardware context index in the upper bits an    690  * hardware context index in the upper bits and the per hardware queue tag in
470  * the lower bits.                                691  * the lower bits.
471  *                                                692  *
472  * Note: When called for a request that is que    693  * Note: When called for a request that is queued on a non-multiqueue request
473  * queue, the hardware context index is set to    694  * queue, the hardware context index is set to zero.
474  */                                               695  */
475 u32 blk_mq_unique_tag(struct request *rq)         696 u32 blk_mq_unique_tag(struct request *rq)
476 {                                                 697 {
477         struct request_queue *q = rq->q;          698         struct request_queue *q = rq->q;
478         struct blk_mq_hw_ctx *hctx;               699         struct blk_mq_hw_ctx *hctx;
479         int hwq = 0;                              700         int hwq = 0;
480                                                   701 
481         if (q->mq_ops) {                          702         if (q->mq_ops) {
482                 hctx = blk_mq_map_queue(q, rq- !! 703                 hctx = q->mq_ops->map_queue(q, rq->mq_ctx->cpu);
483                 hwq = hctx->queue_num;            704                 hwq = hctx->queue_num;
484         }                                         705         }
485                                                   706 
486         return (hwq << BLK_MQ_UNIQUE_TAG_BITS)    707         return (hwq << BLK_MQ_UNIQUE_TAG_BITS) |
487                 (rq->tag & BLK_MQ_UNIQUE_TAG_M    708                 (rq->tag & BLK_MQ_UNIQUE_TAG_MASK);
488 }                                                 709 }
489 EXPORT_SYMBOL(blk_mq_unique_tag);                 710 EXPORT_SYMBOL(blk_mq_unique_tag);
                                                   >> 711 
                                                   >> 712 ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page)
                                                   >> 713 {
                                                   >> 714         char *orig_page = page;
                                                   >> 715         unsigned int free, res;
                                                   >> 716 
                                                   >> 717         if (!tags)
                                                   >> 718                 return 0;
                                                   >> 719 
                                                   >> 720         page += sprintf(page, "nr_tags=%u, reserved_tags=%u, "
                                                   >> 721                         "bits_per_word=%u\n",
                                                   >> 722                         tags->nr_tags, tags->nr_reserved_tags,
                                                   >> 723                         tags->bitmap_tags.bits_per_word);
                                                   >> 724 
                                                   >> 725         free = bt_unused_tags(&tags->bitmap_tags);
                                                   >> 726         res = bt_unused_tags(&tags->breserved_tags);
                                                   >> 727 
                                                   >> 728         page += sprintf(page, "nr_free=%u, nr_reserved=%u\n", free, res);
                                                   >> 729         page += sprintf(page, "active_queues=%u\n", atomic_read(&tags->active_queues));
                                                   >> 730 
                                                   >> 731         return page - orig_page;
                                                   >> 732 }
490                                                   733 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp