~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/block/blk-mq-tag.c

Version: ~ [ linux-5.15-rc3 ] ~ [ linux-5.14.8 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.69 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.149 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.208 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.248 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.284 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.285 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.18.140 ] ~ [ linux-3.16.85 ] ~ [ linux-3.14.79 ] ~ [ linux-3.12.74 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

Diff markup

Differences between /block/blk-mq-tag.c (Version linux-4.17.19) and /block/blk-mq-tag.c (Version linux-5.5.19)


                                                   >>   1 // SPDX-License-Identifier: GPL-2.0
  1 /*                                                  2 /*
  2  * Tag allocation using scalable bitmaps. Uses      3  * Tag allocation using scalable bitmaps. Uses active queue tracking to support
  3  * fairer distribution of tags between multipl      4  * fairer distribution of tags between multiple submitters when a shared tag map
  4  * is used.                                         5  * is used.
  5  *                                                  6  *
  6  * Copyright (C) 2013-2014 Jens Axboe               7  * Copyright (C) 2013-2014 Jens Axboe
  7  */                                                 8  */
  8 #include <linux/kernel.h>                           9 #include <linux/kernel.h>
  9 #include <linux/module.h>                          10 #include <linux/module.h>
 10                                                    11 
 11 #include <linux/blk-mq.h>                          12 #include <linux/blk-mq.h>
                                                   >>  13 #include <linux/delay.h>
 12 #include "blk.h"                                   14 #include "blk.h"
 13 #include "blk-mq.h"                                15 #include "blk-mq.h"
 14 #include "blk-mq-tag.h"                            16 #include "blk-mq-tag.h"
 15                                                    17 
 16 bool blk_mq_has_free_tags(struct blk_mq_tags * << 
 17 {                                              << 
 18         if (!tags)                             << 
 19                 return true;                   << 
 20                                                << 
 21         return sbitmap_any_bit_clear(&tags->bi << 
 22 }                                              << 
 23                                                << 
 24 /*                                                 18 /*
 25  * If a previously inactive queue goes active,     19  * If a previously inactive queue goes active, bump the active user count.
                                                   >>  20  * We need to do this before try to allocate driver tag, then even if fail
                                                   >>  21  * to get tag when first time, the other shared-tag users could reserve
                                                   >>  22  * budget for it.
 26  */                                                23  */
 27 bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *h     24 bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
 28 {                                                  25 {
 29         if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hc     26         if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) &&
 30             !test_and_set_bit(BLK_MQ_S_TAG_ACT     27             !test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
 31                 atomic_inc(&hctx->tags->active     28                 atomic_inc(&hctx->tags->active_queues);
 32                                                    29 
 33         return true;                               30         return true;
 34 }                                                  31 }
 35                                                    32 
 36 /*                                                 33 /*
 37  * Wakeup all potentially sleeping on tags         34  * Wakeup all potentially sleeping on tags
 38  */                                                35  */
 39 void blk_mq_tag_wakeup_all(struct blk_mq_tags      36 void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool include_reserve)
 40 {                                                  37 {
 41         sbitmap_queue_wake_all(&tags->bitmap_t     38         sbitmap_queue_wake_all(&tags->bitmap_tags);
 42         if (include_reserve)                       39         if (include_reserve)
 43                 sbitmap_queue_wake_all(&tags->     40                 sbitmap_queue_wake_all(&tags->breserved_tags);
 44 }                                                  41 }
 45                                                    42 
 46 /*                                                 43 /*
 47  * If a previously busy queue goes inactive, p     44  * If a previously busy queue goes inactive, potential waiters could now
 48  * be allowed to queue. Wake them up and check     45  * be allowed to queue. Wake them up and check.
 49  */                                                46  */
 50 void __blk_mq_tag_idle(struct blk_mq_hw_ctx *h     47 void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
 51 {                                                  48 {
 52         struct blk_mq_tags *tags = hctx->tags;     49         struct blk_mq_tags *tags = hctx->tags;
 53                                                    50 
 54         if (!test_and_clear_bit(BLK_MQ_S_TAG_A     51         if (!test_and_clear_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
 55                 return;                            52                 return;
 56                                                    53 
 57         atomic_dec(&tags->active_queues);          54         atomic_dec(&tags->active_queues);
 58                                                    55 
 59         blk_mq_tag_wakeup_all(tags, false);        56         blk_mq_tag_wakeup_all(tags, false);
 60 }                                                  57 }
 61                                                    58 
 62 /*                                                 59 /*
 63  * For shared tag users, we track the number o     60  * For shared tag users, we track the number of currently active users
 64  * and attempt to provide a fair share of the      61  * and attempt to provide a fair share of the tag depth for each of them.
 65  */                                                62  */
 66 static inline bool hctx_may_queue(struct blk_m     63 static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
 67                                   struct sbitm     64                                   struct sbitmap_queue *bt)
 68 {                                                  65 {
 69         unsigned int depth, users;                 66         unsigned int depth, users;
 70                                                    67 
 71         if (!hctx || !(hctx->flags & BLK_MQ_F_     68         if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_SHARED))
 72                 return true;                       69                 return true;
 73         if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hc     70         if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
 74                 return true;                       71                 return true;
 75                                                    72 
 76         /*                                         73         /*
 77          * Don't try dividing an ant               74          * Don't try dividing an ant
 78          */                                        75          */
 79         if (bt->sb.depth == 1)                     76         if (bt->sb.depth == 1)
 80                 return true;                       77                 return true;
 81                                                    78 
 82         users = atomic_read(&hctx->tags->activ     79         users = atomic_read(&hctx->tags->active_queues);
 83         if (!users)                                80         if (!users)
 84                 return true;                       81                 return true;
 85                                                    82 
 86         /*                                         83         /*
 87          * Allow at least some tags                84          * Allow at least some tags
 88          */                                        85          */
 89         depth = max((bt->sb.depth + users - 1)     86         depth = max((bt->sb.depth + users - 1) / users, 4U);
 90         return atomic_read(&hctx->nr_active) <     87         return atomic_read(&hctx->nr_active) < depth;
 91 }                                                  88 }
 92                                                    89 
 93 static int __blk_mq_get_tag(struct blk_mq_allo     90 static int __blk_mq_get_tag(struct blk_mq_alloc_data *data,
 94                             struct sbitmap_que     91                             struct sbitmap_queue *bt)
 95 {                                                  92 {
 96         if (!(data->flags & BLK_MQ_REQ_INTERNA     93         if (!(data->flags & BLK_MQ_REQ_INTERNAL) &&
 97             !hctx_may_queue(data->hctx, bt))       94             !hctx_may_queue(data->hctx, bt))
 98                 return -1;                         95                 return -1;
 99         if (data->shallow_depth)                   96         if (data->shallow_depth)
100                 return __sbitmap_queue_get_sha     97                 return __sbitmap_queue_get_shallow(bt, data->shallow_depth);
101         else                                       98         else
102                 return __sbitmap_queue_get(bt)     99                 return __sbitmap_queue_get(bt);
103 }                                                 100 }
104                                                   101 
105 unsigned int blk_mq_get_tag(struct blk_mq_allo    102 unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
106 {                                                 103 {
107         struct blk_mq_tags *tags = blk_mq_tags    104         struct blk_mq_tags *tags = blk_mq_tags_from_data(data);
108         struct sbitmap_queue *bt;                 105         struct sbitmap_queue *bt;
109         struct sbq_wait_state *ws;                106         struct sbq_wait_state *ws;
110         DEFINE_WAIT(wait);                     !! 107         DEFINE_SBQ_WAIT(wait);
111         unsigned int tag_offset;                  108         unsigned int tag_offset;
112         bool drop_ctx;                         << 
113         int tag;                                  109         int tag;
114                                                   110 
115         if (data->flags & BLK_MQ_REQ_RESERVED)    111         if (data->flags & BLK_MQ_REQ_RESERVED) {
116                 if (unlikely(!tags->nr_reserve    112                 if (unlikely(!tags->nr_reserved_tags)) {
117                         WARN_ON_ONCE(1);          113                         WARN_ON_ONCE(1);
118                         return BLK_MQ_TAG_FAIL    114                         return BLK_MQ_TAG_FAIL;
119                 }                                 115                 }
120                 bt = &tags->breserved_tags;       116                 bt = &tags->breserved_tags;
121                 tag_offset = 0;                   117                 tag_offset = 0;
122         } else {                                  118         } else {
123                 bt = &tags->bitmap_tags;          119                 bt = &tags->bitmap_tags;
124                 tag_offset = tags->nr_reserved    120                 tag_offset = tags->nr_reserved_tags;
125         }                                         121         }
126                                                   122 
127         tag = __blk_mq_get_tag(data, bt);         123         tag = __blk_mq_get_tag(data, bt);
128         if (tag != -1)                            124         if (tag != -1)
129                 goto found_tag;                   125                 goto found_tag;
130                                                   126 
131         if (data->flags & BLK_MQ_REQ_NOWAIT)      127         if (data->flags & BLK_MQ_REQ_NOWAIT)
132                 return BLK_MQ_TAG_FAIL;           128                 return BLK_MQ_TAG_FAIL;
133                                                   129 
134         ws = bt_wait_ptr(bt, data->hctx);         130         ws = bt_wait_ptr(bt, data->hctx);
135         drop_ctx = data->ctx == NULL;          << 
136         do {                                      131         do {
                                                   >> 132                 struct sbitmap_queue *bt_prev;
                                                   >> 133 
137                 /*                                134                 /*
138                  * We're out of tags on this h    135                  * We're out of tags on this hardware queue, kick any
139                  * pending IO submits before g    136                  * pending IO submits before going to sleep waiting for
140                  * some to complete.              137                  * some to complete.
141                  */                               138                  */
142                 blk_mq_run_hw_queue(data->hctx    139                 blk_mq_run_hw_queue(data->hctx, false);
143                                                   140 
144                 /*                                141                 /*
145                  * Retry tag allocation after     142                  * Retry tag allocation after running the hardware queue,
146                  * as running the queue may al    143                  * as running the queue may also have found completions.
147                  */                               144                  */
148                 tag = __blk_mq_get_tag(data, b    145                 tag = __blk_mq_get_tag(data, bt);
149                 if (tag != -1)                    146                 if (tag != -1)
150                         break;                    147                         break;
151                                                   148 
152                 prepare_to_wait_exclusive(&ws- !! 149                 sbitmap_prepare_to_wait(bt, ws, &wait, TASK_UNINTERRUPTIBLE);
153                                                << 
154                                                   150 
155                 tag = __blk_mq_get_tag(data, b    151                 tag = __blk_mq_get_tag(data, bt);
156                 if (tag != -1)                    152                 if (tag != -1)
157                         break;                    153                         break;
158                                                   154 
159                 if (data->ctx)                 !! 155                 bt_prev = bt;
160                         blk_mq_put_ctx(data->c << 
161                                                << 
162                 io_schedule();                    156                 io_schedule();
163                                                   157 
                                                   >> 158                 sbitmap_finish_wait(bt, ws, &wait);
                                                   >> 159 
164                 data->ctx = blk_mq_get_ctx(dat    160                 data->ctx = blk_mq_get_ctx(data->q);
165                 data->hctx = blk_mq_map_queue( !! 161                 data->hctx = blk_mq_map_queue(data->q, data->cmd_flags,
                                                   >> 162                                                 data->ctx);
166                 tags = blk_mq_tags_from_data(d    163                 tags = blk_mq_tags_from_data(data);
167                 if (data->flags & BLK_MQ_REQ_R    164                 if (data->flags & BLK_MQ_REQ_RESERVED)
168                         bt = &tags->breserved_    165                         bt = &tags->breserved_tags;
169                 else                              166                 else
170                         bt = &tags->bitmap_tag    167                         bt = &tags->bitmap_tags;
171                                                   168 
172                 finish_wait(&ws->wait, &wait); !! 169                 /*
                                                   >> 170                  * If destination hw queue is changed, fake wake up on
                                                   >> 171                  * previous queue for compensating the wake up miss, so
                                                   >> 172                  * other allocations on previous queue won't be starved.
                                                   >> 173                  */
                                                   >> 174                 if (bt != bt_prev)
                                                   >> 175                         sbitmap_queue_wake_up(bt_prev);
                                                   >> 176 
173                 ws = bt_wait_ptr(bt, data->hct    177                 ws = bt_wait_ptr(bt, data->hctx);
174         } while (1);                              178         } while (1);
175                                                   179 
176         if (drop_ctx && data->ctx)             !! 180         sbitmap_finish_wait(bt, ws, &wait);
177                 blk_mq_put_ctx(data->ctx);     << 
178                                                << 
179         finish_wait(&ws->wait, &wait);         << 
180                                                   181 
181 found_tag:                                        182 found_tag:
182         return tag + tag_offset;                  183         return tag + tag_offset;
183 }                                                 184 }
184                                                   185 
185 void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx    186 void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, struct blk_mq_tags *tags,
186                     struct blk_mq_ctx *ctx, un    187                     struct blk_mq_ctx *ctx, unsigned int tag)
187 {                                                 188 {
188         if (!blk_mq_tag_is_reserved(tags, tag)    189         if (!blk_mq_tag_is_reserved(tags, tag)) {
189                 const int real_tag = tag - tag    190                 const int real_tag = tag - tags->nr_reserved_tags;
190                                                   191 
191                 BUG_ON(real_tag >= tags->nr_ta    192                 BUG_ON(real_tag >= tags->nr_tags);
192                 sbitmap_queue_clear(&tags->bit    193                 sbitmap_queue_clear(&tags->bitmap_tags, real_tag, ctx->cpu);
193         } else {                                  194         } else {
194                 BUG_ON(tag >= tags->nr_reserve    195                 BUG_ON(tag >= tags->nr_reserved_tags);
195                 sbitmap_queue_clear(&tags->bre    196                 sbitmap_queue_clear(&tags->breserved_tags, tag, ctx->cpu);
196         }                                         197         }
197 }                                                 198 }
198                                                   199 
199 struct bt_iter_data {                             200 struct bt_iter_data {
200         struct blk_mq_hw_ctx *hctx;               201         struct blk_mq_hw_ctx *hctx;
201         busy_iter_fn *fn;                         202         busy_iter_fn *fn;
202         void *data;                               203         void *data;
203         bool reserved;                            204         bool reserved;
204 };                                                205 };
205                                                   206 
206 static bool bt_iter(struct sbitmap *bitmap, un    207 static bool bt_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
207 {                                                 208 {
208         struct bt_iter_data *iter_data = data;    209         struct bt_iter_data *iter_data = data;
209         struct blk_mq_hw_ctx *hctx = iter_data    210         struct blk_mq_hw_ctx *hctx = iter_data->hctx;
210         struct blk_mq_tags *tags = hctx->tags;    211         struct blk_mq_tags *tags = hctx->tags;
211         bool reserved = iter_data->reserved;      212         bool reserved = iter_data->reserved;
212         struct request *rq;                       213         struct request *rq;
213                                                   214 
214         if (!reserved)                            215         if (!reserved)
215                 bitnr += tags->nr_reserved_tag    216                 bitnr += tags->nr_reserved_tags;
216         rq = tags->rqs[bitnr];                    217         rq = tags->rqs[bitnr];
217                                                   218 
218         /*                                        219         /*
219          * We can hit rq == NULL here, because    220          * We can hit rq == NULL here, because the tagging functions
220          * test and set the bit before assinin !! 221          * test and set the bit before assigning ->rqs[].
221          */                                       222          */
222         if (rq && rq->q == hctx->queue)           223         if (rq && rq->q == hctx->queue)
223                 iter_data->fn(hctx, rq, iter_d !! 224                 return iter_data->fn(hctx, rq, iter_data->data, reserved);
224         return true;                              225         return true;
225 }                                                 226 }
226                                                   227 
                                                   >> 228 /**
                                                   >> 229  * bt_for_each - iterate over the requests associated with a hardware queue
                                                   >> 230  * @hctx:       Hardware queue to examine.
                                                   >> 231  * @bt:         sbitmap to examine. This is either the breserved_tags member
                                                   >> 232  *              or the bitmap_tags member of struct blk_mq_tags.
                                                   >> 233  * @fn:         Pointer to the function that will be called for each request
                                                   >> 234  *              associated with @hctx that has been assigned a driver tag.
                                                   >> 235  *              @fn will be called as follows: @fn(@hctx, rq, @data, @reserved)
                                                   >> 236  *              where rq is a pointer to a request. Return true to continue
                                                   >> 237  *              iterating tags, false to stop.
                                                   >> 238  * @data:       Will be passed as third argument to @fn.
                                                   >> 239  * @reserved:   Indicates whether @bt is the breserved_tags member or the
                                                   >> 240  *              bitmap_tags member of struct blk_mq_tags.
                                                   >> 241  */
227 static void bt_for_each(struct blk_mq_hw_ctx *    242 static void bt_for_each(struct blk_mq_hw_ctx *hctx, struct sbitmap_queue *bt,
228                         busy_iter_fn *fn, void    243                         busy_iter_fn *fn, void *data, bool reserved)
229 {                                                 244 {
230         struct bt_iter_data iter_data = {         245         struct bt_iter_data iter_data = {
231                 .hctx = hctx,                     246                 .hctx = hctx,
232                 .fn = fn,                         247                 .fn = fn,
233                 .data = data,                     248                 .data = data,
234                 .reserved = reserved,             249                 .reserved = reserved,
235         };                                        250         };
236                                                   251 
237         sbitmap_for_each_set(&bt->sb, bt_iter,    252         sbitmap_for_each_set(&bt->sb, bt_iter, &iter_data);
238 }                                                 253 }
239                                                   254 
240 struct bt_tags_iter_data {                        255 struct bt_tags_iter_data {
241         struct blk_mq_tags *tags;                 256         struct blk_mq_tags *tags;
242         busy_tag_iter_fn *fn;                     257         busy_tag_iter_fn *fn;
243         void *data;                               258         void *data;
244         bool reserved;                            259         bool reserved;
245 };                                                260 };
246                                                   261 
247 static bool bt_tags_iter(struct sbitmap *bitma    262 static bool bt_tags_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
248 {                                                 263 {
249         struct bt_tags_iter_data *iter_data =     264         struct bt_tags_iter_data *iter_data = data;
250         struct blk_mq_tags *tags = iter_data->    265         struct blk_mq_tags *tags = iter_data->tags;
251         bool reserved = iter_data->reserved;      266         bool reserved = iter_data->reserved;
252         struct request *rq;                       267         struct request *rq;
253                                                   268 
254         if (!reserved)                            269         if (!reserved)
255                 bitnr += tags->nr_reserved_tag    270                 bitnr += tags->nr_reserved_tags;
256                                                   271 
257         /*                                        272         /*
258          * We can hit rq == NULL here, because    273          * We can hit rq == NULL here, because the tagging functions
259          * test and set the bit before assinin    274          * test and set the bit before assining ->rqs[].
260          */                                       275          */
261         rq = tags->rqs[bitnr];                    276         rq = tags->rqs[bitnr];
262         if (rq)                                !! 277         if (rq && blk_mq_request_started(rq))
263                 iter_data->fn(rq, iter_data->d !! 278                 return iter_data->fn(rq, iter_data->data, reserved);
264                                                   279 
265         return true;                              280         return true;
266 }                                                 281 }
267                                                   282 
                                                   >> 283 /**
                                                   >> 284  * bt_tags_for_each - iterate over the requests in a tag map
                                                   >> 285  * @tags:       Tag map to iterate over.
                                                   >> 286  * @bt:         sbitmap to examine. This is either the breserved_tags member
                                                   >> 287  *              or the bitmap_tags member of struct blk_mq_tags.
                                                   >> 288  * @fn:         Pointer to the function that will be called for each started
                                                   >> 289  *              request. @fn will be called as follows: @fn(rq, @data,
                                                   >> 290  *              @reserved) where rq is a pointer to a request. Return true
                                                   >> 291  *              to continue iterating tags, false to stop.
                                                   >> 292  * @data:       Will be passed as second argument to @fn.
                                                   >> 293  * @reserved:   Indicates whether @bt is the breserved_tags member or the
                                                   >> 294  *              bitmap_tags member of struct blk_mq_tags.
                                                   >> 295  */
268 static void bt_tags_for_each(struct blk_mq_tag    296 static void bt_tags_for_each(struct blk_mq_tags *tags, struct sbitmap_queue *bt,
269                              busy_tag_iter_fn     297                              busy_tag_iter_fn *fn, void *data, bool reserved)
270 {                                                 298 {
271         struct bt_tags_iter_data iter_data = {    299         struct bt_tags_iter_data iter_data = {
272                 .tags = tags,                     300                 .tags = tags,
273                 .fn = fn,                         301                 .fn = fn,
274                 .data = data,                     302                 .data = data,
275                 .reserved = reserved,             303                 .reserved = reserved,
276         };                                        304         };
277                                                   305 
278         if (tags->rqs)                            306         if (tags->rqs)
279                 sbitmap_for_each_set(&bt->sb,     307                 sbitmap_for_each_set(&bt->sb, bt_tags_iter, &iter_data);
280 }                                                 308 }
281                                                   309 
                                                   >> 310 /**
                                                   >> 311  * blk_mq_all_tag_busy_iter - iterate over all started requests in a tag map
                                                   >> 312  * @tags:       Tag map to iterate over.
                                                   >> 313  * @fn:         Pointer to the function that will be called for each started
                                                   >> 314  *              request. @fn will be called as follows: @fn(rq, @priv,
                                                   >> 315  *              reserved) where rq is a pointer to a request. 'reserved'
                                                   >> 316  *              indicates whether or not @rq is a reserved request. Return
                                                   >> 317  *              true to continue iterating tags, false to stop.
                                                   >> 318  * @priv:       Will be passed as second argument to @fn.
                                                   >> 319  */
282 static void blk_mq_all_tag_busy_iter(struct bl    320 static void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags,
283                 busy_tag_iter_fn *fn, void *pr    321                 busy_tag_iter_fn *fn, void *priv)
284 {                                                 322 {
285         if (tags->nr_reserved_tags)               323         if (tags->nr_reserved_tags)
286                 bt_tags_for_each(tags, &tags->    324                 bt_tags_for_each(tags, &tags->breserved_tags, fn, priv, true);
287         bt_tags_for_each(tags, &tags->bitmap_t    325         bt_tags_for_each(tags, &tags->bitmap_tags, fn, priv, false);
288 }                                                 326 }
289                                                   327 
                                                   >> 328 /**
                                                   >> 329  * blk_mq_tagset_busy_iter - iterate over all started requests in a tag set
                                                   >> 330  * @tagset:     Tag set to iterate over.
                                                   >> 331  * @fn:         Pointer to the function that will be called for each started
                                                   >> 332  *              request. @fn will be called as follows: @fn(rq, @priv,
                                                   >> 333  *              reserved) where rq is a pointer to a request. 'reserved'
                                                   >> 334  *              indicates whether or not @rq is a reserved request. Return
                                                   >> 335  *              true to continue iterating tags, false to stop.
                                                   >> 336  * @priv:       Will be passed as second argument to @fn.
                                                   >> 337  */
290 void blk_mq_tagset_busy_iter(struct blk_mq_tag    338 void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
291                 busy_tag_iter_fn *fn, void *pr    339                 busy_tag_iter_fn *fn, void *priv)
292 {                                                 340 {
293         int i;                                    341         int i;
294                                                   342 
295         for (i = 0; i < tagset->nr_hw_queues;     343         for (i = 0; i < tagset->nr_hw_queues; i++) {
296                 if (tagset->tags && tagset->ta    344                 if (tagset->tags && tagset->tags[i])
297                         blk_mq_all_tag_busy_it    345                         blk_mq_all_tag_busy_iter(tagset->tags[i], fn, priv);
298         }                                         346         }
299 }                                                 347 }
300 EXPORT_SYMBOL(blk_mq_tagset_busy_iter);           348 EXPORT_SYMBOL(blk_mq_tagset_busy_iter);
301                                                   349 
302 int blk_mq_tagset_iter(struct blk_mq_tag_set * !! 350 static bool blk_mq_tagset_count_completed_rqs(struct request *rq,
303                          int (fn)(void *, stru !! 351                 void *data, bool reserved)
304 {                                                 352 {
305         int i, j, ret = 0;                     !! 353         unsigned *count = data;
306                                                << 
307         if (WARN_ON_ONCE(!fn))                 << 
308                 goto out;                      << 
309                                                   354 
310         for (i = 0; i < set->nr_hw_queues; i++ !! 355         if (blk_mq_request_completed(rq))
311                 struct blk_mq_tags *tags = set !! 356                 (*count)++;
                                                   >> 357         return true;
                                                   >> 358 }
312                                                   359 
313                 if (!tags)                     !! 360 /**
314                         continue;              !! 361  * blk_mq_tagset_wait_completed_request - wait until all completed req's
                                                   >> 362  * complete funtion is run
                                                   >> 363  * @tagset:     Tag set to drain completed request
                                                   >> 364  *
                                                   >> 365  * Note: This function has to be run after all IO queues are shutdown
                                                   >> 366  */
                                                   >> 367 void blk_mq_tagset_wait_completed_request(struct blk_mq_tag_set *tagset)
                                                   >> 368 {
                                                   >> 369         while (true) {
                                                   >> 370                 unsigned count = 0;
315                                                   371 
316                 for (j = 0; j < tags->nr_tags; !! 372                 blk_mq_tagset_busy_iter(tagset,
317                         if (!tags->static_rqs[ !! 373                                 blk_mq_tagset_count_completed_rqs, &count);
318                                 continue;      !! 374                 if (!count)
319                                                !! 375                         break;
320                         ret = fn(data, tags->s !! 376                 msleep(5);
321                         if (ret)               << 
322                                 goto out;      << 
323                 }                              << 
324         }                                         377         }
325                                                << 
326 out:                                           << 
327         return ret;                            << 
328 }                                                 378 }
329 EXPORT_SYMBOL_GPL(blk_mq_tagset_iter);         !! 379 EXPORT_SYMBOL(blk_mq_tagset_wait_completed_request);
330                                                   380 
                                                   >> 381 /**
                                                   >> 382  * blk_mq_queue_tag_busy_iter - iterate over all requests with a driver tag
                                                   >> 383  * @q:          Request queue to examine.
                                                   >> 384  * @fn:         Pointer to the function that will be called for each request
                                                   >> 385  *              on @q. @fn will be called as follows: @fn(hctx, rq, @priv,
                                                   >> 386  *              reserved) where rq is a pointer to a request and hctx points
                                                   >> 387  *              to the hardware queue associated with the request. 'reserved'
                                                   >> 388  *              indicates whether or not @rq is a reserved request.
                                                   >> 389  * @priv:       Will be passed as third argument to @fn.
                                                   >> 390  *
                                                   >> 391  * Note: if @q->tag_set is shared with other request queues then @fn will be
                                                   >> 392  * called for all requests on all queues that share that tag set and not only
                                                   >> 393  * for requests associated with @q.
                                                   >> 394  */
331 void blk_mq_queue_tag_busy_iter(struct request    395 void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
332                 void *priv)                       396                 void *priv)
333 {                                                 397 {
334         struct blk_mq_hw_ctx *hctx;               398         struct blk_mq_hw_ctx *hctx;
335         int i;                                    399         int i;
336                                                   400 
                                                   >> 401         /*
                                                   >> 402          * __blk_mq_update_nr_hw_queues() updates nr_hw_queues and queue_hw_ctx
                                                   >> 403          * while the queue is frozen. So we can use q_usage_counter to avoid
                                                   >> 404          * racing with it. __blk_mq_update_nr_hw_queues() uses
                                                   >> 405          * synchronize_rcu() to ensure this function left the critical section
                                                   >> 406          * below.
                                                   >> 407          */
                                                   >> 408         if (!percpu_ref_tryget(&q->q_usage_counter))
                                                   >> 409                 return;
337                                                   410 
338         queue_for_each_hw_ctx(q, hctx, i) {       411         queue_for_each_hw_ctx(q, hctx, i) {
339                 struct blk_mq_tags *tags = hct    412                 struct blk_mq_tags *tags = hctx->tags;
340                                                   413 
341                 /*                                414                 /*
342                  * If not software queues are  !! 415                  * If no software queues are currently mapped to this
343                  * hardware queue, there's not    416                  * hardware queue, there's nothing to check
344                  */                               417                  */
345                 if (!blk_mq_hw_queue_mapped(hc    418                 if (!blk_mq_hw_queue_mapped(hctx))
346                         continue;                 419                         continue;
347                                                   420 
348                 if (tags->nr_reserved_tags)       421                 if (tags->nr_reserved_tags)
349                         bt_for_each(hctx, &tag    422                         bt_for_each(hctx, &tags->breserved_tags, fn, priv, true);
350                 bt_for_each(hctx, &tags->bitma    423                 bt_for_each(hctx, &tags->bitmap_tags, fn, priv, false);
351         }                                         424         }
352                                                !! 425         blk_queue_exit(q);
353 }                                                 426 }
354                                                   427 
355 static int bt_alloc(struct sbitmap_queue *bt,     428 static int bt_alloc(struct sbitmap_queue *bt, unsigned int depth,
356                     bool round_robin, int node    429                     bool round_robin, int node)
357 {                                                 430 {
358         return sbitmap_queue_init_node(bt, dep    431         return sbitmap_queue_init_node(bt, depth, -1, round_robin, GFP_KERNEL,
359                                        node);     432                                        node);
360 }                                                 433 }
361                                                   434 
362 static struct blk_mq_tags *blk_mq_init_bitmap_    435 static struct blk_mq_tags *blk_mq_init_bitmap_tags(struct blk_mq_tags *tags,
363                                                   436                                                    int node, int alloc_policy)
364 {                                                 437 {
365         unsigned int depth = tags->nr_tags - t    438         unsigned int depth = tags->nr_tags - tags->nr_reserved_tags;
366         bool round_robin = alloc_policy == BLK    439         bool round_robin = alloc_policy == BLK_TAG_ALLOC_RR;
367                                                   440 
368         if (bt_alloc(&tags->bitmap_tags, depth    441         if (bt_alloc(&tags->bitmap_tags, depth, round_robin, node))
369                 goto free_tags;                   442                 goto free_tags;
370         if (bt_alloc(&tags->breserved_tags, ta    443         if (bt_alloc(&tags->breserved_tags, tags->nr_reserved_tags, round_robin,
371                      node))                       444                      node))
372                 goto free_bitmap_tags;            445                 goto free_bitmap_tags;
373                                                   446 
374         return tags;                              447         return tags;
375 free_bitmap_tags:                                 448 free_bitmap_tags:
376         sbitmap_queue_free(&tags->bitmap_tags)    449         sbitmap_queue_free(&tags->bitmap_tags);
377 free_tags:                                        450 free_tags:
378         kfree(tags);                              451         kfree(tags);
379         return NULL;                              452         return NULL;
380 }                                                 453 }
381                                                   454 
382 struct blk_mq_tags *blk_mq_init_tags(unsigned     455 struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags,
383                                      unsigned     456                                      unsigned int reserved_tags,
384                                      int node,    457                                      int node, int alloc_policy)
385 {                                                 458 {
386         struct blk_mq_tags *tags;                 459         struct blk_mq_tags *tags;
387                                                   460 
388         if (total_tags > BLK_MQ_TAG_MAX) {        461         if (total_tags > BLK_MQ_TAG_MAX) {
389                 pr_err("blk-mq: tag depth too     462                 pr_err("blk-mq: tag depth too large\n");
390                 return NULL;                      463                 return NULL;
391         }                                         464         }
392                                                   465 
393         tags = kzalloc_node(sizeof(*tags), GFP    466         tags = kzalloc_node(sizeof(*tags), GFP_KERNEL, node);
394         if (!tags)                                467         if (!tags)
395                 return NULL;                      468                 return NULL;
396                                                   469 
397         tags->nr_tags = total_tags;               470         tags->nr_tags = total_tags;
398         tags->nr_reserved_tags = reserved_tags    471         tags->nr_reserved_tags = reserved_tags;
399                                                   472 
400         return blk_mq_init_bitmap_tags(tags, n    473         return blk_mq_init_bitmap_tags(tags, node, alloc_policy);
401 }                                                 474 }
402                                                   475 
403 void blk_mq_free_tags(struct blk_mq_tags *tags    476 void blk_mq_free_tags(struct blk_mq_tags *tags)
404 {                                                 477 {
405         sbitmap_queue_free(&tags->bitmap_tags)    478         sbitmap_queue_free(&tags->bitmap_tags);
406         sbitmap_queue_free(&tags->breserved_ta    479         sbitmap_queue_free(&tags->breserved_tags);
407         kfree(tags);                              480         kfree(tags);
408 }                                                 481 }
409                                                   482 
410 int blk_mq_tag_update_depth(struct blk_mq_hw_c    483 int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
411                             struct blk_mq_tags    484                             struct blk_mq_tags **tagsptr, unsigned int tdepth,
412                             bool can_grow)        485                             bool can_grow)
413 {                                                 486 {
414         struct blk_mq_tags *tags = *tagsptr;      487         struct blk_mq_tags *tags = *tagsptr;
415                                                   488 
416         if (tdepth <= tags->nr_reserved_tags)     489         if (tdepth <= tags->nr_reserved_tags)
417                 return -EINVAL;                   490                 return -EINVAL;
418                                                   491 
419         tdepth -= tags->nr_reserved_tags;      << 
420                                                << 
421         /*                                        492         /*
422          * If we are allowed to grow beyond th    493          * If we are allowed to grow beyond the original size, allocate
423          * a new set of tags before freeing th    494          * a new set of tags before freeing the old one.
424          */                                       495          */
425         if (tdepth > tags->nr_tags) {             496         if (tdepth > tags->nr_tags) {
426                 struct blk_mq_tag_set *set = h    497                 struct blk_mq_tag_set *set = hctx->queue->tag_set;
427                 struct blk_mq_tags *new;          498                 struct blk_mq_tags *new;
428                 bool ret;                         499                 bool ret;
429                                                   500 
430                 if (!can_grow)                    501                 if (!can_grow)
431                         return -EINVAL;           502                         return -EINVAL;
432                                                   503 
433                 /*                                504                 /*
434                  * We need some sort of upper     505                  * We need some sort of upper limit, set it high enough that
435                  * no valid use cases should r    506                  * no valid use cases should require more.
436                  */                               507                  */
437                 if (tdepth > 16 * BLKDEV_MAX_R    508                 if (tdepth > 16 * BLKDEV_MAX_RQ)
438                         return -EINVAL;           509                         return -EINVAL;
439                                                   510 
440                 new = blk_mq_alloc_rq_map(set, !! 511                 new = blk_mq_alloc_rq_map(set, hctx->queue_num, tdepth,
                                                   >> 512                                 tags->nr_reserved_tags);
441                 if (!new)                         513                 if (!new)
442                         return -ENOMEM;           514                         return -ENOMEM;
443                 ret = blk_mq_alloc_rqs(set, ne    515                 ret = blk_mq_alloc_rqs(set, new, hctx->queue_num, tdepth);
444                 if (ret) {                        516                 if (ret) {
445                         blk_mq_free_rq_map(new    517                         blk_mq_free_rq_map(new);
446                         return -ENOMEM;           518                         return -ENOMEM;
447                 }                                 519                 }
448                                                   520 
449                 blk_mq_free_rqs(set, *tagsptr,    521                 blk_mq_free_rqs(set, *tagsptr, hctx->queue_num);
450                 blk_mq_free_rq_map(*tagsptr);     522                 blk_mq_free_rq_map(*tagsptr);
451                 *tagsptr = new;                   523                 *tagsptr = new;
452         } else {                                  524         } else {
453                 /*                                525                 /*
454                  * Don't need (or can't) updat    526                  * Don't need (or can't) update reserved tags here, they
455                  * remain static and should ne    527                  * remain static and should never need resizing.
456                  */                               528                  */
457                 sbitmap_queue_resize(&tags->bi !! 529                 sbitmap_queue_resize(&tags->bitmap_tags,
                                                   >> 530                                 tdepth - tags->nr_reserved_tags);
458         }                                         531         }
459                                                   532 
460         return 0;                                 533         return 0;
461 }                                                 534 }
462                                                   535 
463 /**                                               536 /**
464  * blk_mq_unique_tag() - return a tag that is     537  * blk_mq_unique_tag() - return a tag that is unique queue-wide
465  * @rq: request for which to compute a unique     538  * @rq: request for which to compute a unique tag
466  *                                                539  *
467  * The tag field in struct request is unique p    540  * The tag field in struct request is unique per hardware queue but not over
468  * all hardware queues. Hence this function th    541  * all hardware queues. Hence this function that returns a tag with the
469  * hardware context index in the upper bits an    542  * hardware context index in the upper bits and the per hardware queue tag in
470  * the lower bits.                                543  * the lower bits.
471  *                                                544  *
472  * Note: When called for a request that is que    545  * Note: When called for a request that is queued on a non-multiqueue request
473  * queue, the hardware context index is set to    546  * queue, the hardware context index is set to zero.
474  */                                               547  */
475 u32 blk_mq_unique_tag(struct request *rq)         548 u32 blk_mq_unique_tag(struct request *rq)
476 {                                                 549 {
477         struct request_queue *q = rq->q;       !! 550         return (rq->mq_hctx->queue_num << BLK_MQ_UNIQUE_TAG_BITS) |
478         struct blk_mq_hw_ctx *hctx;            << 
479         int hwq = 0;                           << 
480                                                << 
481         if (q->mq_ops) {                       << 
482                 hctx = blk_mq_map_queue(q, rq- << 
483                 hwq = hctx->queue_num;         << 
484         }                                      << 
485                                                << 
486         return (hwq << BLK_MQ_UNIQUE_TAG_BITS) << 
487                 (rq->tag & BLK_MQ_UNIQUE_TAG_M    551                 (rq->tag & BLK_MQ_UNIQUE_TAG_MASK);
488 }                                                 552 }
489 EXPORT_SYMBOL(blk_mq_unique_tag);                 553 EXPORT_SYMBOL(blk_mq_unique_tag);
490                                                   554 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp