~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/block/blk-mq.c

Version: ~ [ linux-5.16-rc3 ] ~ [ linux-5.15.5 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.82 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.162 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.218 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.256 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.291 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.293 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.18.140 ] ~ [ linux-3.16.85 ] ~ [ linux-3.14.79 ] ~ [ linux-3.12.74 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  * Block multiqueue core code
  3  *
  4  * Copyright (C) 2013-2014 Jens Axboe
  5  * Copyright (C) 2013-2014 Christoph Hellwig
  6  */
  7 #include <linux/kernel.h>
  8 #include <linux/module.h>
  9 #include <linux/backing-dev.h>
 10 #include <linux/bio.h>
 11 #include <linux/blkdev.h>
 12 #include <linux/mm.h>
 13 #include <linux/init.h>
 14 #include <linux/slab.h>
 15 #include <linux/workqueue.h>
 16 #include <linux/smp.h>
 17 #include <linux/llist.h>
 18 #include <linux/list_sort.h>
 19 #include <linux/cpu.h>
 20 #include <linux/cache.h>
 21 #include <linux/sched/sysctl.h>
 22 #include <linux/delay.h>
 23 #include <linux/crash_dump.h>
 24 
 25 #include <trace/events/block.h>
 26 
 27 #include <linux/blk-mq.h>
 28 #include "blk.h"
 29 #include "blk-mq.h"
 30 #include "blk-mq-tag.h"
 31 
 32 static DEFINE_MUTEX(all_q_mutex);
 33 static LIST_HEAD(all_q_list);
 34 
 35 static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx);
 36 
 37 /*
 38  * Check if any of the ctx's have pending work in this hardware queue
 39  */
 40 static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx)
 41 {
 42         unsigned int i;
 43 
 44         for (i = 0; i < hctx->ctx_map.size; i++)
 45                 if (hctx->ctx_map.map[i].word)
 46                         return true;
 47 
 48         return false;
 49 }
 50 
 51 static inline struct blk_align_bitmap *get_bm(struct blk_mq_hw_ctx *hctx,
 52                                               struct blk_mq_ctx *ctx)
 53 {
 54         return &hctx->ctx_map.map[ctx->index_hw / hctx->ctx_map.bits_per_word];
 55 }
 56 
 57 #define CTX_TO_BIT(hctx, ctx)   \
 58         ((ctx)->index_hw & ((hctx)->ctx_map.bits_per_word - 1))
 59 
 60 /*
 61  * Mark this ctx as having pending work in this hardware queue
 62  */
 63 static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx,
 64                                      struct blk_mq_ctx *ctx)
 65 {
 66         struct blk_align_bitmap *bm = get_bm(hctx, ctx);
 67 
 68         if (!test_bit(CTX_TO_BIT(hctx, ctx), &bm->word))
 69                 set_bit(CTX_TO_BIT(hctx, ctx), &bm->word);
 70 }
 71 
 72 static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx,
 73                                       struct blk_mq_ctx *ctx)
 74 {
 75         struct blk_align_bitmap *bm = get_bm(hctx, ctx);
 76 
 77         clear_bit(CTX_TO_BIT(hctx, ctx), &bm->word);
 78 }
 79 
 80 static int blk_mq_queue_enter(struct request_queue *q, gfp_t gfp)
 81 {
 82         while (true) {
 83                 int ret;
 84 
 85                 if (percpu_ref_tryget_live(&q->mq_usage_counter))
 86                         return 0;
 87 
 88                 if (!(gfp & __GFP_WAIT))
 89                         return -EBUSY;
 90 
 91                 ret = wait_event_interruptible(q->mq_freeze_wq,
 92                                 !atomic_read(&q->mq_freeze_depth) ||
 93                                 blk_queue_dying(q));
 94                 if (blk_queue_dying(q))
 95                         return -ENODEV;
 96                 if (ret)
 97                         return ret;
 98         }
 99 }
100 
101 static void blk_mq_queue_exit(struct request_queue *q)
102 {
103         percpu_ref_put(&q->mq_usage_counter);
104 }
105 
106 static void blk_mq_usage_counter_release(struct percpu_ref *ref)
107 {
108         struct request_queue *q =
109                 container_of(ref, struct request_queue, mq_usage_counter);
110 
111         wake_up_all(&q->mq_freeze_wq);
112 }
113 
114 void blk_mq_freeze_queue_start(struct request_queue *q)
115 {
116         int freeze_depth;
117 
118         freeze_depth = atomic_inc_return(&q->mq_freeze_depth);
119         if (freeze_depth == 1) {
120                 percpu_ref_kill(&q->mq_usage_counter);
121                 blk_mq_run_hw_queues(q, false);
122         }
123 }
124 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_start);
125 
126 static void blk_mq_freeze_queue_wait(struct request_queue *q)
127 {
128         wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->mq_usage_counter));
129 }
130 
131 /*
132  * Guarantee no request is in use, so we can change any data structure of
133  * the queue afterward.
134  */
135 void blk_mq_freeze_queue(struct request_queue *q)
136 {
137         blk_mq_freeze_queue_start(q);
138         blk_mq_freeze_queue_wait(q);
139 }
140 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue);
141 
142 void blk_mq_unfreeze_queue(struct request_queue *q)
143 {
144         int freeze_depth;
145 
146         freeze_depth = atomic_dec_return(&q->mq_freeze_depth);
147         WARN_ON_ONCE(freeze_depth < 0);
148         if (!freeze_depth) {
149                 percpu_ref_reinit(&q->mq_usage_counter);
150                 wake_up_all(&q->mq_freeze_wq);
151         }
152 }
153 EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue);
154 
155 void blk_mq_wake_waiters(struct request_queue *q)
156 {
157         struct blk_mq_hw_ctx *hctx;
158         unsigned int i;
159 
160         queue_for_each_hw_ctx(q, hctx, i)
161                 if (blk_mq_hw_queue_mapped(hctx))
162                         blk_mq_tag_wakeup_all(hctx->tags, true);
163 
164         /*
165          * If we are called because the queue has now been marked as
166          * dying, we need to ensure that processes currently waiting on
167          * the queue are notified as well.
168          */
169         wake_up_all(&q->mq_freeze_wq);
170 }
171 
172 bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx)
173 {
174         return blk_mq_has_free_tags(hctx->tags);
175 }
176 EXPORT_SYMBOL(blk_mq_can_queue);
177 
178 static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
179                                struct request *rq, unsigned int rw_flags)
180 {
181         if (blk_queue_io_stat(q))
182                 rw_flags |= REQ_IO_STAT;
183 
184         INIT_LIST_HEAD(&rq->queuelist);
185         /* csd/requeue_work/fifo_time is initialized before use */
186         rq->q = q;
187         rq->mq_ctx = ctx;
188         rq->cmd_flags |= rw_flags;
189         /* do not touch atomic flags, it needs atomic ops against the timer */
190         rq->cpu = -1;
191         INIT_HLIST_NODE(&rq->hash);
192         RB_CLEAR_NODE(&rq->rb_node);
193         rq->rq_disk = NULL;
194         rq->part = NULL;
195         rq->start_time = jiffies;
196 #ifdef CONFIG_BLK_CGROUP
197         rq->rl = NULL;
198         set_start_time_ns(rq);
199         rq->io_start_time_ns = 0;
200 #endif
201         rq->nr_phys_segments = 0;
202 #if defined(CONFIG_BLK_DEV_INTEGRITY)
203         rq->nr_integrity_segments = 0;
204 #endif
205         rq->special = NULL;
206         /* tag was already set */
207         rq->errors = 0;
208 
209         rq->cmd = rq->__cmd;
210 
211         rq->extra_len = 0;
212         rq->sense_len = 0;
213         rq->resid_len = 0;
214         rq->sense = NULL;
215 
216         INIT_LIST_HEAD(&rq->timeout_list);
217         rq->timeout = 0;
218 
219         rq->end_io = NULL;
220         rq->end_io_data = NULL;
221         rq->next_rq = NULL;
222 
223         ctx->rq_dispatched[rw_is_sync(rw_flags)]++;
224 }
225 
226 static struct request *
227 __blk_mq_alloc_request(struct blk_mq_alloc_data *data, int rw)
228 {
229         struct request *rq;
230         unsigned int tag;
231 
232         tag = blk_mq_get_tag(data);
233         if (tag != BLK_MQ_TAG_FAIL) {
234                 rq = data->hctx->tags->rqs[tag];
235 
236                 if (blk_mq_tag_busy(data->hctx)) {
237                         rq->cmd_flags = REQ_MQ_INFLIGHT;
238                         atomic_inc(&data->hctx->nr_active);
239                 }
240 
241                 rq->tag = tag;
242                 blk_mq_rq_ctx_init(data->q, data->ctx, rq, rw);
243                 return rq;
244         }
245 
246         return NULL;
247 }
248 
249 struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp,
250                 bool reserved)
251 {
252         struct blk_mq_ctx *ctx;
253         struct blk_mq_hw_ctx *hctx;
254         struct request *rq;
255         struct blk_mq_alloc_data alloc_data;
256         int ret;
257 
258         ret = blk_mq_queue_enter(q, gfp);
259         if (ret)
260                 return ERR_PTR(ret);
261 
262         ctx = blk_mq_get_ctx(q);
263         hctx = q->mq_ops->map_queue(q, ctx->cpu);
264         blk_mq_set_alloc_data(&alloc_data, q, gfp & ~__GFP_WAIT,
265                         reserved, ctx, hctx);
266 
267         rq = __blk_mq_alloc_request(&alloc_data, rw);
268         if (!rq && (gfp & __GFP_WAIT)) {
269                 __blk_mq_run_hw_queue(hctx);
270                 blk_mq_put_ctx(ctx);
271 
272                 ctx = blk_mq_get_ctx(q);
273                 hctx = q->mq_ops->map_queue(q, ctx->cpu);
274                 blk_mq_set_alloc_data(&alloc_data, q, gfp, reserved, ctx,
275                                 hctx);
276                 rq =  __blk_mq_alloc_request(&alloc_data, rw);
277                 ctx = alloc_data.ctx;
278         }
279         blk_mq_put_ctx(ctx);
280         if (!rq) {
281                 blk_mq_queue_exit(q);
282                 return ERR_PTR(-EWOULDBLOCK);
283         }
284         return rq;
285 }
286 EXPORT_SYMBOL(blk_mq_alloc_request);
287 
288 static void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx,
289                                   struct blk_mq_ctx *ctx, struct request *rq)
290 {
291         const int tag = rq->tag;
292         struct request_queue *q = rq->q;
293 
294         if (rq->cmd_flags & REQ_MQ_INFLIGHT)
295                 atomic_dec(&hctx->nr_active);
296         rq->cmd_flags = 0;
297 
298         clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
299         blk_mq_put_tag(hctx, tag, &ctx->last_tag);
300         blk_mq_queue_exit(q);
301 }
302 
303 void blk_mq_free_hctx_request(struct blk_mq_hw_ctx *hctx, struct request *rq)
304 {
305         struct blk_mq_ctx *ctx = rq->mq_ctx;
306 
307         ctx->rq_completed[rq_is_sync(rq)]++;
308         __blk_mq_free_request(hctx, ctx, rq);
309 
310 }
311 EXPORT_SYMBOL_GPL(blk_mq_free_hctx_request);
312 
313 void blk_mq_free_request(struct request *rq)
314 {
315         struct blk_mq_hw_ctx *hctx;
316         struct request_queue *q = rq->q;
317 
318         hctx = q->mq_ops->map_queue(q, rq->mq_ctx->cpu);
319         blk_mq_free_hctx_request(hctx, rq);
320 }
321 EXPORT_SYMBOL_GPL(blk_mq_free_request);
322 
323 inline void __blk_mq_end_request(struct request *rq, int error)
324 {
325         blk_account_io_done(rq);
326 
327         if (rq->end_io) {
328                 rq->end_io(rq, error);
329         } else {
330                 if (unlikely(blk_bidi_rq(rq)))
331                         blk_mq_free_request(rq->next_rq);
332                 blk_mq_free_request(rq);
333         }
334 }
335 EXPORT_SYMBOL(__blk_mq_end_request);
336 
337 void blk_mq_end_request(struct request *rq, int error)
338 {
339         if (blk_update_request(rq, error, blk_rq_bytes(rq)))
340                 BUG();
341         __blk_mq_end_request(rq, error);
342 }
343 EXPORT_SYMBOL(blk_mq_end_request);
344 
345 static void __blk_mq_complete_request_remote(void *data)
346 {
347         struct request *rq = data;
348 
349         rq->q->softirq_done_fn(rq);
350 }
351 
352 static void blk_mq_ipi_complete_request(struct request *rq)
353 {
354         struct blk_mq_ctx *ctx = rq->mq_ctx;
355         bool shared = false;
356         int cpu;
357 
358         if (!test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags)) {
359                 rq->q->softirq_done_fn(rq);
360                 return;
361         }
362 
363         cpu = get_cpu();
364         if (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags))
365                 shared = cpus_share_cache(cpu, ctx->cpu);
366 
367         if (cpu != ctx->cpu && !shared && cpu_online(ctx->cpu)) {
368                 rq->csd.func = __blk_mq_complete_request_remote;
369                 rq->csd.info = rq;
370                 rq->csd.flags = 0;
371                 smp_call_function_single_async(ctx->cpu, &rq->csd);
372         } else {
373                 rq->q->softirq_done_fn(rq);
374         }
375         put_cpu();
376 }
377 
378 void __blk_mq_complete_request(struct request *rq)
379 {
380         struct request_queue *q = rq->q;
381 
382         if (!q->softirq_done_fn)
383                 blk_mq_end_request(rq, rq->errors);
384         else
385                 blk_mq_ipi_complete_request(rq);
386 }
387 
388 /**
389  * blk_mq_complete_request - end I/O on a request
390  * @rq:         the request being processed
391  *
392  * Description:
393  *      Ends all I/O on a request. It does not handle partial completions.
394  *      The actual completion happens out-of-order, through a IPI handler.
395  **/
396 void blk_mq_complete_request(struct request *rq)
397 {
398         struct request_queue *q = rq->q;
399 
400         if (unlikely(blk_should_fake_timeout(q)))
401                 return;
402         if (!blk_mark_rq_complete(rq))
403                 __blk_mq_complete_request(rq);
404 }
405 EXPORT_SYMBOL(blk_mq_complete_request);
406 
407 int blk_mq_request_started(struct request *rq)
408 {
409         return test_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
410 }
411 EXPORT_SYMBOL_GPL(blk_mq_request_started);
412 
413 void blk_mq_start_request(struct request *rq)
414 {
415         struct request_queue *q = rq->q;
416 
417         trace_block_rq_issue(q, rq);
418 
419         rq->resid_len = blk_rq_bytes(rq);
420         if (unlikely(blk_bidi_rq(rq)))
421                 rq->next_rq->resid_len = blk_rq_bytes(rq->next_rq);
422 
423         blk_add_timer(rq);
424 
425         /*
426          * Ensure that ->deadline is visible before set the started
427          * flag and clear the completed flag.
428          */
429         smp_mb__before_atomic();
430 
431         /*
432          * Mark us as started and clear complete. Complete might have been
433          * set if requeue raced with timeout, which then marked it as
434          * complete. So be sure to clear complete again when we start
435          * the request, otherwise we'll ignore the completion event.
436          */
437         if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
438                 set_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
439         if (test_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags))
440                 clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
441 
442         if (q->dma_drain_size && blk_rq_bytes(rq)) {
443                 /*
444                  * Make sure space for the drain appears.  We know we can do
445                  * this because max_hw_segments has been adjusted to be one
446                  * fewer than the device can handle.
447                  */
448                 rq->nr_phys_segments++;
449         }
450 }
451 EXPORT_SYMBOL(blk_mq_start_request);
452 
453 static void __blk_mq_requeue_request(struct request *rq)
454 {
455         struct request_queue *q = rq->q;
456 
457         trace_block_rq_requeue(q, rq);
458 
459         if (test_and_clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) {
460                 if (q->dma_drain_size && blk_rq_bytes(rq))
461                         rq->nr_phys_segments--;
462         }
463 }
464 
465 void blk_mq_requeue_request(struct request *rq)
466 {
467         __blk_mq_requeue_request(rq);
468 
469         BUG_ON(blk_queued_rq(rq));
470         blk_mq_add_to_requeue_list(rq, true);
471 }
472 EXPORT_SYMBOL(blk_mq_requeue_request);
473 
474 static void blk_mq_requeue_work(struct work_struct *work)
475 {
476         struct request_queue *q =
477                 container_of(work, struct request_queue, requeue_work);
478         LIST_HEAD(rq_list);
479         struct request *rq, *next;
480         unsigned long flags;
481 
482         spin_lock_irqsave(&q->requeue_lock, flags);
483         list_splice_init(&q->requeue_list, &rq_list);
484         spin_unlock_irqrestore(&q->requeue_lock, flags);
485 
486         list_for_each_entry_safe(rq, next, &rq_list, queuelist) {
487                 if (!(rq->cmd_flags & REQ_SOFTBARRIER))
488                         continue;
489 
490                 rq->cmd_flags &= ~REQ_SOFTBARRIER;
491                 list_del_init(&rq->queuelist);
492                 blk_mq_insert_request(rq, true, false, false);
493         }
494 
495         while (!list_empty(&rq_list)) {
496                 rq = list_entry(rq_list.next, struct request, queuelist);
497                 list_del_init(&rq->queuelist);
498                 blk_mq_insert_request(rq, false, false, false);
499         }
500 
501         /*
502          * Use the start variant of queue running here, so that running
503          * the requeue work will kick stopped queues.
504          */
505         blk_mq_start_hw_queues(q);
506 }
507 
508 void blk_mq_add_to_requeue_list(struct request *rq, bool at_head)
509 {
510         struct request_queue *q = rq->q;
511         unsigned long flags;
512 
513         /*
514          * We abuse this flag that is otherwise used by the I/O scheduler to
515          * request head insertation from the workqueue.
516          */
517         BUG_ON(rq->cmd_flags & REQ_SOFTBARRIER);
518 
519         spin_lock_irqsave(&q->requeue_lock, flags);
520         if (at_head) {
521                 rq->cmd_flags |= REQ_SOFTBARRIER;
522                 list_add(&rq->queuelist, &q->requeue_list);
523         } else {
524                 list_add_tail(&rq->queuelist, &q->requeue_list);
525         }
526         spin_unlock_irqrestore(&q->requeue_lock, flags);
527 }
528 EXPORT_SYMBOL(blk_mq_add_to_requeue_list);
529 
530 void blk_mq_cancel_requeue_work(struct request_queue *q)
531 {
532         cancel_work_sync(&q->requeue_work);
533 }
534 EXPORT_SYMBOL_GPL(blk_mq_cancel_requeue_work);
535 
536 void blk_mq_kick_requeue_list(struct request_queue *q)
537 {
538         kblockd_schedule_work(&q->requeue_work);
539 }
540 EXPORT_SYMBOL(blk_mq_kick_requeue_list);
541 
542 void blk_mq_abort_requeue_list(struct request_queue *q)
543 {
544         unsigned long flags;
545         LIST_HEAD(rq_list);
546 
547         spin_lock_irqsave(&q->requeue_lock, flags);
548         list_splice_init(&q->requeue_list, &rq_list);
549         spin_unlock_irqrestore(&q->requeue_lock, flags);
550 
551         while (!list_empty(&rq_list)) {
552                 struct request *rq;
553 
554                 rq = list_first_entry(&rq_list, struct request, queuelist);
555                 list_del_init(&rq->queuelist);
556                 rq->errors = -EIO;
557                 blk_mq_end_request(rq, rq->errors);
558         }
559 }
560 EXPORT_SYMBOL(blk_mq_abort_requeue_list);
561 
562 struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag)
563 {
564         return tags->rqs[tag];
565 }
566 EXPORT_SYMBOL(blk_mq_tag_to_rq);
567 
568 struct blk_mq_timeout_data {
569         unsigned long next;
570         unsigned int next_set;
571 };
572 
573 void blk_mq_rq_timed_out(struct request *req, bool reserved)
574 {
575         struct blk_mq_ops *ops = req->q->mq_ops;
576         enum blk_eh_timer_return ret = BLK_EH_RESET_TIMER;
577 
578         /*
579          * We know that complete is set at this point. If STARTED isn't set
580          * anymore, then the request isn't active and the "timeout" should
581          * just be ignored. This can happen due to the bitflag ordering.
582          * Timeout first checks if STARTED is set, and if it is, assumes
583          * the request is active. But if we race with completion, then
584          * we both flags will get cleared. So check here again, and ignore
585          * a timeout event with a request that isn't active.
586          */
587         if (!test_bit(REQ_ATOM_STARTED, &req->atomic_flags))
588                 return;
589 
590         if (ops->timeout)
591                 ret = ops->timeout(req, reserved);
592 
593         switch (ret) {
594         case BLK_EH_HANDLED:
595                 __blk_mq_complete_request(req);
596                 break;
597         case BLK_EH_RESET_TIMER:
598                 blk_add_timer(req);
599                 blk_clear_rq_complete(req);
600                 break;
601         case BLK_EH_NOT_HANDLED:
602                 break;
603         default:
604                 printk(KERN_ERR "block: bad eh return: %d\n", ret);
605                 break;
606         }
607 }
608 
609 static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
610                 struct request *rq, void *priv, bool reserved)
611 {
612         struct blk_mq_timeout_data *data = priv;
613 
614         if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) {
615                 /*
616                  * If a request wasn't started before the queue was
617                  * marked dying, kill it here or it'll go unnoticed.
618                  */
619                 if (unlikely(blk_queue_dying(rq->q))) {
620                         rq->errors = -EIO;
621                         blk_mq_complete_request(rq);
622                 }
623                 return;
624         }
625         if (rq->cmd_flags & REQ_NO_TIMEOUT)
626                 return;
627 
628         if (time_after_eq(jiffies, rq->deadline)) {
629                 if (!blk_mark_rq_complete(rq))
630                         blk_mq_rq_timed_out(rq, reserved);
631         } else if (!data->next_set || time_after(data->next, rq->deadline)) {
632                 data->next = rq->deadline;
633                 data->next_set = 1;
634         }
635 }
636 
637 static void blk_mq_rq_timer(unsigned long priv)
638 {
639         struct request_queue *q = (struct request_queue *)priv;
640         struct blk_mq_timeout_data data = {
641                 .next           = 0,
642                 .next_set       = 0,
643         };
644         struct blk_mq_hw_ctx *hctx;
645         int i;
646 
647         queue_for_each_hw_ctx(q, hctx, i) {
648                 /*
649                  * If not software queues are currently mapped to this
650                  * hardware queue, there's nothing to check
651                  */
652                 if (!blk_mq_hw_queue_mapped(hctx))
653                         continue;
654 
655                 blk_mq_tag_busy_iter(hctx, blk_mq_check_expired, &data);
656         }
657 
658         if (data.next_set) {
659                 data.next = blk_rq_timeout(round_jiffies_up(data.next));
660                 mod_timer(&q->timeout, data.next);
661         } else {
662                 queue_for_each_hw_ctx(q, hctx, i) {
663                         /* the hctx may be unmapped, so check it here */
664                         if (blk_mq_hw_queue_mapped(hctx))
665                                 blk_mq_tag_idle(hctx);
666                 }
667         }
668 }
669 
670 /*
671  * Reverse check our software queue for entries that we could potentially
672  * merge with. Currently includes a hand-wavy stop count of 8, to not spend
673  * too much time checking for merges.
674  */
675 static bool blk_mq_attempt_merge(struct request_queue *q,
676                                  struct blk_mq_ctx *ctx, struct bio *bio)
677 {
678         struct request *rq;
679         int checked = 8;
680 
681         list_for_each_entry_reverse(rq, &ctx->rq_list, queuelist) {
682                 int el_ret;
683 
684                 if (!checked--)
685                         break;
686 
687                 if (!blk_rq_merge_ok(rq, bio))
688                         continue;
689 
690                 el_ret = blk_try_merge(rq, bio);
691                 if (el_ret == ELEVATOR_BACK_MERGE) {
692                         if (bio_attempt_back_merge(q, rq, bio)) {
693                                 ctx->rq_merged++;
694                                 return true;
695                         }
696                         break;
697                 } else if (el_ret == ELEVATOR_FRONT_MERGE) {
698                         if (bio_attempt_front_merge(q, rq, bio)) {
699                                 ctx->rq_merged++;
700                                 return true;
701                         }
702                         break;
703                 }
704         }
705 
706         return false;
707 }
708 
709 /*
710  * Process software queues that have been marked busy, splicing them
711  * to the for-dispatch
712  */
713 static void flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list)
714 {
715         struct blk_mq_ctx *ctx;
716         int i;
717 
718         for (i = 0; i < hctx->ctx_map.size; i++) {
719                 struct blk_align_bitmap *bm = &hctx->ctx_map.map[i];
720                 unsigned int off, bit;
721 
722                 if (!bm->word)
723                         continue;
724 
725                 bit = 0;
726                 off = i * hctx->ctx_map.bits_per_word;
727                 do {
728                         bit = find_next_bit(&bm->word, bm->depth, bit);
729                         if (bit >= bm->depth)
730                                 break;
731 
732                         ctx = hctx->ctxs[bit + off];
733                         clear_bit(bit, &bm->word);
734                         spin_lock(&ctx->lock);
735                         list_splice_tail_init(&ctx->rq_list, list);
736                         spin_unlock(&ctx->lock);
737 
738                         bit++;
739                 } while (1);
740         }
741 }
742 
743 /*
744  * Run this hardware queue, pulling any software queues mapped to it in.
745  * Note that this function currently has various problems around ordering
746  * of IO. In particular, we'd like FIFO behaviour on handling existing
747  * items on the hctx->dispatch list. Ignore that for now.
748  */
749 static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
750 {
751         struct request_queue *q = hctx->queue;
752         struct request *rq;
753         LIST_HEAD(rq_list);
754         LIST_HEAD(driver_list);
755         struct list_head *dptr;
756         int queued;
757 
758         WARN_ON(!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask));
759 
760         if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state)))
761                 return;
762 
763         hctx->run++;
764 
765         /*
766          * Touch any software queue that has pending entries.
767          */
768         flush_busy_ctxs(hctx, &rq_list);
769 
770         /*
771          * If we have previous entries on our dispatch list, grab them
772          * and stuff them at the front for more fair dispatch.
773          */
774         if (!list_empty_careful(&hctx->dispatch)) {
775                 spin_lock(&hctx->lock);
776                 if (!list_empty(&hctx->dispatch))
777                         list_splice_init(&hctx->dispatch, &rq_list);
778                 spin_unlock(&hctx->lock);
779         }
780 
781         /*
782          * Start off with dptr being NULL, so we start the first request
783          * immediately, even if we have more pending.
784          */
785         dptr = NULL;
786 
787         /*
788          * Now process all the entries, sending them to the driver.
789          */
790         queued = 0;
791         while (!list_empty(&rq_list)) {
792                 struct blk_mq_queue_data bd;
793                 int ret;
794 
795                 rq = list_first_entry(&rq_list, struct request, queuelist);
796                 list_del_init(&rq->queuelist);
797 
798                 bd.rq = rq;
799                 bd.list = dptr;
800                 bd.last = list_empty(&rq_list);
801 
802                 ret = q->mq_ops->queue_rq(hctx, &bd);
803                 switch (ret) {
804                 case BLK_MQ_RQ_QUEUE_OK:
805                         queued++;
806                         continue;
807                 case BLK_MQ_RQ_QUEUE_BUSY:
808                         list_add(&rq->queuelist, &rq_list);
809                         __blk_mq_requeue_request(rq);
810                         break;
811                 default:
812                         pr_err("blk-mq: bad return on queue: %d\n", ret);
813                 case BLK_MQ_RQ_QUEUE_ERROR:
814                         rq->errors = -EIO;
815                         blk_mq_end_request(rq, rq->errors);
816                         break;
817                 }
818 
819                 if (ret == BLK_MQ_RQ_QUEUE_BUSY)
820                         break;
821 
822                 /*
823                  * We've done the first request. If we have more than 1
824                  * left in the list, set dptr to defer issue.
825                  */
826                 if (!dptr && rq_list.next != rq_list.prev)
827                         dptr = &driver_list;
828         }
829 
830         if (!queued)
831                 hctx->dispatched[0]++;
832         else if (queued < (1 << (BLK_MQ_MAX_DISPATCH_ORDER - 1)))
833                 hctx->dispatched[ilog2(queued) + 1]++;
834 
835         /*
836          * Any items that need requeuing? Stuff them into hctx->dispatch,
837          * that is where we will continue on next queue run.
838          */
839         if (!list_empty(&rq_list)) {
840                 spin_lock(&hctx->lock);
841                 list_splice(&rq_list, &hctx->dispatch);
842                 spin_unlock(&hctx->lock);
843                 /*
844                  * the queue is expected stopped with BLK_MQ_RQ_QUEUE_BUSY, but
845                  * it's possible the queue is stopped and restarted again
846                  * before this. Queue restart will dispatch requests. And since
847                  * requests in rq_list aren't added into hctx->dispatch yet,
848                  * the requests in rq_list might get lost.
849                  *
850                  * blk_mq_run_hw_queue() already checks the STOPPED bit
851                  **/
852                 blk_mq_run_hw_queue(hctx, true);
853         }
854 }
855 
856 /*
857  * It'd be great if the workqueue API had a way to pass
858  * in a mask and had some smarts for more clever placement.
859  * For now we just round-robin here, switching for every
860  * BLK_MQ_CPU_WORK_BATCH queued items.
861  */
862 static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
863 {
864         if (hctx->queue->nr_hw_queues == 1)
865                 return WORK_CPU_UNBOUND;
866 
867         if (--hctx->next_cpu_batch <= 0) {
868                 int cpu = hctx->next_cpu, next_cpu;
869 
870                 next_cpu = cpumask_next(hctx->next_cpu, hctx->cpumask);
871                 if (next_cpu >= nr_cpu_ids)
872                         next_cpu = cpumask_first(hctx->cpumask);
873 
874                 hctx->next_cpu = next_cpu;
875                 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
876 
877                 return cpu;
878         }
879 
880         return hctx->next_cpu;
881 }
882 
883 void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
884 {
885         if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state) ||
886             !blk_mq_hw_queue_mapped(hctx)))
887                 return;
888 
889         if (!async) {
890                 int cpu = get_cpu();
891                 if (cpumask_test_cpu(cpu, hctx->cpumask)) {
892                         __blk_mq_run_hw_queue(hctx);
893                         put_cpu();
894                         return;
895                 }
896 
897                 put_cpu();
898         }
899 
900         kblockd_schedule_delayed_work_on(blk_mq_hctx_next_cpu(hctx),
901                         &hctx->run_work, 0);
902 }
903 
904 void blk_mq_run_hw_queues(struct request_queue *q, bool async)
905 {
906         struct blk_mq_hw_ctx *hctx;
907         int i;
908 
909         queue_for_each_hw_ctx(q, hctx, i) {
910                 if ((!blk_mq_hctx_has_pending(hctx) &&
911                     list_empty_careful(&hctx->dispatch)) ||
912                     test_bit(BLK_MQ_S_STOPPED, &hctx->state))
913                         continue;
914 
915                 blk_mq_run_hw_queue(hctx, async);
916         }
917 }
918 EXPORT_SYMBOL(blk_mq_run_hw_queues);
919 
920 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
921 {
922         cancel_delayed_work(&hctx->run_work);
923         cancel_delayed_work(&hctx->delay_work);
924         set_bit(BLK_MQ_S_STOPPED, &hctx->state);
925 }
926 EXPORT_SYMBOL(blk_mq_stop_hw_queue);
927 
928 void blk_mq_stop_hw_queues(struct request_queue *q)
929 {
930         struct blk_mq_hw_ctx *hctx;
931         int i;
932 
933         queue_for_each_hw_ctx(q, hctx, i)
934                 blk_mq_stop_hw_queue(hctx);
935 }
936 EXPORT_SYMBOL(blk_mq_stop_hw_queues);
937 
938 void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx)
939 {
940         clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
941 
942         blk_mq_run_hw_queue(hctx, false);
943 }
944 EXPORT_SYMBOL(blk_mq_start_hw_queue);
945 
946 void blk_mq_start_hw_queues(struct request_queue *q)
947 {
948         struct blk_mq_hw_ctx *hctx;
949         int i;
950 
951         queue_for_each_hw_ctx(q, hctx, i)
952                 blk_mq_start_hw_queue(hctx);
953 }
954 EXPORT_SYMBOL(blk_mq_start_hw_queues);
955 
956 void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async)
957 {
958         struct blk_mq_hw_ctx *hctx;
959         int i;
960 
961         queue_for_each_hw_ctx(q, hctx, i) {
962                 if (!test_bit(BLK_MQ_S_STOPPED, &hctx->state))
963                         continue;
964 
965                 clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
966                 blk_mq_run_hw_queue(hctx, async);
967         }
968 }
969 EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues);
970 
971 static void blk_mq_run_work_fn(struct work_struct *work)
972 {
973         struct blk_mq_hw_ctx *hctx;
974 
975         hctx = container_of(work, struct blk_mq_hw_ctx, run_work.work);
976 
977         __blk_mq_run_hw_queue(hctx);
978 }
979 
980 static void blk_mq_delay_work_fn(struct work_struct *work)
981 {
982         struct blk_mq_hw_ctx *hctx;
983 
984         hctx = container_of(work, struct blk_mq_hw_ctx, delay_work.work);
985 
986         if (test_and_clear_bit(BLK_MQ_S_STOPPED, &hctx->state))
987                 __blk_mq_run_hw_queue(hctx);
988 }
989 
990 void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
991 {
992         if (unlikely(!blk_mq_hw_queue_mapped(hctx)))
993                 return;
994 
995         kblockd_schedule_delayed_work_on(blk_mq_hctx_next_cpu(hctx),
996                         &hctx->delay_work, msecs_to_jiffies(msecs));
997 }
998 EXPORT_SYMBOL(blk_mq_delay_queue);
999 
1000 static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx,
1001                                     struct request *rq, bool at_head)
1002 {
1003         struct blk_mq_ctx *ctx = rq->mq_ctx;
1004 
1005         trace_block_rq_insert(hctx->queue, rq);
1006 
1007         if (at_head)
1008                 list_add(&rq->queuelist, &ctx->rq_list);
1009         else
1010                 list_add_tail(&rq->queuelist, &ctx->rq_list);
1011 
1012         blk_mq_hctx_mark_pending(hctx, ctx);
1013 }
1014 
1015 void blk_mq_insert_request(struct request *rq, bool at_head, bool run_queue,
1016                 bool async)
1017 {
1018         struct request_queue *q = rq->q;
1019         struct blk_mq_hw_ctx *hctx;
1020         struct blk_mq_ctx *ctx = rq->mq_ctx, *current_ctx;
1021 
1022         current_ctx = blk_mq_get_ctx(q);
1023         if (!cpu_online(ctx->cpu))
1024                 rq->mq_ctx = ctx = current_ctx;
1025 
1026         hctx = q->mq_ops->map_queue(q, ctx->cpu);
1027 
1028         spin_lock(&ctx->lock);
1029         __blk_mq_insert_request(hctx, rq, at_head);
1030         spin_unlock(&ctx->lock);
1031 
1032         if (run_queue)
1033                 blk_mq_run_hw_queue(hctx, async);
1034 
1035         blk_mq_put_ctx(current_ctx);
1036 }
1037 
1038 static void blk_mq_insert_requests(struct request_queue *q,
1039                                      struct blk_mq_ctx *ctx,
1040                                      struct list_head *list,
1041                                      int depth,
1042                                      bool from_schedule)
1043 
1044 {
1045         struct blk_mq_hw_ctx *hctx;
1046         struct blk_mq_ctx *current_ctx;
1047 
1048         trace_block_unplug(q, depth, !from_schedule);
1049 
1050         current_ctx = blk_mq_get_ctx(q);
1051 
1052         if (!cpu_online(ctx->cpu))
1053                 ctx = current_ctx;
1054         hctx = q->mq_ops->map_queue(q, ctx->cpu);
1055 
1056         /*
1057          * preemption doesn't flush plug list, so it's possible ctx->cpu is
1058          * offline now
1059          */
1060         spin_lock(&ctx->lock);
1061         while (!list_empty(list)) {
1062                 struct request *rq;
1063 
1064                 rq = list_first_entry(list, struct request, queuelist);
1065                 list_del_init(&rq->queuelist);
1066                 rq->mq_ctx = ctx;
1067                 __blk_mq_insert_request(hctx, rq, false);
1068         }
1069         spin_unlock(&ctx->lock);
1070 
1071         blk_mq_run_hw_queue(hctx, from_schedule);
1072         blk_mq_put_ctx(current_ctx);
1073 }
1074 
1075 static int plug_ctx_cmp(void *priv, struct list_head *a, struct list_head *b)
1076 {
1077         struct request *rqa = container_of(a, struct request, queuelist);
1078         struct request *rqb = container_of(b, struct request, queuelist);
1079 
1080         return !(rqa->mq_ctx < rqb->mq_ctx ||
1081                  (rqa->mq_ctx == rqb->mq_ctx &&
1082                   blk_rq_pos(rqa) < blk_rq_pos(rqb)));
1083 }
1084 
1085 void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
1086 {
1087         struct blk_mq_ctx *this_ctx;
1088         struct request_queue *this_q;
1089         struct request *rq;
1090         LIST_HEAD(list);
1091         LIST_HEAD(ctx_list);
1092         unsigned int depth;
1093 
1094         list_splice_init(&plug->mq_list, &list);
1095 
1096         list_sort(NULL, &list, plug_ctx_cmp);
1097 
1098         this_q = NULL;
1099         this_ctx = NULL;
1100         depth = 0;
1101 
1102         while (!list_empty(&list)) {
1103                 rq = list_entry_rq(list.next);
1104                 list_del_init(&rq->queuelist);
1105                 BUG_ON(!rq->q);
1106                 if (rq->mq_ctx != this_ctx) {
1107                         if (this_ctx) {
1108                                 blk_mq_insert_requests(this_q, this_ctx,
1109                                                         &ctx_list, depth,
1110                                                         from_schedule);
1111                         }
1112 
1113                         this_ctx = rq->mq_ctx;
1114                         this_q = rq->q;
1115                         depth = 0;
1116                 }
1117 
1118                 depth++;
1119                 list_add_tail(&rq->queuelist, &ctx_list);
1120         }
1121 
1122         /*
1123          * If 'this_ctx' is set, we know we have entries to complete
1124          * on 'ctx_list'. Do those.
1125          */
1126         if (this_ctx) {
1127                 blk_mq_insert_requests(this_q, this_ctx, &ctx_list, depth,
1128                                        from_schedule);
1129         }
1130 }
1131 
1132 static void blk_mq_bio_to_request(struct request *rq, struct bio *bio)
1133 {
1134         init_request_from_bio(rq, bio);
1135 
1136         if (blk_do_io_stat(rq))
1137                 blk_account_io_start(rq, 1);
1138 }
1139 
1140 static inline bool hctx_allow_merges(struct blk_mq_hw_ctx *hctx)
1141 {
1142         return (hctx->flags & BLK_MQ_F_SHOULD_MERGE) &&
1143                 !blk_queue_nomerges(hctx->queue);
1144 }
1145 
1146 static inline bool blk_mq_merge_queue_io(struct blk_mq_hw_ctx *hctx,
1147                                          struct blk_mq_ctx *ctx,
1148                                          struct request *rq, struct bio *bio)
1149 {
1150         if (!hctx_allow_merges(hctx)) {
1151                 blk_mq_bio_to_request(rq, bio);
1152                 spin_lock(&ctx->lock);
1153 insert_rq:
1154                 __blk_mq_insert_request(hctx, rq, false);
1155                 spin_unlock(&ctx->lock);
1156                 return false;
1157         } else {
1158                 struct request_queue *q = hctx->queue;
1159 
1160                 spin_lock(&ctx->lock);
1161                 if (!blk_mq_attempt_merge(q, ctx, bio)) {
1162                         blk_mq_bio_to_request(rq, bio);
1163                         goto insert_rq;
1164                 }
1165 
1166                 spin_unlock(&ctx->lock);
1167                 __blk_mq_free_request(hctx, ctx, rq);
1168                 return true;
1169         }
1170 }
1171 
1172 struct blk_map_ctx {
1173         struct blk_mq_hw_ctx *hctx;
1174         struct blk_mq_ctx *ctx;
1175 };
1176 
1177 static struct request *blk_mq_map_request(struct request_queue *q,
1178                                           struct bio *bio,
1179                                           struct blk_map_ctx *data)
1180 {
1181         struct blk_mq_hw_ctx *hctx;
1182         struct blk_mq_ctx *ctx;
1183         struct request *rq;
1184         int rw = bio_data_dir(bio);
1185         struct blk_mq_alloc_data alloc_data;
1186 
1187         if (unlikely(blk_mq_queue_enter(q, GFP_KERNEL))) {
1188                 bio_endio(bio, -EIO);
1189                 return NULL;
1190         }
1191 
1192         ctx = blk_mq_get_ctx(q);
1193         hctx = q->mq_ops->map_queue(q, ctx->cpu);
1194 
1195         if (rw_is_sync(bio->bi_rw))
1196                 rw |= REQ_SYNC;
1197 
1198         trace_block_getrq(q, bio, rw);
1199         blk_mq_set_alloc_data(&alloc_data, q, GFP_ATOMIC, false, ctx,
1200                         hctx);
1201         rq = __blk_mq_alloc_request(&alloc_data, rw);
1202         if (unlikely(!rq)) {
1203                 __blk_mq_run_hw_queue(hctx);
1204                 blk_mq_put_ctx(ctx);
1205                 trace_block_sleeprq(q, bio, rw);
1206 
1207                 ctx = blk_mq_get_ctx(q);
1208                 hctx = q->mq_ops->map_queue(q, ctx->cpu);
1209                 blk_mq_set_alloc_data(&alloc_data, q,
1210                                 __GFP_WAIT|GFP_ATOMIC, false, ctx, hctx);
1211                 rq = __blk_mq_alloc_request(&alloc_data, rw);
1212                 ctx = alloc_data.ctx;
1213                 hctx = alloc_data.hctx;
1214         }
1215 
1216         hctx->queued++;
1217         data->hctx = hctx;
1218         data->ctx = ctx;
1219         return rq;
1220 }
1221 
1222 static int blk_mq_direct_issue_request(struct request *rq)
1223 {
1224         int ret;
1225         struct request_queue *q = rq->q;
1226         struct blk_mq_hw_ctx *hctx = q->mq_ops->map_queue(q,
1227                         rq->mq_ctx->cpu);
1228         struct blk_mq_queue_data bd = {
1229                 .rq = rq,
1230                 .list = NULL,
1231                 .last = 1
1232         };
1233 
1234         /*
1235          * For OK queue, we are done. For error, kill it. Any other
1236          * error (busy), just add it to our list as we previously
1237          * would have done
1238          */
1239         ret = q->mq_ops->queue_rq(hctx, &bd);
1240         if (ret == BLK_MQ_RQ_QUEUE_OK)
1241                 return 0;
1242         else {
1243                 __blk_mq_requeue_request(rq);
1244 
1245                 if (ret == BLK_MQ_RQ_QUEUE_ERROR) {
1246                         rq->errors = -EIO;
1247                         blk_mq_end_request(rq, rq->errors);
1248                         return 0;
1249                 }
1250                 return -1;
1251         }
1252 }
1253 
1254 /*
1255  * Multiple hardware queue variant. This will not use per-process plugs,
1256  * but will attempt to bypass the hctx queueing if we can go straight to
1257  * hardware for SYNC IO.
1258  */
1259 static void blk_mq_make_request(struct request_queue *q, struct bio *bio)
1260 {
1261         const int is_sync = rw_is_sync(bio->bi_rw);
1262         const int is_flush_fua = bio->bi_rw & (REQ_FLUSH | REQ_FUA);
1263         struct blk_map_ctx data;
1264         struct request *rq;
1265         unsigned int request_count = 0;
1266         struct blk_plug *plug;
1267         struct request *same_queue_rq = NULL;
1268 
1269         blk_queue_bounce(q, &bio);
1270 
1271         if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
1272                 bio_endio(bio, -EIO);
1273                 return;
1274         }
1275 
1276         if (!is_flush_fua && !blk_queue_nomerges(q) &&
1277             blk_attempt_plug_merge(q, bio, &request_count, &same_queue_rq))
1278                 return;
1279 
1280         rq = blk_mq_map_request(q, bio, &data);
1281         if (unlikely(!rq))
1282                 return;
1283 
1284         if (unlikely(is_flush_fua)) {
1285                 blk_mq_bio_to_request(rq, bio);
1286                 blk_insert_flush(rq);
1287                 goto run_queue;
1288         }
1289 
1290         plug = current->plug;
1291         /*
1292          * If the driver supports defer issued based on 'last', then
1293          * queue it up like normal since we can potentially save some
1294          * CPU this way.
1295          */
1296         if (((plug && !blk_queue_nomerges(q)) || is_sync) &&
1297             !(data.hctx->flags & BLK_MQ_F_DEFER_ISSUE)) {
1298                 struct request *old_rq = NULL;
1299 
1300                 blk_mq_bio_to_request(rq, bio);
1301 
1302                 /*
1303                  * we do limited pluging. If bio can be merged, do merge.
1304                  * Otherwise the existing request in the plug list will be
1305                  * issued. So the plug list will have one request at most
1306                  */
1307                 if (plug) {
1308                         /*
1309                          * The plug list might get flushed before this. If that
1310                          * happens, same_queue_rq is invalid and plug list is empty
1311                          **/
1312                         if (same_queue_rq && !list_empty(&plug->mq_list)) {
1313                                 old_rq = same_queue_rq;
1314                                 list_del_init(&old_rq->queuelist);
1315                         }
1316                         list_add_tail(&rq->queuelist, &plug->mq_list);
1317                 } else /* is_sync */
1318                         old_rq = rq;
1319                 blk_mq_put_ctx(data.ctx);
1320                 if (!old_rq)
1321                         return;
1322                 if (!blk_mq_direct_issue_request(old_rq))
1323                         return;
1324                 blk_mq_insert_request(old_rq, false, true, true);
1325                 return;
1326         }
1327 
1328         if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
1329                 /*
1330                  * For a SYNC request, send it to the hardware immediately. For
1331                  * an ASYNC request, just ensure that we run it later on. The
1332                  * latter allows for merging opportunities and more efficient
1333                  * dispatching.
1334                  */
1335 run_queue:
1336                 blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
1337         }
1338         blk_mq_put_ctx(data.ctx);
1339 }
1340 
1341 /*
1342  * Single hardware queue variant. This will attempt to use any per-process
1343  * plug for merging and IO deferral.
1344  */
1345 static void blk_sq_make_request(struct request_queue *q, struct bio *bio)
1346 {
1347         const int is_sync = rw_is_sync(bio->bi_rw);
1348         const int is_flush_fua = bio->bi_rw & (REQ_FLUSH | REQ_FUA);
1349         struct blk_plug *plug;
1350         unsigned int request_count = 0;
1351         struct blk_map_ctx data;
1352         struct request *rq;
1353 
1354         blk_queue_bounce(q, &bio);
1355 
1356         if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
1357                 bio_endio(bio, -EIO);
1358                 return;
1359         }
1360 
1361         if (!is_flush_fua && !blk_queue_nomerges(q) &&
1362             blk_attempt_plug_merge(q, bio, &request_count, NULL))
1363                 return;
1364 
1365         rq = blk_mq_map_request(q, bio, &data);
1366         if (unlikely(!rq))
1367                 return;
1368 
1369         if (unlikely(is_flush_fua)) {
1370                 blk_mq_bio_to_request(rq, bio);
1371                 blk_insert_flush(rq);
1372                 goto run_queue;
1373         }
1374 
1375         /*
1376          * A task plug currently exists. Since this is completely lockless,
1377          * utilize that to temporarily store requests until the task is
1378          * either done or scheduled away.
1379          */
1380         plug = current->plug;
1381         if (plug) {
1382                 blk_mq_bio_to_request(rq, bio);
1383                 if (list_empty(&plug->mq_list))
1384                         trace_block_plug(q);
1385                 else if (request_count >= BLK_MAX_REQUEST_COUNT) {
1386                         blk_flush_plug_list(plug, false);
1387                         trace_block_plug(q);
1388                 }
1389                 list_add_tail(&rq->queuelist, &plug->mq_list);
1390                 blk_mq_put_ctx(data.ctx);
1391                 return;
1392         }
1393 
1394         if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
1395                 /*
1396                  * For a SYNC request, send it to the hardware immediately. For
1397                  * an ASYNC request, just ensure that we run it later on. The
1398                  * latter allows for merging opportunities and more efficient
1399                  * dispatching.
1400                  */
1401 run_queue:
1402                 blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
1403         }
1404 
1405         blk_mq_put_ctx(data.ctx);
1406 }
1407 
1408 /*
1409  * Default mapping to a software queue, since we use one per CPU.
1410  */
1411 struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, const int cpu)
1412 {
1413         return q->queue_hw_ctx[q->mq_map[cpu]];
1414 }
1415 EXPORT_SYMBOL(blk_mq_map_queue);
1416 
1417 static void blk_mq_free_rq_map(struct blk_mq_tag_set *set,
1418                 struct blk_mq_tags *tags, unsigned int hctx_idx)
1419 {
1420         struct page *page;
1421 
1422         if (tags->rqs && set->ops->exit_request) {
1423                 int i;
1424 
1425                 for (i = 0; i < tags->nr_tags; i++) {
1426                         if (!tags->rqs[i])
1427                                 continue;
1428                         set->ops->exit_request(set->driver_data, tags->rqs[i],
1429                                                 hctx_idx, i);
1430                         tags->rqs[i] = NULL;
1431                 }
1432         }
1433 
1434         while (!list_empty(&tags->page_list)) {
1435                 page = list_first_entry(&tags->page_list, struct page, lru);
1436                 list_del_init(&page->lru);
1437                 __free_pages(page, page->private);
1438         }
1439 
1440         kfree(tags->rqs);
1441 
1442         blk_mq_free_tags(tags);
1443 }
1444 
1445 static size_t order_to_size(unsigned int order)
1446 {
1447         return (size_t)PAGE_SIZE << order;
1448 }
1449 
1450 static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
1451                 unsigned int hctx_idx)
1452 {
1453         struct blk_mq_tags *tags;
1454         unsigned int i, j, entries_per_page, max_order = 4;
1455         size_t rq_size, left;
1456 
1457         tags = blk_mq_init_tags(set->queue_depth, set->reserved_tags,
1458                                 set->numa_node,
1459                                 BLK_MQ_FLAG_TO_ALLOC_POLICY(set->flags));
1460         if (!tags)
1461                 return NULL;
1462 
1463         INIT_LIST_HEAD(&tags->page_list);
1464 
1465         tags->rqs = kzalloc_node(set->queue_depth * sizeof(struct request *),
1466                                  GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY,
1467                                  set->numa_node);
1468         if (!tags->rqs) {
1469                 blk_mq_free_tags(tags);
1470                 return NULL;
1471         }
1472 
1473         /*
1474          * rq_size is the size of the request plus driver payload, rounded
1475          * to the cacheline size
1476          */
1477         rq_size = round_up(sizeof(struct request) + set->cmd_size,
1478                                 cache_line_size());
1479         left = rq_size * set->queue_depth;
1480 
1481         for (i = 0; i < set->queue_depth; ) {
1482                 int this_order = max_order;
1483                 struct page *page;
1484                 int to_do;
1485                 void *p;
1486 
1487                 while (left < order_to_size(this_order - 1) && this_order)
1488                         this_order--;
1489 
1490                 do {
1491                         page = alloc_pages_node(set->numa_node,
1492                                 GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO,
1493                                 this_order);
1494                         if (page)
1495                                 break;
1496                         if (!this_order--)
1497                                 break;
1498                         if (order_to_size(this_order) < rq_size)
1499                                 break;
1500                 } while (1);
1501 
1502                 if (!page)
1503                         goto fail;
1504 
1505                 page->private = this_order;
1506                 list_add_tail(&page->lru, &tags->page_list);
1507 
1508                 p = page_address(page);
1509                 entries_per_page = order_to_size(this_order) / rq_size;
1510                 to_do = min(entries_per_page, set->queue_depth - i);
1511                 left -= to_do * rq_size;
1512                 for (j = 0; j < to_do; j++) {
1513                         tags->rqs[i] = p;
1514                         if (set->ops->init_request) {
1515                                 if (set->ops->init_request(set->driver_data,
1516                                                 tags->rqs[i], hctx_idx, i,
1517                                                 set->numa_node)) {
1518                                         tags->rqs[i] = NULL;
1519                                         goto fail;
1520                                 }
1521                         }
1522 
1523                         p += rq_size;
1524                         i++;
1525                 }
1526         }
1527         return tags;
1528 
1529 fail:
1530         blk_mq_free_rq_map(set, tags, hctx_idx);
1531         return NULL;
1532 }
1533 
1534 static void blk_mq_free_bitmap(struct blk_mq_ctxmap *bitmap)
1535 {
1536         kfree(bitmap->map);
1537 }
1538 
1539 static int blk_mq_alloc_bitmap(struct blk_mq_ctxmap *bitmap, int node)
1540 {
1541         unsigned int bpw = 8, total, num_maps, i;
1542 
1543         bitmap->bits_per_word = bpw;
1544 
1545         num_maps = ALIGN(nr_cpu_ids, bpw) / bpw;
1546         bitmap->map = kzalloc_node(num_maps * sizeof(struct blk_align_bitmap),
1547                                         GFP_KERNEL, node);
1548         if (!bitmap->map)
1549                 return -ENOMEM;
1550 
1551         total = nr_cpu_ids;
1552         for (i = 0; i < num_maps; i++) {
1553                 bitmap->map[i].depth = min(total, bitmap->bits_per_word);
1554                 total -= bitmap->map[i].depth;
1555         }
1556 
1557         return 0;
1558 }
1559 
1560 static int blk_mq_hctx_cpu_offline(struct blk_mq_hw_ctx *hctx, int cpu)
1561 {
1562         struct request_queue *q = hctx->queue;
1563         struct blk_mq_ctx *ctx;
1564         LIST_HEAD(tmp);
1565 
1566         /*
1567          * Move ctx entries to new CPU, if this one is going away.
1568          */
1569         ctx = __blk_mq_get_ctx(q, cpu);
1570 
1571         spin_lock(&ctx->lock);
1572         if (!list_empty(&ctx->rq_list)) {
1573                 list_splice_init(&ctx->rq_list, &tmp);
1574                 blk_mq_hctx_clear_pending(hctx, ctx);
1575         }
1576         spin_unlock(&ctx->lock);
1577 
1578         if (list_empty(&tmp))
1579                 return NOTIFY_OK;
1580 
1581         ctx = blk_mq_get_ctx(q);
1582         spin_lock(&ctx->lock);
1583 
1584         while (!list_empty(&tmp)) {
1585                 struct request *rq;
1586 
1587                 rq = list_first_entry(&tmp, struct request, queuelist);
1588                 rq->mq_ctx = ctx;
1589                 list_move_tail(&rq->queuelist, &ctx->rq_list);
1590         }
1591 
1592         hctx = q->mq_ops->map_queue(q, ctx->cpu);
1593         blk_mq_hctx_mark_pending(hctx, ctx);
1594 
1595         spin_unlock(&ctx->lock);
1596 
1597         blk_mq_run_hw_queue(hctx, true);
1598         blk_mq_put_ctx(ctx);
1599         return NOTIFY_OK;
1600 }
1601 
1602 static int blk_mq_hctx_notify(void *data, unsigned long action,
1603                               unsigned int cpu)
1604 {
1605         struct blk_mq_hw_ctx *hctx = data;
1606 
1607         if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
1608                 return blk_mq_hctx_cpu_offline(hctx, cpu);
1609 
1610         /*
1611          * In case of CPU online, tags may be reallocated
1612          * in blk_mq_map_swqueue() after mapping is updated.
1613          */
1614 
1615         return NOTIFY_OK;
1616 }
1617 
1618 /* hctx->ctxs will be freed in queue's release handler */
1619 static void blk_mq_exit_hctx(struct request_queue *q,
1620                 struct blk_mq_tag_set *set,
1621                 struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
1622 {
1623         unsigned flush_start_tag = set->queue_depth;
1624 
1625         blk_mq_tag_idle(hctx);
1626 
1627         if (set->ops->exit_request)
1628                 set->ops->exit_request(set->driver_data,
1629                                        hctx->fq->flush_rq, hctx_idx,
1630                                        flush_start_tag + hctx_idx);
1631 
1632         if (set->ops->exit_hctx)
1633                 set->ops->exit_hctx(hctx, hctx_idx);
1634 
1635         blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier);
1636         blk_free_flush_queue(hctx->fq);
1637         blk_mq_free_bitmap(&hctx->ctx_map);
1638 }
1639 
1640 static void blk_mq_exit_hw_queues(struct request_queue *q,
1641                 struct blk_mq_tag_set *set, int nr_queue)
1642 {
1643         struct blk_mq_hw_ctx *hctx;
1644         unsigned int i;
1645 
1646         queue_for_each_hw_ctx(q, hctx, i) {
1647                 if (i == nr_queue)
1648                         break;
1649                 blk_mq_exit_hctx(q, set, hctx, i);
1650         }
1651 }
1652 
1653 static void blk_mq_free_hw_queues(struct request_queue *q,
1654                 struct blk_mq_tag_set *set)
1655 {
1656         struct blk_mq_hw_ctx *hctx;
1657         unsigned int i;
1658 
1659         queue_for_each_hw_ctx(q, hctx, i)
1660                 free_cpumask_var(hctx->cpumask);
1661 }
1662 
1663 static int blk_mq_init_hctx(struct request_queue *q,
1664                 struct blk_mq_tag_set *set,
1665                 struct blk_mq_hw_ctx *hctx, unsigned hctx_idx)
1666 {
1667         int node;
1668         unsigned flush_start_tag = set->queue_depth;
1669 
1670         node = hctx->numa_node;
1671         if (node == NUMA_NO_NODE)
1672                 node = hctx->numa_node = set->numa_node;
1673 
1674         INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn);
1675         INIT_DELAYED_WORK(&hctx->delay_work, blk_mq_delay_work_fn);
1676         spin_lock_init(&hctx->lock);
1677         INIT_LIST_HEAD(&hctx->dispatch);
1678         hctx->queue = q;
1679         hctx->queue_num = hctx_idx;
1680         hctx->flags = set->flags;
1681 
1682         blk_mq_init_cpu_notifier(&hctx->cpu_notifier,
1683                                         blk_mq_hctx_notify, hctx);
1684         blk_mq_register_cpu_notifier(&hctx->cpu_notifier);
1685 
1686         hctx->tags = set->tags[hctx_idx];
1687 
1688         /*
1689          * Allocate space for all possible cpus to avoid allocation at
1690          * runtime
1691          */
1692         hctx->ctxs = kmalloc_node(nr_cpu_ids * sizeof(void *),
1693                                         GFP_KERNEL, node);
1694         if (!hctx->ctxs)
1695                 goto unregister_cpu_notifier;
1696 
1697         if (blk_mq_alloc_bitmap(&hctx->ctx_map, node))
1698                 goto free_ctxs;
1699 
1700         hctx->nr_ctx = 0;
1701 
1702         if (set->ops->init_hctx &&
1703             set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
1704                 goto free_bitmap;
1705 
1706         hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size);
1707         if (!hctx->fq)
1708                 goto exit_hctx;
1709 
1710         if (set->ops->init_request &&
1711             set->ops->init_request(set->driver_data,
1712                                    hctx->fq->flush_rq, hctx_idx,
1713                                    flush_start_tag + hctx_idx, node))
1714                 goto free_fq;
1715 
1716         return 0;
1717 
1718  free_fq:
1719         kfree(hctx->fq);
1720  exit_hctx:
1721         if (set->ops->exit_hctx)
1722                 set->ops->exit_hctx(hctx, hctx_idx);
1723  free_bitmap:
1724         blk_mq_free_bitmap(&hctx->ctx_map);
1725  free_ctxs:
1726         kfree(hctx->ctxs);
1727  unregister_cpu_notifier:
1728         blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier);
1729 
1730         return -1;
1731 }
1732 
1733 static int blk_mq_init_hw_queues(struct request_queue *q,
1734                 struct blk_mq_tag_set *set)
1735 {
1736         struct blk_mq_hw_ctx *hctx;
1737         unsigned int i;
1738 
1739         /*
1740          * Initialize hardware queues
1741          */
1742         queue_for_each_hw_ctx(q, hctx, i) {
1743                 if (blk_mq_init_hctx(q, set, hctx, i))
1744                         break;
1745         }
1746 
1747         if (i == q->nr_hw_queues)
1748                 return 0;
1749 
1750         /*
1751          * Init failed
1752          */
1753         blk_mq_exit_hw_queues(q, set, i);
1754 
1755         return 1;
1756 }
1757 
1758 static void blk_mq_init_cpu_queues(struct request_queue *q,
1759                                    unsigned int nr_hw_queues)
1760 {
1761         unsigned int i;
1762 
1763         for_each_possible_cpu(i) {
1764                 struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i);
1765                 struct blk_mq_hw_ctx *hctx;
1766 
1767                 memset(__ctx, 0, sizeof(*__ctx));
1768                 __ctx->cpu = i;
1769                 spin_lock_init(&__ctx->lock);
1770                 INIT_LIST_HEAD(&__ctx->rq_list);
1771                 __ctx->queue = q;
1772 
1773                 /* If the cpu isn't online, the cpu is mapped to first hctx */
1774                 if (!cpu_online(i))
1775                         continue;
1776 
1777                 hctx = q->mq_ops->map_queue(q, i);
1778 
1779                 /*
1780                  * Set local node, IFF we have more than one hw queue. If
1781                  * not, we remain on the home node of the device
1782                  */
1783                 if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE)
1784                         hctx->numa_node = cpu_to_node(i);
1785         }
1786 }
1787 
1788 static void blk_mq_map_swqueue(struct request_queue *q)
1789 {
1790         unsigned int i;
1791         struct blk_mq_hw_ctx *hctx;
1792         struct blk_mq_ctx *ctx;
1793         struct blk_mq_tag_set *set = q->tag_set;
1794 
1795         queue_for_each_hw_ctx(q, hctx, i) {
1796                 cpumask_clear(hctx->cpumask);
1797                 hctx->nr_ctx = 0;
1798         }
1799 
1800         /*
1801          * Map software to hardware queues
1802          */
1803         queue_for_each_ctx(q, ctx, i) {
1804                 /* If the cpu isn't online, the cpu is mapped to first hctx */
1805                 if (!cpu_online(i))
1806                         continue;
1807 
1808                 hctx = q->mq_ops->map_queue(q, i);
1809                 cpumask_set_cpu(i, hctx->cpumask);
1810                 ctx->index_hw = hctx->nr_ctx;
1811                 hctx->ctxs[hctx->nr_ctx++] = ctx;
1812         }
1813 
1814         queue_for_each_hw_ctx(q, hctx, i) {
1815                 struct blk_mq_ctxmap *map = &hctx->ctx_map;
1816 
1817                 /*
1818                  * If no software queues are mapped to this hardware queue,
1819                  * disable it and free the request entries.
1820                  */
1821                 if (!hctx->nr_ctx) {
1822                         if (set->tags[i]) {
1823                                 blk_mq_free_rq_map(set, set->tags[i], i);
1824                                 set->tags[i] = NULL;
1825                         }
1826                         hctx->tags = NULL;
1827                         continue;
1828                 }
1829 
1830                 /* unmapped hw queue can be remapped after CPU topo changed */
1831                 if (!set->tags[i])
1832                         set->tags[i] = blk_mq_init_rq_map(set, i);
1833                 hctx->tags = set->tags[i];
1834                 WARN_ON(!hctx->tags);
1835 
1836                 /*
1837                  * Set the map size to the number of mapped software queues.
1838                  * This is more accurate and more efficient than looping
1839                  * over all possibly mapped software queues.
1840                  */
1841                 map->size = DIV_ROUND_UP(hctx->nr_ctx, map->bits_per_word);
1842 
1843                 /*
1844                  * Initialize batch roundrobin counts
1845                  */
1846                 hctx->next_cpu = cpumask_first(hctx->cpumask);
1847                 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
1848         }
1849 
1850         queue_for_each_ctx(q, ctx, i) {
1851                 if (!cpu_online(i))
1852                         continue;
1853 
1854                 hctx = q->mq_ops->map_queue(q, i);
1855                 cpumask_set_cpu(i, hctx->tags->cpumask);
1856         }
1857 }
1858 
1859 static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set)
1860 {
1861         struct blk_mq_hw_ctx *hctx;
1862         struct request_queue *q;
1863         bool shared;
1864         int i;
1865 
1866         if (set->tag_list.next == set->tag_list.prev)
1867                 shared = false;
1868         else
1869                 shared = true;
1870 
1871         list_for_each_entry(q, &set->tag_list, tag_set_list) {
1872                 blk_mq_freeze_queue(q);
1873 
1874                 queue_for_each_hw_ctx(q, hctx, i) {
1875                         if (shared)
1876                                 hctx->flags |= BLK_MQ_F_TAG_SHARED;
1877                         else
1878                                 hctx->flags &= ~BLK_MQ_F_TAG_SHARED;
1879                 }
1880                 blk_mq_unfreeze_queue(q);
1881         }
1882 }
1883 
1884 static void blk_mq_del_queue_tag_set(struct request_queue *q)
1885 {
1886         struct blk_mq_tag_set *set = q->tag_set;
1887 
1888         mutex_lock(&set->tag_list_lock);
1889         list_del_init(&q->tag_set_list);
1890         blk_mq_update_tag_set_depth(set);
1891         mutex_unlock(&set->tag_list_lock);
1892 }
1893 
1894 static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
1895                                      struct request_queue *q)
1896 {
1897         q->tag_set = set;
1898 
1899         mutex_lock(&set->tag_list_lock);
1900         list_add_tail(&q->tag_set_list, &set->tag_list);
1901         blk_mq_update_tag_set_depth(set);
1902         mutex_unlock(&set->tag_list_lock);
1903 }
1904 
1905 /*
1906  * It is the actual release handler for mq, but we do it from
1907  * request queue's release handler for avoiding use-after-free
1908  * and headache because q->mq_kobj shouldn't have been introduced,
1909  * but we can't group ctx/kctx kobj without it.
1910  */
1911 void blk_mq_release(struct request_queue *q)
1912 {
1913         struct blk_mq_hw_ctx *hctx;
1914         unsigned int i;
1915 
1916         /* hctx kobj stays in hctx */
1917         queue_for_each_hw_ctx(q, hctx, i) {
1918                 if (!hctx)
1919                         continue;
1920                 kfree(hctx->ctxs);
1921                 kfree(hctx);
1922         }
1923 
1924         kfree(q->queue_hw_ctx);
1925 
1926         /* ctx kobj stays in queue_ctx */
1927         free_percpu(q->queue_ctx);
1928 }
1929 
1930 struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
1931 {
1932         struct request_queue *uninit_q, *q;
1933 
1934         uninit_q = blk_alloc_queue_node(GFP_KERNEL, set->numa_node);
1935         if (!uninit_q)
1936                 return ERR_PTR(-ENOMEM);
1937 
1938         q = blk_mq_init_allocated_queue(set, uninit_q);
1939         if (IS_ERR(q))
1940                 blk_cleanup_queue(uninit_q);
1941 
1942         return q;
1943 }
1944 EXPORT_SYMBOL(blk_mq_init_queue);
1945 
1946 struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
1947                                                   struct request_queue *q)
1948 {
1949         struct blk_mq_hw_ctx **hctxs;
1950         struct blk_mq_ctx __percpu *ctx;
1951         unsigned int *map;
1952         int i;
1953 
1954         ctx = alloc_percpu(struct blk_mq_ctx);
1955         if (!ctx)
1956                 return ERR_PTR(-ENOMEM);
1957 
1958         hctxs = kmalloc_node(set->nr_hw_queues * sizeof(*hctxs), GFP_KERNEL,
1959                         set->numa_node);
1960 
1961         if (!hctxs)
1962                 goto err_percpu;
1963 
1964         map = blk_mq_make_queue_map(set);
1965         if (!map)
1966                 goto err_map;
1967 
1968         for (i = 0; i < set->nr_hw_queues; i++) {
1969                 int node = blk_mq_hw_queue_to_node(map, i);
1970 
1971                 hctxs[i] = kzalloc_node(sizeof(struct blk_mq_hw_ctx),
1972                                         GFP_KERNEL, node);
1973                 if (!hctxs[i])
1974                         goto err_hctxs;
1975 
1976                 if (!zalloc_cpumask_var_node(&hctxs[i]->cpumask, GFP_KERNEL,
1977                                                 node))
1978                         goto err_hctxs;
1979 
1980                 atomic_set(&hctxs[i]->nr_active, 0);
1981                 hctxs[i]->numa_node = node;
1982                 hctxs[i]->queue_num = i;
1983         }
1984 
1985         /*
1986          * Init percpu_ref in atomic mode so that it's faster to shutdown.
1987          * See blk_register_queue() for details.
1988          */
1989         if (percpu_ref_init(&q->mq_usage_counter, blk_mq_usage_counter_release,
1990                             PERCPU_REF_INIT_ATOMIC, GFP_KERNEL))
1991                 goto err_hctxs;
1992 
1993         setup_timer(&q->timeout, blk_mq_rq_timer, (unsigned long) q);
1994         blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ);
1995 
1996         q->nr_queues = nr_cpu_ids;
1997         q->nr_hw_queues = set->nr_hw_queues;
1998         q->mq_map = map;
1999 
2000         q->queue_ctx = ctx;
2001         q->queue_hw_ctx = hctxs;
2002 
2003         q->mq_ops = set->ops;
2004         q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT;
2005 
2006         if (!(set->flags & BLK_MQ_F_SG_MERGE))
2007                 q->queue_flags |= 1 << QUEUE_FLAG_NO_SG_MERGE;
2008 
2009         q->sg_reserved_size = INT_MAX;
2010 
2011         INIT_WORK(&q->requeue_work, blk_mq_requeue_work);
2012         INIT_LIST_HEAD(&q->requeue_list);
2013         spin_lock_init(&q->requeue_lock);
2014 
2015         if (q->nr_hw_queues > 1)
2016                 blk_queue_make_request(q, blk_mq_make_request);
2017         else
2018                 blk_queue_make_request(q, blk_sq_make_request);
2019 
2020         /*
2021          * Do this after blk_queue_make_request() overrides it...
2022          */
2023         q->nr_requests = set->queue_depth;
2024 
2025         if (set->ops->complete)
2026                 blk_queue_softirq_done(q, set->ops->complete);
2027 
2028         blk_mq_init_cpu_queues(q, set->nr_hw_queues);
2029 
2030         if (blk_mq_init_hw_queues(q, set))
2031                 goto err_hctxs;
2032 
2033         mutex_lock(&all_q_mutex);
2034         list_add_tail(&q->all_q_node, &all_q_list);
2035         mutex_unlock(&all_q_mutex);
2036 
2037         blk_mq_add_queue_tag_set(set, q);
2038 
2039         blk_mq_map_swqueue(q);
2040 
2041         return q;
2042 
2043 err_hctxs:
2044         kfree(map);
2045         for (i = 0; i < set->nr_hw_queues; i++) {
2046                 if (!hctxs[i])
2047                         break;
2048                 free_cpumask_var(hctxs[i]->cpumask);
2049                 kfree(hctxs[i]);
2050         }
2051 err_map:
2052         kfree(hctxs);
2053 err_percpu:
2054         free_percpu(ctx);
2055         return ERR_PTR(-ENOMEM);
2056 }
2057 EXPORT_SYMBOL(blk_mq_init_allocated_queue);
2058 
2059 void blk_mq_free_queue(struct request_queue *q)
2060 {
2061         struct blk_mq_tag_set   *set = q->tag_set;
2062 
2063         blk_mq_del_queue_tag_set(q);
2064 
2065         blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
2066         blk_mq_free_hw_queues(q, set);
2067 
2068         percpu_ref_exit(&q->mq_usage_counter);
2069 
2070         kfree(q->mq_map);
2071 
2072         q->mq_map = NULL;
2073 
2074         mutex_lock(&all_q_mutex);
2075         list_del_init(&q->all_q_node);
2076         mutex_unlock(&all_q_mutex);
2077 }
2078 
2079 /* Basically redo blk_mq_init_queue with queue frozen */
2080 static void blk_mq_queue_reinit(struct request_queue *q)
2081 {
2082         WARN_ON_ONCE(!atomic_read(&q->mq_freeze_depth));
2083 
2084         blk_mq_sysfs_unregister(q);
2085 
2086         blk_mq_update_queue_map(q->mq_map, q->nr_hw_queues);
2087 
2088         /*
2089          * redo blk_mq_init_cpu_queues and blk_mq_init_hw_queues. FIXME: maybe
2090          * we should change hctx numa_node according to new topology (this
2091          * involves free and re-allocate memory, worthy doing?)
2092          */
2093 
2094         blk_mq_map_swqueue(q);
2095 
2096         blk_mq_sysfs_register(q);
2097 }
2098 
2099 static int blk_mq_queue_reinit_notify(struct notifier_block *nb,
2100                                       unsigned long action, void *hcpu)
2101 {
2102         struct request_queue *q;
2103 
2104         /*
2105          * Before new mappings are established, hotadded cpu might already
2106          * start handling requests. This doesn't break anything as we map
2107          * offline CPUs to first hardware queue. We will re-init the queue
2108          * below to get optimal settings.
2109          */
2110         if (action != CPU_DEAD && action != CPU_DEAD_FROZEN &&
2111             action != CPU_ONLINE && action != CPU_ONLINE_FROZEN)
2112                 return NOTIFY_OK;
2113 
2114         mutex_lock(&all_q_mutex);
2115 
2116         /*
2117          * We need to freeze and reinit all existing queues.  Freezing
2118          * involves synchronous wait for an RCU grace period and doing it
2119          * one by one may take a long time.  Start freezing all queues in
2120          * one swoop and then wait for the completions so that freezing can
2121          * take place in parallel.
2122          */
2123         list_for_each_entry(q, &all_q_list, all_q_node)
2124                 blk_mq_freeze_queue_start(q);
2125         list_for_each_entry(q, &all_q_list, all_q_node) {
2126                 blk_mq_freeze_queue_wait(q);
2127 
2128                 /*
2129                  * timeout handler can't touch hw queue during the
2130                  * reinitialization
2131                  */
2132                 del_timer_sync(&q->timeout);
2133         }
2134 
2135         list_for_each_entry(q, &all_q_list, all_q_node)
2136                 blk_mq_queue_reinit(q);
2137 
2138         list_for_each_entry(q, &all_q_list, all_q_node)
2139                 blk_mq_unfreeze_queue(q);
2140 
2141         mutex_unlock(&all_q_mutex);
2142         return NOTIFY_OK;
2143 }
2144 
2145 static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
2146 {
2147         int i;
2148 
2149         for (i = 0; i < set->nr_hw_queues; i++) {
2150                 set->tags[i] = blk_mq_init_rq_map(set, i);
2151                 if (!set->tags[i])
2152                         goto out_unwind;
2153         }
2154 
2155         return 0;
2156 
2157 out_unwind:
2158         while (--i >= 0)
2159                 blk_mq_free_rq_map(set, set->tags[i], i);
2160 
2161         return -ENOMEM;
2162 }
2163 
2164 /*
2165  * Allocate the request maps associated with this tag_set. Note that this
2166  * may reduce the depth asked for, if memory is tight. set->queue_depth
2167  * will be updated to reflect the allocated depth.
2168  */
2169 static int blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
2170 {
2171         unsigned int depth;
2172         int err;
2173 
2174         depth = set->queue_depth;
2175         do {
2176                 err = __blk_mq_alloc_rq_maps(set);
2177                 if (!err)
2178                         break;
2179 
2180                 set->queue_depth >>= 1;
2181                 if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) {
2182                         err = -ENOMEM;
2183                         break;
2184                 }
2185         } while (set->queue_depth);
2186 
2187         if (!set->queue_depth || err) {
2188                 pr_err("blk-mq: failed to allocate request map\n");
2189                 return -ENOMEM;
2190         }
2191 
2192         if (depth != set->queue_depth)
2193                 pr_info("blk-mq: reduced tag depth (%u -> %u)\n",
2194                                                 depth, set->queue_depth);
2195 
2196         return 0;
2197 }
2198 
2199 struct cpumask *blk_mq_tags_cpumask(struct blk_mq_tags *tags)
2200 {
2201         return tags->cpumask;
2202 }
2203 EXPORT_SYMBOL_GPL(blk_mq_tags_cpumask);
2204 
2205 /*
2206  * Alloc a tag set to be associated with one or more request queues.
2207  * May fail with EINVAL for various error conditions. May adjust the
2208  * requested depth down, if if it too large. In that case, the set
2209  * value will be stored in set->queue_depth.
2210  */
2211 int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
2212 {
2213         BUILD_BUG_ON(BLK_MQ_MAX_DEPTH > 1 << BLK_MQ_UNIQUE_TAG_BITS);
2214 
2215         if (!set->nr_hw_queues)
2216                 return -EINVAL;
2217         if (!set->queue_depth)
2218                 return -EINVAL;
2219         if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN)
2220                 return -EINVAL;
2221 
2222         if (!set->ops->queue_rq || !set->ops->map_queue)
2223                 return -EINVAL;
2224 
2225         if (set->queue_depth > BLK_MQ_MAX_DEPTH) {
2226                 pr_info("blk-mq: reduced tag depth to %u\n",
2227                         BLK_MQ_MAX_DEPTH);
2228                 set->queue_depth = BLK_MQ_MAX_DEPTH;
2229         }
2230 
2231         /*
2232          * If a crashdump is active, then we are potentially in a very
2233          * memory constrained environment. Limit us to 1 queue and
2234          * 64 tags to prevent using too much memory.
2235          */
2236         if (is_kdump_kernel()) {
2237                 set->nr_hw_queues = 1;
2238                 set->queue_depth = min(64U, set->queue_depth);
2239         }
2240 
2241         set->tags = kmalloc_node(set->nr_hw_queues *
2242                                  sizeof(struct blk_mq_tags *),
2243                                  GFP_KERNEL, set->numa_node);
2244         if (!set->tags)
2245                 return -ENOMEM;
2246 
2247         if (blk_mq_alloc_rq_maps(set))
2248                 goto enomem;
2249 
2250         mutex_init(&set->tag_list_lock);
2251         INIT_LIST_HEAD(&set->tag_list);
2252 
2253         return 0;
2254 enomem:
2255         kfree(set->tags);
2256         set->tags = NULL;
2257         return -ENOMEM;
2258 }
2259 EXPORT_SYMBOL(blk_mq_alloc_tag_set);
2260 
2261 void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
2262 {
2263         int i;
2264 
2265         for (i = 0; i < set->nr_hw_queues; i++) {
2266                 if (set->tags[i])
2267                         blk_mq_free_rq_map(set, set->tags[i], i);
2268         }
2269 
2270         kfree(set->tags);
2271         set->tags = NULL;
2272 }
2273 EXPORT_SYMBOL(blk_mq_free_tag_set);
2274 
2275 int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
2276 {
2277         struct blk_mq_tag_set *set = q->tag_set;
2278         struct blk_mq_hw_ctx *hctx;
2279         int i, ret;
2280 
2281         if (!set || nr > set->queue_depth)
2282                 return -EINVAL;
2283 
2284         ret = 0;
2285         queue_for_each_hw_ctx(q, hctx, i) {
2286                 ret = blk_mq_tag_update_depth(hctx->tags, nr);
2287                 if (ret)
2288                         break;
2289         }
2290 
2291         if (!ret)
2292                 q->nr_requests = nr;
2293 
2294         return ret;
2295 }
2296 
2297 void blk_mq_disable_hotplug(void)
2298 {
2299         mutex_lock(&all_q_mutex);
2300 }
2301 
2302 void blk_mq_enable_hotplug(void)
2303 {
2304         mutex_unlock(&all_q_mutex);
2305 }
2306 
2307 static int __init blk_mq_init(void)
2308 {
2309         blk_mq_cpu_init();
2310 
2311         hotcpu_notifier(blk_mq_queue_reinit_notify, 0);
2312 
2313         return 0;
2314 }
2315 subsys_initcall(blk_mq_init);
2316 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp