~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/block/blk-mq.c

Version: ~ [ linux-5.16-rc3 ] ~ [ linux-5.15.5 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.82 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.162 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.218 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.256 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.291 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.293 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.18.140 ] ~ [ linux-3.16.85 ] ~ [ linux-3.14.79 ] ~ [ linux-3.12.74 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  * Block multiqueue core code
  3  *
  4  * Copyright (C) 2013-2014 Jens Axboe
  5  * Copyright (C) 2013-2014 Christoph Hellwig
  6  */
  7 #include <linux/kernel.h>
  8 #include <linux/module.h>
  9 #include <linux/backing-dev.h>
 10 #include <linux/bio.h>
 11 #include <linux/blkdev.h>
 12 #include <linux/mm.h>
 13 #include <linux/init.h>
 14 #include <linux/slab.h>
 15 #include <linux/workqueue.h>
 16 #include <linux/smp.h>
 17 #include <linux/llist.h>
 18 #include <linux/list_sort.h>
 19 #include <linux/cpu.h>
 20 #include <linux/cache.h>
 21 #include <linux/sched/sysctl.h>
 22 #include <linux/delay.h>
 23 #include <linux/crash_dump.h>
 24 
 25 #include <trace/events/block.h>
 26 
 27 #include <linux/blk-mq.h>
 28 #include "blk.h"
 29 #include "blk-mq.h"
 30 #include "blk-mq-tag.h"
 31 
 32 static DEFINE_MUTEX(all_q_mutex);
 33 static LIST_HEAD(all_q_list);
 34 
 35 static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx);
 36 static void blk_mq_run_queues(struct request_queue *q);
 37 
 38 /*
 39  * Check if any of the ctx's have pending work in this hardware queue
 40  */
 41 static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx)
 42 {
 43         unsigned int i;
 44 
 45         for (i = 0; i < hctx->ctx_map.map_size; i++)
 46                 if (hctx->ctx_map.map[i].word)
 47                         return true;
 48 
 49         return false;
 50 }
 51 
 52 static inline struct blk_align_bitmap *get_bm(struct blk_mq_hw_ctx *hctx,
 53                                               struct blk_mq_ctx *ctx)
 54 {
 55         return &hctx->ctx_map.map[ctx->index_hw / hctx->ctx_map.bits_per_word];
 56 }
 57 
 58 #define CTX_TO_BIT(hctx, ctx)   \
 59         ((ctx)->index_hw & ((hctx)->ctx_map.bits_per_word - 1))
 60 
 61 /*
 62  * Mark this ctx as having pending work in this hardware queue
 63  */
 64 static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx,
 65                                      struct blk_mq_ctx *ctx)
 66 {
 67         struct blk_align_bitmap *bm = get_bm(hctx, ctx);
 68 
 69         if (!test_bit(CTX_TO_BIT(hctx, ctx), &bm->word))
 70                 set_bit(CTX_TO_BIT(hctx, ctx), &bm->word);
 71 }
 72 
 73 static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx,
 74                                       struct blk_mq_ctx *ctx)
 75 {
 76         struct blk_align_bitmap *bm = get_bm(hctx, ctx);
 77 
 78         clear_bit(CTX_TO_BIT(hctx, ctx), &bm->word);
 79 }
 80 
 81 static int blk_mq_queue_enter(struct request_queue *q)
 82 {
 83         while (true) {
 84                 int ret;
 85 
 86                 if (percpu_ref_tryget_live(&q->mq_usage_counter))
 87                         return 0;
 88 
 89                 ret = wait_event_interruptible(q->mq_freeze_wq,
 90                                 !q->mq_freeze_depth || blk_queue_dying(q));
 91                 if (blk_queue_dying(q))
 92                         return -ENODEV;
 93                 if (ret)
 94                         return ret;
 95         }
 96 }
 97 
 98 static void blk_mq_queue_exit(struct request_queue *q)
 99 {
100         percpu_ref_put(&q->mq_usage_counter);
101 }
102 
103 static void blk_mq_usage_counter_release(struct percpu_ref *ref)
104 {
105         struct request_queue *q =
106                 container_of(ref, struct request_queue, mq_usage_counter);
107 
108         wake_up_all(&q->mq_freeze_wq);
109 }
110 
111 void blk_mq_freeze_queue_start(struct request_queue *q)
112 {
113         bool freeze;
114 
115         spin_lock_irq(q->queue_lock);
116         freeze = !q->mq_freeze_depth++;
117         spin_unlock_irq(q->queue_lock);
118 
119         if (freeze) {
120                 percpu_ref_kill(&q->mq_usage_counter);
121                 blk_mq_run_queues(q);
122         }
123 }
124 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_start);
125 
126 static void blk_mq_freeze_queue_wait(struct request_queue *q)
127 {
128         wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->mq_usage_counter));
129 }
130 
131 /*
132  * Guarantee no request is in use, so we can change any data structure of
133  * the queue afterward.
134  */
135 void blk_mq_freeze_queue(struct request_queue *q)
136 {
137         blk_mq_freeze_queue_start(q);
138         blk_mq_freeze_queue_wait(q);
139 }
140 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue);
141 
142 void blk_mq_unfreeze_queue(struct request_queue *q)
143 {
144         bool wake;
145 
146         spin_lock_irq(q->queue_lock);
147         wake = !--q->mq_freeze_depth;
148         WARN_ON_ONCE(q->mq_freeze_depth < 0);
149         spin_unlock_irq(q->queue_lock);
150         if (wake) {
151                 percpu_ref_reinit(&q->mq_usage_counter);
152                 wake_up_all(&q->mq_freeze_wq);
153         }
154 }
155 EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue);
156 
157 void blk_mq_wake_waiters(struct request_queue *q)
158 {
159         struct blk_mq_hw_ctx *hctx;
160         unsigned int i;
161 
162         queue_for_each_hw_ctx(q, hctx, i)
163                 if (blk_mq_hw_queue_mapped(hctx))
164                         blk_mq_tag_wakeup_all(hctx->tags, true);
165 
166         /*
167          * If we are called because the queue has now been marked as
168          * dying, we need to ensure that processes currently waiting on
169          * the queue are notified as well.
170          */
171         wake_up_all(&q->mq_freeze_wq);
172 }
173 
174 bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx)
175 {
176         return blk_mq_has_free_tags(hctx->tags);
177 }
178 EXPORT_SYMBOL(blk_mq_can_queue);
179 
180 static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
181                                struct request *rq, unsigned int rw_flags)
182 {
183         if (blk_queue_io_stat(q))
184                 rw_flags |= REQ_IO_STAT;
185 
186         INIT_LIST_HEAD(&rq->queuelist);
187         /* csd/requeue_work/fifo_time is initialized before use */
188         rq->q = q;
189         rq->mq_ctx = ctx;
190         rq->cmd_flags |= rw_flags;
191         /* do not touch atomic flags, it needs atomic ops against the timer */
192         rq->cpu = -1;
193         INIT_HLIST_NODE(&rq->hash);
194         RB_CLEAR_NODE(&rq->rb_node);
195         rq->rq_disk = NULL;
196         rq->part = NULL;
197         rq->start_time = jiffies;
198 #ifdef CONFIG_BLK_CGROUP
199         rq->rl = NULL;
200         set_start_time_ns(rq);
201         rq->io_start_time_ns = 0;
202 #endif
203         rq->nr_phys_segments = 0;
204 #if defined(CONFIG_BLK_DEV_INTEGRITY)
205         rq->nr_integrity_segments = 0;
206 #endif
207         rq->special = NULL;
208         /* tag was already set */
209         rq->errors = 0;
210 
211         rq->cmd = rq->__cmd;
212 
213         rq->extra_len = 0;
214         rq->sense_len = 0;
215         rq->resid_len = 0;
216         rq->sense = NULL;
217 
218         INIT_LIST_HEAD(&rq->timeout_list);
219         rq->timeout = 0;
220 
221         rq->end_io = NULL;
222         rq->end_io_data = NULL;
223         rq->next_rq = NULL;
224 
225         ctx->rq_dispatched[rw_is_sync(rw_flags)]++;
226 }
227 
228 static struct request *
229 __blk_mq_alloc_request(struct blk_mq_alloc_data *data, int rw)
230 {
231         struct request *rq;
232         unsigned int tag;
233 
234         tag = blk_mq_get_tag(data);
235         if (tag != BLK_MQ_TAG_FAIL) {
236                 rq = data->hctx->tags->rqs[tag];
237 
238                 if (blk_mq_tag_busy(data->hctx)) {
239                         rq->cmd_flags = REQ_MQ_INFLIGHT;
240                         atomic_inc(&data->hctx->nr_active);
241                 }
242 
243                 rq->tag = tag;
244                 blk_mq_rq_ctx_init(data->q, data->ctx, rq, rw);
245                 return rq;
246         }
247 
248         return NULL;
249 }
250 
251 struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp,
252                 bool reserved)
253 {
254         struct blk_mq_ctx *ctx;
255         struct blk_mq_hw_ctx *hctx;
256         struct request *rq;
257         struct blk_mq_alloc_data alloc_data;
258         int ret;
259 
260         ret = blk_mq_queue_enter(q);
261         if (ret)
262                 return ERR_PTR(ret);
263 
264         ctx = blk_mq_get_ctx(q);
265         hctx = q->mq_ops->map_queue(q, ctx->cpu);
266         blk_mq_set_alloc_data(&alloc_data, q, gfp & ~__GFP_WAIT,
267                         reserved, ctx, hctx);
268 
269         rq = __blk_mq_alloc_request(&alloc_data, rw);
270         if (!rq && (gfp & __GFP_WAIT)) {
271                 __blk_mq_run_hw_queue(hctx);
272                 blk_mq_put_ctx(ctx);
273 
274                 ctx = blk_mq_get_ctx(q);
275                 hctx = q->mq_ops->map_queue(q, ctx->cpu);
276                 blk_mq_set_alloc_data(&alloc_data, q, gfp, reserved, ctx,
277                                 hctx);
278                 rq =  __blk_mq_alloc_request(&alloc_data, rw);
279                 ctx = alloc_data.ctx;
280         }
281         blk_mq_put_ctx(ctx);
282         if (!rq) {
283                 blk_mq_queue_exit(q);
284                 return ERR_PTR(-EWOULDBLOCK);
285         }
286         return rq;
287 }
288 EXPORT_SYMBOL(blk_mq_alloc_request);
289 
290 static void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx,
291                                   struct blk_mq_ctx *ctx, struct request *rq)
292 {
293         const int tag = rq->tag;
294         struct request_queue *q = rq->q;
295 
296         if (rq->cmd_flags & REQ_MQ_INFLIGHT)
297                 atomic_dec(&hctx->nr_active);
298         rq->cmd_flags = 0;
299 
300         clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
301         blk_mq_put_tag(hctx, tag, &ctx->last_tag);
302         blk_mq_queue_exit(q);
303 }
304 
305 void blk_mq_free_hctx_request(struct blk_mq_hw_ctx *hctx, struct request *rq)
306 {
307         struct blk_mq_ctx *ctx = rq->mq_ctx;
308 
309         ctx->rq_completed[rq_is_sync(rq)]++;
310         __blk_mq_free_request(hctx, ctx, rq);
311 
312 }
313 EXPORT_SYMBOL_GPL(blk_mq_free_hctx_request);
314 
315 void blk_mq_free_request(struct request *rq)
316 {
317         struct blk_mq_hw_ctx *hctx;
318         struct request_queue *q = rq->q;
319 
320         hctx = q->mq_ops->map_queue(q, rq->mq_ctx->cpu);
321         blk_mq_free_hctx_request(hctx, rq);
322 }
323 EXPORT_SYMBOL_GPL(blk_mq_free_request);
324 
325 inline void __blk_mq_end_request(struct request *rq, int error)
326 {
327         blk_account_io_done(rq);
328 
329         if (rq->end_io) {
330                 rq->end_io(rq, error);
331         } else {
332                 if (unlikely(blk_bidi_rq(rq)))
333                         blk_mq_free_request(rq->next_rq);
334                 blk_mq_free_request(rq);
335         }
336 }
337 EXPORT_SYMBOL(__blk_mq_end_request);
338 
339 void blk_mq_end_request(struct request *rq, int error)
340 {
341         if (blk_update_request(rq, error, blk_rq_bytes(rq)))
342                 BUG();
343         __blk_mq_end_request(rq, error);
344 }
345 EXPORT_SYMBOL(blk_mq_end_request);
346 
347 static void __blk_mq_complete_request_remote(void *data)
348 {
349         struct request *rq = data;
350 
351         rq->q->softirq_done_fn(rq);
352 }
353 
354 static void blk_mq_ipi_complete_request(struct request *rq)
355 {
356         struct blk_mq_ctx *ctx = rq->mq_ctx;
357         bool shared = false;
358         int cpu;
359 
360         if (!test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags)) {
361                 rq->q->softirq_done_fn(rq);
362                 return;
363         }
364 
365         cpu = get_cpu();
366         if (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags))
367                 shared = cpus_share_cache(cpu, ctx->cpu);
368 
369         if (cpu != ctx->cpu && !shared && cpu_online(ctx->cpu)) {
370                 rq->csd.func = __blk_mq_complete_request_remote;
371                 rq->csd.info = rq;
372                 rq->csd.flags = 0;
373                 smp_call_function_single_async(ctx->cpu, &rq->csd);
374         } else {
375                 rq->q->softirq_done_fn(rq);
376         }
377         put_cpu();
378 }
379 
380 void __blk_mq_complete_request(struct request *rq)
381 {
382         struct request_queue *q = rq->q;
383 
384         if (!q->softirq_done_fn)
385                 blk_mq_end_request(rq, rq->errors);
386         else
387                 blk_mq_ipi_complete_request(rq);
388 }
389 
390 /**
391  * blk_mq_complete_request - end I/O on a request
392  * @rq:         the request being processed
393  *
394  * Description:
395  *      Ends all I/O on a request. It does not handle partial completions.
396  *      The actual completion happens out-of-order, through a IPI handler.
397  **/
398 void blk_mq_complete_request(struct request *rq)
399 {
400         struct request_queue *q = rq->q;
401 
402         if (unlikely(blk_should_fake_timeout(q)))
403                 return;
404         if (!blk_mark_rq_complete(rq))
405                 __blk_mq_complete_request(rq);
406 }
407 EXPORT_SYMBOL(blk_mq_complete_request);
408 
409 int blk_mq_request_started(struct request *rq)
410 {
411         return test_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
412 }
413 EXPORT_SYMBOL_GPL(blk_mq_request_started);
414 
415 void blk_mq_start_request(struct request *rq)
416 {
417         struct request_queue *q = rq->q;
418 
419         trace_block_rq_issue(q, rq);
420 
421         rq->resid_len = blk_rq_bytes(rq);
422         if (unlikely(blk_bidi_rq(rq)))
423                 rq->next_rq->resid_len = blk_rq_bytes(rq->next_rq);
424 
425         blk_add_timer(rq);
426 
427         /*
428          * Ensure that ->deadline is visible before set the started
429          * flag and clear the completed flag.
430          */
431         smp_mb__before_atomic();
432 
433         /*
434          * Mark us as started and clear complete. Complete might have been
435          * set if requeue raced with timeout, which then marked it as
436          * complete. So be sure to clear complete again when we start
437          * the request, otherwise we'll ignore the completion event.
438          */
439         if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
440                 set_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
441         if (test_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags))
442                 clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
443 
444         if (q->dma_drain_size && blk_rq_bytes(rq)) {
445                 /*
446                  * Make sure space for the drain appears.  We know we can do
447                  * this because max_hw_segments has been adjusted to be one
448                  * fewer than the device can handle.
449                  */
450                 rq->nr_phys_segments++;
451         }
452 }
453 EXPORT_SYMBOL(blk_mq_start_request);
454 
455 static void __blk_mq_requeue_request(struct request *rq)
456 {
457         struct request_queue *q = rq->q;
458 
459         trace_block_rq_requeue(q, rq);
460 
461         if (test_and_clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) {
462                 if (q->dma_drain_size && blk_rq_bytes(rq))
463                         rq->nr_phys_segments--;
464         }
465 }
466 
467 void blk_mq_requeue_request(struct request *rq)
468 {
469         __blk_mq_requeue_request(rq);
470 
471         BUG_ON(blk_queued_rq(rq));
472         blk_mq_add_to_requeue_list(rq, true);
473 }
474 EXPORT_SYMBOL(blk_mq_requeue_request);
475 
476 static void blk_mq_requeue_work(struct work_struct *work)
477 {
478         struct request_queue *q =
479                 container_of(work, struct request_queue, requeue_work);
480         LIST_HEAD(rq_list);
481         struct request *rq, *next;
482         unsigned long flags;
483 
484         spin_lock_irqsave(&q->requeue_lock, flags);
485         list_splice_init(&q->requeue_list, &rq_list);
486         spin_unlock_irqrestore(&q->requeue_lock, flags);
487 
488         list_for_each_entry_safe(rq, next, &rq_list, queuelist) {
489                 if (!(rq->cmd_flags & REQ_SOFTBARRIER))
490                         continue;
491 
492                 rq->cmd_flags &= ~REQ_SOFTBARRIER;
493                 list_del_init(&rq->queuelist);
494                 blk_mq_insert_request(rq, true, false, false);
495         }
496 
497         while (!list_empty(&rq_list)) {
498                 rq = list_entry(rq_list.next, struct request, queuelist);
499                 list_del_init(&rq->queuelist);
500                 blk_mq_insert_request(rq, false, false, false);
501         }
502 
503         /*
504          * Use the start variant of queue running here, so that running
505          * the requeue work will kick stopped queues.
506          */
507         blk_mq_start_hw_queues(q);
508 }
509 
510 void blk_mq_add_to_requeue_list(struct request *rq, bool at_head)
511 {
512         struct request_queue *q = rq->q;
513         unsigned long flags;
514 
515         /*
516          * We abuse this flag that is otherwise used by the I/O scheduler to
517          * request head insertation from the workqueue.
518          */
519         BUG_ON(rq->cmd_flags & REQ_SOFTBARRIER);
520 
521         spin_lock_irqsave(&q->requeue_lock, flags);
522         if (at_head) {
523                 rq->cmd_flags |= REQ_SOFTBARRIER;
524                 list_add(&rq->queuelist, &q->requeue_list);
525         } else {
526                 list_add_tail(&rq->queuelist, &q->requeue_list);
527         }
528         spin_unlock_irqrestore(&q->requeue_lock, flags);
529 }
530 EXPORT_SYMBOL(blk_mq_add_to_requeue_list);
531 
532 void blk_mq_cancel_requeue_work(struct request_queue *q)
533 {
534         cancel_work_sync(&q->requeue_work);
535 }
536 EXPORT_SYMBOL_GPL(blk_mq_cancel_requeue_work);
537 
538 void blk_mq_kick_requeue_list(struct request_queue *q)
539 {
540         kblockd_schedule_work(&q->requeue_work);
541 }
542 EXPORT_SYMBOL(blk_mq_kick_requeue_list);
543 
544 void blk_mq_abort_requeue_list(struct request_queue *q)
545 {
546         unsigned long flags;
547         LIST_HEAD(rq_list);
548 
549         spin_lock_irqsave(&q->requeue_lock, flags);
550         list_splice_init(&q->requeue_list, &rq_list);
551         spin_unlock_irqrestore(&q->requeue_lock, flags);
552 
553         while (!list_empty(&rq_list)) {
554                 struct request *rq;
555 
556                 rq = list_first_entry(&rq_list, struct request, queuelist);
557                 list_del_init(&rq->queuelist);
558                 rq->errors = -EIO;
559                 blk_mq_end_request(rq, rq->errors);
560         }
561 }
562 EXPORT_SYMBOL(blk_mq_abort_requeue_list);
563 
564 static inline bool is_flush_request(struct request *rq,
565                 struct blk_flush_queue *fq, unsigned int tag)
566 {
567         return ((rq->cmd_flags & REQ_FLUSH_SEQ) &&
568                         fq->flush_rq->tag == tag);
569 }
570 
571 struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag)
572 {
573         struct request *rq = tags->rqs[tag];
574         /* mq_ctx of flush rq is always cloned from the corresponding req */
575         struct blk_flush_queue *fq = blk_get_flush_queue(rq->q, rq->mq_ctx);
576 
577         if (!is_flush_request(rq, fq, tag))
578                 return rq;
579 
580         return fq->flush_rq;
581 }
582 EXPORT_SYMBOL(blk_mq_tag_to_rq);
583 
584 struct blk_mq_timeout_data {
585         unsigned long next;
586         unsigned int next_set;
587 };
588 
589 void blk_mq_rq_timed_out(struct request *req, bool reserved)
590 {
591         struct blk_mq_ops *ops = req->q->mq_ops;
592         enum blk_eh_timer_return ret = BLK_EH_RESET_TIMER;
593 
594         /*
595          * We know that complete is set at this point. If STARTED isn't set
596          * anymore, then the request isn't active and the "timeout" should
597          * just be ignored. This can happen due to the bitflag ordering.
598          * Timeout first checks if STARTED is set, and if it is, assumes
599          * the request is active. But if we race with completion, then
600          * we both flags will get cleared. So check here again, and ignore
601          * a timeout event with a request that isn't active.
602          */
603         if (!test_bit(REQ_ATOM_STARTED, &req->atomic_flags))
604                 return;
605 
606         if (ops->timeout)
607                 ret = ops->timeout(req, reserved);
608 
609         switch (ret) {
610         case BLK_EH_HANDLED:
611                 __blk_mq_complete_request(req);
612                 break;
613         case BLK_EH_RESET_TIMER:
614                 blk_add_timer(req);
615                 blk_clear_rq_complete(req);
616                 break;
617         case BLK_EH_NOT_HANDLED:
618                 break;
619         default:
620                 printk(KERN_ERR "block: bad eh return: %d\n", ret);
621                 break;
622         }
623 }
624 
625 static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
626                 struct request *rq, void *priv, bool reserved)
627 {
628         struct blk_mq_timeout_data *data = priv;
629 
630         if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) {
631                 /*
632                  * If a request wasn't started before the queue was
633                  * marked dying, kill it here or it'll go unnoticed.
634                  */
635                 if (unlikely(blk_queue_dying(rq->q))) {
636                         rq->errors = -EIO;
637                         blk_mq_complete_request(rq);
638                 }
639                 return;
640         }
641         if (rq->cmd_flags & REQ_NO_TIMEOUT)
642                 return;
643 
644         if (time_after_eq(jiffies, rq->deadline)) {
645                 if (!blk_mark_rq_complete(rq))
646                         blk_mq_rq_timed_out(rq, reserved);
647         } else if (!data->next_set || time_after(data->next, rq->deadline)) {
648                 data->next = rq->deadline;
649                 data->next_set = 1;
650         }
651 }
652 
653 static void blk_mq_rq_timer(unsigned long priv)
654 {
655         struct request_queue *q = (struct request_queue *)priv;
656         struct blk_mq_timeout_data data = {
657                 .next           = 0,
658                 .next_set       = 0,
659         };
660         struct blk_mq_hw_ctx *hctx;
661         int i;
662 
663         queue_for_each_hw_ctx(q, hctx, i) {
664                 /*
665                  * If not software queues are currently mapped to this
666                  * hardware queue, there's nothing to check
667                  */
668                 if (!blk_mq_hw_queue_mapped(hctx))
669                         continue;
670 
671                 blk_mq_tag_busy_iter(hctx, blk_mq_check_expired, &data);
672         }
673 
674         if (data.next_set) {
675                 data.next = blk_rq_timeout(round_jiffies_up(data.next));
676                 mod_timer(&q->timeout, data.next);
677         } else {
678                 queue_for_each_hw_ctx(q, hctx, i) {
679                         /* the hctx may be unmapped, so check it here */
680                         if (blk_mq_hw_queue_mapped(hctx))
681                                 blk_mq_tag_idle(hctx);
682                 }
683         }
684 }
685 
686 /*
687  * Reverse check our software queue for entries that we could potentially
688  * merge with. Currently includes a hand-wavy stop count of 8, to not spend
689  * too much time checking for merges.
690  */
691 static bool blk_mq_attempt_merge(struct request_queue *q,
692                                  struct blk_mq_ctx *ctx, struct bio *bio)
693 {
694         struct request *rq;
695         int checked = 8;
696 
697         list_for_each_entry_reverse(rq, &ctx->rq_list, queuelist) {
698                 int el_ret;
699 
700                 if (!checked--)
701                         break;
702 
703                 if (!blk_rq_merge_ok(rq, bio))
704                         continue;
705 
706                 el_ret = blk_try_merge(rq, bio);
707                 if (el_ret == ELEVATOR_BACK_MERGE) {
708                         if (bio_attempt_back_merge(q, rq, bio)) {
709                                 ctx->rq_merged++;
710                                 return true;
711                         }
712                         break;
713                 } else if (el_ret == ELEVATOR_FRONT_MERGE) {
714                         if (bio_attempt_front_merge(q, rq, bio)) {
715                                 ctx->rq_merged++;
716                                 return true;
717                         }
718                         break;
719                 }
720         }
721 
722         return false;
723 }
724 
725 /*
726  * Process software queues that have been marked busy, splicing them
727  * to the for-dispatch
728  */
729 static void flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list)
730 {
731         struct blk_mq_ctx *ctx;
732         int i;
733 
734         for (i = 0; i < hctx->ctx_map.map_size; i++) {
735                 struct blk_align_bitmap *bm = &hctx->ctx_map.map[i];
736                 unsigned int off, bit;
737 
738                 if (!bm->word)
739                         continue;
740 
741                 bit = 0;
742                 off = i * hctx->ctx_map.bits_per_word;
743                 do {
744                         bit = find_next_bit(&bm->word, bm->depth, bit);
745                         if (bit >= bm->depth)
746                                 break;
747 
748                         ctx = hctx->ctxs[bit + off];
749                         clear_bit(bit, &bm->word);
750                         spin_lock(&ctx->lock);
751                         list_splice_tail_init(&ctx->rq_list, list);
752                         spin_unlock(&ctx->lock);
753 
754                         bit++;
755                 } while (1);
756         }
757 }
758 
759 /*
760  * Run this hardware queue, pulling any software queues mapped to it in.
761  * Note that this function currently has various problems around ordering
762  * of IO. In particular, we'd like FIFO behaviour on handling existing
763  * items on the hctx->dispatch list. Ignore that for now.
764  */
765 static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
766 {
767         struct request_queue *q = hctx->queue;
768         struct request *rq;
769         LIST_HEAD(rq_list);
770         LIST_HEAD(driver_list);
771         struct list_head *dptr;
772         int queued;
773 
774         WARN_ON(!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask));
775 
776         if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state)))
777                 return;
778 
779         hctx->run++;
780 
781         /*
782          * Touch any software queue that has pending entries.
783          */
784         flush_busy_ctxs(hctx, &rq_list);
785 
786         /*
787          * If we have previous entries on our dispatch list, grab them
788          * and stuff them at the front for more fair dispatch.
789          */
790         if (!list_empty_careful(&hctx->dispatch)) {
791                 spin_lock(&hctx->lock);
792                 if (!list_empty(&hctx->dispatch))
793                         list_splice_init(&hctx->dispatch, &rq_list);
794                 spin_unlock(&hctx->lock);
795         }
796 
797         /*
798          * Start off with dptr being NULL, so we start the first request
799          * immediately, even if we have more pending.
800          */
801         dptr = NULL;
802 
803         /*
804          * Now process all the entries, sending them to the driver.
805          */
806         queued = 0;
807         while (!list_empty(&rq_list)) {
808                 struct blk_mq_queue_data bd;
809                 int ret;
810 
811                 rq = list_first_entry(&rq_list, struct request, queuelist);
812                 list_del_init(&rq->queuelist);
813 
814                 bd.rq = rq;
815                 bd.list = dptr;
816                 bd.last = list_empty(&rq_list);
817 
818                 ret = q->mq_ops->queue_rq(hctx, &bd);
819                 switch (ret) {
820                 case BLK_MQ_RQ_QUEUE_OK:
821                         queued++;
822                         continue;
823                 case BLK_MQ_RQ_QUEUE_BUSY:
824                         list_add(&rq->queuelist, &rq_list);
825                         __blk_mq_requeue_request(rq);
826                         break;
827                 default:
828                         pr_err("blk-mq: bad return on queue: %d\n", ret);
829                 case BLK_MQ_RQ_QUEUE_ERROR:
830                         rq->errors = -EIO;
831                         blk_mq_end_request(rq, rq->errors);
832                         break;
833                 }
834 
835                 if (ret == BLK_MQ_RQ_QUEUE_BUSY)
836                         break;
837 
838                 /*
839                  * We've done the first request. If we have more than 1
840                  * left in the list, set dptr to defer issue.
841                  */
842                 if (!dptr && rq_list.next != rq_list.prev)
843                         dptr = &driver_list;
844         }
845 
846         if (!queued)
847                 hctx->dispatched[0]++;
848         else if (queued < (1 << (BLK_MQ_MAX_DISPATCH_ORDER - 1)))
849                 hctx->dispatched[ilog2(queued) + 1]++;
850 
851         /*
852          * Any items that need requeuing? Stuff them into hctx->dispatch,
853          * that is where we will continue on next queue run.
854          */
855         if (!list_empty(&rq_list)) {
856                 spin_lock(&hctx->lock);
857                 list_splice(&rq_list, &hctx->dispatch);
858                 spin_unlock(&hctx->lock);
859         }
860 }
861 
862 /*
863  * It'd be great if the workqueue API had a way to pass
864  * in a mask and had some smarts for more clever placement.
865  * For now we just round-robin here, switching for every
866  * BLK_MQ_CPU_WORK_BATCH queued items.
867  */
868 static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
869 {
870         if (hctx->queue->nr_hw_queues == 1)
871                 return WORK_CPU_UNBOUND;
872 
873         if (--hctx->next_cpu_batch <= 0) {
874                 int cpu = hctx->next_cpu, next_cpu;
875 
876                 next_cpu = cpumask_next(hctx->next_cpu, hctx->cpumask);
877                 if (next_cpu >= nr_cpu_ids)
878                         next_cpu = cpumask_first(hctx->cpumask);
879 
880                 hctx->next_cpu = next_cpu;
881                 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
882 
883                 return cpu;
884         }
885 
886         return hctx->next_cpu;
887 }
888 
889 void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
890 {
891         if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state) ||
892             !blk_mq_hw_queue_mapped(hctx)))
893                 return;
894 
895         if (!async) {
896                 int cpu = get_cpu();
897                 if (cpumask_test_cpu(cpu, hctx->cpumask)) {
898                         __blk_mq_run_hw_queue(hctx);
899                         put_cpu();
900                         return;
901                 }
902 
903                 put_cpu();
904         }
905 
906         kblockd_schedule_delayed_work_on(blk_mq_hctx_next_cpu(hctx),
907                         &hctx->run_work, 0);
908 }
909 
910 static void blk_mq_run_queues(struct request_queue *q)
911 {
912         struct blk_mq_hw_ctx *hctx;
913         int i;
914 
915         queue_for_each_hw_ctx(q, hctx, i) {
916                 if ((!blk_mq_hctx_has_pending(hctx) &&
917                     list_empty_careful(&hctx->dispatch)) ||
918                     test_bit(BLK_MQ_S_STOPPED, &hctx->state))
919                         continue;
920 
921                 blk_mq_run_hw_queue(hctx, false);
922         }
923 }
924 
925 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
926 {
927         cancel_delayed_work(&hctx->run_work);
928         cancel_delayed_work(&hctx->delay_work);
929         set_bit(BLK_MQ_S_STOPPED, &hctx->state);
930 }
931 EXPORT_SYMBOL(blk_mq_stop_hw_queue);
932 
933 void blk_mq_stop_hw_queues(struct request_queue *q)
934 {
935         struct blk_mq_hw_ctx *hctx;
936         int i;
937 
938         queue_for_each_hw_ctx(q, hctx, i)
939                 blk_mq_stop_hw_queue(hctx);
940 }
941 EXPORT_SYMBOL(blk_mq_stop_hw_queues);
942 
943 void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx)
944 {
945         clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
946 
947         blk_mq_run_hw_queue(hctx, false);
948 }
949 EXPORT_SYMBOL(blk_mq_start_hw_queue);
950 
951 void blk_mq_start_hw_queues(struct request_queue *q)
952 {
953         struct blk_mq_hw_ctx *hctx;
954         int i;
955 
956         queue_for_each_hw_ctx(q, hctx, i)
957                 blk_mq_start_hw_queue(hctx);
958 }
959 EXPORT_SYMBOL(blk_mq_start_hw_queues);
960 
961 void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async)
962 {
963         struct blk_mq_hw_ctx *hctx;
964         int i;
965 
966         queue_for_each_hw_ctx(q, hctx, i) {
967                 if (!test_bit(BLK_MQ_S_STOPPED, &hctx->state))
968                         continue;
969 
970                 clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
971                 blk_mq_run_hw_queue(hctx, async);
972         }
973 }
974 EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues);
975 
976 static void blk_mq_run_work_fn(struct work_struct *work)
977 {
978         struct blk_mq_hw_ctx *hctx;
979 
980         hctx = container_of(work, struct blk_mq_hw_ctx, run_work.work);
981 
982         __blk_mq_run_hw_queue(hctx);
983 }
984 
985 static void blk_mq_delay_work_fn(struct work_struct *work)
986 {
987         struct blk_mq_hw_ctx *hctx;
988 
989         hctx = container_of(work, struct blk_mq_hw_ctx, delay_work.work);
990 
991         if (test_and_clear_bit(BLK_MQ_S_STOPPED, &hctx->state))
992                 __blk_mq_run_hw_queue(hctx);
993 }
994 
995 void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
996 {
997         if (unlikely(!blk_mq_hw_queue_mapped(hctx)))
998                 return;
999 
1000         kblockd_schedule_delayed_work_on(blk_mq_hctx_next_cpu(hctx),
1001                         &hctx->delay_work, msecs_to_jiffies(msecs));
1002 }
1003 EXPORT_SYMBOL(blk_mq_delay_queue);
1004 
1005 static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx,
1006                                     struct request *rq, bool at_head)
1007 {
1008         struct blk_mq_ctx *ctx = rq->mq_ctx;
1009 
1010         trace_block_rq_insert(hctx->queue, rq);
1011 
1012         if (at_head)
1013                 list_add(&rq->queuelist, &ctx->rq_list);
1014         else
1015                 list_add_tail(&rq->queuelist, &ctx->rq_list);
1016 
1017         blk_mq_hctx_mark_pending(hctx, ctx);
1018 }
1019 
1020 void blk_mq_insert_request(struct request *rq, bool at_head, bool run_queue,
1021                 bool async)
1022 {
1023         struct request_queue *q = rq->q;
1024         struct blk_mq_hw_ctx *hctx;
1025         struct blk_mq_ctx *ctx = rq->mq_ctx, *current_ctx;
1026 
1027         current_ctx = blk_mq_get_ctx(q);
1028         if (!cpu_online(ctx->cpu))
1029                 rq->mq_ctx = ctx = current_ctx;
1030 
1031         hctx = q->mq_ops->map_queue(q, ctx->cpu);
1032 
1033         spin_lock(&ctx->lock);
1034         __blk_mq_insert_request(hctx, rq, at_head);
1035         spin_unlock(&ctx->lock);
1036 
1037         if (run_queue)
1038                 blk_mq_run_hw_queue(hctx, async);
1039 
1040         blk_mq_put_ctx(current_ctx);
1041 }
1042 
1043 static void blk_mq_insert_requests(struct request_queue *q,
1044                                      struct blk_mq_ctx *ctx,
1045                                      struct list_head *list,
1046                                      int depth,
1047                                      bool from_schedule)
1048 
1049 {
1050         struct blk_mq_hw_ctx *hctx;
1051         struct blk_mq_ctx *current_ctx;
1052 
1053         trace_block_unplug(q, depth, !from_schedule);
1054 
1055         current_ctx = blk_mq_get_ctx(q);
1056 
1057         if (!cpu_online(ctx->cpu))
1058                 ctx = current_ctx;
1059         hctx = q->mq_ops->map_queue(q, ctx->cpu);
1060 
1061         /*
1062          * preemption doesn't flush plug list, so it's possible ctx->cpu is
1063          * offline now
1064          */
1065         spin_lock(&ctx->lock);
1066         while (!list_empty(list)) {
1067                 struct request *rq;
1068 
1069                 rq = list_first_entry(list, struct request, queuelist);
1070                 list_del_init(&rq->queuelist);
1071                 rq->mq_ctx = ctx;
1072                 __blk_mq_insert_request(hctx, rq, false);
1073         }
1074         spin_unlock(&ctx->lock);
1075 
1076         blk_mq_run_hw_queue(hctx, from_schedule);
1077         blk_mq_put_ctx(current_ctx);
1078 }
1079 
1080 static int plug_ctx_cmp(void *priv, struct list_head *a, struct list_head *b)
1081 {
1082         struct request *rqa = container_of(a, struct request, queuelist);
1083         struct request *rqb = container_of(b, struct request, queuelist);
1084 
1085         return !(rqa->mq_ctx < rqb->mq_ctx ||
1086                  (rqa->mq_ctx == rqb->mq_ctx &&
1087                   blk_rq_pos(rqa) < blk_rq_pos(rqb)));
1088 }
1089 
1090 void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
1091 {
1092         struct blk_mq_ctx *this_ctx;
1093         struct request_queue *this_q;
1094         struct request *rq;
1095         LIST_HEAD(list);
1096         LIST_HEAD(ctx_list);
1097         unsigned int depth;
1098 
1099         list_splice_init(&plug->mq_list, &list);
1100 
1101         list_sort(NULL, &list, plug_ctx_cmp);
1102 
1103         this_q = NULL;
1104         this_ctx = NULL;
1105         depth = 0;
1106 
1107         while (!list_empty(&list)) {
1108                 rq = list_entry_rq(list.next);
1109                 list_del_init(&rq->queuelist);
1110                 BUG_ON(!rq->q);
1111                 if (rq->mq_ctx != this_ctx) {
1112                         if (this_ctx) {
1113                                 blk_mq_insert_requests(this_q, this_ctx,
1114                                                         &ctx_list, depth,
1115                                                         from_schedule);
1116                         }
1117 
1118                         this_ctx = rq->mq_ctx;
1119                         this_q = rq->q;
1120                         depth = 0;
1121                 }
1122 
1123                 depth++;
1124                 list_add_tail(&rq->queuelist, &ctx_list);
1125         }
1126 
1127         /*
1128          * If 'this_ctx' is set, we know we have entries to complete
1129          * on 'ctx_list'. Do those.
1130          */
1131         if (this_ctx) {
1132                 blk_mq_insert_requests(this_q, this_ctx, &ctx_list, depth,
1133                                        from_schedule);
1134         }
1135 }
1136 
1137 static void blk_mq_bio_to_request(struct request *rq, struct bio *bio)
1138 {
1139         init_request_from_bio(rq, bio);
1140 
1141         if (blk_do_io_stat(rq))
1142                 blk_account_io_start(rq, 1);
1143 }
1144 
1145 static inline bool hctx_allow_merges(struct blk_mq_hw_ctx *hctx)
1146 {
1147         return (hctx->flags & BLK_MQ_F_SHOULD_MERGE) &&
1148                 !blk_queue_nomerges(hctx->queue);
1149 }
1150 
1151 static inline bool blk_mq_merge_queue_io(struct blk_mq_hw_ctx *hctx,
1152                                          struct blk_mq_ctx *ctx,
1153                                          struct request *rq, struct bio *bio)
1154 {
1155         if (!hctx_allow_merges(hctx)) {
1156                 blk_mq_bio_to_request(rq, bio);
1157                 spin_lock(&ctx->lock);
1158 insert_rq:
1159                 __blk_mq_insert_request(hctx, rq, false);
1160                 spin_unlock(&ctx->lock);
1161                 return false;
1162         } else {
1163                 struct request_queue *q = hctx->queue;
1164 
1165                 spin_lock(&ctx->lock);
1166                 if (!blk_mq_attempt_merge(q, ctx, bio)) {
1167                         blk_mq_bio_to_request(rq, bio);
1168                         goto insert_rq;
1169                 }
1170 
1171                 spin_unlock(&ctx->lock);
1172                 __blk_mq_free_request(hctx, ctx, rq);
1173                 return true;
1174         }
1175 }
1176 
1177 struct blk_map_ctx {
1178         struct blk_mq_hw_ctx *hctx;
1179         struct blk_mq_ctx *ctx;
1180 };
1181 
1182 static struct request *blk_mq_map_request(struct request_queue *q,
1183                                           struct bio *bio,
1184                                           struct blk_map_ctx *data)
1185 {
1186         struct blk_mq_hw_ctx *hctx;
1187         struct blk_mq_ctx *ctx;
1188         struct request *rq;
1189         int rw = bio_data_dir(bio);
1190         struct blk_mq_alloc_data alloc_data;
1191 
1192         if (unlikely(blk_mq_queue_enter(q))) {
1193                 bio_endio(bio, -EIO);
1194                 return NULL;
1195         }
1196 
1197         ctx = blk_mq_get_ctx(q);
1198         hctx = q->mq_ops->map_queue(q, ctx->cpu);
1199 
1200         if (rw_is_sync(bio->bi_rw))
1201                 rw |= REQ_SYNC;
1202 
1203         trace_block_getrq(q, bio, rw);
1204         blk_mq_set_alloc_data(&alloc_data, q, GFP_ATOMIC, false, ctx,
1205                         hctx);
1206         rq = __blk_mq_alloc_request(&alloc_data, rw);
1207         if (unlikely(!rq)) {
1208                 __blk_mq_run_hw_queue(hctx);
1209                 blk_mq_put_ctx(ctx);
1210                 trace_block_sleeprq(q, bio, rw);
1211 
1212                 ctx = blk_mq_get_ctx(q);
1213                 hctx = q->mq_ops->map_queue(q, ctx->cpu);
1214                 blk_mq_set_alloc_data(&alloc_data, q,
1215                                 __GFP_WAIT|GFP_ATOMIC, false, ctx, hctx);
1216                 rq = __blk_mq_alloc_request(&alloc_data, rw);
1217                 ctx = alloc_data.ctx;
1218                 hctx = alloc_data.hctx;
1219         }
1220 
1221         hctx->queued++;
1222         data->hctx = hctx;
1223         data->ctx = ctx;
1224         return rq;
1225 }
1226 
1227 /*
1228  * Multiple hardware queue variant. This will not use per-process plugs,
1229  * but will attempt to bypass the hctx queueing if we can go straight to
1230  * hardware for SYNC IO.
1231  */
1232 static void blk_mq_make_request(struct request_queue *q, struct bio *bio)
1233 {
1234         const int is_sync = rw_is_sync(bio->bi_rw);
1235         const int is_flush_fua = bio->bi_rw & (REQ_FLUSH | REQ_FUA);
1236         struct blk_map_ctx data;
1237         struct request *rq;
1238 
1239         blk_queue_bounce(q, &bio);
1240 
1241         if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
1242                 bio_endio(bio, -EIO);
1243                 return;
1244         }
1245 
1246         rq = blk_mq_map_request(q, bio, &data);
1247         if (unlikely(!rq))
1248                 return;
1249 
1250         if (unlikely(is_flush_fua)) {
1251                 blk_mq_bio_to_request(rq, bio);
1252                 blk_insert_flush(rq);
1253                 goto run_queue;
1254         }
1255 
1256         /*
1257          * If the driver supports defer issued based on 'last', then
1258          * queue it up like normal since we can potentially save some
1259          * CPU this way.
1260          */
1261         if (is_sync && !(data.hctx->flags & BLK_MQ_F_DEFER_ISSUE)) {
1262                 struct blk_mq_queue_data bd = {
1263                         .rq = rq,
1264                         .list = NULL,
1265                         .last = 1
1266                 };
1267                 int ret;
1268 
1269                 blk_mq_bio_to_request(rq, bio);
1270 
1271                 /*
1272                  * For OK queue, we are done. For error, kill it. Any other
1273                  * error (busy), just add it to our list as we previously
1274                  * would have done
1275                  */
1276                 ret = q->mq_ops->queue_rq(data.hctx, &bd);
1277                 if (ret == BLK_MQ_RQ_QUEUE_OK)
1278                         goto done;
1279                 else {
1280                         __blk_mq_requeue_request(rq);
1281 
1282                         if (ret == BLK_MQ_RQ_QUEUE_ERROR) {
1283                                 rq->errors = -EIO;
1284                                 blk_mq_end_request(rq, rq->errors);
1285                                 goto done;
1286                         }
1287                 }
1288         }
1289 
1290         if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
1291                 /*
1292                  * For a SYNC request, send it to the hardware immediately. For
1293                  * an ASYNC request, just ensure that we run it later on. The
1294                  * latter allows for merging opportunities and more efficient
1295                  * dispatching.
1296                  */
1297 run_queue:
1298                 blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
1299         }
1300 done:
1301         blk_mq_put_ctx(data.ctx);
1302 }
1303 
1304 /*
1305  * Single hardware queue variant. This will attempt to use any per-process
1306  * plug for merging and IO deferral.
1307  */
1308 static void blk_sq_make_request(struct request_queue *q, struct bio *bio)
1309 {
1310         const int is_sync = rw_is_sync(bio->bi_rw);
1311         const int is_flush_fua = bio->bi_rw & (REQ_FLUSH | REQ_FUA);
1312         unsigned int use_plug, request_count = 0;
1313         struct blk_map_ctx data;
1314         struct request *rq;
1315 
1316         /*
1317          * If we have multiple hardware queues, just go directly to
1318          * one of those for sync IO.
1319          */
1320         use_plug = !is_flush_fua && !is_sync;
1321 
1322         blk_queue_bounce(q, &bio);
1323 
1324         if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
1325                 bio_endio(bio, -EIO);
1326                 return;
1327         }
1328 
1329         if (use_plug && !blk_queue_nomerges(q) &&
1330             blk_attempt_plug_merge(q, bio, &request_count))
1331                 return;
1332 
1333         rq = blk_mq_map_request(q, bio, &data);
1334         if (unlikely(!rq))
1335                 return;
1336 
1337         if (unlikely(is_flush_fua)) {
1338                 blk_mq_bio_to_request(rq, bio);
1339                 blk_insert_flush(rq);
1340                 goto run_queue;
1341         }
1342 
1343         /*
1344          * A task plug currently exists. Since this is completely lockless,
1345          * utilize that to temporarily store requests until the task is
1346          * either done or scheduled away.
1347          */
1348         if (use_plug) {
1349                 struct blk_plug *plug = current->plug;
1350 
1351                 if (plug) {
1352                         blk_mq_bio_to_request(rq, bio);
1353                         if (list_empty(&plug->mq_list))
1354                                 trace_block_plug(q);
1355                         else if (request_count >= BLK_MAX_REQUEST_COUNT) {
1356                                 blk_flush_plug_list(plug, false);
1357                                 trace_block_plug(q);
1358                         }
1359                         list_add_tail(&rq->queuelist, &plug->mq_list);
1360                         blk_mq_put_ctx(data.ctx);
1361                         return;
1362                 }
1363         }
1364 
1365         if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
1366                 /*
1367                  * For a SYNC request, send it to the hardware immediately. For
1368                  * an ASYNC request, just ensure that we run it later on. The
1369                  * latter allows for merging opportunities and more efficient
1370                  * dispatching.
1371                  */
1372 run_queue:
1373                 blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
1374         }
1375 
1376         blk_mq_put_ctx(data.ctx);
1377 }
1378 
1379 /*
1380  * Default mapping to a software queue, since we use one per CPU.
1381  */
1382 struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, const int cpu)
1383 {
1384         return q->queue_hw_ctx[q->mq_map[cpu]];
1385 }
1386 EXPORT_SYMBOL(blk_mq_map_queue);
1387 
1388 static void blk_mq_free_rq_map(struct blk_mq_tag_set *set,
1389                 struct blk_mq_tags *tags, unsigned int hctx_idx)
1390 {
1391         struct page *page;
1392 
1393         if (tags->rqs && set->ops->exit_request) {
1394                 int i;
1395 
1396                 for (i = 0; i < tags->nr_tags; i++) {
1397                         if (!tags->rqs[i])
1398                                 continue;
1399                         set->ops->exit_request(set->driver_data, tags->rqs[i],
1400                                                 hctx_idx, i);
1401                         tags->rqs[i] = NULL;
1402                 }
1403         }
1404 
1405         while (!list_empty(&tags->page_list)) {
1406                 page = list_first_entry(&tags->page_list, struct page, lru);
1407                 list_del_init(&page->lru);
1408                 __free_pages(page, page->private);
1409         }
1410 
1411         kfree(tags->rqs);
1412 
1413         blk_mq_free_tags(tags);
1414 }
1415 
1416 static size_t order_to_size(unsigned int order)
1417 {
1418         return (size_t)PAGE_SIZE << order;
1419 }
1420 
1421 static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
1422                 unsigned int hctx_idx)
1423 {
1424         struct blk_mq_tags *tags;
1425         unsigned int i, j, entries_per_page, max_order = 4;
1426         size_t rq_size, left;
1427 
1428         tags = blk_mq_init_tags(set->queue_depth, set->reserved_tags,
1429                                 set->numa_node,
1430                                 BLK_MQ_FLAG_TO_ALLOC_POLICY(set->flags));
1431         if (!tags)
1432                 return NULL;
1433 
1434         INIT_LIST_HEAD(&tags->page_list);
1435 
1436         tags->rqs = kzalloc_node(set->queue_depth * sizeof(struct request *),
1437                                  GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY,
1438                                  set->numa_node);
1439         if (!tags->rqs) {
1440                 blk_mq_free_tags(tags);
1441                 return NULL;
1442         }
1443 
1444         /*
1445          * rq_size is the size of the request plus driver payload, rounded
1446          * to the cacheline size
1447          */
1448         rq_size = round_up(sizeof(struct request) + set->cmd_size,
1449                                 cache_line_size());
1450         left = rq_size * set->queue_depth;
1451 
1452         for (i = 0; i < set->queue_depth; ) {
1453                 int this_order = max_order;
1454                 struct page *page;
1455                 int to_do;
1456                 void *p;
1457 
1458                 while (left < order_to_size(this_order - 1) && this_order)
1459                         this_order--;
1460 
1461                 do {
1462                         page = alloc_pages_node(set->numa_node,
1463                                 GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO,
1464                                 this_order);
1465                         if (page)
1466                                 break;
1467                         if (!this_order--)
1468                                 break;
1469                         if (order_to_size(this_order) < rq_size)
1470                                 break;
1471                 } while (1);
1472 
1473                 if (!page)
1474                         goto fail;
1475 
1476                 page->private = this_order;
1477                 list_add_tail(&page->lru, &tags->page_list);
1478 
1479                 p = page_address(page);
1480                 entries_per_page = order_to_size(this_order) / rq_size;
1481                 to_do = min(entries_per_page, set->queue_depth - i);
1482                 left -= to_do * rq_size;
1483                 for (j = 0; j < to_do; j++) {
1484                         tags->rqs[i] = p;
1485                         if (set->ops->init_request) {
1486                                 if (set->ops->init_request(set->driver_data,
1487                                                 tags->rqs[i], hctx_idx, i,
1488                                                 set->numa_node)) {
1489                                         tags->rqs[i] = NULL;
1490                                         goto fail;
1491                                 }
1492                         }
1493 
1494                         p += rq_size;
1495                         i++;
1496                 }
1497         }
1498 
1499         return tags;
1500 
1501 fail:
1502         blk_mq_free_rq_map(set, tags, hctx_idx);
1503         return NULL;
1504 }
1505 
1506 static void blk_mq_free_bitmap(struct blk_mq_ctxmap *bitmap)
1507 {
1508         kfree(bitmap->map);
1509 }
1510 
1511 static int blk_mq_alloc_bitmap(struct blk_mq_ctxmap *bitmap, int node)
1512 {
1513         unsigned int bpw = 8, total, num_maps, i;
1514 
1515         bitmap->bits_per_word = bpw;
1516 
1517         num_maps = ALIGN(nr_cpu_ids, bpw) / bpw;
1518         bitmap->map = kzalloc_node(num_maps * sizeof(struct blk_align_bitmap),
1519                                         GFP_KERNEL, node);
1520         if (!bitmap->map)
1521                 return -ENOMEM;
1522 
1523         bitmap->map_size = num_maps;
1524 
1525         total = nr_cpu_ids;
1526         for (i = 0; i < num_maps; i++) {
1527                 bitmap->map[i].depth = min(total, bitmap->bits_per_word);
1528                 total -= bitmap->map[i].depth;
1529         }
1530 
1531         return 0;
1532 }
1533 
1534 static int blk_mq_hctx_cpu_offline(struct blk_mq_hw_ctx *hctx, int cpu)
1535 {
1536         struct request_queue *q = hctx->queue;
1537         struct blk_mq_ctx *ctx;
1538         LIST_HEAD(tmp);
1539 
1540         /*
1541          * Move ctx entries to new CPU, if this one is going away.
1542          */
1543         ctx = __blk_mq_get_ctx(q, cpu);
1544 
1545         spin_lock(&ctx->lock);
1546         if (!list_empty(&ctx->rq_list)) {
1547                 list_splice_init(&ctx->rq_list, &tmp);
1548                 blk_mq_hctx_clear_pending(hctx, ctx);
1549         }
1550         spin_unlock(&ctx->lock);
1551 
1552         if (list_empty(&tmp))
1553                 return NOTIFY_OK;
1554 
1555         ctx = blk_mq_get_ctx(q);
1556         spin_lock(&ctx->lock);
1557 
1558         while (!list_empty(&tmp)) {
1559                 struct request *rq;
1560 
1561                 rq = list_first_entry(&tmp, struct request, queuelist);
1562                 rq->mq_ctx = ctx;
1563                 list_move_tail(&rq->queuelist, &ctx->rq_list);
1564         }
1565 
1566         hctx = q->mq_ops->map_queue(q, ctx->cpu);
1567         blk_mq_hctx_mark_pending(hctx, ctx);
1568 
1569         spin_unlock(&ctx->lock);
1570 
1571         blk_mq_run_hw_queue(hctx, true);
1572         blk_mq_put_ctx(ctx);
1573         return NOTIFY_OK;
1574 }
1575 
1576 static int blk_mq_hctx_notify(void *data, unsigned long action,
1577                               unsigned int cpu)
1578 {
1579         struct blk_mq_hw_ctx *hctx = data;
1580 
1581         if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
1582                 return blk_mq_hctx_cpu_offline(hctx, cpu);
1583 
1584         /*
1585          * In case of CPU online, tags may be reallocated
1586          * in blk_mq_map_swqueue() after mapping is updated.
1587          */
1588 
1589         return NOTIFY_OK;
1590 }
1591 
1592 /* hctx->ctxs will be freed in queue's release handler */
1593 static void blk_mq_exit_hctx(struct request_queue *q,
1594                 struct blk_mq_tag_set *set,
1595                 struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
1596 {
1597         unsigned flush_start_tag = set->queue_depth;
1598 
1599         blk_mq_tag_idle(hctx);
1600 
1601         if (set->ops->exit_request)
1602                 set->ops->exit_request(set->driver_data,
1603                                        hctx->fq->flush_rq, hctx_idx,
1604                                        flush_start_tag + hctx_idx);
1605 
1606         if (set->ops->exit_hctx)
1607                 set->ops->exit_hctx(hctx, hctx_idx);
1608 
1609         blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier);
1610         blk_free_flush_queue(hctx->fq);
1611         blk_mq_free_bitmap(&hctx->ctx_map);
1612 }
1613 
1614 static void blk_mq_exit_hw_queues(struct request_queue *q,
1615                 struct blk_mq_tag_set *set, int nr_queue)
1616 {
1617         struct blk_mq_hw_ctx *hctx;
1618         unsigned int i;
1619 
1620         queue_for_each_hw_ctx(q, hctx, i) {
1621                 if (i == nr_queue)
1622                         break;
1623                 blk_mq_exit_hctx(q, set, hctx, i);
1624         }
1625 }
1626 
1627 static void blk_mq_free_hw_queues(struct request_queue *q,
1628                 struct blk_mq_tag_set *set)
1629 {
1630         struct blk_mq_hw_ctx *hctx;
1631         unsigned int i;
1632 
1633         queue_for_each_hw_ctx(q, hctx, i)
1634                 free_cpumask_var(hctx->cpumask);
1635 }
1636 
1637 static int blk_mq_init_hctx(struct request_queue *q,
1638                 struct blk_mq_tag_set *set,
1639                 struct blk_mq_hw_ctx *hctx, unsigned hctx_idx)
1640 {
1641         int node;
1642         unsigned flush_start_tag = set->queue_depth;
1643 
1644         node = hctx->numa_node;
1645         if (node == NUMA_NO_NODE)
1646                 node = hctx->numa_node = set->numa_node;
1647 
1648         INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn);
1649         INIT_DELAYED_WORK(&hctx->delay_work, blk_mq_delay_work_fn);
1650         spin_lock_init(&hctx->lock);
1651         INIT_LIST_HEAD(&hctx->dispatch);
1652         hctx->queue = q;
1653         hctx->queue_num = hctx_idx;
1654         hctx->flags = set->flags;
1655 
1656         blk_mq_init_cpu_notifier(&hctx->cpu_notifier,
1657                                         blk_mq_hctx_notify, hctx);
1658         blk_mq_register_cpu_notifier(&hctx->cpu_notifier);
1659 
1660         hctx->tags = set->tags[hctx_idx];
1661 
1662         /*
1663          * Allocate space for all possible cpus to avoid allocation at
1664          * runtime
1665          */
1666         hctx->ctxs = kmalloc_node(nr_cpu_ids * sizeof(void *),
1667                                         GFP_KERNEL, node);
1668         if (!hctx->ctxs)
1669                 goto unregister_cpu_notifier;
1670 
1671         if (blk_mq_alloc_bitmap(&hctx->ctx_map, node))
1672                 goto free_ctxs;
1673 
1674         hctx->nr_ctx = 0;
1675 
1676         if (set->ops->init_hctx &&
1677             set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
1678                 goto free_bitmap;
1679 
1680         hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size);
1681         if (!hctx->fq)
1682                 goto exit_hctx;
1683 
1684         if (set->ops->init_request &&
1685             set->ops->init_request(set->driver_data,
1686                                    hctx->fq->flush_rq, hctx_idx,
1687                                    flush_start_tag + hctx_idx, node))
1688                 goto free_fq;
1689 
1690         return 0;
1691 
1692  free_fq:
1693         kfree(hctx->fq);
1694  exit_hctx:
1695         if (set->ops->exit_hctx)
1696                 set->ops->exit_hctx(hctx, hctx_idx);
1697  free_bitmap:
1698         blk_mq_free_bitmap(&hctx->ctx_map);
1699  free_ctxs:
1700         kfree(hctx->ctxs);
1701  unregister_cpu_notifier:
1702         blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier);
1703 
1704         return -1;
1705 }
1706 
1707 static int blk_mq_init_hw_queues(struct request_queue *q,
1708                 struct blk_mq_tag_set *set)
1709 {
1710         struct blk_mq_hw_ctx *hctx;
1711         unsigned int i;
1712 
1713         /*
1714          * Initialize hardware queues
1715          */
1716         queue_for_each_hw_ctx(q, hctx, i) {
1717                 if (blk_mq_init_hctx(q, set, hctx, i))
1718                         break;
1719         }
1720 
1721         if (i == q->nr_hw_queues)
1722                 return 0;
1723 
1724         /*
1725          * Init failed
1726          */
1727         blk_mq_exit_hw_queues(q, set, i);
1728 
1729         return 1;
1730 }
1731 
1732 static void blk_mq_init_cpu_queues(struct request_queue *q,
1733                                    unsigned int nr_hw_queues)
1734 {
1735         unsigned int i;
1736 
1737         for_each_possible_cpu(i) {
1738                 struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i);
1739                 struct blk_mq_hw_ctx *hctx;
1740 
1741                 memset(__ctx, 0, sizeof(*__ctx));
1742                 __ctx->cpu = i;
1743                 spin_lock_init(&__ctx->lock);
1744                 INIT_LIST_HEAD(&__ctx->rq_list);
1745                 __ctx->queue = q;
1746 
1747                 /* If the cpu isn't online, the cpu is mapped to first hctx */
1748                 if (!cpu_online(i))
1749                         continue;
1750 
1751                 hctx = q->mq_ops->map_queue(q, i);
1752                 cpumask_set_cpu(i, hctx->cpumask);
1753                 hctx->nr_ctx++;
1754 
1755                 /*
1756                  * Set local node, IFF we have more than one hw queue. If
1757                  * not, we remain on the home node of the device
1758                  */
1759                 if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE)
1760                         hctx->numa_node = cpu_to_node(i);
1761         }
1762 }
1763 
1764 static void blk_mq_map_swqueue(struct request_queue *q)
1765 {
1766         unsigned int i;
1767         struct blk_mq_hw_ctx *hctx;
1768         struct blk_mq_ctx *ctx;
1769         struct blk_mq_tag_set *set = q->tag_set;
1770 
1771         queue_for_each_hw_ctx(q, hctx, i) {
1772                 cpumask_clear(hctx->cpumask);
1773                 hctx->nr_ctx = 0;
1774         }
1775 
1776         /*
1777          * Map software to hardware queues
1778          */
1779         queue_for_each_ctx(q, ctx, i) {
1780                 /* If the cpu isn't online, the cpu is mapped to first hctx */
1781                 if (!cpu_online(i))
1782                         continue;
1783 
1784                 hctx = q->mq_ops->map_queue(q, i);
1785                 cpumask_set_cpu(i, hctx->cpumask);
1786                 ctx->index_hw = hctx->nr_ctx;
1787                 hctx->ctxs[hctx->nr_ctx++] = ctx;
1788         }
1789 
1790         queue_for_each_hw_ctx(q, hctx, i) {
1791                 /*
1792                  * If no software queues are mapped to this hardware queue,
1793                  * disable it and free the request entries.
1794                  */
1795                 if (!hctx->nr_ctx) {
1796                         if (set->tags[i]) {
1797                                 blk_mq_free_rq_map(set, set->tags[i], i);
1798                                 set->tags[i] = NULL;
1799                         }
1800                         hctx->tags = NULL;
1801                         continue;
1802                 }
1803 
1804                 /* unmapped hw queue can be remapped after CPU topo changed */
1805                 if (!set->tags[i])
1806                         set->tags[i] = blk_mq_init_rq_map(set, i);
1807                 hctx->tags = set->tags[i];
1808                 WARN_ON(!hctx->tags);
1809 
1810                 /*
1811                  * Initialize batch roundrobin counts
1812                  */
1813                 hctx->next_cpu = cpumask_first(hctx->cpumask);
1814                 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
1815         }
1816 }
1817 
1818 static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set)
1819 {
1820         struct blk_mq_hw_ctx *hctx;
1821         struct request_queue *q;
1822         bool shared;
1823         int i;
1824 
1825         if (set->tag_list.next == set->tag_list.prev)
1826                 shared = false;
1827         else
1828                 shared = true;
1829 
1830         list_for_each_entry(q, &set->tag_list, tag_set_list) {
1831                 blk_mq_freeze_queue(q);
1832 
1833                 queue_for_each_hw_ctx(q, hctx, i) {
1834                         if (shared)
1835                                 hctx->flags |= BLK_MQ_F_TAG_SHARED;
1836                         else
1837                                 hctx->flags &= ~BLK_MQ_F_TAG_SHARED;
1838                 }
1839                 blk_mq_unfreeze_queue(q);
1840         }
1841 }
1842 
1843 static void blk_mq_del_queue_tag_set(struct request_queue *q)
1844 {
1845         struct blk_mq_tag_set *set = q->tag_set;
1846 
1847         mutex_lock(&set->tag_list_lock);
1848         list_del_init(&q->tag_set_list);
1849         blk_mq_update_tag_set_depth(set);
1850         mutex_unlock(&set->tag_list_lock);
1851 }
1852 
1853 static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
1854                                      struct request_queue *q)
1855 {
1856         q->tag_set = set;
1857 
1858         mutex_lock(&set->tag_list_lock);
1859         list_add_tail(&q->tag_set_list, &set->tag_list);
1860         blk_mq_update_tag_set_depth(set);
1861         mutex_unlock(&set->tag_list_lock);
1862 }
1863 
1864 /*
1865  * It is the actual release handler for mq, but we do it from
1866  * request queue's release handler for avoiding use-after-free
1867  * and headache because q->mq_kobj shouldn't have been introduced,
1868  * but we can't group ctx/kctx kobj without it.
1869  */
1870 void blk_mq_release(struct request_queue *q)
1871 {
1872         struct blk_mq_hw_ctx *hctx;
1873         unsigned int i;
1874 
1875         /* hctx kobj stays in hctx */
1876         queue_for_each_hw_ctx(q, hctx, i) {
1877                 if (!hctx)
1878                         continue;
1879                 kfree(hctx->ctxs);
1880                 kfree(hctx);
1881         }
1882 
1883         kfree(q->queue_hw_ctx);
1884 
1885         /* ctx kobj stays in queue_ctx */
1886         free_percpu(q->queue_ctx);
1887 }
1888 
1889 struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
1890 {
1891         struct blk_mq_hw_ctx **hctxs;
1892         struct blk_mq_ctx __percpu *ctx;
1893         struct request_queue *q;
1894         unsigned int *map;
1895         int i;
1896 
1897         ctx = alloc_percpu(struct blk_mq_ctx);
1898         if (!ctx)
1899                 return ERR_PTR(-ENOMEM);
1900 
1901         hctxs = kmalloc_node(set->nr_hw_queues * sizeof(*hctxs), GFP_KERNEL,
1902                         set->numa_node);
1903 
1904         if (!hctxs)
1905                 goto err_percpu;
1906 
1907         map = blk_mq_make_queue_map(set);
1908         if (!map)
1909                 goto err_map;
1910 
1911         for (i = 0; i < set->nr_hw_queues; i++) {
1912                 int node = blk_mq_hw_queue_to_node(map, i);
1913 
1914                 hctxs[i] = kzalloc_node(sizeof(struct blk_mq_hw_ctx),
1915                                         GFP_KERNEL, node);
1916                 if (!hctxs[i])
1917                         goto err_hctxs;
1918 
1919                 if (!zalloc_cpumask_var_node(&hctxs[i]->cpumask, GFP_KERNEL,
1920                                                 node))
1921                         goto err_hctxs;
1922 
1923                 atomic_set(&hctxs[i]->nr_active, 0);
1924                 hctxs[i]->numa_node = node;
1925                 hctxs[i]->queue_num = i;
1926         }
1927 
1928         q = blk_alloc_queue_node(GFP_KERNEL, set->numa_node);
1929         if (!q)
1930                 goto err_hctxs;
1931 
1932         /*
1933          * Init percpu_ref in atomic mode so that it's faster to shutdown.
1934          * See blk_register_queue() for details.
1935          */
1936         if (percpu_ref_init(&q->mq_usage_counter, blk_mq_usage_counter_release,
1937                             PERCPU_REF_INIT_ATOMIC, GFP_KERNEL))
1938                 goto err_mq_usage;
1939 
1940         setup_timer(&q->timeout, blk_mq_rq_timer, (unsigned long) q);
1941         blk_queue_rq_timeout(q, 30000);
1942 
1943         q->nr_queues = nr_cpu_ids;
1944         q->nr_hw_queues = set->nr_hw_queues;
1945         q->mq_map = map;
1946 
1947         q->queue_ctx = ctx;
1948         q->queue_hw_ctx = hctxs;
1949 
1950         q->mq_ops = set->ops;
1951         q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT;
1952 
1953         if (!(set->flags & BLK_MQ_F_SG_MERGE))
1954                 q->queue_flags |= 1 << QUEUE_FLAG_NO_SG_MERGE;
1955 
1956         q->sg_reserved_size = INT_MAX;
1957 
1958         INIT_WORK(&q->requeue_work, blk_mq_requeue_work);
1959         INIT_LIST_HEAD(&q->requeue_list);
1960         spin_lock_init(&q->requeue_lock);
1961 
1962         if (q->nr_hw_queues > 1)
1963                 blk_queue_make_request(q, blk_mq_make_request);
1964         else
1965                 blk_queue_make_request(q, blk_sq_make_request);
1966 
1967         if (set->timeout)
1968                 blk_queue_rq_timeout(q, set->timeout);
1969 
1970         /*
1971          * Do this after blk_queue_make_request() overrides it...
1972          */
1973         q->nr_requests = set->queue_depth;
1974 
1975         if (set->ops->complete)
1976                 blk_queue_softirq_done(q, set->ops->complete);
1977 
1978         blk_mq_init_cpu_queues(q, set->nr_hw_queues);
1979 
1980         if (blk_mq_init_hw_queues(q, set))
1981                 goto err_mq_usage;
1982 
1983         mutex_lock(&all_q_mutex);
1984         list_add_tail(&q->all_q_node, &all_q_list);
1985         mutex_unlock(&all_q_mutex);
1986 
1987         blk_mq_add_queue_tag_set(set, q);
1988 
1989         blk_mq_map_swqueue(q);
1990 
1991         return q;
1992 
1993 err_mq_usage:
1994         blk_cleanup_queue(q);
1995 err_hctxs:
1996         kfree(map);
1997         for (i = 0; i < set->nr_hw_queues; i++) {
1998                 if (!hctxs[i])
1999                         break;
2000                 free_cpumask_var(hctxs[i]->cpumask);
2001                 kfree(hctxs[i]);
2002         }
2003 err_map:
2004         kfree(hctxs);
2005 err_percpu:
2006         free_percpu(ctx);
2007         return ERR_PTR(-ENOMEM);
2008 }
2009 EXPORT_SYMBOL(blk_mq_init_queue);
2010 
2011 void blk_mq_free_queue(struct request_queue *q)
2012 {
2013         struct blk_mq_tag_set   *set = q->tag_set;
2014 
2015         blk_mq_del_queue_tag_set(q);
2016 
2017         blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
2018         blk_mq_free_hw_queues(q, set);
2019 
2020         percpu_ref_exit(&q->mq_usage_counter);
2021 
2022         kfree(q->mq_map);
2023 
2024         q->mq_map = NULL;
2025 
2026         mutex_lock(&all_q_mutex);
2027         list_del_init(&q->all_q_node);
2028         mutex_unlock(&all_q_mutex);
2029 }
2030 
2031 /* Basically redo blk_mq_init_queue with queue frozen */
2032 static void blk_mq_queue_reinit(struct request_queue *q)
2033 {
2034         WARN_ON_ONCE(!q->mq_freeze_depth);
2035 
2036         blk_mq_sysfs_unregister(q);
2037 
2038         blk_mq_update_queue_map(q->mq_map, q->nr_hw_queues);
2039 
2040         /*
2041          * redo blk_mq_init_cpu_queues and blk_mq_init_hw_queues. FIXME: maybe
2042          * we should change hctx numa_node according to new topology (this
2043          * involves free and re-allocate memory, worthy doing?)
2044          */
2045 
2046         blk_mq_map_swqueue(q);
2047 
2048         blk_mq_sysfs_register(q);
2049 }
2050 
2051 static int blk_mq_queue_reinit_notify(struct notifier_block *nb,
2052                                       unsigned long action, void *hcpu)
2053 {
2054         struct request_queue *q;
2055 
2056         /*
2057          * Before new mappings are established, hotadded cpu might already
2058          * start handling requests. This doesn't break anything as we map
2059          * offline CPUs to first hardware queue. We will re-init the queue
2060          * below to get optimal settings.
2061          */
2062         if (action != CPU_DEAD && action != CPU_DEAD_FROZEN &&
2063             action != CPU_ONLINE && action != CPU_ONLINE_FROZEN)
2064                 return NOTIFY_OK;
2065 
2066         mutex_lock(&all_q_mutex);
2067 
2068         /*
2069          * We need to freeze and reinit all existing queues.  Freezing
2070          * involves synchronous wait for an RCU grace period and doing it
2071          * one by one may take a long time.  Start freezing all queues in
2072          * one swoop and then wait for the completions so that freezing can
2073          * take place in parallel.
2074          */
2075         list_for_each_entry(q, &all_q_list, all_q_node)
2076                 blk_mq_freeze_queue_start(q);
2077         list_for_each_entry(q, &all_q_list, all_q_node) {
2078                 blk_mq_freeze_queue_wait(q);
2079 
2080                 /*
2081                  * timeout handler can't touch hw queue during the
2082                  * reinitialization
2083                  */
2084                 del_timer_sync(&q->timeout);
2085         }
2086 
2087         list_for_each_entry(q, &all_q_list, all_q_node)
2088                 blk_mq_queue_reinit(q);
2089 
2090         list_for_each_entry(q, &all_q_list, all_q_node)
2091                 blk_mq_unfreeze_queue(q);
2092 
2093         mutex_unlock(&all_q_mutex);
2094         return NOTIFY_OK;
2095 }
2096 
2097 static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
2098 {
2099         int i;
2100 
2101         for (i = 0; i < set->nr_hw_queues; i++) {
2102                 set->tags[i] = blk_mq_init_rq_map(set, i);
2103                 if (!set->tags[i])
2104                         goto out_unwind;
2105         }
2106 
2107         return 0;
2108 
2109 out_unwind:
2110         while (--i >= 0)
2111                 blk_mq_free_rq_map(set, set->tags[i], i);
2112 
2113         return -ENOMEM;
2114 }
2115 
2116 /*
2117  * Allocate the request maps associated with this tag_set. Note that this
2118  * may reduce the depth asked for, if memory is tight. set->queue_depth
2119  * will be updated to reflect the allocated depth.
2120  */
2121 static int blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
2122 {
2123         unsigned int depth;
2124         int err;
2125 
2126         depth = set->queue_depth;
2127         do {
2128                 err = __blk_mq_alloc_rq_maps(set);
2129                 if (!err)
2130                         break;
2131 
2132                 set->queue_depth >>= 1;
2133                 if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) {
2134                         err = -ENOMEM;
2135                         break;
2136                 }
2137         } while (set->queue_depth);
2138 
2139         if (!set->queue_depth || err) {
2140                 pr_err("blk-mq: failed to allocate request map\n");
2141                 return -ENOMEM;
2142         }
2143 
2144         if (depth != set->queue_depth)
2145                 pr_info("blk-mq: reduced tag depth (%u -> %u)\n",
2146                                                 depth, set->queue_depth);
2147 
2148         return 0;
2149 }
2150 
2151 /*
2152  * Alloc a tag set to be associated with one or more request queues.
2153  * May fail with EINVAL for various error conditions. May adjust the
2154  * requested depth down, if if it too large. In that case, the set
2155  * value will be stored in set->queue_depth.
2156  */
2157 int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
2158 {
2159         BUILD_BUG_ON(BLK_MQ_MAX_DEPTH > 1 << BLK_MQ_UNIQUE_TAG_BITS);
2160 
2161         if (!set->nr_hw_queues)
2162                 return -EINVAL;
2163         if (!set->queue_depth)
2164                 return -EINVAL;
2165         if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN)
2166                 return -EINVAL;
2167 
2168         if (!set->nr_hw_queues || !set->ops->queue_rq || !set->ops->map_queue)
2169                 return -EINVAL;
2170 
2171         if (set->queue_depth > BLK_MQ_MAX_DEPTH) {
2172                 pr_info("blk-mq: reduced tag depth to %u\n",
2173                         BLK_MQ_MAX_DEPTH);
2174                 set->queue_depth = BLK_MQ_MAX_DEPTH;
2175         }
2176 
2177         /*
2178          * If a crashdump is active, then we are potentially in a very
2179          * memory constrained environment. Limit us to 1 queue and
2180          * 64 tags to prevent using too much memory.
2181          */
2182         if (is_kdump_kernel()) {
2183                 set->nr_hw_queues = 1;
2184                 set->queue_depth = min(64U, set->queue_depth);
2185         }
2186 
2187         set->tags = kmalloc_node(set->nr_hw_queues *
2188                                  sizeof(struct blk_mq_tags *),
2189                                  GFP_KERNEL, set->numa_node);
2190         if (!set->tags)
2191                 return -ENOMEM;
2192 
2193         if (blk_mq_alloc_rq_maps(set))
2194                 goto enomem;
2195 
2196         mutex_init(&set->tag_list_lock);
2197         INIT_LIST_HEAD(&set->tag_list);
2198 
2199         return 0;
2200 enomem:
2201         kfree(set->tags);
2202         set->tags = NULL;
2203         return -ENOMEM;
2204 }
2205 EXPORT_SYMBOL(blk_mq_alloc_tag_set);
2206 
2207 void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
2208 {
2209         int i;
2210 
2211         for (i = 0; i < set->nr_hw_queues; i++) {
2212                 if (set->tags[i])
2213                         blk_mq_free_rq_map(set, set->tags[i], i);
2214         }
2215 
2216         kfree(set->tags);
2217         set->tags = NULL;
2218 }
2219 EXPORT_SYMBOL(blk_mq_free_tag_set);
2220 
2221 int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
2222 {
2223         struct blk_mq_tag_set *set = q->tag_set;
2224         struct blk_mq_hw_ctx *hctx;
2225         int i, ret;
2226 
2227         if (!set || nr > set->queue_depth)
2228                 return -EINVAL;
2229 
2230         ret = 0;
2231         queue_for_each_hw_ctx(q, hctx, i) {
2232                 ret = blk_mq_tag_update_depth(hctx->tags, nr);
2233                 if (ret)
2234                         break;
2235         }
2236 
2237         if (!ret)
2238                 q->nr_requests = nr;
2239 
2240         return ret;
2241 }
2242 
2243 void blk_mq_disable_hotplug(void)
2244 {
2245         mutex_lock(&all_q_mutex);
2246 }
2247 
2248 void blk_mq_enable_hotplug(void)
2249 {
2250         mutex_unlock(&all_q_mutex);
2251 }
2252 
2253 static int __init blk_mq_init(void)
2254 {
2255         blk_mq_cpu_init();
2256 
2257         hotcpu_notifier(blk_mq_queue_reinit_notify, 0);
2258 
2259         return 0;
2260 }
2261 subsys_initcall(blk_mq_init);
2262 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp