~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/include/linux/blkdev.h

Version: ~ [ linux-5.10-rc5 ] ~ [ linux-5.9.10 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.79 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.159 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.208 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.245 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.245 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.85 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 #ifndef _LINUX_BLKDEV_H
  2 #define _LINUX_BLKDEV_H
  3 
  4 #include <linux/sched.h>
  5 #include <linux/sched/clock.h>
  6 
  7 #ifdef CONFIG_BLOCK
  8 
  9 #include <linux/major.h>
 10 #include <linux/genhd.h>
 11 #include <linux/list.h>
 12 #include <linux/llist.h>
 13 #include <linux/timer.h>
 14 #include <linux/workqueue.h>
 15 #include <linux/pagemap.h>
 16 #include <linux/backing-dev-defs.h>
 17 #include <linux/wait.h>
 18 #include <linux/mempool.h>
 19 #include <linux/pfn.h>
 20 #include <linux/bio.h>
 21 #include <linux/stringify.h>
 22 #include <linux/gfp.h>
 23 #include <linux/bsg.h>
 24 #include <linux/smp.h>
 25 #include <linux/rcupdate.h>
 26 #include <linux/percpu-refcount.h>
 27 #include <linux/scatterlist.h>
 28 #include <linux/blkzoned.h>
 29 
 30 struct module;
 31 struct scsi_ioctl_command;
 32 
 33 struct request_queue;
 34 struct elevator_queue;
 35 struct blk_trace;
 36 struct request;
 37 struct sg_io_hdr;
 38 struct bsg_job;
 39 struct blkcg_gq;
 40 struct blk_flush_queue;
 41 struct pr_ops;
 42 struct rq_wb;
 43 struct blk_queue_stats;
 44 struct blk_stat_callback;
 45 
 46 #define BLKDEV_MIN_RQ   4
 47 #define BLKDEV_MAX_RQ   128     /* Default maximum */
 48 
 49 /* Must be consisitent with blk_mq_poll_stats_bkt() */
 50 #define BLK_MQ_POLL_STATS_BKTS 16
 51 
 52 /*
 53  * Maximum number of blkcg policies allowed to be registered concurrently.
 54  * Defined here to simplify include dependency.
 55  */
 56 #define BLKCG_MAX_POLS          3
 57 
 58 typedef void (rq_end_io_fn)(struct request *, blk_status_t);
 59 
 60 #define BLK_RL_SYNCFULL         (1U << 0)
 61 #define BLK_RL_ASYNCFULL        (1U << 1)
 62 
 63 struct request_list {
 64         struct request_queue    *q;     /* the queue this rl belongs to */
 65 #ifdef CONFIG_BLK_CGROUP
 66         struct blkcg_gq         *blkg;  /* blkg this request pool belongs to */
 67 #endif
 68         /*
 69          * count[], starved[], and wait[] are indexed by
 70          * BLK_RW_SYNC/BLK_RW_ASYNC
 71          */
 72         int                     count[2];
 73         int                     starved[2];
 74         mempool_t               *rq_pool;
 75         wait_queue_head_t       wait[2];
 76         unsigned int            flags;
 77 };
 78 
 79 /*
 80  * request flags */
 81 typedef __u32 __bitwise req_flags_t;
 82 
 83 /* elevator knows about this request */
 84 #define RQF_SORTED              ((__force req_flags_t)(1 << 0))
 85 /* drive already may have started this one */
 86 #define RQF_STARTED             ((__force req_flags_t)(1 << 1))
 87 /* uses tagged queueing */
 88 #define RQF_QUEUED              ((__force req_flags_t)(1 << 2))
 89 /* may not be passed by ioscheduler */
 90 #define RQF_SOFTBARRIER         ((__force req_flags_t)(1 << 3))
 91 /* request for flush sequence */
 92 #define RQF_FLUSH_SEQ           ((__force req_flags_t)(1 << 4))
 93 /* merge of different types, fail separately */
 94 #define RQF_MIXED_MERGE         ((__force req_flags_t)(1 << 5))
 95 /* track inflight for MQ */
 96 #define RQF_MQ_INFLIGHT         ((__force req_flags_t)(1 << 6))
 97 /* don't call prep for this one */
 98 #define RQF_DONTPREP            ((__force req_flags_t)(1 << 7))
 99 /* set for "ide_preempt" requests and also for requests for which the SCSI
100    "quiesce" state must be ignored. */
101 #define RQF_PREEMPT             ((__force req_flags_t)(1 << 8))
102 /* contains copies of user pages */
103 #define RQF_COPY_USER           ((__force req_flags_t)(1 << 9))
104 /* vaguely specified driver internal error.  Ignored by the block layer */
105 #define RQF_FAILED              ((__force req_flags_t)(1 << 10))
106 /* don't warn about errors */
107 #define RQF_QUIET               ((__force req_flags_t)(1 << 11))
108 /* elevator private data attached */
109 #define RQF_ELVPRIV             ((__force req_flags_t)(1 << 12))
110 /* account I/O stat */
111 #define RQF_IO_STAT             ((__force req_flags_t)(1 << 13))
112 /* request came from our alloc pool */
113 #define RQF_ALLOCED             ((__force req_flags_t)(1 << 14))
114 /* runtime pm request */
115 #define RQF_PM                  ((__force req_flags_t)(1 << 15))
116 /* on IO scheduler merge hash */
117 #define RQF_HASHED              ((__force req_flags_t)(1 << 16))
118 /* IO stats tracking on */
119 #define RQF_STATS               ((__force req_flags_t)(1 << 17))
120 /* Look at ->special_vec for the actual data payload instead of the
121    bio chain. */
122 #define RQF_SPECIAL_PAYLOAD     ((__force req_flags_t)(1 << 18))
123 
124 /* flags that prevent us from merging requests: */
125 #define RQF_NOMERGE_FLAGS \
126         (RQF_STARTED | RQF_SOFTBARRIER | RQF_FLUSH_SEQ | RQF_SPECIAL_PAYLOAD)
127 
128 /*
129  * Try to put the fields that are referenced together in the same cacheline.
130  *
131  * If you modify this structure, make sure to update blk_rq_init() and
132  * especially blk_mq_rq_ctx_init() to take care of the added fields.
133  */
134 struct request {
135         struct list_head queuelist;
136         union {
137                 struct call_single_data csd;
138                 u64 fifo_time;
139         };
140 
141         struct request_queue *q;
142         struct blk_mq_ctx *mq_ctx;
143 
144         int cpu;
145         unsigned int cmd_flags;         /* op and common flags */
146         req_flags_t rq_flags;
147 
148         int internal_tag;
149 
150         unsigned long atomic_flags;
151 
152         /* the following two fields are internal, NEVER access directly */
153         unsigned int __data_len;        /* total data len */
154         int tag;
155         sector_t __sector;              /* sector cursor */
156 
157         struct bio *bio;
158         struct bio *biotail;
159 
160         /*
161          * The hash is used inside the scheduler, and killed once the
162          * request reaches the dispatch list. The ipi_list is only used
163          * to queue the request for softirq completion, which is long
164          * after the request has been unhashed (and even removed from
165          * the dispatch list).
166          */
167         union {
168                 struct hlist_node hash; /* merge hash */
169                 struct list_head ipi_list;
170         };
171 
172         /*
173          * The rb_node is only used inside the io scheduler, requests
174          * are pruned when moved to the dispatch queue. So let the
175          * completion_data share space with the rb_node.
176          */
177         union {
178                 struct rb_node rb_node; /* sort/lookup */
179                 struct bio_vec special_vec;
180                 void *completion_data;
181                 int error_count; /* for legacy drivers, don't use */
182         };
183 
184         /*
185          * Three pointers are available for the IO schedulers, if they need
186          * more they have to dynamically allocate it.  Flush requests are
187          * never put on the IO scheduler. So let the flush fields share
188          * space with the elevator data.
189          */
190         union {
191                 struct {
192                         struct io_cq            *icq;
193                         void                    *priv[2];
194                 } elv;
195 
196                 struct {
197                         unsigned int            seq;
198                         struct list_head        list;
199                         rq_end_io_fn            *saved_end_io;
200                 } flush;
201         };
202 
203         struct gendisk *rq_disk;
204         struct hd_struct *part;
205         unsigned long start_time;
206         struct blk_issue_stat issue_stat;
207 #ifdef CONFIG_BLK_CGROUP
208         struct request_list *rl;                /* rl this rq is alloced from */
209         unsigned long long start_time_ns;
210         unsigned long long io_start_time_ns;    /* when passed to hardware */
211 #endif
212         /* Number of scatter-gather DMA addr+len pairs after
213          * physical address coalescing is performed.
214          */
215         unsigned short nr_phys_segments;
216 #if defined(CONFIG_BLK_DEV_INTEGRITY)
217         unsigned short nr_integrity_segments;
218 #endif
219 
220         unsigned short ioprio;
221 
222         unsigned int timeout;
223 
224         void *special;          /* opaque pointer available for LLD use */
225 
226         unsigned int extra_len; /* length of alignment and padding */
227 
228         unsigned short write_hint;
229 
230         unsigned long deadline;
231         struct list_head timeout_list;
232 
233         /*
234          * completion callback.
235          */
236         rq_end_io_fn *end_io;
237         void *end_io_data;
238 
239         /* for bidi */
240         struct request *next_rq;
241 };
242 
243 static inline bool blk_rq_is_scsi(struct request *rq)
244 {
245         return req_op(rq) == REQ_OP_SCSI_IN || req_op(rq) == REQ_OP_SCSI_OUT;
246 }
247 
248 static inline bool blk_rq_is_private(struct request *rq)
249 {
250         return req_op(rq) == REQ_OP_DRV_IN || req_op(rq) == REQ_OP_DRV_OUT;
251 }
252 
253 static inline bool blk_rq_is_passthrough(struct request *rq)
254 {
255         return blk_rq_is_scsi(rq) || blk_rq_is_private(rq);
256 }
257 
258 static inline unsigned short req_get_ioprio(struct request *req)
259 {
260         return req->ioprio;
261 }
262 
263 #include <linux/elevator.h>
264 
265 struct blk_queue_ctx;
266 
267 typedef void (request_fn_proc) (struct request_queue *q);
268 typedef blk_qc_t (make_request_fn) (struct request_queue *q, struct bio *bio);
269 typedef int (prep_rq_fn) (struct request_queue *, struct request *);
270 typedef void (unprep_rq_fn) (struct request_queue *, struct request *);
271 
272 struct bio_vec;
273 typedef void (softirq_done_fn)(struct request *);
274 typedef int (dma_drain_needed_fn)(struct request *);
275 typedef int (lld_busy_fn) (struct request_queue *q);
276 typedef int (bsg_job_fn) (struct bsg_job *);
277 typedef int (init_rq_fn)(struct request_queue *, struct request *, gfp_t);
278 typedef void (exit_rq_fn)(struct request_queue *, struct request *);
279 
280 enum blk_eh_timer_return {
281         BLK_EH_NOT_HANDLED,
282         BLK_EH_HANDLED,
283         BLK_EH_RESET_TIMER,
284 };
285 
286 typedef enum blk_eh_timer_return (rq_timed_out_fn)(struct request *);
287 
288 enum blk_queue_state {
289         Queue_down,
290         Queue_up,
291 };
292 
293 struct blk_queue_tag {
294         struct request **tag_index;     /* map of busy tags */
295         unsigned long *tag_map;         /* bit map of free/busy tags */
296         int max_depth;                  /* what we will send to device */
297         int real_max_depth;             /* what the array can hold */
298         atomic_t refcnt;                /* map can be shared */
299         int alloc_policy;               /* tag allocation policy */
300         int next_tag;                   /* next tag */
301 };
302 #define BLK_TAG_ALLOC_FIFO 0 /* allocate starting from 0 */
303 #define BLK_TAG_ALLOC_RR 1 /* allocate starting from last allocated tag */
304 
305 #define BLK_SCSI_MAX_CMDS       (256)
306 #define BLK_SCSI_CMD_PER_LONG   (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8))
307 
308 /*
309  * Zoned block device models (zoned limit).
310  */
311 enum blk_zoned_model {
312         BLK_ZONED_NONE, /* Regular block device */
313         BLK_ZONED_HA,   /* Host-aware zoned block device */
314         BLK_ZONED_HM,   /* Host-managed zoned block device */
315 };
316 
317 struct queue_limits {
318         unsigned long           bounce_pfn;
319         unsigned long           seg_boundary_mask;
320         unsigned long           virt_boundary_mask;
321 
322         unsigned int            max_hw_sectors;
323         unsigned int            max_dev_sectors;
324         unsigned int            chunk_sectors;
325         unsigned int            max_sectors;
326         unsigned int            max_segment_size;
327         unsigned int            physical_block_size;
328         unsigned int            alignment_offset;
329         unsigned int            io_min;
330         unsigned int            io_opt;
331         unsigned int            max_discard_sectors;
332         unsigned int            max_hw_discard_sectors;
333         unsigned int            max_write_same_sectors;
334         unsigned int            max_write_zeroes_sectors;
335         unsigned int            discard_granularity;
336         unsigned int            discard_alignment;
337 
338         unsigned short          logical_block_size;
339         unsigned short          max_segments;
340         unsigned short          max_integrity_segments;
341         unsigned short          max_discard_segments;
342 
343         unsigned char           misaligned;
344         unsigned char           discard_misaligned;
345         unsigned char           cluster;
346         unsigned char           raid_partial_stripes_expensive;
347         enum blk_zoned_model    zoned;
348 };
349 
350 #ifdef CONFIG_BLK_DEV_ZONED
351 
352 struct blk_zone_report_hdr {
353         unsigned int    nr_zones;
354         u8              padding[60];
355 };
356 
357 extern int blkdev_report_zones(struct block_device *bdev,
358                                sector_t sector, struct blk_zone *zones,
359                                unsigned int *nr_zones, gfp_t gfp_mask);
360 extern int blkdev_reset_zones(struct block_device *bdev, sector_t sectors,
361                               sector_t nr_sectors, gfp_t gfp_mask);
362 
363 extern int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode,
364                                      unsigned int cmd, unsigned long arg);
365 extern int blkdev_reset_zones_ioctl(struct block_device *bdev, fmode_t mode,
366                                     unsigned int cmd, unsigned long arg);
367 
368 #else /* CONFIG_BLK_DEV_ZONED */
369 
370 static inline int blkdev_report_zones_ioctl(struct block_device *bdev,
371                                             fmode_t mode, unsigned int cmd,
372                                             unsigned long arg)
373 {
374         return -ENOTTY;
375 }
376 
377 static inline int blkdev_reset_zones_ioctl(struct block_device *bdev,
378                                            fmode_t mode, unsigned int cmd,
379                                            unsigned long arg)
380 {
381         return -ENOTTY;
382 }
383 
384 #endif /* CONFIG_BLK_DEV_ZONED */
385 
386 struct request_queue {
387         /*
388          * Together with queue_head for cacheline sharing
389          */
390         struct list_head        queue_head;
391         struct request          *last_merge;
392         struct elevator_queue   *elevator;
393         int                     nr_rqs[2];      /* # allocated [a]sync rqs */
394         int                     nr_rqs_elvpriv; /* # allocated rqs w/ elvpriv */
395 
396         atomic_t                shared_hctx_restart;
397 
398         struct blk_queue_stats  *stats;
399         struct rq_wb            *rq_wb;
400 
401         /*
402          * If blkcg is not used, @q->root_rl serves all requests.  If blkcg
403          * is used, root blkg allocates from @q->root_rl and all other
404          * blkgs from their own blkg->rl.  Which one to use should be
405          * determined using bio_request_list().
406          */
407         struct request_list     root_rl;
408 
409         request_fn_proc         *request_fn;
410         make_request_fn         *make_request_fn;
411         prep_rq_fn              *prep_rq_fn;
412         unprep_rq_fn            *unprep_rq_fn;
413         softirq_done_fn         *softirq_done_fn;
414         rq_timed_out_fn         *rq_timed_out_fn;
415         dma_drain_needed_fn     *dma_drain_needed;
416         lld_busy_fn             *lld_busy_fn;
417         /* Called just after a request is allocated */
418         init_rq_fn              *init_rq_fn;
419         /* Called just before a request is freed */
420         exit_rq_fn              *exit_rq_fn;
421         /* Called from inside blk_get_request() */
422         void (*initialize_rq_fn)(struct request *rq);
423 
424         const struct blk_mq_ops *mq_ops;
425 
426         unsigned int            *mq_map;
427 
428         /* sw queues */
429         struct blk_mq_ctx __percpu      *queue_ctx;
430         unsigned int            nr_queues;
431 
432         unsigned int            queue_depth;
433 
434         /* hw dispatch queues */
435         struct blk_mq_hw_ctx    **queue_hw_ctx;
436         unsigned int            nr_hw_queues;
437 
438         /*
439          * Dispatch queue sorting
440          */
441         sector_t                end_sector;
442         struct request          *boundary_rq;
443 
444         /*
445          * Delayed queue handling
446          */
447         struct delayed_work     delay_work;
448 
449         struct backing_dev_info *backing_dev_info;
450 
451         /*
452          * The queue owner gets to use this for whatever they like.
453          * ll_rw_blk doesn't touch it.
454          */
455         void                    *queuedata;
456 
457         /*
458          * various queue flags, see QUEUE_* below
459          */
460         unsigned long           queue_flags;
461 
462         /*
463          * ida allocated id for this queue.  Used to index queues from
464          * ioctx.
465          */
466         int                     id;
467 
468         /*
469          * queue needs bounce pages for pages above this limit
470          */
471         gfp_t                   bounce_gfp;
472 
473         /*
474          * protects queue structures from reentrancy. ->__queue_lock should
475          * _never_ be used directly, it is queue private. always use
476          * ->queue_lock.
477          */
478         spinlock_t              __queue_lock;
479         spinlock_t              *queue_lock;
480 
481         /*
482          * queue kobject
483          */
484         struct kobject kobj;
485 
486         /*
487          * mq queue kobject
488          */
489         struct kobject mq_kobj;
490 
491 #ifdef  CONFIG_BLK_DEV_INTEGRITY
492         struct blk_integrity integrity;
493 #endif  /* CONFIG_BLK_DEV_INTEGRITY */
494 
495 #ifdef CONFIG_PM
496         struct device           *dev;
497         int                     rpm_status;
498         unsigned int            nr_pending;
499 #endif
500 
501         /*
502          * queue settings
503          */
504         unsigned long           nr_requests;    /* Max # of requests */
505         unsigned int            nr_congestion_on;
506         unsigned int            nr_congestion_off;
507         unsigned int            nr_batching;
508 
509         unsigned int            dma_drain_size;
510         void                    *dma_drain_buffer;
511         unsigned int            dma_pad_mask;
512         unsigned int            dma_alignment;
513 
514         struct blk_queue_tag    *queue_tags;
515         struct list_head        tag_busy_list;
516 
517         unsigned int            nr_sorted;
518         unsigned int            in_flight[2];
519 
520         /*
521          * Number of active block driver functions for which blk_drain_queue()
522          * must wait. Must be incremented around functions that unlock the
523          * queue_lock internally, e.g. scsi_request_fn().
524          */
525         unsigned int            request_fn_active;
526 
527         unsigned int            rq_timeout;
528         int                     poll_nsec;
529 
530         struct blk_stat_callback        *poll_cb;
531         struct blk_rq_stat      poll_stat[BLK_MQ_POLL_STATS_BKTS];
532 
533         struct timer_list       timeout;
534         struct work_struct      timeout_work;
535         struct list_head        timeout_list;
536 
537         struct list_head        icq_list;
538 #ifdef CONFIG_BLK_CGROUP
539         DECLARE_BITMAP          (blkcg_pols, BLKCG_MAX_POLS);
540         struct blkcg_gq         *root_blkg;
541         struct list_head        blkg_list;
542 #endif
543 
544         struct queue_limits     limits;
545 
546         /*
547          * sg stuff
548          */
549         unsigned int            sg_timeout;
550         unsigned int            sg_reserved_size;
551         int                     node;
552 #ifdef CONFIG_BLK_DEV_IO_TRACE
553         struct blk_trace        *blk_trace;
554 #endif
555         /*
556          * for flush operations
557          */
558         struct blk_flush_queue  *fq;
559 
560         struct list_head        requeue_list;
561         spinlock_t              requeue_lock;
562         struct delayed_work     requeue_work;
563 
564         struct mutex            sysfs_lock;
565 
566         int                     bypass_depth;
567         atomic_t                mq_freeze_depth;
568 
569 #if defined(CONFIG_BLK_DEV_BSG)
570         bsg_job_fn              *bsg_job_fn;
571         struct bsg_class_device bsg_dev;
572 #endif
573 
574 #ifdef CONFIG_BLK_DEV_THROTTLING
575         /* Throttle data */
576         struct throtl_data *td;
577 #endif
578         struct rcu_head         rcu_head;
579         wait_queue_head_t       mq_freeze_wq;
580         struct percpu_ref       q_usage_counter;
581         struct list_head        all_q_node;
582 
583         struct blk_mq_tag_set   *tag_set;
584         struct list_head        tag_set_list;
585         struct bio_set          *bio_split;
586 
587 #ifdef CONFIG_BLK_DEBUG_FS
588         struct dentry           *debugfs_dir;
589         struct dentry           *sched_debugfs_dir;
590 #endif
591 
592         bool                    mq_sysfs_init_done;
593 
594         size_t                  cmd_size;
595         void                    *rq_alloc_data;
596 
597         struct work_struct      release_work;
598 
599 #define BLK_MAX_WRITE_HINTS     5
600         u64                     write_hints[BLK_MAX_WRITE_HINTS];
601 };
602 
603 #define QUEUE_FLAG_QUEUED       1       /* uses generic tag queueing */
604 #define QUEUE_FLAG_STOPPED      2       /* queue is stopped */
605 #define QUEUE_FLAG_SYNCFULL     3       /* read queue has been filled */
606 #define QUEUE_FLAG_ASYNCFULL    4       /* write queue has been filled */
607 #define QUEUE_FLAG_DYING        5       /* queue being torn down */
608 #define QUEUE_FLAG_BYPASS       6       /* act as dumb FIFO queue */
609 #define QUEUE_FLAG_BIDI         7       /* queue supports bidi requests */
610 #define QUEUE_FLAG_NOMERGES     8       /* disable merge attempts */
611 #define QUEUE_FLAG_SAME_COMP    9       /* complete on same CPU-group */
612 #define QUEUE_FLAG_FAIL_IO     10       /* fake timeout */
613 #define QUEUE_FLAG_STACKABLE   11       /* supports request stacking */
614 #define QUEUE_FLAG_NONROT      12       /* non-rotational device (SSD) */
615 #define QUEUE_FLAG_VIRT        QUEUE_FLAG_NONROT /* paravirt device */
616 #define QUEUE_FLAG_IO_STAT     13       /* do IO stats */
617 #define QUEUE_FLAG_DISCARD     14       /* supports DISCARD */
618 #define QUEUE_FLAG_NOXMERGES   15       /* No extended merges */
619 #define QUEUE_FLAG_ADD_RANDOM  16       /* Contributes to random pool */
620 #define QUEUE_FLAG_SECERASE    17       /* supports secure erase */
621 #define QUEUE_FLAG_SAME_FORCE  18       /* force complete on same CPU */
622 #define QUEUE_FLAG_DEAD        19       /* queue tear-down finished */
623 #define QUEUE_FLAG_INIT_DONE   20       /* queue is initialized */
624 #define QUEUE_FLAG_NO_SG_MERGE 21       /* don't attempt to merge SG segments*/
625 #define QUEUE_FLAG_POLL        22       /* IO polling enabled if set */
626 #define QUEUE_FLAG_WC          23       /* Write back caching */
627 #define QUEUE_FLAG_FUA         24       /* device supports FUA writes */
628 #define QUEUE_FLAG_FLUSH_NQ    25       /* flush not queueuable */
629 #define QUEUE_FLAG_DAX         26       /* device supports DAX */
630 #define QUEUE_FLAG_STATS       27       /* track rq completion times */
631 #define QUEUE_FLAG_POLL_STATS  28       /* collecting stats for hybrid polling */
632 #define QUEUE_FLAG_REGISTERED  29       /* queue has been registered to a disk */
633 #define QUEUE_FLAG_SCSI_PASSTHROUGH 30  /* queue supports SCSI commands */
634 #define QUEUE_FLAG_QUIESCED    31       /* queue has been quiesced */
635 
636 #define QUEUE_FLAG_DEFAULT      ((1 << QUEUE_FLAG_IO_STAT) |            \
637                                  (1 << QUEUE_FLAG_STACKABLE)    |       \
638                                  (1 << QUEUE_FLAG_SAME_COMP)    |       \
639                                  (1 << QUEUE_FLAG_ADD_RANDOM))
640 
641 #define QUEUE_FLAG_MQ_DEFAULT   ((1 << QUEUE_FLAG_IO_STAT) |            \
642                                  (1 << QUEUE_FLAG_STACKABLE)    |       \
643                                  (1 << QUEUE_FLAG_SAME_COMP)    |       \
644                                  (1 << QUEUE_FLAG_POLL))
645 
646 /*
647  * @q->queue_lock is set while a queue is being initialized. Since we know
648  * that no other threads access the queue object before @q->queue_lock has
649  * been set, it is safe to manipulate queue flags without holding the
650  * queue_lock if @q->queue_lock == NULL. See also blk_alloc_queue_node() and
651  * blk_init_allocated_queue().
652  */
653 static inline void queue_lockdep_assert_held(struct request_queue *q)
654 {
655         if (q->queue_lock)
656                 lockdep_assert_held(q->queue_lock);
657 }
658 
659 static inline void queue_flag_set_unlocked(unsigned int flag,
660                                            struct request_queue *q)
661 {
662         __set_bit(flag, &q->queue_flags);
663 }
664 
665 static inline int queue_flag_test_and_clear(unsigned int flag,
666                                             struct request_queue *q)
667 {
668         queue_lockdep_assert_held(q);
669 
670         if (test_bit(flag, &q->queue_flags)) {
671                 __clear_bit(flag, &q->queue_flags);
672                 return 1;
673         }
674 
675         return 0;
676 }
677 
678 static inline int queue_flag_test_and_set(unsigned int flag,
679                                           struct request_queue *q)
680 {
681         queue_lockdep_assert_held(q);
682 
683         if (!test_bit(flag, &q->queue_flags)) {
684                 __set_bit(flag, &q->queue_flags);
685                 return 0;
686         }
687 
688         return 1;
689 }
690 
691 static inline void queue_flag_set(unsigned int flag, struct request_queue *q)
692 {
693         queue_lockdep_assert_held(q);
694         __set_bit(flag, &q->queue_flags);
695 }
696 
697 static inline void queue_flag_clear_unlocked(unsigned int flag,
698                                              struct request_queue *q)
699 {
700         __clear_bit(flag, &q->queue_flags);
701 }
702 
703 static inline int queue_in_flight(struct request_queue *q)
704 {
705         return q->in_flight[0] + q->in_flight[1];
706 }
707 
708 static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
709 {
710         queue_lockdep_assert_held(q);
711         __clear_bit(flag, &q->queue_flags);
712 }
713 
714 #define blk_queue_tagged(q)     test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)
715 #define blk_queue_stopped(q)    test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
716 #define blk_queue_dying(q)      test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags)
717 #define blk_queue_dead(q)       test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags)
718 #define blk_queue_bypass(q)     test_bit(QUEUE_FLAG_BYPASS, &(q)->queue_flags)
719 #define blk_queue_init_done(q)  test_bit(QUEUE_FLAG_INIT_DONE, &(q)->queue_flags)
720 #define blk_queue_nomerges(q)   test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
721 #define blk_queue_noxmerges(q)  \
722         test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags)
723 #define blk_queue_nonrot(q)     test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags)
724 #define blk_queue_io_stat(q)    test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags)
725 #define blk_queue_add_random(q) test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags)
726 #define blk_queue_stackable(q)  \
727         test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags)
728 #define blk_queue_discard(q)    test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags)
729 #define blk_queue_secure_erase(q) \
730         (test_bit(QUEUE_FLAG_SECERASE, &(q)->queue_flags))
731 #define blk_queue_dax(q)        test_bit(QUEUE_FLAG_DAX, &(q)->queue_flags)
732 #define blk_queue_scsi_passthrough(q)   \
733         test_bit(QUEUE_FLAG_SCSI_PASSTHROUGH, &(q)->queue_flags)
734 
735 #define blk_noretry_request(rq) \
736         ((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \
737                              REQ_FAILFAST_DRIVER))
738 #define blk_queue_quiesced(q)   test_bit(QUEUE_FLAG_QUIESCED, &(q)->queue_flags)
739 
740 static inline bool blk_account_rq(struct request *rq)
741 {
742         return (rq->rq_flags & RQF_STARTED) && !blk_rq_is_passthrough(rq);
743 }
744 
745 #define blk_rq_cpu_valid(rq)    ((rq)->cpu != -1)
746 #define blk_bidi_rq(rq)         ((rq)->next_rq != NULL)
747 /* rq->queuelist of dequeued request must be list_empty() */
748 #define blk_queued_rq(rq)       (!list_empty(&(rq)->queuelist))
749 
750 #define list_entry_rq(ptr)      list_entry((ptr), struct request, queuelist)
751 
752 #define rq_data_dir(rq)         (op_is_write(req_op(rq)) ? WRITE : READ)
753 
754 /*
755  * Driver can handle struct request, if it either has an old style
756  * request_fn defined, or is blk-mq based.
757  */
758 static inline bool queue_is_rq_based(struct request_queue *q)
759 {
760         return q->request_fn || q->mq_ops;
761 }
762 
763 static inline unsigned int blk_queue_cluster(struct request_queue *q)
764 {
765         return q->limits.cluster;
766 }
767 
768 static inline enum blk_zoned_model
769 blk_queue_zoned_model(struct request_queue *q)
770 {
771         return q->limits.zoned;
772 }
773 
774 static inline bool blk_queue_is_zoned(struct request_queue *q)
775 {
776         switch (blk_queue_zoned_model(q)) {
777         case BLK_ZONED_HA:
778         case BLK_ZONED_HM:
779                 return true;
780         default:
781                 return false;
782         }
783 }
784 
785 static inline unsigned int blk_queue_zone_sectors(struct request_queue *q)
786 {
787         return blk_queue_is_zoned(q) ? q->limits.chunk_sectors : 0;
788 }
789 
790 static inline bool rq_is_sync(struct request *rq)
791 {
792         return op_is_sync(rq->cmd_flags);
793 }
794 
795 static inline bool blk_rl_full(struct request_list *rl, bool sync)
796 {
797         unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL;
798 
799         return rl->flags & flag;
800 }
801 
802 static inline void blk_set_rl_full(struct request_list *rl, bool sync)
803 {
804         unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL;
805 
806         rl->flags |= flag;
807 }
808 
809 static inline void blk_clear_rl_full(struct request_list *rl, bool sync)
810 {
811         unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL;
812 
813         rl->flags &= ~flag;
814 }
815 
816 static inline bool rq_mergeable(struct request *rq)
817 {
818         if (blk_rq_is_passthrough(rq))
819                 return false;
820 
821         if (req_op(rq) == REQ_OP_FLUSH)
822                 return false;
823 
824         if (req_op(rq) == REQ_OP_WRITE_ZEROES)
825                 return false;
826 
827         if (rq->cmd_flags & REQ_NOMERGE_FLAGS)
828                 return false;
829         if (rq->rq_flags & RQF_NOMERGE_FLAGS)
830                 return false;
831 
832         return true;
833 }
834 
835 static inline bool blk_write_same_mergeable(struct bio *a, struct bio *b)
836 {
837         if (bio_page(a) == bio_page(b) &&
838             bio_offset(a) == bio_offset(b))
839                 return true;
840 
841         return false;
842 }
843 
844 static inline unsigned int blk_queue_depth(struct request_queue *q)
845 {
846         if (q->queue_depth)
847                 return q->queue_depth;
848 
849         return q->nr_requests;
850 }
851 
852 /*
853  * q->prep_rq_fn return values
854  */
855 enum {
856         BLKPREP_OK,             /* serve it */
857         BLKPREP_KILL,           /* fatal error, kill, return -EIO */
858         BLKPREP_DEFER,          /* leave on queue */
859         BLKPREP_INVALID,        /* invalid command, kill, return -EREMOTEIO */
860 };
861 
862 extern unsigned long blk_max_low_pfn, blk_max_pfn;
863 
864 /*
865  * standard bounce addresses:
866  *
867  * BLK_BOUNCE_HIGH      : bounce all highmem pages
868  * BLK_BOUNCE_ANY       : don't bounce anything
869  * BLK_BOUNCE_ISA       : bounce pages above ISA DMA boundary
870  */
871 
872 #if BITS_PER_LONG == 32
873 #define BLK_BOUNCE_HIGH         ((u64)blk_max_low_pfn << PAGE_SHIFT)
874 #else
875 #define BLK_BOUNCE_HIGH         -1ULL
876 #endif
877 #define BLK_BOUNCE_ANY          (-1ULL)
878 #define BLK_BOUNCE_ISA          (DMA_BIT_MASK(24))
879 
880 /*
881  * default timeout for SG_IO if none specified
882  */
883 #define BLK_DEFAULT_SG_TIMEOUT  (60 * HZ)
884 #define BLK_MIN_SG_TIMEOUT      (7 * HZ)
885 
886 struct rq_map_data {
887         struct page **pages;
888         int page_order;
889         int nr_entries;
890         unsigned long offset;
891         int null_mapped;
892         int from_user;
893 };
894 
895 struct req_iterator {
896         struct bvec_iter iter;
897         struct bio *bio;
898 };
899 
900 /* This should not be used directly - use rq_for_each_segment */
901 #define for_each_bio(_bio)              \
902         for (; _bio; _bio = _bio->bi_next)
903 #define __rq_for_each_bio(_bio, rq)     \
904         if ((rq->bio))                  \
905                 for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next)
906 
907 #define rq_for_each_segment(bvl, _rq, _iter)                    \
908         __rq_for_each_bio(_iter.bio, _rq)                       \
909                 bio_for_each_segment(bvl, _iter.bio, _iter.iter)
910 
911 #define rq_iter_last(bvec, _iter)                               \
912                 (_iter.bio->bi_next == NULL &&                  \
913                  bio_iter_last(bvec, _iter.iter))
914 
915 #ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
916 # error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform"
917 #endif
918 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
919 extern void rq_flush_dcache_pages(struct request *rq);
920 #else
921 static inline void rq_flush_dcache_pages(struct request *rq)
922 {
923 }
924 #endif
925 
926 #ifdef CONFIG_PRINTK
927 #define vfs_msg(sb, level, fmt, ...)                            \
928         __vfs_msg(sb, level, fmt, ##__VA_ARGS__)
929 #else
930 #define vfs_msg(sb, level, fmt, ...)                            \
931 do {                                                            \
932         no_printk(fmt, ##__VA_ARGS__);                          \
933         __vfs_msg(sb, "", " ");                                 \
934 } while (0)
935 #endif
936 
937 extern int blk_register_queue(struct gendisk *disk);
938 extern void blk_unregister_queue(struct gendisk *disk);
939 extern blk_qc_t generic_make_request(struct bio *bio);
940 extern void blk_rq_init(struct request_queue *q, struct request *rq);
941 extern void blk_init_request_from_bio(struct request *req, struct bio *bio);
942 extern void blk_put_request(struct request *);
943 extern void __blk_put_request(struct request_queue *, struct request *);
944 extern struct request *blk_get_request(struct request_queue *, unsigned int op,
945                                        gfp_t gfp_mask);
946 extern void blk_requeue_request(struct request_queue *, struct request *);
947 extern int blk_lld_busy(struct request_queue *q);
948 extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
949                              struct bio_set *bs, gfp_t gfp_mask,
950                              int (*bio_ctr)(struct bio *, struct bio *, void *),
951                              void *data);
952 extern void blk_rq_unprep_clone(struct request *rq);
953 extern blk_status_t blk_insert_cloned_request(struct request_queue *q,
954                                      struct request *rq);
955 extern int blk_rq_append_bio(struct request *rq, struct bio *bio);
956 extern void blk_delay_queue(struct request_queue *, unsigned long);
957 extern void blk_queue_split(struct request_queue *, struct bio **);
958 extern void blk_recount_segments(struct request_queue *, struct bio *);
959 extern int scsi_verify_blk_ioctl(struct block_device *, unsigned int);
960 extern int scsi_cmd_blk_ioctl(struct block_device *, fmode_t,
961                               unsigned int, void __user *);
962 extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t,
963                           unsigned int, void __user *);
964 extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t,
965                          struct scsi_ioctl_command __user *);
966 
967 extern int blk_queue_enter(struct request_queue *q, bool nowait);
968 extern void blk_queue_exit(struct request_queue *q);
969 extern void blk_start_queue(struct request_queue *q);
970 extern void blk_start_queue_async(struct request_queue *q);
971 extern void blk_stop_queue(struct request_queue *q);
972 extern void blk_sync_queue(struct request_queue *q);
973 extern void __blk_stop_queue(struct request_queue *q);
974 extern void __blk_run_queue(struct request_queue *q);
975 extern void __blk_run_queue_uncond(struct request_queue *q);
976 extern void blk_run_queue(struct request_queue *);
977 extern void blk_run_queue_async(struct request_queue *q);
978 extern int blk_rq_map_user(struct request_queue *, struct request *,
979                            struct rq_map_data *, void __user *, unsigned long,
980                            gfp_t);
981 extern int blk_rq_unmap_user(struct bio *);
982 extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t);
983 extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
984                                struct rq_map_data *, const struct iov_iter *,
985                                gfp_t);
986 extern void blk_execute_rq(struct request_queue *, struct gendisk *,
987                           struct request *, int);
988 extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
989                                   struct request *, int, rq_end_io_fn *);
990 
991 int blk_status_to_errno(blk_status_t status);
992 blk_status_t errno_to_blk_status(int errno);
993 
994 bool blk_mq_poll(struct request_queue *q, blk_qc_t cookie);
995 
996 static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
997 {
998         return bdev->bd_disk->queue;    /* this is never NULL */
999 }
1000 
1001 /*
1002  * blk_rq_pos()                 : the current sector
1003  * blk_rq_bytes()               : bytes left in the entire request
1004  * blk_rq_cur_bytes()           : bytes left in the current segment
1005  * blk_rq_err_bytes()           : bytes left till the next error boundary
1006  * blk_rq_sectors()             : sectors left in the entire request
1007  * blk_rq_cur_sectors()         : sectors left in the current segment
1008  */
1009 static inline sector_t blk_rq_pos(const struct request *rq)
1010 {
1011         return rq->__sector;
1012 }
1013 
1014 static inline unsigned int blk_rq_bytes(const struct request *rq)
1015 {
1016         return rq->__data_len;
1017 }
1018 
1019 static inline int blk_rq_cur_bytes(const struct request *rq)
1020 {
1021         return rq->bio ? bio_cur_bytes(rq->bio) : 0;
1022 }
1023 
1024 extern unsigned int blk_rq_err_bytes(const struct request *rq);
1025 
1026 static inline unsigned int blk_rq_sectors(const struct request *rq)
1027 {
1028         return blk_rq_bytes(rq) >> 9;
1029 }
1030 
1031 static inline unsigned int blk_rq_cur_sectors(const struct request *rq)
1032 {
1033         return blk_rq_cur_bytes(rq) >> 9;
1034 }
1035 
1036 /*
1037  * Some commands like WRITE SAME have a payload or data transfer size which
1038  * is different from the size of the request.  Any driver that supports such
1039  * commands using the RQF_SPECIAL_PAYLOAD flag needs to use this helper to
1040  * calculate the data transfer size.
1041  */
1042 static inline unsigned int blk_rq_payload_bytes(struct request *rq)
1043 {
1044         if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
1045                 return rq->special_vec.bv_len;
1046         return blk_rq_bytes(rq);
1047 }
1048 
1049 static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q,
1050                                                      int op)
1051 {
1052         if (unlikely(op == REQ_OP_DISCARD || op == REQ_OP_SECURE_ERASE))
1053                 return min(q->limits.max_discard_sectors, UINT_MAX >> 9);
1054 
1055         if (unlikely(op == REQ_OP_WRITE_SAME))
1056                 return q->limits.max_write_same_sectors;
1057 
1058         if (unlikely(op == REQ_OP_WRITE_ZEROES))
1059                 return q->limits.max_write_zeroes_sectors;
1060 
1061         return q->limits.max_sectors;
1062 }
1063 
1064 /*
1065  * Return maximum size of a request at given offset. Only valid for
1066  * file system requests.
1067  */
1068 static inline unsigned int blk_max_size_offset(struct request_queue *q,
1069                                                sector_t offset)
1070 {
1071         if (!q->limits.chunk_sectors)
1072                 return q->limits.max_sectors;
1073 
1074         return q->limits.chunk_sectors -
1075                         (offset & (q->limits.chunk_sectors - 1));
1076 }
1077 
1078 static inline unsigned int blk_rq_get_max_sectors(struct request *rq,
1079                                                   sector_t offset)
1080 {
1081         struct request_queue *q = rq->q;
1082 
1083         if (blk_rq_is_passthrough(rq))
1084                 return q->limits.max_hw_sectors;
1085 
1086         if (!q->limits.chunk_sectors ||
1087             req_op(rq) == REQ_OP_DISCARD ||
1088             req_op(rq) == REQ_OP_SECURE_ERASE)
1089                 return blk_queue_get_max_sectors(q, req_op(rq));
1090 
1091         return min(blk_max_size_offset(q, offset),
1092                         blk_queue_get_max_sectors(q, req_op(rq)));
1093 }
1094 
1095 static inline unsigned int blk_rq_count_bios(struct request *rq)
1096 {
1097         unsigned int nr_bios = 0;
1098         struct bio *bio;
1099 
1100         __rq_for_each_bio(bio, rq)
1101                 nr_bios++;
1102 
1103         return nr_bios;
1104 }
1105 
1106 /*
1107  * Request issue related functions.
1108  */
1109 extern struct request *blk_peek_request(struct request_queue *q);
1110 extern void blk_start_request(struct request *rq);
1111 extern struct request *blk_fetch_request(struct request_queue *q);
1112 
1113 /*
1114  * Request completion related functions.
1115  *
1116  * blk_update_request() completes given number of bytes and updates
1117  * the request without completing it.
1118  *
1119  * blk_end_request() and friends.  __blk_end_request() must be called
1120  * with the request queue spinlock acquired.
1121  *
1122  * Several drivers define their own end_request and call
1123  * blk_end_request() for parts of the original function.
1124  * This prevents code duplication in drivers.
1125  */
1126 extern bool blk_update_request(struct request *rq, blk_status_t error,
1127                                unsigned int nr_bytes);
1128 extern void blk_finish_request(struct request *rq, blk_status_t error);
1129 extern bool blk_end_request(struct request *rq, blk_status_t error,
1130                             unsigned int nr_bytes);
1131 extern void blk_end_request_all(struct request *rq, blk_status_t error);
1132 extern bool __blk_end_request(struct request *rq, blk_status_t error,
1133                               unsigned int nr_bytes);
1134 extern void __blk_end_request_all(struct request *rq, blk_status_t error);
1135 extern bool __blk_end_request_cur(struct request *rq, blk_status_t error);
1136 
1137 extern void blk_complete_request(struct request *);
1138 extern void __blk_complete_request(struct request *);
1139 extern void blk_abort_request(struct request *);
1140 extern void blk_unprep_request(struct request *);
1141 
1142 /*
1143  * Access functions for manipulating queue properties
1144  */
1145 extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn,
1146                                         spinlock_t *lock, int node_id);
1147 extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *);
1148 extern int blk_init_allocated_queue(struct request_queue *);
1149 extern void blk_cleanup_queue(struct request_queue *);
1150 extern void blk_queue_make_request(struct request_queue *, make_request_fn *);
1151 extern void blk_queue_bounce_limit(struct request_queue *, u64);
1152 extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int);
1153 extern void blk_queue_chunk_sectors(struct request_queue *, unsigned int);
1154 extern void blk_queue_max_segments(struct request_queue *, unsigned short);
1155 extern void blk_queue_max_discard_segments(struct request_queue *,
1156                 unsigned short);
1157 extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
1158 extern void blk_queue_max_discard_sectors(struct request_queue *q,
1159                 unsigned int max_discard_sectors);
1160 extern void blk_queue_max_write_same_sectors(struct request_queue *q,
1161                 unsigned int max_write_same_sectors);
1162 extern void blk_queue_max_write_zeroes_sectors(struct request_queue *q,
1163                 unsigned int max_write_same_sectors);
1164 extern void blk_queue_logical_block_size(struct request_queue *, unsigned short);
1165 extern void blk_queue_physical_block_size(struct request_queue *, unsigned int);
1166 extern void blk_queue_alignment_offset(struct request_queue *q,
1167                                        unsigned int alignment);
1168 extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min);
1169 extern void blk_queue_io_min(struct request_queue *q, unsigned int min);
1170 extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt);
1171 extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt);
1172 extern void blk_set_queue_depth(struct request_queue *q, unsigned int depth);
1173 extern void blk_set_default_limits(struct queue_limits *lim);
1174 extern void blk_set_stacking_limits(struct queue_limits *lim);
1175 extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
1176                             sector_t offset);
1177 extern int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev,
1178                             sector_t offset);
1179 extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
1180                               sector_t offset);
1181 extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b);
1182 extern void blk_queue_dma_pad(struct request_queue *, unsigned int);
1183 extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int);
1184 extern int blk_queue_dma_drain(struct request_queue *q,
1185                                dma_drain_needed_fn *dma_drain_needed,
1186                                void *buf, unsigned int size);
1187 extern void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn);
1188 extern void blk_queue_segment_boundary(struct request_queue *, unsigned long);
1189 extern void blk_queue_virt_boundary(struct request_queue *, unsigned long);
1190 extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn);
1191 extern void blk_queue_unprep_rq(struct request_queue *, unprep_rq_fn *ufn);
1192 extern void blk_queue_dma_alignment(struct request_queue *, int);
1193 extern void blk_queue_update_dma_alignment(struct request_queue *, int);
1194 extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *);
1195 extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *);
1196 extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
1197 extern void blk_queue_flush_queueable(struct request_queue *q, bool queueable);
1198 extern void blk_queue_write_cache(struct request_queue *q, bool enabled, bool fua);
1199 
1200 /*
1201  * Number of physical segments as sent to the device.
1202  *
1203  * Normally this is the number of discontiguous data segments sent by the
1204  * submitter.  But for data-less command like discard we might have no
1205  * actual data segments submitted, but the driver might have to add it's
1206  * own special payload.  In that case we still return 1 here so that this
1207  * special payload will be mapped.
1208  */
1209 static inline unsigned short blk_rq_nr_phys_segments(struct request *rq)
1210 {
1211         if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
1212                 return 1;
1213         return rq->nr_phys_segments;
1214 }
1215 
1216 /*
1217  * Number of discard segments (or ranges) the driver needs to fill in.
1218  * Each discard bio merged into a request is counted as one segment.
1219  */
1220 static inline unsigned short blk_rq_nr_discard_segments(struct request *rq)
1221 {
1222         return max_t(unsigned short, rq->nr_phys_segments, 1);
1223 }
1224 
1225 extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *);
1226 extern void blk_dump_rq_flags(struct request *, char *);
1227 extern long nr_blockdev_pages(void);
1228 
1229 bool __must_check blk_get_queue(struct request_queue *);
1230 struct request_queue *blk_alloc_queue(gfp_t);
1231 struct request_queue *blk_alloc_queue_node(gfp_t, int);
1232 extern void blk_put_queue(struct request_queue *);
1233 extern void blk_set_queue_dying(struct request_queue *);
1234 
1235 /*
1236  * block layer runtime pm functions
1237  */
1238 #ifdef CONFIG_PM
1239 extern void blk_pm_runtime_init(struct request_queue *q, struct device *dev);
1240 extern int blk_pre_runtime_suspend(struct request_queue *q);
1241 extern void blk_post_runtime_suspend(struct request_queue *q, int err);
1242 extern void blk_pre_runtime_resume(struct request_queue *q);
1243 extern void blk_post_runtime_resume(struct request_queue *q, int err);
1244 extern void blk_set_runtime_active(struct request_queue *q);
1245 #else
1246 static inline void blk_pm_runtime_init(struct request_queue *q,
1247         struct device *dev) {}
1248 static inline int blk_pre_runtime_suspend(struct request_queue *q)
1249 {
1250         return -ENOSYS;
1251 }
1252 static inline void blk_post_runtime_suspend(struct request_queue *q, int err) {}
1253 static inline void blk_pre_runtime_resume(struct request_queue *q) {}
1254 static inline void blk_post_runtime_resume(struct request_queue *q, int err) {}
1255 static inline void blk_set_runtime_active(struct request_queue *q) {}
1256 #endif
1257 
1258 /*
1259  * blk_plug permits building a queue of related requests by holding the I/O
1260  * fragments for a short period. This allows merging of sequential requests
1261  * into single larger request. As the requests are moved from a per-task list to
1262  * the device's request_queue in a batch, this results in improved scalability
1263  * as the lock contention for request_queue lock is reduced.
1264  *
1265  * It is ok not to disable preemption when adding the request to the plug list
1266  * or when attempting a merge, because blk_schedule_flush_list() will only flush
1267  * the plug list when the task sleeps by itself. For details, please see
1268  * schedule() where blk_schedule_flush_plug() is called.
1269  */
1270 struct blk_plug {
1271         struct list_head list; /* requests */
1272         struct list_head mq_list; /* blk-mq requests */
1273         struct list_head cb_list; /* md requires an unplug callback */
1274 };
1275 #define BLK_MAX_REQUEST_COUNT 16
1276 #define BLK_PLUG_FLUSH_SIZE (128 * 1024)
1277 
1278 struct blk_plug_cb;
1279 typedef void (*blk_plug_cb_fn)(struct blk_plug_cb *, bool);
1280 struct blk_plug_cb {
1281         struct list_head list;
1282         blk_plug_cb_fn callback;
1283         void *data;
1284 };
1285 extern struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug,
1286                                              void *data, int size);
1287 extern void blk_start_plug(struct blk_plug *);
1288 extern void blk_finish_plug(struct blk_plug *);
1289 extern void blk_flush_plug_list(struct blk_plug *, bool);
1290 
1291 static inline void blk_flush_plug(struct task_struct *tsk)
1292 {
1293         struct blk_plug *plug = tsk->plug;
1294 
1295         if (plug)
1296                 blk_flush_plug_list(plug, false);
1297 }
1298 
1299 static inline void blk_schedule_flush_plug(struct task_struct *tsk)
1300 {
1301         struct blk_plug *plug = tsk->plug;
1302 
1303         if (plug)
1304                 blk_flush_plug_list(plug, true);
1305 }
1306 
1307 static inline bool blk_needs_flush_plug(struct task_struct *tsk)
1308 {
1309         struct blk_plug *plug = tsk->plug;
1310 
1311         return plug &&
1312                 (!list_empty(&plug->list) ||
1313                  !list_empty(&plug->mq_list) ||
1314                  !list_empty(&plug->cb_list));
1315 }
1316 
1317 /*
1318  * tag stuff
1319  */
1320 extern int blk_queue_start_tag(struct request_queue *, struct request *);
1321 extern struct request *blk_queue_find_tag(struct request_queue *, int);
1322 extern void blk_queue_end_tag(struct request_queue *, struct request *);
1323 extern int blk_queue_init_tags(struct request_queue *, int, struct blk_queue_tag *, int);
1324 extern void blk_queue_free_tags(struct request_queue *);
1325 extern int blk_queue_resize_tags(struct request_queue *, int);
1326 extern void blk_queue_invalidate_tags(struct request_queue *);
1327 extern struct blk_queue_tag *blk_init_tags(int, int);
1328 extern void blk_free_tags(struct blk_queue_tag *);
1329 
1330 static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt,
1331                                                 int tag)
1332 {
1333         if (unlikely(bqt == NULL || tag >= bqt->real_max_depth))
1334                 return NULL;
1335         return bqt->tag_index[tag];
1336 }
1337 
1338 extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *);
1339 extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
1340                 sector_t nr_sects, gfp_t gfp_mask, struct page *page);
1341 
1342 #define BLKDEV_DISCARD_SECURE   (1 << 0)        /* issue a secure erase */
1343 
1344 extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
1345                 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags);
1346 extern int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
1347                 sector_t nr_sects, gfp_t gfp_mask, int flags,
1348                 struct bio **biop);
1349 
1350 #define BLKDEV_ZERO_NOUNMAP     (1 << 0)  /* do not free blocks */
1351 #define BLKDEV_ZERO_NOFALLBACK  (1 << 1)  /* don't write explicit zeroes */
1352 
1353 extern int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
1354                 sector_t nr_sects, gfp_t gfp_mask, struct bio **biop,
1355                 unsigned flags);
1356 extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
1357                 sector_t nr_sects, gfp_t gfp_mask, unsigned flags);
1358 
1359 static inline int sb_issue_discard(struct super_block *sb, sector_t block,
1360                 sector_t nr_blocks, gfp_t gfp_mask, unsigned long flags)
1361 {
1362         return blkdev_issue_discard(sb->s_bdev, block << (sb->s_blocksize_bits - 9),
1363                                     nr_blocks << (sb->s_blocksize_bits - 9),
1364                                     gfp_mask, flags);
1365 }
1366 static inline int sb_issue_zeroout(struct super_block *sb, sector_t block,
1367                 sector_t nr_blocks, gfp_t gfp_mask)
1368 {
1369         return blkdev_issue_zeroout(sb->s_bdev,
1370                                     block << (sb->s_blocksize_bits - 9),
1371                                     nr_blocks << (sb->s_blocksize_bits - 9),
1372                                     gfp_mask, 0);
1373 }
1374 
1375 extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm);
1376 
1377 enum blk_default_limits {
1378         BLK_MAX_SEGMENTS        = 128,
1379         BLK_SAFE_MAX_SECTORS    = 255,
1380         BLK_DEF_MAX_SECTORS     = 2560,
1381         BLK_MAX_SEGMENT_SIZE    = 65536,
1382         BLK_SEG_BOUNDARY_MASK   = 0xFFFFFFFFUL,
1383 };
1384 
1385 #define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist)
1386 
1387 static inline unsigned long queue_segment_boundary(struct request_queue *q)
1388 {
1389         return q->limits.seg_boundary_mask;
1390 }
1391 
1392 static inline unsigned long queue_virt_boundary(struct request_queue *q)
1393 {
1394         return q->limits.virt_boundary_mask;
1395 }
1396 
1397 static inline unsigned int queue_max_sectors(struct request_queue *q)
1398 {
1399         return q->limits.max_sectors;
1400 }
1401 
1402 static inline unsigned int queue_max_hw_sectors(struct request_queue *q)
1403 {
1404         return q->limits.max_hw_sectors;
1405 }
1406 
1407 static inline unsigned short queue_max_segments(struct request_queue *q)
1408 {
1409         return q->limits.max_segments;
1410 }
1411 
1412 static inline unsigned short queue_max_discard_segments(struct request_queue *q)
1413 {
1414         return q->limits.max_discard_segments;
1415 }
1416 
1417 static inline unsigned int queue_max_segment_size(struct request_queue *q)
1418 {
1419         return q->limits.max_segment_size;
1420 }
1421 
1422 static inline unsigned short queue_logical_block_size(struct request_queue *q)
1423 {
1424         int retval = 512;
1425 
1426         if (q && q->limits.logical_block_size)
1427                 retval = q->limits.logical_block_size;
1428 
1429         return retval;
1430 }
1431 
1432 static inline unsigned short bdev_logical_block_size(struct block_device *bdev)
1433 {
1434         return queue_logical_block_size(bdev_get_queue(bdev));
1435 }
1436 
1437 static inline unsigned int queue_physical_block_size(struct request_queue *q)
1438 {
1439         return q->limits.physical_block_size;
1440 }
1441 
1442 static inline unsigned int bdev_physical_block_size(struct block_device *bdev)
1443 {
1444         return queue_physical_block_size(bdev_get_queue(bdev));
1445 }
1446 
1447 static inline unsigned int queue_io_min(struct request_queue *q)
1448 {
1449         return q->limits.io_min;
1450 }
1451 
1452 static inline int bdev_io_min(struct block_device *bdev)
1453 {
1454         return queue_io_min(bdev_get_queue(bdev));
1455 }
1456 
1457 static inline unsigned int queue_io_opt(struct request_queue *q)
1458 {
1459         return q->limits.io_opt;
1460 }
1461 
1462 static inline int bdev_io_opt(struct block_device *bdev)
1463 {
1464         return queue_io_opt(bdev_get_queue(bdev));
1465 }
1466 
1467 static inline int queue_alignment_offset(struct request_queue *q)
1468 {
1469         if (q->limits.misaligned)
1470                 return -1;
1471 
1472         return q->limits.alignment_offset;
1473 }
1474 
1475 static inline int queue_limit_alignment_offset(struct queue_limits *lim, sector_t sector)
1476 {
1477         unsigned int granularity = max(lim->physical_block_size, lim->io_min);
1478         unsigned int alignment = sector_div(sector, granularity >> 9) << 9;
1479 
1480         return (granularity + lim->alignment_offset - alignment) % granularity;
1481 }
1482 
1483 static inline int bdev_alignment_offset(struct block_device *bdev)
1484 {
1485         struct request_queue *q = bdev_get_queue(bdev);
1486 
1487         if (q->limits.misaligned)
1488                 return -1;
1489 
1490         if (bdev != bdev->bd_contains)
1491                 return bdev->bd_part->alignment_offset;
1492 
1493         return q->limits.alignment_offset;
1494 }
1495 
1496 static inline int queue_discard_alignment(struct request_queue *q)
1497 {
1498         if (q->limits.discard_misaligned)
1499                 return -1;
1500 
1501         return q->limits.discard_alignment;
1502 }
1503 
1504 static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector_t sector)
1505 {
1506         unsigned int alignment, granularity, offset;
1507 
1508         if (!lim->max_discard_sectors)
1509                 return 0;
1510 
1511         /* Why are these in bytes, not sectors? */
1512         alignment = lim->discard_alignment >> 9;
1513         granularity = lim->discard_granularity >> 9;
1514         if (!granularity)
1515                 return 0;
1516 
1517         /* Offset of the partition start in 'granularity' sectors */
1518         offset = sector_div(sector, granularity);
1519 
1520         /* And why do we do this modulus *again* in blkdev_issue_discard()? */
1521         offset = (granularity + alignment - offset) % granularity;
1522 
1523         /* Turn it back into bytes, gaah */
1524         return offset << 9;
1525 }
1526 
1527 static inline int bdev_discard_alignment(struct block_device *bdev)
1528 {
1529         struct request_queue *q = bdev_get_queue(bdev);
1530 
1531         if (bdev != bdev->bd_contains)
1532                 return bdev->bd_part->discard_alignment;
1533 
1534         return q->limits.discard_alignment;
1535 }
1536 
1537 static inline unsigned int bdev_write_same(struct block_device *bdev)
1538 {
1539         struct request_queue *q = bdev_get_queue(bdev);
1540 
1541         if (q)
1542                 return q->limits.max_write_same_sectors;
1543 
1544         return 0;
1545 }
1546 
1547 static inline unsigned int bdev_write_zeroes_sectors(struct block_device *bdev)
1548 {
1549         struct request_queue *q = bdev_get_queue(bdev);
1550 
1551         if (q)
1552                 return q->limits.max_write_zeroes_sectors;
1553 
1554         return 0;
1555 }
1556 
1557 static inline enum blk_zoned_model bdev_zoned_model(struct block_device *bdev)
1558 {
1559         struct request_queue *q = bdev_get_queue(bdev);
1560 
1561         if (q)
1562                 return blk_queue_zoned_model(q);
1563 
1564         return BLK_ZONED_NONE;
1565 }
1566 
1567 static inline bool bdev_is_zoned(struct block_device *bdev)
1568 {
1569         struct request_queue *q = bdev_get_queue(bdev);
1570 
1571         if (q)
1572                 return blk_queue_is_zoned(q);
1573 
1574         return false;
1575 }
1576 
1577 static inline unsigned int bdev_zone_sectors(struct block_device *bdev)
1578 {
1579         struct request_queue *q = bdev_get_queue(bdev);
1580 
1581         if (q)
1582                 return blk_queue_zone_sectors(q);
1583 
1584         return 0;
1585 }
1586 
1587 static inline int queue_dma_alignment(struct request_queue *q)
1588 {
1589         return q ? q->dma_alignment : 511;
1590 }
1591 
1592 static inline int blk_rq_aligned(struct request_queue *q, unsigned long addr,
1593                                  unsigned int len)
1594 {
1595         unsigned int alignment = queue_dma_alignment(q) | q->dma_pad_mask;
1596         return !(addr & alignment) && !(len & alignment);
1597 }
1598 
1599 /* assumes size > 256 */
1600 static inline unsigned int blksize_bits(unsigned int size)
1601 {
1602         unsigned int bits = 8;
1603         do {
1604                 bits++;
1605                 size >>= 1;
1606         } while (size > 256);
1607         return bits;
1608 }
1609 
1610 static inline unsigned int block_size(struct block_device *bdev)
1611 {
1612         return bdev->bd_block_size;
1613 }
1614 
1615 static inline bool queue_flush_queueable(struct request_queue *q)
1616 {
1617         return !test_bit(QUEUE_FLAG_FLUSH_NQ, &q->queue_flags);
1618 }
1619 
1620 typedef struct {struct page *v;} Sector;
1621 
1622 unsigned char *read_dev_sector(struct block_device *, sector_t, Sector *);
1623 
1624 static inline void put_dev_sector(Sector p)
1625 {
1626         put_page(p.v);
1627 }
1628 
1629 static inline bool __bvec_gap_to_prev(struct request_queue *q,
1630                                 struct bio_vec *bprv, unsigned int offset)
1631 {
1632         return offset ||
1633                 ((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q));
1634 }
1635 
1636 /*
1637  * Check if adding a bio_vec after bprv with offset would create a gap in
1638  * the SG list. Most drivers don't care about this, but some do.
1639  */
1640 static inline bool bvec_gap_to_prev(struct request_queue *q,
1641                                 struct bio_vec *bprv, unsigned int offset)
1642 {
1643         if (!queue_virt_boundary(q))
1644                 return false;
1645         return __bvec_gap_to_prev(q, bprv, offset);
1646 }
1647 
1648 /*
1649  * Check if the two bvecs from two bios can be merged to one segment.
1650  * If yes, no need to check gap between the two bios since the 1st bio
1651  * and the 1st bvec in the 2nd bio can be handled in one segment.
1652  */
1653 static inline bool bios_segs_mergeable(struct request_queue *q,
1654                 struct bio *prev, struct bio_vec *prev_last_bv,
1655                 struct bio_vec *next_first_bv)
1656 {
1657         if (!BIOVEC_PHYS_MERGEABLE(prev_last_bv, next_first_bv))
1658                 return false;
1659         if (!BIOVEC_SEG_BOUNDARY(q, prev_last_bv, next_first_bv))
1660                 return false;
1661         if (prev->bi_seg_back_size + next_first_bv->bv_len >
1662                         queue_max_segment_size(q))
1663                 return false;
1664         return true;
1665 }
1666 
1667 static inline bool bio_will_gap(struct request_queue *q,
1668                                 struct request *prev_rq,
1669                                 struct bio *prev,
1670                                 struct bio *next)
1671 {
1672         if (bio_has_data(prev) && queue_virt_boundary(q)) {
1673                 struct bio_vec pb, nb;
1674 
1675                 /*
1676                  * don't merge if the 1st bio starts with non-zero
1677                  * offset, otherwise it is quite difficult to respect
1678                  * sg gap limit. We work hard to merge a huge number of small
1679                  * single bios in case of mkfs.
1680                  */
1681                 if (prev_rq)
1682                         bio_get_first_bvec(prev_rq->bio, &pb);
1683                 else
1684                         bio_get_first_bvec(prev, &pb);
1685                 if (pb.bv_offset)
1686                         return true;
1687 
1688                 /*
1689                  * We don't need to worry about the situation that the
1690                  * merged segment ends in unaligned virt boundary:
1691                  *
1692                  * - if 'pb' ends aligned, the merged segment ends aligned
1693                  * - if 'pb' ends unaligned, the next bio must include
1694                  *   one single bvec of 'nb', otherwise the 'nb' can't
1695                  *   merge with 'pb'
1696                  */
1697                 bio_get_last_bvec(prev, &pb);
1698                 bio_get_first_bvec(next, &nb);
1699 
1700                 if (!bios_segs_mergeable(q, prev, &pb, &nb))
1701                         return __bvec_gap_to_prev(q, &pb, nb.bv_offset);
1702         }
1703 
1704         return false;
1705 }
1706 
1707 static inline bool req_gap_back_merge(struct request *req, struct bio *bio)
1708 {
1709         return bio_will_gap(req->q, req, req->biotail, bio);
1710 }
1711 
1712 static inline bool req_gap_front_merge(struct request *req, struct bio *bio)
1713 {
1714         return bio_will_gap(req->q, NULL, bio, req->bio);
1715 }
1716 
1717 int kblockd_schedule_work(struct work_struct *work);
1718 int kblockd_schedule_work_on(int cpu, struct work_struct *work);
1719 int kblockd_schedule_delayed_work(struct delayed_work *dwork, unsigned long delay);
1720 int kblockd_schedule_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay);
1721 int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay);
1722 
1723 #ifdef CONFIG_BLK_CGROUP
1724 /*
1725  * This should not be using sched_clock(). A real patch is in progress
1726  * to fix this up, until that is in place we need to disable preemption
1727  * around sched_clock() in this function and set_io_start_time_ns().
1728  */
1729 static inline void set_start_time_ns(struct request *req)
1730 {
1731         preempt_disable();
1732         req->start_time_ns = sched_clock();
1733         preempt_enable();
1734 }
1735 
1736 static inline void set_io_start_time_ns(struct request *req)
1737 {
1738         preempt_disable();
1739         req->io_start_time_ns = sched_clock();
1740         preempt_enable();
1741 }
1742 
1743 static inline uint64_t rq_start_time_ns(struct request *req)
1744 {
1745         return req->start_time_ns;
1746 }
1747 
1748 static inline uint64_t rq_io_start_time_ns(struct request *req)
1749 {
1750         return req->io_start_time_ns;
1751 }
1752 #else
1753 static inline void set_start_time_ns(struct request *req) {}
1754 static inline void set_io_start_time_ns(struct request *req) {}
1755 static inline uint64_t rq_start_time_ns(struct request *req)
1756 {
1757         return 0;
1758 }
1759 static inline uint64_t rq_io_start_time_ns(struct request *req)
1760 {
1761         return 0;
1762 }
1763 #endif
1764 
1765 #define MODULE_ALIAS_BLOCKDEV(major,minor) \
1766         MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor))
1767 #define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \
1768         MODULE_ALIAS("block-major-" __stringify(major) "-*")
1769 
1770 #if defined(CONFIG_BLK_DEV_INTEGRITY)
1771 
1772 enum blk_integrity_flags {
1773         BLK_INTEGRITY_VERIFY            = 1 << 0,
1774         BLK_INTEGRITY_GENERATE          = 1 << 1,
1775         BLK_INTEGRITY_DEVICE_CAPABLE    = 1 << 2,
1776         BLK_INTEGRITY_IP_CHECKSUM       = 1 << 3,
1777 };
1778 
1779 struct blk_integrity_iter {
1780         void                    *prot_buf;
1781         void                    *data_buf;
1782         sector_t                seed;
1783         unsigned int            data_size;
1784         unsigned short          interval;
1785         const char              *disk_name;
1786 };
1787 
1788 typedef blk_status_t (integrity_processing_fn) (struct blk_integrity_iter *);
1789 
1790 struct blk_integrity_profile {
1791         integrity_processing_fn         *generate_fn;
1792         integrity_processing_fn         *verify_fn;
1793         const char                      *name;
1794 };
1795 
1796 extern void blk_integrity_register(struct gendisk *, struct blk_integrity *);
1797 extern void blk_integrity_unregister(struct gendisk *);
1798 extern int blk_integrity_compare(struct gendisk *, struct gendisk *);
1799 extern int blk_rq_map_integrity_sg(struct request_queue *, struct bio *,
1800                                    struct scatterlist *);
1801 extern int blk_rq_count_integrity_sg(struct request_queue *, struct bio *);
1802 extern bool blk_integrity_merge_rq(struct request_queue *, struct request *,
1803                                    struct request *);
1804 extern bool blk_integrity_merge_bio(struct request_queue *, struct request *,
1805                                     struct bio *);
1806 
1807 static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk)
1808 {
1809         struct blk_integrity *bi = &disk->queue->integrity;
1810 
1811         if (!bi->profile)
1812                 return NULL;
1813 
1814         return bi;
1815 }
1816 
1817 static inline
1818 struct blk_integrity *bdev_get_integrity(struct block_device *bdev)
1819 {
1820         return blk_get_integrity(bdev->bd_disk);
1821 }
1822 
1823 static inline bool blk_integrity_rq(struct request *rq)
1824 {
1825         return rq->cmd_flags & REQ_INTEGRITY;
1826 }
1827 
1828 static inline void blk_queue_max_integrity_segments(struct request_queue *q,
1829                                                     unsigned int segs)
1830 {
1831         q->limits.max_integrity_segments = segs;
1832 }
1833 
1834 static inline unsigned short
1835 queue_max_integrity_segments(struct request_queue *q)
1836 {
1837         return q->limits.max_integrity_segments;
1838 }
1839 
1840 static inline bool integrity_req_gap_back_merge(struct request *req,
1841                                                 struct bio *next)
1842 {
1843         struct bio_integrity_payload *bip = bio_integrity(req->bio);
1844         struct bio_integrity_payload *bip_next = bio_integrity(next);
1845 
1846         return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1],
1847                                 bip_next->bip_vec[0].bv_offset);
1848 }
1849 
1850 static inline bool integrity_req_gap_front_merge(struct request *req,
1851                                                  struct bio *bio)
1852 {
1853         struct bio_integrity_payload *bip = bio_integrity(bio);
1854         struct bio_integrity_payload *bip_next = bio_integrity(req->bio);
1855 
1856         return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1],
1857                                 bip_next->bip_vec[0].bv_offset);
1858 }
1859 
1860 #else /* CONFIG_BLK_DEV_INTEGRITY */
1861 
1862 struct bio;
1863 struct block_device;
1864 struct gendisk;
1865 struct blk_integrity;
1866 
1867 static inline int blk_integrity_rq(struct request *rq)
1868 {
1869         return 0;
1870 }
1871 static inline int blk_rq_count_integrity_sg(struct request_queue *q,
1872                                             struct bio *b)
1873 {
1874         return 0;
1875 }
1876 static inline int blk_rq_map_integrity_sg(struct request_queue *q,
1877                                           struct bio *b,
1878                                           struct scatterlist *s)
1879 {
1880         return 0;
1881 }
1882 static inline struct blk_integrity *bdev_get_integrity(struct block_device *b)
1883 {
1884         return NULL;
1885 }
1886 static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk)
1887 {
1888         return NULL;
1889 }
1890 static inline int blk_integrity_compare(struct gendisk *a, struct gendisk *b)
1891 {
1892         return 0;
1893 }
1894 static inline void blk_integrity_register(struct gendisk *d,
1895                                          struct blk_integrity *b)
1896 {
1897 }
1898 static inline void blk_integrity_unregister(struct gendisk *d)
1899 {
1900 }
1901 static inline void blk_queue_max_integrity_segments(struct request_queue *q,
1902                                                     unsigned int segs)
1903 {
1904 }
1905 static inline unsigned short queue_max_integrity_segments(struct request_queue *q)
1906 {
1907         return 0;
1908 }
1909 static inline bool blk_integrity_merge_rq(struct request_queue *rq,
1910                                           struct request *r1,
1911                                           struct request *r2)
1912 {
1913         return true;
1914 }
1915 static inline bool blk_integrity_merge_bio(struct request_queue *rq,
1916                                            struct request *r,
1917                                            struct bio *b)
1918 {
1919         return true;
1920 }
1921 
1922 static inline bool integrity_req_gap_back_merge(struct request *req,
1923                                                 struct bio *next)
1924 {
1925         return false;
1926 }
1927 static inline bool integrity_req_gap_front_merge(struct request *req,
1928                                                  struct bio *bio)
1929 {
1930         return false;
1931 }
1932 
1933 #endif /* CONFIG_BLK_DEV_INTEGRITY */
1934 
1935 struct block_device_operations {
1936         int (*open) (struct block_device *, fmode_t);
1937         void (*release) (struct gendisk *, fmode_t);
1938         int (*rw_page)(struct block_device *, sector_t, struct page *, bool);
1939         int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
1940         int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
1941         unsigned int (*check_events) (struct gendisk *disk,
1942                                       unsigned int clearing);
1943         /* ->media_changed() is DEPRECATED, use ->check_events() instead */
1944         int (*media_changed) (struct gendisk *);
1945         void (*unlock_native_capacity) (struct gendisk *);
1946         int (*revalidate_disk) (struct gendisk *);
1947         int (*getgeo)(struct block_device *, struct hd_geometry *);
1948         /* this callback is with swap_lock and sometimes page table lock held */
1949         void (*swap_slot_free_notify) (struct block_device *, unsigned long);
1950         struct module *owner;
1951         const struct pr_ops *pr_ops;
1952 };
1953 
1954 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
1955                                  unsigned long);
1956 extern int bdev_read_page(struct block_device *, sector_t, struct page *);
1957 extern int bdev_write_page(struct block_device *, sector_t, struct page *,
1958                                                 struct writeback_control *);
1959 #else /* CONFIG_BLOCK */
1960 
1961 struct block_device;
1962 
1963 /*
1964  * stubs for when the block layer is configured out
1965  */
1966 #define buffer_heads_over_limit 0
1967 
1968 static inline long nr_blockdev_pages(void)
1969 {
1970         return 0;
1971 }
1972 
1973 struct blk_plug {
1974 };
1975 
1976 static inline void blk_start_plug(struct blk_plug *plug)
1977 {
1978 }
1979 
1980 static inline void blk_finish_plug(struct blk_plug *plug)
1981 {
1982 }
1983 
1984 static inline void blk_flush_plug(struct task_struct *task)
1985 {
1986 }
1987 
1988 static inline void blk_schedule_flush_plug(struct task_struct *task)
1989 {
1990 }
1991 
1992 
1993 static inline bool blk_needs_flush_plug(struct task_struct *tsk)
1994 {
1995         return false;
1996 }
1997 
1998 static inline int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
1999                                      sector_t *error_sector)
2000 {
2001         return 0;
2002 }
2003 
2004 #endif /* CONFIG_BLOCK */
2005 
2006 #endif
2007 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp