~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/include/linux/blk_types.h

Version: ~ [ linux-5.14-rc3 ] ~ [ linux-5.13.5 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.53 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.135 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.198 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.240 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.276 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.276 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.18.140 ] ~ [ linux-3.16.85 ] ~ [ linux-3.14.79 ] ~ [ linux-3.12.74 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* SPDX-License-Identifier: GPL-2.0 */
  2 /*
  3  * Block data types and constants.  Directly include this file only to
  4  * break include dependency loop.
  5  */
  6 #ifndef __LINUX_BLK_TYPES_H
  7 #define __LINUX_BLK_TYPES_H
  8 
  9 #include <linux/types.h>
 10 #include <linux/bvec.h>
 11 #include <linux/ktime.h>
 12 
 13 struct bio_set;
 14 struct bio;
 15 struct bio_integrity_payload;
 16 struct page;
 17 struct io_context;
 18 struct cgroup_subsys_state;
 19 typedef void (bio_end_io_t) (struct bio *);
 20 struct bio_crypt_ctx;
 21 
 22 struct block_device {
 23         dev_t                   bd_dev;  /* not a kdev_t - it's a search key */
 24         int                     bd_openers;
 25         struct inode *          bd_inode;       /* will die */
 26         struct super_block *    bd_super;
 27         struct mutex            bd_mutex;       /* open/close mutex */
 28         void *                  bd_claiming;
 29         void *                  bd_holder;
 30         int                     bd_holders;
 31         bool                    bd_write_holder;
 32 #ifdef CONFIG_SYSFS
 33         struct list_head        bd_holder_disks;
 34 #endif
 35         struct block_device *   bd_contains;
 36         u8                      bd_partno;
 37         struct hd_struct *      bd_part;
 38         /* number of times partitions within this device have been opened. */
 39         unsigned                bd_part_count;
 40         int                     bd_invalidated;
 41         struct gendisk *        bd_disk;
 42         struct backing_dev_info *bd_bdi;
 43 
 44         /* The counter of freeze processes */
 45         int                     bd_fsfreeze_count;
 46         /* Mutex for freeze */
 47         struct mutex            bd_fsfreeze_mutex;
 48 } __randomize_layout;
 49 
 50 /*
 51  * Block error status values.  See block/blk-core:blk_errors for the details.
 52  * Alpha cannot write a byte atomically, so we need to use 32-bit value.
 53  */
 54 #if defined(CONFIG_ALPHA) && !defined(__alpha_bwx__)
 55 typedef u32 __bitwise blk_status_t;
 56 #else
 57 typedef u8 __bitwise blk_status_t;
 58 #endif
 59 #define BLK_STS_OK 0
 60 #define BLK_STS_NOTSUPP         ((__force blk_status_t)1)
 61 #define BLK_STS_TIMEOUT         ((__force blk_status_t)2)
 62 #define BLK_STS_NOSPC           ((__force blk_status_t)3)
 63 #define BLK_STS_TRANSPORT       ((__force blk_status_t)4)
 64 #define BLK_STS_TARGET          ((__force blk_status_t)5)
 65 #define BLK_STS_NEXUS           ((__force blk_status_t)6)
 66 #define BLK_STS_MEDIUM          ((__force blk_status_t)7)
 67 #define BLK_STS_PROTECTION      ((__force blk_status_t)8)
 68 #define BLK_STS_RESOURCE        ((__force blk_status_t)9)
 69 #define BLK_STS_IOERR           ((__force blk_status_t)10)
 70 
 71 /* hack for device mapper, don't use elsewhere: */
 72 #define BLK_STS_DM_REQUEUE    ((__force blk_status_t)11)
 73 
 74 #define BLK_STS_AGAIN           ((__force blk_status_t)12)
 75 
 76 /*
 77  * BLK_STS_DEV_RESOURCE is returned from the driver to the block layer if
 78  * device related resources are unavailable, but the driver can guarantee
 79  * that the queue will be rerun in the future once resources become
 80  * available again. This is typically the case for device specific
 81  * resources that are consumed for IO. If the driver fails allocating these
 82  * resources, we know that inflight (or pending) IO will free these
 83  * resource upon completion.
 84  *
 85  * This is different from BLK_STS_RESOURCE in that it explicitly references
 86  * a device specific resource. For resources of wider scope, allocation
 87  * failure can happen without having pending IO. This means that we can't
 88  * rely on request completions freeing these resources, as IO may not be in
 89  * flight. Examples of that are kernel memory allocations, DMA mappings, or
 90  * any other system wide resources.
 91  */
 92 #define BLK_STS_DEV_RESOURCE    ((__force blk_status_t)13)
 93 
 94 /*
 95  * BLK_STS_ZONE_RESOURCE is returned from the driver to the block layer if zone
 96  * related resources are unavailable, but the driver can guarantee the queue
 97  * will be rerun in the future once the resources become available again.
 98  *
 99  * This is different from BLK_STS_DEV_RESOURCE in that it explicitly references
100  * a zone specific resource and IO to a different zone on the same device could
101  * still be served. Examples of that are zones that are write-locked, but a read
102  * to the same zone could be served.
103  */
104 #define BLK_STS_ZONE_RESOURCE   ((__force blk_status_t)14)
105 
106 /**
107  * blk_path_error - returns true if error may be path related
108  * @error: status the request was completed with
109  *
110  * Description:
111  *     This classifies block error status into non-retryable errors and ones
112  *     that may be successful if retried on a failover path.
113  *
114  * Return:
115  *     %false - retrying failover path will not help
116  *     %true  - may succeed if retried
117  */
118 static inline bool blk_path_error(blk_status_t error)
119 {
120         switch (error) {
121         case BLK_STS_NOTSUPP:
122         case BLK_STS_NOSPC:
123         case BLK_STS_TARGET:
124         case BLK_STS_NEXUS:
125         case BLK_STS_MEDIUM:
126         case BLK_STS_PROTECTION:
127                 return false;
128         }
129 
130         /* Anything else could be a path failure, so should be retried */
131         return true;
132 }
133 
134 /*
135  * From most significant bit:
136  * 1 bit: reserved for other usage, see below
137  * 12 bits: original size of bio
138  * 51 bits: issue time of bio
139  */
140 #define BIO_ISSUE_RES_BITS      1
141 #define BIO_ISSUE_SIZE_BITS     12
142 #define BIO_ISSUE_RES_SHIFT     (64 - BIO_ISSUE_RES_BITS)
143 #define BIO_ISSUE_SIZE_SHIFT    (BIO_ISSUE_RES_SHIFT - BIO_ISSUE_SIZE_BITS)
144 #define BIO_ISSUE_TIME_MASK     ((1ULL << BIO_ISSUE_SIZE_SHIFT) - 1)
145 #define BIO_ISSUE_SIZE_MASK     \
146         (((1ULL << BIO_ISSUE_SIZE_BITS) - 1) << BIO_ISSUE_SIZE_SHIFT)
147 #define BIO_ISSUE_RES_MASK      (~((1ULL << BIO_ISSUE_RES_SHIFT) - 1))
148 
149 /* Reserved bit for blk-throtl */
150 #define BIO_ISSUE_THROTL_SKIP_LATENCY (1ULL << 63)
151 
152 struct bio_issue {
153         u64 value;
154 };
155 
156 static inline u64 __bio_issue_time(u64 time)
157 {
158         return time & BIO_ISSUE_TIME_MASK;
159 }
160 
161 static inline u64 bio_issue_time(struct bio_issue *issue)
162 {
163         return __bio_issue_time(issue->value);
164 }
165 
166 static inline sector_t bio_issue_size(struct bio_issue *issue)
167 {
168         return ((issue->value & BIO_ISSUE_SIZE_MASK) >> BIO_ISSUE_SIZE_SHIFT);
169 }
170 
171 static inline void bio_issue_init(struct bio_issue *issue,
172                                        sector_t size)
173 {
174         size &= (1ULL << BIO_ISSUE_SIZE_BITS) - 1;
175         issue->value = ((issue->value & BIO_ISSUE_RES_MASK) |
176                         (ktime_get_ns() & BIO_ISSUE_TIME_MASK) |
177                         ((u64)size << BIO_ISSUE_SIZE_SHIFT));
178 }
179 
180 /*
181  * main unit of I/O for the block layer and lower layers (ie drivers and
182  * stacking drivers)
183  */
184 struct bio {
185         struct bio              *bi_next;       /* request queue link */
186         struct gendisk          *bi_disk;
187         unsigned int            bi_opf;         /* bottom bits req flags,
188                                                  * top bits REQ_OP. Use
189                                                  * accessors.
190                                                  */
191         unsigned short          bi_flags;       /* status, etc and bvec pool number */
192         unsigned short          bi_ioprio;
193         unsigned short          bi_write_hint;
194         blk_status_t            bi_status;
195         u8                      bi_partno;
196         atomic_t                __bi_remaining;
197 
198         struct bvec_iter        bi_iter;
199 
200         bio_end_io_t            *bi_end_io;
201 
202         void                    *bi_private;
203 #ifdef CONFIG_BLK_CGROUP
204         /*
205          * Represents the association of the css and request_queue for the bio.
206          * If a bio goes direct to device, it will not have a blkg as it will
207          * not have a request_queue associated with it.  The reference is put
208          * on release of the bio.
209          */
210         struct blkcg_gq         *bi_blkg;
211         struct bio_issue        bi_issue;
212 #ifdef CONFIG_BLK_CGROUP_IOCOST
213         u64                     bi_iocost_cost;
214 #endif
215 #endif
216 
217 #ifdef CONFIG_BLK_INLINE_ENCRYPTION
218         struct bio_crypt_ctx    *bi_crypt_context;
219 #endif
220 
221         union {
222 #if defined(CONFIG_BLK_DEV_INTEGRITY)
223                 struct bio_integrity_payload *bi_integrity; /* data integrity */
224 #endif
225         };
226 
227         unsigned short          bi_vcnt;        /* how many bio_vec's */
228 
229         /*
230          * Everything starting with bi_max_vecs will be preserved by bio_reset()
231          */
232 
233         unsigned short          bi_max_vecs;    /* max bvl_vecs we can hold */
234 
235         atomic_t                __bi_cnt;       /* pin count */
236 
237         struct bio_vec          *bi_io_vec;     /* the actual vec list */
238 
239         struct bio_set          *bi_pool;
240 
241         /*
242          * We can inline a number of vecs at the end of the bio, to avoid
243          * double allocations for a small number of bio_vecs. This member
244          * MUST obviously be kept at the very end of the bio.
245          */
246         struct bio_vec          bi_inline_vecs[];
247 };
248 
249 #define BIO_RESET_BYTES         offsetof(struct bio, bi_max_vecs)
250 
251 /*
252  * bio flags
253  */
254 enum {
255         BIO_NO_PAGE_REF,        /* don't put release vec pages */
256         BIO_CLONED,             /* doesn't own data */
257         BIO_BOUNCED,            /* bio is a bounce bio */
258         BIO_USER_MAPPED,        /* contains user pages */
259         BIO_NULL_MAPPED,        /* contains invalid user pages */
260         BIO_WORKINGSET,         /* contains userspace workingset pages */
261         BIO_QUIET,              /* Make BIO Quiet */
262         BIO_CHAIN,              /* chained bio, ->bi_remaining in effect */
263         BIO_REFFED,             /* bio has elevated ->bi_cnt */
264         BIO_THROTTLED,          /* This bio has already been subjected to
265                                  * throttling rules. Don't do it again. */
266         BIO_TRACE_COMPLETION,   /* bio_endio() should trace the final completion
267                                  * of this bio. */
268         BIO_CGROUP_ACCT,        /* has been accounted to a cgroup */
269         BIO_TRACKED,            /* set if bio goes through the rq_qos path */
270         BIO_FLAG_LAST
271 };
272 
273 /* See BVEC_POOL_OFFSET below before adding new flags */
274 
275 /*
276  * We support 6 different bvec pools, the last one is magic in that it
277  * is backed by a mempool.
278  */
279 #define BVEC_POOL_NR            6
280 #define BVEC_POOL_MAX           (BVEC_POOL_NR - 1)
281 
282 /*
283  * Top 3 bits of bio flags indicate the pool the bvecs came from.  We add
284  * 1 to the actual index so that 0 indicates that there are no bvecs to be
285  * freed.
286  */
287 #define BVEC_POOL_BITS          (3)
288 #define BVEC_POOL_OFFSET        (16 - BVEC_POOL_BITS)
289 #define BVEC_POOL_IDX(bio)      ((bio)->bi_flags >> BVEC_POOL_OFFSET)
290 #if (1<< BVEC_POOL_BITS) < (BVEC_POOL_NR+1)
291 # error "BVEC_POOL_BITS is too small"
292 #endif
293 
294 /*
295  * Flags starting here get preserved by bio_reset() - this includes
296  * only BVEC_POOL_IDX()
297  */
298 #define BIO_RESET_BITS  BVEC_POOL_OFFSET
299 
300 typedef __u32 __bitwise blk_mq_req_flags_t;
301 
302 /*
303  * Operations and flags common to the bio and request structures.
304  * We use 8 bits for encoding the operation, and the remaining 24 for flags.
305  *
306  * The least significant bit of the operation number indicates the data
307  * transfer direction:
308  *
309  *   - if the least significant bit is set transfers are TO the device
310  *   - if the least significant bit is not set transfers are FROM the device
311  *
312  * If a operation does not transfer data the least significant bit has no
313  * meaning.
314  */
315 #define REQ_OP_BITS     8
316 #define REQ_OP_MASK     ((1 << REQ_OP_BITS) - 1)
317 #define REQ_FLAG_BITS   24
318 
319 enum req_opf {
320         /* read sectors from the device */
321         REQ_OP_READ             = 0,
322         /* write sectors to the device */
323         REQ_OP_WRITE            = 1,
324         /* flush the volatile write cache */
325         REQ_OP_FLUSH            = 2,
326         /* discard sectors */
327         REQ_OP_DISCARD          = 3,
328         /* securely erase sectors */
329         REQ_OP_SECURE_ERASE     = 5,
330         /* write the same sector many times */
331         REQ_OP_WRITE_SAME       = 7,
332         /* write the zero filled sector many times */
333         REQ_OP_WRITE_ZEROES     = 9,
334         /* Open a zone */
335         REQ_OP_ZONE_OPEN        = 10,
336         /* Close a zone */
337         REQ_OP_ZONE_CLOSE       = 11,
338         /* Transition a zone to full */
339         REQ_OP_ZONE_FINISH      = 12,
340         /* write data at the current zone write pointer */
341         REQ_OP_ZONE_APPEND      = 13,
342         /* reset a zone write pointer */
343         REQ_OP_ZONE_RESET       = 15,
344         /* reset all the zone present on the device */
345         REQ_OP_ZONE_RESET_ALL   = 17,
346 
347         /* SCSI passthrough using struct scsi_request */
348         REQ_OP_SCSI_IN          = 32,
349         REQ_OP_SCSI_OUT         = 33,
350         /* Driver private requests */
351         REQ_OP_DRV_IN           = 34,
352         REQ_OP_DRV_OUT          = 35,
353 
354         REQ_OP_LAST,
355 };
356 
357 enum req_flag_bits {
358         __REQ_FAILFAST_DEV =    /* no driver retries of device errors */
359                 REQ_OP_BITS,
360         __REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */
361         __REQ_FAILFAST_DRIVER,  /* no driver retries of driver errors */
362         __REQ_SYNC,             /* request is sync (sync write or read) */
363         __REQ_META,             /* metadata io request */
364         __REQ_PRIO,             /* boost priority in cfq */
365         __REQ_NOMERGE,          /* don't touch this for merging */
366         __REQ_IDLE,             /* anticipate more IO after this one */
367         __REQ_INTEGRITY,        /* I/O includes block integrity payload */
368         __REQ_FUA,              /* forced unit access */
369         __REQ_PREFLUSH,         /* request for cache flush */
370         __REQ_RAHEAD,           /* read ahead, can fail anytime */
371         __REQ_BACKGROUND,       /* background IO */
372         __REQ_NOWAIT,           /* Don't wait if request will block */
373         /*
374          * When a shared kthread needs to issue a bio for a cgroup, doing
375          * so synchronously can lead to priority inversions as the kthread
376          * can be trapped waiting for that cgroup.  CGROUP_PUNT flag makes
377          * submit_bio() punt the actual issuing to a dedicated per-blkcg
378          * work item to avoid such priority inversions.
379          */
380         __REQ_CGROUP_PUNT,
381 
382         /* command specific flags for REQ_OP_WRITE_ZEROES: */
383         __REQ_NOUNMAP,          /* do not free blocks when zeroing */
384 
385         __REQ_HIPRI,
386 
387         /* for driver use */
388         __REQ_DRV,
389         __REQ_SWAP,             /* swapping request. */
390         __REQ_NR_BITS,          /* stops here */
391 };
392 
393 #define REQ_FAILFAST_DEV        (1ULL << __REQ_FAILFAST_DEV)
394 #define REQ_FAILFAST_TRANSPORT  (1ULL << __REQ_FAILFAST_TRANSPORT)
395 #define REQ_FAILFAST_DRIVER     (1ULL << __REQ_FAILFAST_DRIVER)
396 #define REQ_SYNC                (1ULL << __REQ_SYNC)
397 #define REQ_META                (1ULL << __REQ_META)
398 #define REQ_PRIO                (1ULL << __REQ_PRIO)
399 #define REQ_NOMERGE             (1ULL << __REQ_NOMERGE)
400 #define REQ_IDLE                (1ULL << __REQ_IDLE)
401 #define REQ_INTEGRITY           (1ULL << __REQ_INTEGRITY)
402 #define REQ_FUA                 (1ULL << __REQ_FUA)
403 #define REQ_PREFLUSH            (1ULL << __REQ_PREFLUSH)
404 #define REQ_RAHEAD              (1ULL << __REQ_RAHEAD)
405 #define REQ_BACKGROUND          (1ULL << __REQ_BACKGROUND)
406 #define REQ_NOWAIT              (1ULL << __REQ_NOWAIT)
407 #define REQ_CGROUP_PUNT         (1ULL << __REQ_CGROUP_PUNT)
408 
409 #define REQ_NOUNMAP             (1ULL << __REQ_NOUNMAP)
410 #define REQ_HIPRI               (1ULL << __REQ_HIPRI)
411 
412 #define REQ_DRV                 (1ULL << __REQ_DRV)
413 #define REQ_SWAP                (1ULL << __REQ_SWAP)
414 
415 #define REQ_FAILFAST_MASK \
416         (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER)
417 
418 #define REQ_NOMERGE_FLAGS \
419         (REQ_NOMERGE | REQ_PREFLUSH | REQ_FUA)
420 
421 enum stat_group {
422         STAT_READ,
423         STAT_WRITE,
424         STAT_DISCARD,
425         STAT_FLUSH,
426 
427         NR_STAT_GROUPS
428 };
429 
430 #define bio_op(bio) \
431         ((bio)->bi_opf & REQ_OP_MASK)
432 #define req_op(req) \
433         ((req)->cmd_flags & REQ_OP_MASK)
434 
435 /* obsolete, don't use in new code */
436 static inline void bio_set_op_attrs(struct bio *bio, unsigned op,
437                 unsigned op_flags)
438 {
439         bio->bi_opf = op | op_flags;
440 }
441 
442 static inline bool op_is_write(unsigned int op)
443 {
444         return (op & 1);
445 }
446 
447 /*
448  * Check if the bio or request is one that needs special treatment in the
449  * flush state machine.
450  */
451 static inline bool op_is_flush(unsigned int op)
452 {
453         return op & (REQ_FUA | REQ_PREFLUSH);
454 }
455 
456 /*
457  * Reads are always treated as synchronous, as are requests with the FUA or
458  * PREFLUSH flag.  Other operations may be marked as synchronous using the
459  * REQ_SYNC flag.
460  */
461 static inline bool op_is_sync(unsigned int op)
462 {
463         return (op & REQ_OP_MASK) == REQ_OP_READ ||
464                 (op & (REQ_SYNC | REQ_FUA | REQ_PREFLUSH));
465 }
466 
467 static inline bool op_is_discard(unsigned int op)
468 {
469         return (op & REQ_OP_MASK) == REQ_OP_DISCARD;
470 }
471 
472 /*
473  * Check if a bio or request operation is a zone management operation, with
474  * the exception of REQ_OP_ZONE_RESET_ALL which is treated as a special case
475  * due to its different handling in the block layer and device response in
476  * case of command failure.
477  */
478 static inline bool op_is_zone_mgmt(enum req_opf op)
479 {
480         switch (op & REQ_OP_MASK) {
481         case REQ_OP_ZONE_RESET:
482         case REQ_OP_ZONE_OPEN:
483         case REQ_OP_ZONE_CLOSE:
484         case REQ_OP_ZONE_FINISH:
485                 return true;
486         default:
487                 return false;
488         }
489 }
490 
491 static inline int op_stat_group(unsigned int op)
492 {
493         if (op_is_discard(op))
494                 return STAT_DISCARD;
495         return op_is_write(op);
496 }
497 
498 typedef unsigned int blk_qc_t;
499 #define BLK_QC_T_NONE           -1U
500 #define BLK_QC_T_SHIFT          16
501 #define BLK_QC_T_INTERNAL       (1U << 31)
502 
503 static inline bool blk_qc_t_valid(blk_qc_t cookie)
504 {
505         return cookie != BLK_QC_T_NONE;
506 }
507 
508 static inline unsigned int blk_qc_t_to_queue_num(blk_qc_t cookie)
509 {
510         return (cookie & ~BLK_QC_T_INTERNAL) >> BLK_QC_T_SHIFT;
511 }
512 
513 static inline unsigned int blk_qc_t_to_tag(blk_qc_t cookie)
514 {
515         return cookie & ((1u << BLK_QC_T_SHIFT) - 1);
516 }
517 
518 static inline bool blk_qc_t_is_internal(blk_qc_t cookie)
519 {
520         return (cookie & BLK_QC_T_INTERNAL) != 0;
521 }
522 
523 struct blk_rq_stat {
524         u64 mean;
525         u64 min;
526         u64 max;
527         u32 nr_samples;
528         u64 batch;
529 };
530 
531 #endif /* __LINUX_BLK_TYPES_H */
532 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp