1 /* 2 * Copyright (C) 2001 Sistina Software (UK) Limited. 3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. 4 * 5 * This file is released under the LGPL. 6 */ 7 8 #ifndef _LINUX_DEVICE_MAPPER_H 9 #define _LINUX_DEVICE_MAPPER_H 10 11 #include <linux/bio.h> 12 #include <linux/blkdev.h> 13 #include <linux/math64.h> 14 #include <linux/ratelimit.h> 15 16 struct dm_dev; 17 struct dm_target; 18 struct dm_table; 19 struct mapped_device; 20 struct bio_vec; 21 22 typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE } status_type_t; 23 24 union map_info { 25 void *ptr; 26 }; 27 28 /* 29 * In the constructor the target parameter will already have the 30 * table, type, begin and len fields filled in. 31 */ 32 typedef int (*dm_ctr_fn) (struct dm_target *target, 33 unsigned int argc, char **argv); 34 35 /* 36 * The destructor doesn't need to free the dm_target, just 37 * anything hidden ti->private. 38 */ 39 typedef void (*dm_dtr_fn) (struct dm_target *ti); 40 41 /* 42 * The map function must return: 43 * < 0: error 44 * = 0: The target will handle the io by resubmitting it later 45 * = 1: simple remap complete 46 * = 2: The target wants to push back the io 47 */ 48 typedef int (*dm_map_fn) (struct dm_target *ti, struct bio *bio); 49 typedef int (*dm_map_request_fn) (struct dm_target *ti, struct request *clone, 50 union map_info *map_context); 51 typedef int (*dm_clone_and_map_request_fn) (struct dm_target *ti, 52 struct request *rq, 53 union map_info *map_context, 54 struct request **clone); 55 typedef void (*dm_release_clone_request_fn) (struct request *clone); 56 57 /* 58 * Returns: 59 * < 0 : error (currently ignored) 60 * 0 : ended successfully 61 * 1 : for some reason the io has still not completed (eg, 62 * multipath target might want to requeue a failed io). 63 * 2 : The target wants to push back the io 64 */ 65 typedef int (*dm_endio_fn) (struct dm_target *ti, 66 struct bio *bio, int error); 67 typedef int (*dm_request_endio_fn) (struct dm_target *ti, 68 struct request *clone, int error, 69 union map_info *map_context); 70 71 typedef void (*dm_presuspend_fn) (struct dm_target *ti); 72 typedef void (*dm_presuspend_undo_fn) (struct dm_target *ti); 73 typedef void (*dm_postsuspend_fn) (struct dm_target *ti); 74 typedef int (*dm_preresume_fn) (struct dm_target *ti); 75 typedef void (*dm_resume_fn) (struct dm_target *ti); 76 77 typedef void (*dm_status_fn) (struct dm_target *ti, status_type_t status_type, 78 unsigned status_flags, char *result, unsigned maxlen); 79 80 typedef int (*dm_message_fn) (struct dm_target *ti, unsigned argc, char **argv); 81 82 typedef int (*dm_prepare_ioctl_fn) (struct dm_target *ti, 83 struct block_device **bdev, fmode_t *mode); 84 85 /* 86 * These iteration functions are typically used to check (and combine) 87 * properties of underlying devices. 88 * E.g. Does at least one underlying device support flush? 89 * Does any underlying device not support WRITE_SAME? 90 * 91 * The callout function is called once for each contiguous section of 92 * an underlying device. State can be maintained in *data. 93 * Return non-zero to stop iterating through any further devices. 94 */ 95 typedef int (*iterate_devices_callout_fn) (struct dm_target *ti, 96 struct dm_dev *dev, 97 sector_t start, sector_t len, 98 void *data); 99 100 /* 101 * This function must iterate through each section of device used by the 102 * target until it encounters a non-zero return code, which it then returns. 103 * Returns zero if no callout returned non-zero. 104 */ 105 typedef int (*dm_iterate_devices_fn) (struct dm_target *ti, 106 iterate_devices_callout_fn fn, 107 void *data); 108 109 typedef void (*dm_io_hints_fn) (struct dm_target *ti, 110 struct queue_limits *limits); 111 112 /* 113 * Returns: 114 * 0: The target can handle the next I/O immediately. 115 * 1: The target can't handle the next I/O immediately. 116 */ 117 typedef int (*dm_busy_fn) (struct dm_target *ti); 118 119 void dm_error(const char *message); 120 121 struct dm_dev { 122 struct block_device *bdev; 123 fmode_t mode; 124 char name[16]; 125 }; 126 127 dev_t dm_get_dev_t(const char *path); 128 129 /* 130 * Constructors should call these functions to ensure destination devices 131 * are opened/closed correctly. 132 */ 133 int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode, 134 struct dm_dev **result); 135 void dm_put_device(struct dm_target *ti, struct dm_dev *d); 136 137 /* 138 * Information about a target type 139 */ 140 141 struct target_type { 142 uint64_t features; 143 const char *name; 144 struct module *module; 145 unsigned version[3]; 146 dm_ctr_fn ctr; 147 dm_dtr_fn dtr; 148 dm_map_fn map; 149 dm_map_request_fn map_rq; 150 dm_clone_and_map_request_fn clone_and_map_rq; 151 dm_release_clone_request_fn release_clone_rq; 152 dm_endio_fn end_io; 153 dm_request_endio_fn rq_end_io; 154 dm_presuspend_fn presuspend; 155 dm_presuspend_undo_fn presuspend_undo; 156 dm_postsuspend_fn postsuspend; 157 dm_preresume_fn preresume; 158 dm_resume_fn resume; 159 dm_status_fn status; 160 dm_message_fn message; 161 dm_prepare_ioctl_fn prepare_ioctl; 162 dm_busy_fn busy; 163 dm_iterate_devices_fn iterate_devices; 164 dm_io_hints_fn io_hints; 165 166 /* For internal device-mapper use. */ 167 struct list_head list; 168 }; 169 170 /* 171 * Target features 172 */ 173 174 /* 175 * Any table that contains an instance of this target must have only one. 176 */ 177 #define DM_TARGET_SINGLETON 0x00000001 178 #define dm_target_needs_singleton(type) ((type)->features & DM_TARGET_SINGLETON) 179 180 /* 181 * Indicates that a target does not support read-only devices. 182 */ 183 #define DM_TARGET_ALWAYS_WRITEABLE 0x00000002 184 #define dm_target_always_writeable(type) \ 185 ((type)->features & DM_TARGET_ALWAYS_WRITEABLE) 186 187 /* 188 * Any device that contains a table with an instance of this target may never 189 * have tables containing any different target type. 190 */ 191 #define DM_TARGET_IMMUTABLE 0x00000004 192 #define dm_target_is_immutable(type) ((type)->features & DM_TARGET_IMMUTABLE) 193 194 /* 195 * Indicates that a target may replace any target; even immutable targets. 196 * .map, .map_rq, .clone_and_map_rq and .release_clone_rq are all defined. 197 */ 198 #define DM_TARGET_WILDCARD 0x00000008 199 #define dm_target_is_wildcard(type) ((type)->features & DM_TARGET_WILDCARD) 200 201 /* 202 * Some targets need to be sent the same WRITE bio severals times so 203 * that they can send copies of it to different devices. This function 204 * examines any supplied bio and returns the number of copies of it the 205 * target requires. 206 */ 207 typedef unsigned (*dm_num_write_bios_fn) (struct dm_target *ti, struct bio *bio); 208 209 struct dm_target { 210 struct dm_table *table; 211 struct target_type *type; 212 213 /* target limits */ 214 sector_t begin; 215 sector_t len; 216 217 /* If non-zero, maximum size of I/O submitted to a target. */ 218 uint32_t max_io_len; 219 220 /* 221 * A number of zero-length barrier bios that will be submitted 222 * to the target for the purpose of flushing cache. 223 * 224 * The bio number can be accessed with dm_bio_get_target_bio_nr. 225 * It is a responsibility of the target driver to remap these bios 226 * to the real underlying devices. 227 */ 228 unsigned num_flush_bios; 229 230 /* 231 * The number of discard bios that will be submitted to the target. 232 * The bio number can be accessed with dm_bio_get_target_bio_nr. 233 */ 234 unsigned num_discard_bios; 235 236 /* 237 * The number of WRITE SAME bios that will be submitted to the target. 238 * The bio number can be accessed with dm_bio_get_target_bio_nr. 239 */ 240 unsigned num_write_same_bios; 241 242 /* 243 * The minimum number of extra bytes allocated in each io for the 244 * target to use. 245 */ 246 unsigned per_io_data_size; 247 248 /* 249 * If defined, this function is called to find out how many 250 * duplicate bios should be sent to the target when writing 251 * data. 252 */ 253 dm_num_write_bios_fn num_write_bios; 254 255 /* target specific data */ 256 void *private; 257 258 /* Used to provide an error string from the ctr */ 259 char *error; 260 261 /* 262 * Set if this target needs to receive flushes regardless of 263 * whether or not its underlying devices have support. 264 */ 265 bool flush_supported:1; 266 267 /* 268 * Set if this target needs to receive discards regardless of 269 * whether or not its underlying devices have support. 270 */ 271 bool discards_supported:1; 272 273 /* 274 * Set if the target required discard bios to be split 275 * on max_io_len boundary. 276 */ 277 bool split_discard_bios:1; 278 279 /* 280 * Set if this target does not return zeroes on discarded blocks. 281 */ 282 bool discard_zeroes_data_unsupported:1; 283 }; 284 285 /* Each target can link one of these into the table */ 286 struct dm_target_callbacks { 287 struct list_head list; 288 int (*congested_fn) (struct dm_target_callbacks *, int); 289 }; 290 291 /* 292 * For bio-based dm. 293 * One of these is allocated for each bio. 294 * This structure shouldn't be touched directly by target drivers. 295 * It is here so that we can inline dm_per_bio_data and 296 * dm_bio_from_per_bio_data 297 */ 298 struct dm_target_io { 299 struct dm_io *io; 300 struct dm_target *ti; 301 unsigned target_bio_nr; 302 unsigned *len_ptr; 303 struct bio clone; 304 }; 305 306 static inline void *dm_per_bio_data(struct bio *bio, size_t data_size) 307 { 308 return (char *)bio - offsetof(struct dm_target_io, clone) - data_size; 309 } 310 311 static inline struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size) 312 { 313 return (struct bio *)((char *)data + data_size + offsetof(struct dm_target_io, clone)); 314 } 315 316 static inline unsigned dm_bio_get_target_bio_nr(const struct bio *bio) 317 { 318 return container_of(bio, struct dm_target_io, clone)->target_bio_nr; 319 } 320 321 int dm_register_target(struct target_type *t); 322 void dm_unregister_target(struct target_type *t); 323 324 /* 325 * Target argument parsing. 326 */ 327 struct dm_arg_set { 328 unsigned argc; 329 char **argv; 330 }; 331 332 /* 333 * The minimum and maximum value of a numeric argument, together with 334 * the error message to use if the number is found to be outside that range. 335 */ 336 struct dm_arg { 337 unsigned min; 338 unsigned max; 339 char *error; 340 }; 341 342 /* 343 * Validate the next argument, either returning it as *value or, if invalid, 344 * returning -EINVAL and setting *error. 345 */ 346 int dm_read_arg(struct dm_arg *arg, struct dm_arg_set *arg_set, 347 unsigned *value, char **error); 348 349 /* 350 * Process the next argument as the start of a group containing between 351 * arg->min and arg->max further arguments. Either return the size as 352 * *num_args or, if invalid, return -EINVAL and set *error. 353 */ 354 int dm_read_arg_group(struct dm_arg *arg, struct dm_arg_set *arg_set, 355 unsigned *num_args, char **error); 356 357 /* 358 * Return the current argument and shift to the next. 359 */ 360 const char *dm_shift_arg(struct dm_arg_set *as); 361 362 /* 363 * Move through num_args arguments. 364 */ 365 void dm_consume_args(struct dm_arg_set *as, unsigned num_args); 366 367 /*----------------------------------------------------------------- 368 * Functions for creating and manipulating mapped devices. 369 * Drop the reference with dm_put when you finish with the object. 370 *---------------------------------------------------------------*/ 371 372 /* 373 * DM_ANY_MINOR chooses the next available minor number. 374 */ 375 #define DM_ANY_MINOR (-1) 376 int dm_create(int minor, struct mapped_device **md); 377 378 /* 379 * Reference counting for md. 380 */ 381 struct mapped_device *dm_get_md(dev_t dev); 382 void dm_get(struct mapped_device *md); 383 int dm_hold(struct mapped_device *md); 384 void dm_put(struct mapped_device *md); 385 386 /* 387 * An arbitrary pointer may be stored alongside a mapped device. 388 */ 389 void dm_set_mdptr(struct mapped_device *md, void *ptr); 390 void *dm_get_mdptr(struct mapped_device *md); 391 392 /* 393 * A device can still be used while suspended, but I/O is deferred. 394 */ 395 int dm_suspend(struct mapped_device *md, unsigned suspend_flags); 396 int dm_resume(struct mapped_device *md); 397 398 /* 399 * Event functions. 400 */ 401 uint32_t dm_get_event_nr(struct mapped_device *md); 402 int dm_wait_event(struct mapped_device *md, int event_nr); 403 uint32_t dm_next_uevent_seq(struct mapped_device *md); 404 void dm_uevent_add(struct mapped_device *md, struct list_head *elist); 405 406 /* 407 * Info functions. 408 */ 409 const char *dm_device_name(struct mapped_device *md); 410 int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid); 411 struct gendisk *dm_disk(struct mapped_device *md); 412 int dm_suspended(struct dm_target *ti); 413 int dm_noflush_suspending(struct dm_target *ti); 414 void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors); 415 union map_info *dm_get_rq_mapinfo(struct request *rq); 416 417 struct queue_limits *dm_get_queue_limits(struct mapped_device *md); 418 419 /* 420 * Geometry functions. 421 */ 422 int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo); 423 int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo); 424 425 /*----------------------------------------------------------------- 426 * Functions for manipulating device-mapper tables. 427 *---------------------------------------------------------------*/ 428 429 /* 430 * First create an empty table. 431 */ 432 int dm_table_create(struct dm_table **result, fmode_t mode, 433 unsigned num_targets, struct mapped_device *md); 434 435 /* 436 * Then call this once for each target. 437 */ 438 int dm_table_add_target(struct dm_table *t, const char *type, 439 sector_t start, sector_t len, char *params); 440 441 /* 442 * Target_ctr should call this if it needs to add any callbacks. 443 */ 444 void dm_table_add_target_callbacks(struct dm_table *t, struct dm_target_callbacks *cb); 445 446 /* 447 * Finally call this to make the table ready for use. 448 */ 449 int dm_table_complete(struct dm_table *t); 450 451 /* 452 * Target may require that it is never sent I/O larger than len. 453 */ 454 int __must_check dm_set_target_max_io_len(struct dm_target *ti, sector_t len); 455 456 /* 457 * Table reference counting. 458 */ 459 struct dm_table *dm_get_live_table(struct mapped_device *md, int *srcu_idx); 460 void dm_put_live_table(struct mapped_device *md, int srcu_idx); 461 void dm_sync_table(struct mapped_device *md); 462 463 /* 464 * Queries 465 */ 466 sector_t dm_table_get_size(struct dm_table *t); 467 unsigned int dm_table_get_num_targets(struct dm_table *t); 468 fmode_t dm_table_get_mode(struct dm_table *t); 469 struct mapped_device *dm_table_get_md(struct dm_table *t); 470 471 /* 472 * Trigger an event. 473 */ 474 void dm_table_event(struct dm_table *t); 475 476 /* 477 * Run the queue for request-based targets. 478 */ 479 void dm_table_run_md_queue_async(struct dm_table *t); 480 481 /* 482 * The device must be suspended before calling this method. 483 * Returns the previous table, which the caller must destroy. 484 */ 485 struct dm_table *dm_swap_table(struct mapped_device *md, 486 struct dm_table *t); 487 488 /* 489 * A wrapper around vmalloc. 490 */ 491 void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size); 492 493 /*----------------------------------------------------------------- 494 * Macros. 495 *---------------------------------------------------------------*/ 496 #define DM_NAME "device-mapper" 497 498 #ifdef CONFIG_PRINTK 499 extern struct ratelimit_state dm_ratelimit_state; 500 501 #define dm_ratelimit() __ratelimit(&dm_ratelimit_state) 502 #else 503 #define dm_ratelimit() 0 504 #endif 505 506 #define DMCRIT(f, arg...) \ 507 printk(KERN_CRIT DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg) 508 509 #define DMERR(f, arg...) \ 510 printk(KERN_ERR DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg) 511 #define DMERR_LIMIT(f, arg...) \ 512 do { \ 513 if (dm_ratelimit()) \ 514 printk(KERN_ERR DM_NAME ": " DM_MSG_PREFIX ": " \ 515 f "\n", ## arg); \ 516 } while (0) 517 518 #define DMWARN(f, arg...) \ 519 printk(KERN_WARNING DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg) 520 #define DMWARN_LIMIT(f, arg...) \ 521 do { \ 522 if (dm_ratelimit()) \ 523 printk(KERN_WARNING DM_NAME ": " DM_MSG_PREFIX ": " \ 524 f "\n", ## arg); \ 525 } while (0) 526 527 #define DMINFO(f, arg...) \ 528 printk(KERN_INFO DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg) 529 #define DMINFO_LIMIT(f, arg...) \ 530 do { \ 531 if (dm_ratelimit()) \ 532 printk(KERN_INFO DM_NAME ": " DM_MSG_PREFIX ": " f \ 533 "\n", ## arg); \ 534 } while (0) 535 536 #ifdef CONFIG_DM_DEBUG 537 # define DMDEBUG(f, arg...) \ 538 printk(KERN_DEBUG DM_NAME ": " DM_MSG_PREFIX " DEBUG: " f "\n", ## arg) 539 # define DMDEBUG_LIMIT(f, arg...) \ 540 do { \ 541 if (dm_ratelimit()) \ 542 printk(KERN_DEBUG DM_NAME ": " DM_MSG_PREFIX ": " f \ 543 "\n", ## arg); \ 544 } while (0) 545 #else 546 # define DMDEBUG(f, arg...) do {} while (0) 547 # define DMDEBUG_LIMIT(f, arg...) do {} while (0) 548 #endif 549 550 #define DMEMIT(x...) sz += ((sz >= maxlen) ? \ 551 0 : scnprintf(result + sz, maxlen - sz, x)) 552 553 #define SECTOR_SHIFT 9 554 555 /* 556 * Definitions of return values from target end_io function. 557 */ 558 #define DM_ENDIO_INCOMPLETE 1 559 #define DM_ENDIO_REQUEUE 2 560 561 /* 562 * Definitions of return values from target map function. 563 */ 564 #define DM_MAPIO_SUBMITTED 0 565 #define DM_MAPIO_REMAPPED 1 566 #define DM_MAPIO_REQUEUE DM_ENDIO_REQUEUE 567 568 #define dm_sector_div64(x, y)( \ 569 { \ 570 u64 _res; \ 571 (x) = div64_u64_rem(x, y, &_res); \ 572 _res; \ 573 } \ 574 ) 575 576 /* 577 * Ceiling(n / sz) 578 */ 579 #define dm_div_up(n, sz) (((n) + (sz) - 1) / (sz)) 580 581 #define dm_sector_div_up(n, sz) ( \ 582 { \ 583 sector_t _r = ((n) + (sz) - 1); \ 584 sector_div(_r, (sz)); \ 585 _r; \ 586 } \ 587 ) 588 589 /* 590 * ceiling(n / size) * size 591 */ 592 #define dm_round_up(n, sz) (dm_div_up((n), (sz)) * (sz)) 593 594 #define dm_array_too_big(fixed, obj, num) \ 595 ((num) > (UINT_MAX - (fixed)) / (obj)) 596 597 /* 598 * Sector offset taken relative to the start of the target instead of 599 * relative to the start of the device. 600 */ 601 #define dm_target_offset(ti, sector) ((sector) - (ti)->begin) 602 603 static inline sector_t to_sector(unsigned long n) 604 { 605 return (n >> SECTOR_SHIFT); 606 } 607 608 static inline unsigned long to_bytes(sector_t n) 609 { 610 return (n << SECTOR_SHIFT); 611 } 612 613 #endif /* _LINUX_DEVICE_MAPPER_H */ 614
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.