1 /* 2 * Copyright (C) 2010 Red Hat, Inc. 3 * Copyright (c) 2016-2018 Christoph Hellwig. 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms and conditions of the GNU General Public License, 7 * version 2, as published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * more details. 13 */ 14 #include <linux/module.h> 15 #include <linux/compiler.h> 16 #include <linux/fs.h> 17 #include <linux/iomap.h> 18 #include <linux/uaccess.h> 19 #include <linux/gfp.h> 20 #include <linux/migrate.h> 21 #include <linux/mm.h> 22 #include <linux/mm_inline.h> 23 #include <linux/swap.h> 24 #include <linux/pagemap.h> 25 #include <linux/pagevec.h> 26 #include <linux/file.h> 27 #include <linux/uio.h> 28 #include <linux/backing-dev.h> 29 #include <linux/buffer_head.h> 30 #include <linux/task_io_accounting_ops.h> 31 #include <linux/dax.h> 32 #include <linux/sched/signal.h> 33 34 #include "internal.h" 35 36 /* 37 * Execute a iomap write on a segment of the mapping that spans a 38 * contiguous range of pages that have identical block mapping state. 39 * 40 * This avoids the need to map pages individually, do individual allocations 41 * for each page and most importantly avoid the need for filesystem specific 42 * locking per page. Instead, all the operations are amortised over the entire 43 * range of pages. It is assumed that the filesystems will lock whatever 44 * resources they require in the iomap_begin call, and release them in the 45 * iomap_end call. 46 */ 47 loff_t 48 iomap_apply(struct inode *inode, loff_t pos, loff_t length, unsigned flags, 49 const struct iomap_ops *ops, void *data, iomap_actor_t actor) 50 { 51 struct iomap iomap = { 0 }; 52 loff_t written = 0, ret; 53 54 /* 55 * Need to map a range from start position for length bytes. This can 56 * span multiple pages - it is only guaranteed to return a range of a 57 * single type of pages (e.g. all into a hole, all mapped or all 58 * unwritten). Failure at this point has nothing to undo. 59 * 60 * If allocation is required for this range, reserve the space now so 61 * that the allocation is guaranteed to succeed later on. Once we copy 62 * the data into the page cache pages, then we cannot fail otherwise we 63 * expose transient stale data. If the reserve fails, we can safely 64 * back out at this point as there is nothing to undo. 65 */ 66 ret = ops->iomap_begin(inode, pos, length, flags, &iomap); 67 if (ret) 68 return ret; 69 if (WARN_ON(iomap.offset > pos)) 70 return -EIO; 71 if (WARN_ON(iomap.length == 0)) 72 return -EIO; 73 74 /* 75 * Cut down the length to the one actually provided by the filesystem, 76 * as it might not be able to give us the whole size that we requested. 77 */ 78 if (iomap.offset + iomap.length < pos + length) 79 length = iomap.offset + iomap.length - pos; 80 81 /* 82 * Now that we have guaranteed that the space allocation will succeed. 83 * we can do the copy-in page by page without having to worry about 84 * failures exposing transient data. 85 */ 86 written = actor(inode, pos, length, data, &iomap); 87 88 /* 89 * Now the data has been copied, commit the range we've copied. This 90 * should not fail unless the filesystem has had a fatal error. 91 */ 92 if (ops->iomap_end) { 93 ret = ops->iomap_end(inode, pos, length, 94 written > 0 ? written : 0, 95 flags, &iomap); 96 } 97 98 return written ? written : ret; 99 } 100 101 static sector_t 102 iomap_sector(struct iomap *iomap, loff_t pos) 103 { 104 return (iomap->addr + pos - iomap->offset) >> SECTOR_SHIFT; 105 } 106 107 static struct iomap_page * 108 iomap_page_create(struct inode *inode, struct page *page) 109 { 110 struct iomap_page *iop = to_iomap_page(page); 111 112 if (iop || i_blocksize(inode) == PAGE_SIZE) 113 return iop; 114 115 iop = kmalloc(sizeof(*iop), GFP_NOFS | __GFP_NOFAIL); 116 atomic_set(&iop->read_count, 0); 117 atomic_set(&iop->write_count, 0); 118 bitmap_zero(iop->uptodate, PAGE_SIZE / SECTOR_SIZE); 119 120 /* 121 * migrate_page_move_mapping() assumes that pages with private data have 122 * their count elevated by 1. 123 */ 124 get_page(page); 125 set_page_private(page, (unsigned long)iop); 126 SetPagePrivate(page); 127 return iop; 128 } 129 130 static void 131 iomap_page_release(struct page *page) 132 { 133 struct iomap_page *iop = to_iomap_page(page); 134 135 if (!iop) 136 return; 137 WARN_ON_ONCE(atomic_read(&iop->read_count)); 138 WARN_ON_ONCE(atomic_read(&iop->write_count)); 139 ClearPagePrivate(page); 140 set_page_private(page, 0); 141 put_page(page); 142 kfree(iop); 143 } 144 145 /* 146 * Calculate the range inside the page that we actually need to read. 147 */ 148 static void 149 iomap_adjust_read_range(struct inode *inode, struct iomap_page *iop, 150 loff_t *pos, loff_t length, unsigned *offp, unsigned *lenp) 151 { 152 loff_t orig_pos = *pos; 153 loff_t isize = i_size_read(inode); 154 unsigned block_bits = inode->i_blkbits; 155 unsigned block_size = (1 << block_bits); 156 unsigned poff = offset_in_page(*pos); 157 unsigned plen = min_t(loff_t, PAGE_SIZE - poff, length); 158 unsigned first = poff >> block_bits; 159 unsigned last = (poff + plen - 1) >> block_bits; 160 161 /* 162 * If the block size is smaller than the page size we need to check the 163 * per-block uptodate status and adjust the offset and length if needed 164 * to avoid reading in already uptodate ranges. 165 */ 166 if (iop) { 167 unsigned int i; 168 169 /* move forward for each leading block marked uptodate */ 170 for (i = first; i <= last; i++) { 171 if (!test_bit(i, iop->uptodate)) 172 break; 173 *pos += block_size; 174 poff += block_size; 175 plen -= block_size; 176 first++; 177 } 178 179 /* truncate len if we find any trailing uptodate block(s) */ 180 for ( ; i <= last; i++) { 181 if (test_bit(i, iop->uptodate)) { 182 plen -= (last - i + 1) * block_size; 183 last = i - 1; 184 break; 185 } 186 } 187 } 188 189 /* 190 * If the extent spans the block that contains the i_size we need to 191 * handle both halves separately so that we properly zero data in the 192 * page cache for blocks that are entirely outside of i_size. 193 */ 194 if (orig_pos <= isize && orig_pos + length > isize) { 195 unsigned end = offset_in_page(isize - 1) >> block_bits; 196 197 if (first <= end && last > end) 198 plen -= (last - end) * block_size; 199 } 200 201 *offp = poff; 202 *lenp = plen; 203 } 204 205 static void 206 iomap_set_range_uptodate(struct page *page, unsigned off, unsigned len) 207 { 208 struct iomap_page *iop = to_iomap_page(page); 209 struct inode *inode = page->mapping->host; 210 unsigned first = off >> inode->i_blkbits; 211 unsigned last = (off + len - 1) >> inode->i_blkbits; 212 unsigned int i; 213 bool uptodate = true; 214 215 if (iop) { 216 for (i = 0; i < PAGE_SIZE / i_blocksize(inode); i++) { 217 if (i >= first && i <= last) 218 set_bit(i, iop->uptodate); 219 else if (!test_bit(i, iop->uptodate)) 220 uptodate = false; 221 } 222 } 223 224 if (uptodate && !PageError(page)) 225 SetPageUptodate(page); 226 } 227 228 static void 229 iomap_read_finish(struct iomap_page *iop, struct page *page) 230 { 231 if (!iop || atomic_dec_and_test(&iop->read_count)) 232 unlock_page(page); 233 } 234 235 static void 236 iomap_read_page_end_io(struct bio_vec *bvec, int error) 237 { 238 struct page *page = bvec->bv_page; 239 struct iomap_page *iop = to_iomap_page(page); 240 241 if (unlikely(error)) { 242 ClearPageUptodate(page); 243 SetPageError(page); 244 } else { 245 iomap_set_range_uptodate(page, bvec->bv_offset, bvec->bv_len); 246 } 247 248 iomap_read_finish(iop, page); 249 } 250 251 static void 252 iomap_read_inline_data(struct inode *inode, struct page *page, 253 struct iomap *iomap) 254 { 255 size_t size = i_size_read(inode); 256 void *addr; 257 258 if (PageUptodate(page)) 259 return; 260 261 BUG_ON(page->index); 262 BUG_ON(size > PAGE_SIZE - offset_in_page(iomap->inline_data)); 263 264 addr = kmap_atomic(page); 265 memcpy(addr, iomap->inline_data, size); 266 memset(addr + size, 0, PAGE_SIZE - size); 267 kunmap_atomic(addr); 268 SetPageUptodate(page); 269 } 270 271 static void 272 iomap_read_end_io(struct bio *bio) 273 { 274 int error = blk_status_to_errno(bio->bi_status); 275 struct bio_vec *bvec; 276 int i; 277 278 bio_for_each_segment_all(bvec, bio, i) 279 iomap_read_page_end_io(bvec, error); 280 bio_put(bio); 281 } 282 283 struct iomap_readpage_ctx { 284 struct page *cur_page; 285 bool cur_page_in_bio; 286 bool is_readahead; 287 struct bio *bio; 288 struct list_head *pages; 289 }; 290 291 static loff_t 292 iomap_readpage_actor(struct inode *inode, loff_t pos, loff_t length, void *data, 293 struct iomap *iomap) 294 { 295 struct iomap_readpage_ctx *ctx = data; 296 struct page *page = ctx->cur_page; 297 struct iomap_page *iop = iomap_page_create(inode, page); 298 bool is_contig = false; 299 loff_t orig_pos = pos; 300 unsigned poff, plen; 301 sector_t sector; 302 303 if (iomap->type == IOMAP_INLINE) { 304 WARN_ON_ONCE(pos); 305 iomap_read_inline_data(inode, page, iomap); 306 return PAGE_SIZE; 307 } 308 309 /* zero post-eof blocks as the page may be mapped */ 310 iomap_adjust_read_range(inode, iop, &pos, length, &poff, &plen); 311 if (plen == 0) 312 goto done; 313 314 if (iomap->type != IOMAP_MAPPED || pos >= i_size_read(inode)) { 315 zero_user(page, poff, plen); 316 iomap_set_range_uptodate(page, poff, plen); 317 goto done; 318 } 319 320 ctx->cur_page_in_bio = true; 321 322 /* 323 * Try to merge into a previous segment if we can. 324 */ 325 sector = iomap_sector(iomap, pos); 326 if (ctx->bio && bio_end_sector(ctx->bio) == sector) { 327 if (__bio_try_merge_page(ctx->bio, page, plen, poff)) 328 goto done; 329 is_contig = true; 330 } 331 332 /* 333 * If we start a new segment we need to increase the read count, and we 334 * need to do so before submitting any previous full bio to make sure 335 * that we don't prematurely unlock the page. 336 */ 337 if (iop) 338 atomic_inc(&iop->read_count); 339 340 if (!ctx->bio || !is_contig || bio_full(ctx->bio)) { 341 gfp_t gfp = mapping_gfp_constraint(page->mapping, GFP_KERNEL); 342 int nr_vecs = (length + PAGE_SIZE - 1) >> PAGE_SHIFT; 343 344 if (ctx->bio) 345 submit_bio(ctx->bio); 346 347 if (ctx->is_readahead) /* same as readahead_gfp_mask */ 348 gfp |= __GFP_NORETRY | __GFP_NOWARN; 349 ctx->bio = bio_alloc(gfp, min(BIO_MAX_PAGES, nr_vecs)); 350 ctx->bio->bi_opf = REQ_OP_READ; 351 if (ctx->is_readahead) 352 ctx->bio->bi_opf |= REQ_RAHEAD; 353 ctx->bio->bi_iter.bi_sector = sector; 354 bio_set_dev(ctx->bio, iomap->bdev); 355 ctx->bio->bi_end_io = iomap_read_end_io; 356 } 357 358 __bio_add_page(ctx->bio, page, plen, poff); 359 done: 360 /* 361 * Move the caller beyond our range so that it keeps making progress. 362 * For that we have to include any leading non-uptodate ranges, but 363 * we can skip trailing ones as they will be handled in the next 364 * iteration. 365 */ 366 return pos - orig_pos + plen; 367 } 368 369 int 370 iomap_readpage(struct page *page, const struct iomap_ops *ops) 371 { 372 struct iomap_readpage_ctx ctx = { .cur_page = page }; 373 struct inode *inode = page->mapping->host; 374 unsigned poff; 375 loff_t ret; 376 377 for (poff = 0; poff < PAGE_SIZE; poff += ret) { 378 ret = iomap_apply(inode, page_offset(page) + poff, 379 PAGE_SIZE - poff, 0, ops, &ctx, 380 iomap_readpage_actor); 381 if (ret <= 0) { 382 WARN_ON_ONCE(ret == 0); 383 SetPageError(page); 384 break; 385 } 386 } 387 388 if (ctx.bio) { 389 submit_bio(ctx.bio); 390 WARN_ON_ONCE(!ctx.cur_page_in_bio); 391 } else { 392 WARN_ON_ONCE(ctx.cur_page_in_bio); 393 unlock_page(page); 394 } 395 396 /* 397 * Just like mpage_readpages and block_read_full_page we always 398 * return 0 and just mark the page as PageError on errors. This 399 * should be cleaned up all through the stack eventually. 400 */ 401 return 0; 402 } 403 EXPORT_SYMBOL_GPL(iomap_readpage); 404 405 static struct page * 406 iomap_next_page(struct inode *inode, struct list_head *pages, loff_t pos, 407 loff_t length, loff_t *done) 408 { 409 while (!list_empty(pages)) { 410 struct page *page = lru_to_page(pages); 411 412 if (page_offset(page) >= (u64)pos + length) 413 break; 414 415 list_del(&page->lru); 416 if (!add_to_page_cache_lru(page, inode->i_mapping, page->index, 417 GFP_NOFS)) 418 return page; 419 420 /* 421 * If we already have a page in the page cache at index we are 422 * done. Upper layers don't care if it is uptodate after the 423 * readpages call itself as every page gets checked again once 424 * actually needed. 425 */ 426 *done += PAGE_SIZE; 427 put_page(page); 428 } 429 430 return NULL; 431 } 432 433 static loff_t 434 iomap_readpages_actor(struct inode *inode, loff_t pos, loff_t length, 435 void *data, struct iomap *iomap) 436 { 437 struct iomap_readpage_ctx *ctx = data; 438 loff_t done, ret; 439 440 for (done = 0; done < length; done += ret) { 441 if (ctx->cur_page && offset_in_page(pos + done) == 0) { 442 if (!ctx->cur_page_in_bio) 443 unlock_page(ctx->cur_page); 444 put_page(ctx->cur_page); 445 ctx->cur_page = NULL; 446 } 447 if (!ctx->cur_page) { 448 ctx->cur_page = iomap_next_page(inode, ctx->pages, 449 pos, length, &done); 450 if (!ctx->cur_page) 451 break; 452 ctx->cur_page_in_bio = false; 453 } 454 ret = iomap_readpage_actor(inode, pos + done, length - done, 455 ctx, iomap); 456 } 457 458 return done; 459 } 460 461 int 462 iomap_readpages(struct address_space *mapping, struct list_head *pages, 463 unsigned nr_pages, const struct iomap_ops *ops) 464 { 465 struct iomap_readpage_ctx ctx = { 466 .pages = pages, 467 .is_readahead = true, 468 }; 469 loff_t pos = page_offset(list_entry(pages->prev, struct page, lru)); 470 loff_t last = page_offset(list_entry(pages->next, struct page, lru)); 471 loff_t length = last - pos + PAGE_SIZE, ret = 0; 472 473 while (length > 0) { 474 ret = iomap_apply(mapping->host, pos, length, 0, ops, 475 &ctx, iomap_readpages_actor); 476 if (ret <= 0) { 477 WARN_ON_ONCE(ret == 0); 478 goto done; 479 } 480 pos += ret; 481 length -= ret; 482 } 483 ret = 0; 484 done: 485 if (ctx.bio) 486 submit_bio(ctx.bio); 487 if (ctx.cur_page) { 488 if (!ctx.cur_page_in_bio) 489 unlock_page(ctx.cur_page); 490 put_page(ctx.cur_page); 491 } 492 493 /* 494 * Check that we didn't lose a page due to the arcance calling 495 * conventions.. 496 */ 497 WARN_ON_ONCE(!ret && !list_empty(ctx.pages)); 498 return ret; 499 } 500 EXPORT_SYMBOL_GPL(iomap_readpages); 501 502 /* 503 * iomap_is_partially_uptodate checks whether blocks within a page are 504 * uptodate or not. 505 * 506 * Returns true if all blocks which correspond to a file portion 507 * we want to read within the page are uptodate. 508 */ 509 int 510 iomap_is_partially_uptodate(struct page *page, unsigned long from, 511 unsigned long count) 512 { 513 struct iomap_page *iop = to_iomap_page(page); 514 struct inode *inode = page->mapping->host; 515 unsigned len, first, last; 516 unsigned i; 517 518 /* Limit range to one page */ 519 len = min_t(unsigned, PAGE_SIZE - from, count); 520 521 /* First and last blocks in range within page */ 522 first = from >> inode->i_blkbits; 523 last = (from + len - 1) >> inode->i_blkbits; 524 525 if (iop) { 526 for (i = first; i <= last; i++) 527 if (!test_bit(i, iop->uptodate)) 528 return 0; 529 return 1; 530 } 531 532 return 0; 533 } 534 EXPORT_SYMBOL_GPL(iomap_is_partially_uptodate); 535 536 int 537 iomap_releasepage(struct page *page, gfp_t gfp_mask) 538 { 539 /* 540 * mm accommodates an old ext3 case where clean pages might not have had 541 * the dirty bit cleared. Thus, it can send actual dirty pages to 542 * ->releasepage() via shrink_active_list(), skip those here. 543 */ 544 if (PageDirty(page) || PageWriteback(page)) 545 return 0; 546 iomap_page_release(page); 547 return 1; 548 } 549 EXPORT_SYMBOL_GPL(iomap_releasepage); 550 551 void 552 iomap_invalidatepage(struct page *page, unsigned int offset, unsigned int len) 553 { 554 /* 555 * If we are invalidating the entire page, clear the dirty state from it 556 * and release it to avoid unnecessary buildup of the LRU. 557 */ 558 if (offset == 0 && len == PAGE_SIZE) { 559 WARN_ON_ONCE(PageWriteback(page)); 560 cancel_dirty_page(page); 561 iomap_page_release(page); 562 } 563 } 564 EXPORT_SYMBOL_GPL(iomap_invalidatepage); 565 566 #ifdef CONFIG_MIGRATION 567 int 568 iomap_migrate_page(struct address_space *mapping, struct page *newpage, 569 struct page *page, enum migrate_mode mode) 570 { 571 int ret; 572 573 ret = migrate_page_move_mapping(mapping, newpage, page, mode, 0); 574 if (ret != MIGRATEPAGE_SUCCESS) 575 return ret; 576 577 if (page_has_private(page)) { 578 ClearPagePrivate(page); 579 get_page(newpage); 580 set_page_private(newpage, page_private(page)); 581 set_page_private(page, 0); 582 put_page(page); 583 SetPagePrivate(newpage); 584 } 585 586 if (mode != MIGRATE_SYNC_NO_COPY) 587 migrate_page_copy(newpage, page); 588 else 589 migrate_page_states(newpage, page); 590 return MIGRATEPAGE_SUCCESS; 591 } 592 EXPORT_SYMBOL_GPL(iomap_migrate_page); 593 #endif /* CONFIG_MIGRATION */ 594 595 static void 596 iomap_write_failed(struct inode *inode, loff_t pos, unsigned len) 597 { 598 loff_t i_size = i_size_read(inode); 599 600 /* 601 * Only truncate newly allocated pages beyoned EOF, even if the 602 * write started inside the existing inode size. 603 */ 604 if (pos + len > i_size) 605 truncate_pagecache_range(inode, max(pos, i_size), pos + len); 606 } 607 608 static int 609 iomap_read_page_sync(struct inode *inode, loff_t block_start, struct page *page, 610 unsigned poff, unsigned plen, unsigned from, unsigned to, 611 struct iomap *iomap) 612 { 613 struct bio_vec bvec; 614 struct bio bio; 615 616 if (iomap->type != IOMAP_MAPPED || block_start >= i_size_read(inode)) { 617 zero_user_segments(page, poff, from, to, poff + plen); 618 iomap_set_range_uptodate(page, poff, plen); 619 return 0; 620 } 621 622 bio_init(&bio, &bvec, 1); 623 bio.bi_opf = REQ_OP_READ; 624 bio.bi_iter.bi_sector = iomap_sector(iomap, block_start); 625 bio_set_dev(&bio, iomap->bdev); 626 __bio_add_page(&bio, page, plen, poff); 627 return submit_bio_wait(&bio); 628 } 629 630 static int 631 __iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, 632 struct page *page, struct iomap *iomap) 633 { 634 struct iomap_page *iop = iomap_page_create(inode, page); 635 loff_t block_size = i_blocksize(inode); 636 loff_t block_start = pos & ~(block_size - 1); 637 loff_t block_end = (pos + len + block_size - 1) & ~(block_size - 1); 638 unsigned from = offset_in_page(pos), to = from + len, poff, plen; 639 int status = 0; 640 641 if (PageUptodate(page)) 642 return 0; 643 644 do { 645 iomap_adjust_read_range(inode, iop, &block_start, 646 block_end - block_start, &poff, &plen); 647 if (plen == 0) 648 break; 649 650 if ((from > poff && from < poff + plen) || 651 (to > poff && to < poff + plen)) { 652 status = iomap_read_page_sync(inode, block_start, page, 653 poff, plen, from, to, iomap); 654 if (status) 655 break; 656 } 657 658 } while ((block_start += plen) < block_end); 659 660 return status; 661 } 662 663 static int 664 iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, unsigned flags, 665 struct page **pagep, struct iomap *iomap) 666 { 667 pgoff_t index = pos >> PAGE_SHIFT; 668 struct page *page; 669 int status = 0; 670 671 BUG_ON(pos + len > iomap->offset + iomap->length); 672 673 if (fatal_signal_pending(current)) 674 return -EINTR; 675 676 page = grab_cache_page_write_begin(inode->i_mapping, index, flags); 677 if (!page) 678 return -ENOMEM; 679 680 if (iomap->type == IOMAP_INLINE) 681 iomap_read_inline_data(inode, page, iomap); 682 else if (iomap->flags & IOMAP_F_BUFFER_HEAD) 683 status = __block_write_begin_int(page, pos, len, NULL, iomap); 684 else 685 status = __iomap_write_begin(inode, pos, len, page, iomap); 686 if (unlikely(status)) { 687 unlock_page(page); 688 put_page(page); 689 page = NULL; 690 691 iomap_write_failed(inode, pos, len); 692 } 693 694 *pagep = page; 695 return status; 696 } 697 698 int 699 iomap_set_page_dirty(struct page *page) 700 { 701 struct address_space *mapping = page_mapping(page); 702 int newly_dirty; 703 704 if (unlikely(!mapping)) 705 return !TestSetPageDirty(page); 706 707 /* 708 * Lock out page->mem_cgroup migration to keep PageDirty 709 * synchronized with per-memcg dirty page counters. 710 */ 711 lock_page_memcg(page); 712 newly_dirty = !TestSetPageDirty(page); 713 if (newly_dirty) 714 __set_page_dirty(page, mapping, 0); 715 unlock_page_memcg(page); 716 717 if (newly_dirty) 718 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); 719 return newly_dirty; 720 } 721 EXPORT_SYMBOL_GPL(iomap_set_page_dirty); 722 723 static int 724 __iomap_write_end(struct inode *inode, loff_t pos, unsigned len, 725 unsigned copied, struct page *page, struct iomap *iomap) 726 { 727 flush_dcache_page(page); 728 729 /* 730 * The blocks that were entirely written will now be uptodate, so we 731 * don't have to worry about a readpage reading them and overwriting a 732 * partial write. However if we have encountered a short write and only 733 * partially written into a block, it will not be marked uptodate, so a 734 * readpage might come in and destroy our partial write. 735 * 736 * Do the simplest thing, and just treat any short write to a non 737 * uptodate page as a zero-length write, and force the caller to redo 738 * the whole thing. 739 */ 740 if (unlikely(copied < len && !PageUptodate(page))) { 741 copied = 0; 742 } else { 743 iomap_set_range_uptodate(page, offset_in_page(pos), len); 744 iomap_set_page_dirty(page); 745 } 746 return __generic_write_end(inode, pos, copied, page); 747 } 748 749 static int 750 iomap_write_end_inline(struct inode *inode, struct page *page, 751 struct iomap *iomap, loff_t pos, unsigned copied) 752 { 753 void *addr; 754 755 WARN_ON_ONCE(!PageUptodate(page)); 756 BUG_ON(pos + copied > PAGE_SIZE - offset_in_page(iomap->inline_data)); 757 758 addr = kmap_atomic(page); 759 memcpy(iomap->inline_data + pos, addr + pos, copied); 760 kunmap_atomic(addr); 761 762 mark_inode_dirty(inode); 763 __generic_write_end(inode, pos, copied, page); 764 return copied; 765 } 766 767 static int 768 iomap_write_end(struct inode *inode, loff_t pos, unsigned len, 769 unsigned copied, struct page *page, struct iomap *iomap) 770 { 771 int ret; 772 773 if (iomap->type == IOMAP_INLINE) { 774 ret = iomap_write_end_inline(inode, page, iomap, pos, copied); 775 } else if (iomap->flags & IOMAP_F_BUFFER_HEAD) { 776 ret = generic_write_end(NULL, inode->i_mapping, pos, len, 777 copied, page, NULL); 778 } else { 779 ret = __iomap_write_end(inode, pos, len, copied, page, iomap); 780 } 781 782 if (iomap->page_done) 783 iomap->page_done(inode, pos, copied, page, iomap); 784 785 if (ret < len) 786 iomap_write_failed(inode, pos, len); 787 return ret; 788 } 789 790 static loff_t 791 iomap_write_actor(struct inode *inode, loff_t pos, loff_t length, void *data, 792 struct iomap *iomap) 793 { 794 struct iov_iter *i = data; 795 long status = 0; 796 ssize_t written = 0; 797 unsigned int flags = AOP_FLAG_NOFS; 798 799 do { 800 struct page *page; 801 unsigned long offset; /* Offset into pagecache page */ 802 unsigned long bytes; /* Bytes to write to page */ 803 size_t copied; /* Bytes copied from user */ 804 805 offset = offset_in_page(pos); 806 bytes = min_t(unsigned long, PAGE_SIZE - offset, 807 iov_iter_count(i)); 808 again: 809 if (bytes > length) 810 bytes = length; 811 812 /* 813 * Bring in the user page that we will copy from _first_. 814 * Otherwise there's a nasty deadlock on copying from the 815 * same page as we're writing to, without it being marked 816 * up-to-date. 817 * 818 * Not only is this an optimisation, but it is also required 819 * to check that the address is actually valid, when atomic 820 * usercopies are used, below. 821 */ 822 if (unlikely(iov_iter_fault_in_readable(i, bytes))) { 823 status = -EFAULT; 824 break; 825 } 826 827 status = iomap_write_begin(inode, pos, bytes, flags, &page, 828 iomap); 829 if (unlikely(status)) 830 break; 831 832 if (mapping_writably_mapped(inode->i_mapping)) 833 flush_dcache_page(page); 834 835 copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes); 836 837 flush_dcache_page(page); 838 839 status = iomap_write_end(inode, pos, bytes, copied, page, 840 iomap); 841 if (unlikely(status < 0)) 842 break; 843 copied = status; 844 845 cond_resched(); 846 847 iov_iter_advance(i, copied); 848 if (unlikely(copied == 0)) { 849 /* 850 * If we were unable to copy any data at all, we must 851 * fall back to a single segment length write. 852 * 853 * If we didn't fallback here, we could livelock 854 * because not all segments in the iov can be copied at 855 * once without a pagefault. 856 */ 857 bytes = min_t(unsigned long, PAGE_SIZE - offset, 858 iov_iter_single_seg_count(i)); 859 goto again; 860 } 861 pos += copied; 862 written += copied; 863 length -= copied; 864 865 balance_dirty_pages_ratelimited(inode->i_mapping); 866 } while (iov_iter_count(i) && length); 867 868 return written ? written : status; 869 } 870 871 ssize_t 872 iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *iter, 873 const struct iomap_ops *ops) 874 { 875 struct inode *inode = iocb->ki_filp->f_mapping->host; 876 loff_t pos = iocb->ki_pos, ret = 0, written = 0; 877 878 while (iov_iter_count(iter)) { 879 ret = iomap_apply(inode, pos, iov_iter_count(iter), 880 IOMAP_WRITE, ops, iter, iomap_write_actor); 881 if (ret <= 0) 882 break; 883 pos += ret; 884 written += ret; 885 } 886 887 return written ? written : ret; 888 } 889 EXPORT_SYMBOL_GPL(iomap_file_buffered_write); 890 891 static struct page * 892 __iomap_read_page(struct inode *inode, loff_t offset) 893 { 894 struct address_space *mapping = inode->i_mapping; 895 struct page *page; 896 897 page = read_mapping_page(mapping, offset >> PAGE_SHIFT, NULL); 898 if (IS_ERR(page)) 899 return page; 900 if (!PageUptodate(page)) { 901 put_page(page); 902 return ERR_PTR(-EIO); 903 } 904 return page; 905 } 906 907 static loff_t 908 iomap_dirty_actor(struct inode *inode, loff_t pos, loff_t length, void *data, 909 struct iomap *iomap) 910 { 911 long status = 0; 912 ssize_t written = 0; 913 914 do { 915 struct page *page, *rpage; 916 unsigned long offset; /* Offset into pagecache page */ 917 unsigned long bytes; /* Bytes to write to page */ 918 919 offset = offset_in_page(pos); 920 bytes = min_t(loff_t, PAGE_SIZE - offset, length); 921 922 rpage = __iomap_read_page(inode, pos); 923 if (IS_ERR(rpage)) 924 return PTR_ERR(rpage); 925 926 status = iomap_write_begin(inode, pos, bytes, 927 AOP_FLAG_NOFS, &page, iomap); 928 put_page(rpage); 929 if (unlikely(status)) 930 return status; 931 932 WARN_ON_ONCE(!PageUptodate(page)); 933 934 status = iomap_write_end(inode, pos, bytes, bytes, page, iomap); 935 if (unlikely(status <= 0)) { 936 if (WARN_ON_ONCE(status == 0)) 937 return -EIO; 938 return status; 939 } 940 941 cond_resched(); 942 943 pos += status; 944 written += status; 945 length -= status; 946 947 balance_dirty_pages_ratelimited(inode->i_mapping); 948 } while (length); 949 950 return written; 951 } 952 953 int 954 iomap_file_dirty(struct inode *inode, loff_t pos, loff_t len, 955 const struct iomap_ops *ops) 956 { 957 loff_t ret; 958 959 while (len) { 960 ret = iomap_apply(inode, pos, len, IOMAP_WRITE, ops, NULL, 961 iomap_dirty_actor); 962 if (ret <= 0) 963 return ret; 964 pos += ret; 965 len -= ret; 966 } 967 968 return 0; 969 } 970 EXPORT_SYMBOL_GPL(iomap_file_dirty); 971 972 static int iomap_zero(struct inode *inode, loff_t pos, unsigned offset, 973 unsigned bytes, struct iomap *iomap) 974 { 975 struct page *page; 976 int status; 977 978 status = iomap_write_begin(inode, pos, bytes, AOP_FLAG_NOFS, &page, 979 iomap); 980 if (status) 981 return status; 982 983 zero_user(page, offset, bytes); 984 mark_page_accessed(page); 985 986 return iomap_write_end(inode, pos, bytes, bytes, page, iomap); 987 } 988 989 static int iomap_dax_zero(loff_t pos, unsigned offset, unsigned bytes, 990 struct iomap *iomap) 991 { 992 return __dax_zero_page_range(iomap->bdev, iomap->dax_dev, 993 iomap_sector(iomap, pos & PAGE_MASK), offset, bytes); 994 } 995 996 static loff_t 997 iomap_zero_range_actor(struct inode *inode, loff_t pos, loff_t count, 998 void *data, struct iomap *iomap) 999 { 1000 bool *did_zero = data; 1001 loff_t written = 0; 1002 int status; 1003 1004 /* already zeroed? we're done. */ 1005 if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN) 1006 return count; 1007 1008 do { 1009 unsigned offset, bytes; 1010 1011 offset = offset_in_page(pos); 1012 bytes = min_t(loff_t, PAGE_SIZE - offset, count); 1013 1014 if (IS_DAX(inode)) 1015 status = iomap_dax_zero(pos, offset, bytes, iomap); 1016 else 1017 status = iomap_zero(inode, pos, offset, bytes, iomap); 1018 if (status < 0) 1019 return status; 1020 1021 pos += bytes; 1022 count -= bytes; 1023 written += bytes; 1024 if (did_zero) 1025 *did_zero = true; 1026 } while (count > 0); 1027 1028 return written; 1029 } 1030 1031 int 1032 iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero, 1033 const struct iomap_ops *ops) 1034 { 1035 loff_t ret; 1036 1037 while (len > 0) { 1038 ret = iomap_apply(inode, pos, len, IOMAP_ZERO, 1039 ops, did_zero, iomap_zero_range_actor); 1040 if (ret <= 0) 1041 return ret; 1042 1043 pos += ret; 1044 len -= ret; 1045 } 1046 1047 return 0; 1048 } 1049 EXPORT_SYMBOL_GPL(iomap_zero_range); 1050 1051 int 1052 iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero, 1053 const struct iomap_ops *ops) 1054 { 1055 unsigned int blocksize = i_blocksize(inode); 1056 unsigned int off = pos & (blocksize - 1); 1057 1058 /* Block boundary? Nothing to do */ 1059 if (!off) 1060 return 0; 1061 return iomap_zero_range(inode, pos, blocksize - off, did_zero, ops); 1062 } 1063 EXPORT_SYMBOL_GPL(iomap_truncate_page); 1064 1065 static loff_t 1066 iomap_page_mkwrite_actor(struct inode *inode, loff_t pos, loff_t length, 1067 void *data, struct iomap *iomap) 1068 { 1069 struct page *page = data; 1070 int ret; 1071 1072 if (iomap->flags & IOMAP_F_BUFFER_HEAD) { 1073 ret = __block_write_begin_int(page, pos, length, NULL, iomap); 1074 if (ret) 1075 return ret; 1076 block_commit_write(page, 0, length); 1077 } else { 1078 WARN_ON_ONCE(!PageUptodate(page)); 1079 iomap_page_create(inode, page); 1080 set_page_dirty(page); 1081 } 1082 1083 return length; 1084 } 1085 1086 vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops) 1087 { 1088 struct page *page = vmf->page; 1089 struct inode *inode = file_inode(vmf->vma->vm_file); 1090 unsigned long length; 1091 loff_t offset, size; 1092 ssize_t ret; 1093 1094 lock_page(page); 1095 size = i_size_read(inode); 1096 if ((page->mapping != inode->i_mapping) || 1097 (page_offset(page) > size)) { 1098 /* We overload EFAULT to mean page got truncated */ 1099 ret = -EFAULT; 1100 goto out_unlock; 1101 } 1102 1103 /* page is wholly or partially inside EOF */ 1104 if (((page->index + 1) << PAGE_SHIFT) > size) 1105 length = offset_in_page(size); 1106 else 1107 length = PAGE_SIZE; 1108 1109 offset = page_offset(page); 1110 while (length > 0) { 1111 ret = iomap_apply(inode, offset, length, 1112 IOMAP_WRITE | IOMAP_FAULT, ops, page, 1113 iomap_page_mkwrite_actor); 1114 if (unlikely(ret <= 0)) 1115 goto out_unlock; 1116 offset += ret; 1117 length -= ret; 1118 } 1119 1120 wait_for_stable_page(page); 1121 return VM_FAULT_LOCKED; 1122 out_unlock: 1123 unlock_page(page); 1124 return block_page_mkwrite_return(ret); 1125 } 1126 EXPORT_SYMBOL_GPL(iomap_page_mkwrite); 1127 1128 struct fiemap_ctx { 1129 struct fiemap_extent_info *fi; 1130 struct iomap prev; 1131 }; 1132 1133 static int iomap_to_fiemap(struct fiemap_extent_info *fi, 1134 struct iomap *iomap, u32 flags) 1135 { 1136 switch (iomap->type) { 1137 case IOMAP_HOLE: 1138 /* skip holes */ 1139 return 0; 1140 case IOMAP_DELALLOC: 1141 flags |= FIEMAP_EXTENT_DELALLOC | FIEMAP_EXTENT_UNKNOWN; 1142 break; 1143 case IOMAP_MAPPED: 1144 break; 1145 case IOMAP_UNWRITTEN: 1146 flags |= FIEMAP_EXTENT_UNWRITTEN; 1147 break; 1148 case IOMAP_INLINE: 1149 flags |= FIEMAP_EXTENT_DATA_INLINE; 1150 break; 1151 } 1152 1153 if (iomap->flags & IOMAP_F_MERGED) 1154 flags |= FIEMAP_EXTENT_MERGED; 1155 if (iomap->flags & IOMAP_F_SHARED) 1156 flags |= FIEMAP_EXTENT_SHARED; 1157 1158 return fiemap_fill_next_extent(fi, iomap->offset, 1159 iomap->addr != IOMAP_NULL_ADDR ? iomap->addr : 0, 1160 iomap->length, flags); 1161 } 1162 1163 static loff_t 1164 iomap_fiemap_actor(struct inode *inode, loff_t pos, loff_t length, void *data, 1165 struct iomap *iomap) 1166 { 1167 struct fiemap_ctx *ctx = data; 1168 loff_t ret = length; 1169 1170 if (iomap->type == IOMAP_HOLE) 1171 return length; 1172 1173 ret = iomap_to_fiemap(ctx->fi, &ctx->prev, 0); 1174 ctx->prev = *iomap; 1175 switch (ret) { 1176 case 0: /* success */ 1177 return length; 1178 case 1: /* extent array full */ 1179 return 0; 1180 default: 1181 return ret; 1182 } 1183 } 1184 1185 int iomap_fiemap(struct inode *inode, struct fiemap_extent_info *fi, 1186 loff_t start, loff_t len, const struct iomap_ops *ops) 1187 { 1188 struct fiemap_ctx ctx; 1189 loff_t ret; 1190 1191 memset(&ctx, 0, sizeof(ctx)); 1192 ctx.fi = fi; 1193 ctx.prev.type = IOMAP_HOLE; 1194 1195 ret = fiemap_check_flags(fi, FIEMAP_FLAG_SYNC); 1196 if (ret) 1197 return ret; 1198 1199 if (fi->fi_flags & FIEMAP_FLAG_SYNC) { 1200 ret = filemap_write_and_wait(inode->i_mapping); 1201 if (ret) 1202 return ret; 1203 } 1204 1205 while (len > 0) { 1206 ret = iomap_apply(inode, start, len, IOMAP_REPORT, ops, &ctx, 1207 iomap_fiemap_actor); 1208 /* inode with no (attribute) mapping will give ENOENT */ 1209 if (ret == -ENOENT) 1210 break; 1211 if (ret < 0) 1212 return ret; 1213 if (ret == 0) 1214 break; 1215 1216 start += ret; 1217 len -= ret; 1218 } 1219 1220 if (ctx.prev.type != IOMAP_HOLE) { 1221 ret = iomap_to_fiemap(fi, &ctx.prev, FIEMAP_EXTENT_LAST); 1222 if (ret < 0) 1223 return ret; 1224 } 1225 1226 return 0; 1227 } 1228 EXPORT_SYMBOL_GPL(iomap_fiemap); 1229 1230 /* 1231 * Seek for SEEK_DATA / SEEK_HOLE within @page, starting at @lastoff. 1232 * Returns true if found and updates @lastoff to the offset in file. 1233 */ 1234 static bool 1235 page_seek_hole_data(struct inode *inode, struct page *page, loff_t *lastoff, 1236 int whence) 1237 { 1238 const struct address_space_operations *ops = inode->i_mapping->a_ops; 1239 unsigned int bsize = i_blocksize(inode), off; 1240 bool seek_data = whence == SEEK_DATA; 1241 loff_t poff = page_offset(page); 1242 1243 if (WARN_ON_ONCE(*lastoff >= poff + PAGE_SIZE)) 1244 return false; 1245 1246 if (*lastoff < poff) { 1247 /* 1248 * Last offset smaller than the start of the page means we found 1249 * a hole: 1250 */ 1251 if (whence == SEEK_HOLE) 1252 return true; 1253 *lastoff = poff; 1254 } 1255 1256 /* 1257 * Just check the page unless we can and should check block ranges: 1258 */ 1259 if (bsize == PAGE_SIZE || !ops->is_partially_uptodate) 1260 return PageUptodate(page) == seek_data; 1261 1262 lock_page(page); 1263 if (unlikely(page->mapping != inode->i_mapping)) 1264 goto out_unlock_not_found; 1265 1266 for (off = 0; off < PAGE_SIZE; off += bsize) { 1267 if (offset_in_page(*lastoff) >= off + bsize) 1268 continue; 1269 if (ops->is_partially_uptodate(page, off, bsize) == seek_data) { 1270 unlock_page(page); 1271 return true; 1272 } 1273 *lastoff = poff + off + bsize; 1274 } 1275 1276 out_unlock_not_found: 1277 unlock_page(page); 1278 return false; 1279 } 1280 1281 /* 1282 * Seek for SEEK_DATA / SEEK_HOLE in the page cache. 1283 * 1284 * Within unwritten extents, the page cache determines which parts are holes 1285 * and which are data: uptodate buffer heads count as data; everything else 1286 * counts as a hole. 1287 * 1288 * Returns the resulting offset on successs, and -ENOENT otherwise. 1289 */ 1290 static loff_t 1291 page_cache_seek_hole_data(struct inode *inode, loff_t offset, loff_t length, 1292 int whence) 1293 { 1294 pgoff_t index = offset >> PAGE_SHIFT; 1295 pgoff_t end = DIV_ROUND_UP(offset + length, PAGE_SIZE); 1296 loff_t lastoff = offset; 1297 struct pagevec pvec; 1298 1299 if (length <= 0) 1300 return -ENOENT; 1301 1302 pagevec_init(&pvec); 1303 1304 do { 1305 unsigned nr_pages, i; 1306 1307 nr_pages = pagevec_lookup_range(&pvec, inode->i_mapping, &index, 1308 end - 1); 1309 if (nr_pages == 0) 1310 break; 1311 1312 for (i = 0; i < nr_pages; i++) { 1313 struct page *page = pvec.pages[i]; 1314 1315 if (page_seek_hole_data(inode, page, &lastoff, whence)) 1316 goto check_range; 1317 lastoff = page_offset(page) + PAGE_SIZE; 1318 } 1319 pagevec_release(&pvec); 1320 } while (index < end); 1321 1322 /* When no page at lastoff and we are not done, we found a hole. */ 1323 if (whence != SEEK_HOLE) 1324 goto not_found; 1325 1326 check_range: 1327 if (lastoff < offset + length) 1328 goto out; 1329 not_found: 1330 lastoff = -ENOENT; 1331 out: 1332 pagevec_release(&pvec); 1333 return lastoff; 1334 } 1335 1336 1337 static loff_t 1338 iomap_seek_hole_actor(struct inode *inode, loff_t offset, loff_t length, 1339 void *data, struct iomap *iomap) 1340 { 1341 switch (iomap->type) { 1342 case IOMAP_UNWRITTEN: 1343 offset = page_cache_seek_hole_data(inode, offset, length, 1344 SEEK_HOLE); 1345 if (offset < 0) 1346 return length; 1347 /* fall through */ 1348 case IOMAP_HOLE: 1349 *(loff_t *)data = offset; 1350 return 0; 1351 default: 1352 return length; 1353 } 1354 } 1355 1356 loff_t 1357 iomap_seek_hole(struct inode *inode, loff_t offset, const struct iomap_ops *ops) 1358 { 1359 loff_t size = i_size_read(inode); 1360 loff_t length = size - offset; 1361 loff_t ret; 1362 1363 /* Nothing to be found before or beyond the end of the file. */ 1364 if (offset < 0 || offset >= size) 1365 return -ENXIO; 1366 1367 while (length > 0) { 1368 ret = iomap_apply(inode, offset, length, IOMAP_REPORT, ops, 1369 &offset, iomap_seek_hole_actor); 1370 if (ret < 0) 1371 return ret; 1372 if (ret == 0) 1373 break; 1374 1375 offset += ret; 1376 length -= ret; 1377 } 1378 1379 return offset; 1380 } 1381 EXPORT_SYMBOL_GPL(iomap_seek_hole); 1382 1383 static loff_t 1384 iomap_seek_data_actor(struct inode *inode, loff_t offset, loff_t length, 1385 void *data, struct iomap *iomap) 1386 { 1387 switch (iomap->type) { 1388 case IOMAP_HOLE: 1389 return length; 1390 case IOMAP_UNWRITTEN: 1391 offset = page_cache_seek_hole_data(inode, offset, length, 1392 SEEK_DATA); 1393 if (offset < 0) 1394 return length; 1395 /*FALLTHRU*/ 1396 default: 1397 *(loff_t *)data = offset; 1398 return 0; 1399 } 1400 } 1401 1402 loff_t 1403 iomap_seek_data(struct inode *inode, loff_t offset, const struct iomap_ops *ops) 1404 { 1405 loff_t size = i_size_read(inode); 1406 loff_t length = size - offset; 1407 loff_t ret; 1408 1409 /* Nothing to be found before or beyond the end of the file. */ 1410 if (offset < 0 || offset >= size) 1411 return -ENXIO; 1412 1413 while (length > 0) { 1414 ret = iomap_apply(inode, offset, length, IOMAP_REPORT, ops, 1415 &offset, iomap_seek_data_actor); 1416 if (ret < 0) 1417 return ret; 1418 if (ret == 0) 1419 break; 1420 1421 offset += ret; 1422 length -= ret; 1423 } 1424 1425 if (length <= 0) 1426 return -ENXIO; 1427 return offset; 1428 } 1429 EXPORT_SYMBOL_GPL(iomap_seek_data); 1430 1431 /* 1432 * Private flags for iomap_dio, must not overlap with the public ones in 1433 * iomap.h: 1434 */ 1435 #define IOMAP_DIO_WRITE_FUA (1 << 28) 1436 #define IOMAP_DIO_NEED_SYNC (1 << 29) 1437 #define IOMAP_DIO_WRITE (1 << 30) 1438 #define IOMAP_DIO_DIRTY (1 << 31) 1439 1440 struct iomap_dio { 1441 struct kiocb *iocb; 1442 iomap_dio_end_io_t *end_io; 1443 loff_t i_size; 1444 loff_t size; 1445 atomic_t ref; 1446 unsigned flags; 1447 int error; 1448 bool wait_for_completion; 1449 1450 union { 1451 /* used during submission and for synchronous completion: */ 1452 struct { 1453 struct iov_iter *iter; 1454 struct task_struct *waiter; 1455 struct request_queue *last_queue; 1456 blk_qc_t cookie; 1457 } submit; 1458 1459 /* used for aio completion: */ 1460 struct { 1461 struct work_struct work; 1462 } aio; 1463 }; 1464 }; 1465 1466 static ssize_t iomap_dio_complete(struct iomap_dio *dio) 1467 { 1468 struct kiocb *iocb = dio->iocb; 1469 struct inode *inode = file_inode(iocb->ki_filp); 1470 loff_t offset = iocb->ki_pos; 1471 ssize_t ret; 1472 1473 if (dio->end_io) { 1474 ret = dio->end_io(iocb, 1475 dio->error ? dio->error : dio->size, 1476 dio->flags); 1477 } else { 1478 ret = dio->error; 1479 } 1480 1481 if (likely(!ret)) { 1482 ret = dio->size; 1483 /* check for short read */ 1484 if (offset + ret > dio->i_size && 1485 !(dio->flags & IOMAP_DIO_WRITE)) 1486 ret = dio->i_size - offset; 1487 iocb->ki_pos += ret; 1488 } 1489 1490 /* 1491 * Try again to invalidate clean pages which might have been cached by 1492 * non-direct readahead, or faulted in by get_user_pages() if the source 1493 * of the write was an mmap'ed region of the file we're writing. Either 1494 * one is a pretty crazy thing to do, so we don't support it 100%. If 1495 * this invalidation fails, tough, the write still worked... 1496 * 1497 * And this page cache invalidation has to be after dio->end_io(), as 1498 * some filesystems convert unwritten extents to real allocations in 1499 * end_io() when necessary, otherwise a racing buffer read would cache 1500 * zeros from unwritten extents. 1501 */ 1502 if (!dio->error && 1503 (dio->flags & IOMAP_DIO_WRITE) && inode->i_mapping->nrpages) { 1504 int err; 1505 err = invalidate_inode_pages2_range(inode->i_mapping, 1506 offset >> PAGE_SHIFT, 1507 (offset + dio->size - 1) >> PAGE_SHIFT); 1508 if (err) 1509 dio_warn_stale_pagecache(iocb->ki_filp); 1510 } 1511 1512 /* 1513 * If this is a DSYNC write, make sure we push it to stable storage now 1514 * that we've written data. 1515 */ 1516 if (ret > 0 && (dio->flags & IOMAP_DIO_NEED_SYNC)) 1517 ret = generic_write_sync(iocb, ret); 1518 1519 inode_dio_end(file_inode(iocb->ki_filp)); 1520 kfree(dio); 1521 1522 return ret; 1523 } 1524 1525 static void iomap_dio_complete_work(struct work_struct *work) 1526 { 1527 struct iomap_dio *dio = container_of(work, struct iomap_dio, aio.work); 1528 struct kiocb *iocb = dio->iocb; 1529 1530 iocb->ki_complete(iocb, iomap_dio_complete(dio), 0); 1531 } 1532 1533 /* 1534 * Set an error in the dio if none is set yet. We have to use cmpxchg 1535 * as the submission context and the completion context(s) can race to 1536 * update the error. 1537 */ 1538 static inline void iomap_dio_set_error(struct iomap_dio *dio, int ret) 1539 { 1540 cmpxchg(&dio->error, 0, ret); 1541 } 1542 1543 static void iomap_dio_bio_end_io(struct bio *bio) 1544 { 1545 struct iomap_dio *dio = bio->bi_private; 1546 bool should_dirty = (dio->flags & IOMAP_DIO_DIRTY); 1547 1548 if (bio->bi_status) 1549 iomap_dio_set_error(dio, blk_status_to_errno(bio->bi_status)); 1550 1551 if (atomic_dec_and_test(&dio->ref)) { 1552 if (dio->wait_for_completion) { 1553 struct task_struct *waiter = dio->submit.waiter; 1554 WRITE_ONCE(dio->submit.waiter, NULL); 1555 blk_wake_io_task(waiter); 1556 } else if (dio->flags & IOMAP_DIO_WRITE) { 1557 struct inode *inode = file_inode(dio->iocb->ki_filp); 1558 1559 INIT_WORK(&dio->aio.work, iomap_dio_complete_work); 1560 queue_work(inode->i_sb->s_dio_done_wq, &dio->aio.work); 1561 } else { 1562 iomap_dio_complete_work(&dio->aio.work); 1563 } 1564 } 1565 1566 if (should_dirty) { 1567 bio_check_pages_dirty(bio); 1568 } else { 1569 struct bio_vec *bvec; 1570 int i; 1571 1572 bio_for_each_segment_all(bvec, bio, i) 1573 put_page(bvec->bv_page); 1574 bio_put(bio); 1575 } 1576 } 1577 1578 static blk_qc_t 1579 iomap_dio_zero(struct iomap_dio *dio, struct iomap *iomap, loff_t pos, 1580 unsigned len) 1581 { 1582 struct page *page = ZERO_PAGE(0); 1583 int flags = REQ_SYNC | REQ_IDLE; 1584 struct bio *bio; 1585 1586 bio = bio_alloc(GFP_KERNEL, 1); 1587 bio_set_dev(bio, iomap->bdev); 1588 bio->bi_iter.bi_sector = iomap_sector(iomap, pos); 1589 bio->bi_private = dio; 1590 bio->bi_end_io = iomap_dio_bio_end_io; 1591 1592 if (dio->iocb->ki_flags & IOCB_HIPRI) 1593 flags |= REQ_HIPRI; 1594 1595 get_page(page); 1596 __bio_add_page(bio, page, len, 0); 1597 bio_set_op_attrs(bio, REQ_OP_WRITE, flags); 1598 1599 atomic_inc(&dio->ref); 1600 return submit_bio(bio); 1601 } 1602 1603 static loff_t 1604 iomap_dio_bio_actor(struct inode *inode, loff_t pos, loff_t length, 1605 struct iomap_dio *dio, struct iomap *iomap) 1606 { 1607 unsigned int blkbits = blksize_bits(bdev_logical_block_size(iomap->bdev)); 1608 unsigned int fs_block_size = i_blocksize(inode), pad; 1609 unsigned int align = iov_iter_alignment(dio->submit.iter); 1610 struct iov_iter iter; 1611 struct bio *bio; 1612 bool need_zeroout = false; 1613 bool use_fua = false; 1614 int nr_pages, ret = 0; 1615 size_t copied = 0; 1616 1617 if ((pos | length | align) & ((1 << blkbits) - 1)) 1618 return -EINVAL; 1619 1620 if (iomap->type == IOMAP_UNWRITTEN) { 1621 dio->flags |= IOMAP_DIO_UNWRITTEN; 1622 need_zeroout = true; 1623 } 1624 1625 if (iomap->flags & IOMAP_F_SHARED) 1626 dio->flags |= IOMAP_DIO_COW; 1627 1628 if (iomap->flags & IOMAP_F_NEW) { 1629 need_zeroout = true; 1630 } else if (iomap->type == IOMAP_MAPPED) { 1631 /* 1632 * Use a FUA write if we need datasync semantics, this is a pure 1633 * data IO that doesn't require any metadata updates (including 1634 * after IO completion such as unwritten extent conversion) and 1635 * the underlying device supports FUA. This allows us to avoid 1636 * cache flushes on IO completion. 1637 */ 1638 if (!(iomap->flags & (IOMAP_F_SHARED|IOMAP_F_DIRTY)) && 1639 (dio->flags & IOMAP_DIO_WRITE_FUA) && 1640 blk_queue_fua(bdev_get_queue(iomap->bdev))) 1641 use_fua = true; 1642 } 1643 1644 /* 1645 * Operate on a partial iter trimmed to the extent we were called for. 1646 * We'll update the iter in the dio once we're done with this extent. 1647 */ 1648 iter = *dio->submit.iter; 1649 iov_iter_truncate(&iter, length); 1650 1651 nr_pages = iov_iter_npages(&iter, BIO_MAX_PAGES); 1652 if (nr_pages <= 0) 1653 return nr_pages; 1654 1655 if (need_zeroout) { 1656 /* zero out from the start of the block to the write offset */ 1657 pad = pos & (fs_block_size - 1); 1658 if (pad) 1659 iomap_dio_zero(dio, iomap, pos - pad, pad); 1660 } 1661 1662 do { 1663 size_t n; 1664 if (dio->error) { 1665 iov_iter_revert(dio->submit.iter, copied); 1666 return 0; 1667 } 1668 1669 bio = bio_alloc(GFP_KERNEL, nr_pages); 1670 bio_set_dev(bio, iomap->bdev); 1671 bio->bi_iter.bi_sector = iomap_sector(iomap, pos); 1672 bio->bi_write_hint = dio->iocb->ki_hint; 1673 bio->bi_ioprio = dio->iocb->ki_ioprio; 1674 bio->bi_private = dio; 1675 bio->bi_end_io = iomap_dio_bio_end_io; 1676 1677 ret = bio_iov_iter_get_pages(bio, &iter); 1678 if (unlikely(ret)) { 1679 /* 1680 * We have to stop part way through an IO. We must fall 1681 * through to the sub-block tail zeroing here, otherwise 1682 * this short IO may expose stale data in the tail of 1683 * the block we haven't written data to. 1684 */ 1685 bio_put(bio); 1686 goto zero_tail; 1687 } 1688 1689 n = bio->bi_iter.bi_size; 1690 if (dio->flags & IOMAP_DIO_WRITE) { 1691 bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE; 1692 if (use_fua) 1693 bio->bi_opf |= REQ_FUA; 1694 else 1695 dio->flags &= ~IOMAP_DIO_WRITE_FUA; 1696 task_io_account_write(n); 1697 } else { 1698 bio->bi_opf = REQ_OP_READ; 1699 if (dio->flags & IOMAP_DIO_DIRTY) 1700 bio_set_pages_dirty(bio); 1701 } 1702 1703 if (dio->iocb->ki_flags & IOCB_HIPRI) 1704 bio->bi_opf |= REQ_HIPRI; 1705 1706 iov_iter_advance(dio->submit.iter, n); 1707 1708 dio->size += n; 1709 pos += n; 1710 copied += n; 1711 1712 nr_pages = iov_iter_npages(&iter, BIO_MAX_PAGES); 1713 1714 atomic_inc(&dio->ref); 1715 1716 dio->submit.last_queue = bdev_get_queue(iomap->bdev); 1717 dio->submit.cookie = submit_bio(bio); 1718 } while (nr_pages); 1719 1720 /* 1721 * We need to zeroout the tail of a sub-block write if the extent type 1722 * requires zeroing or the write extends beyond EOF. If we don't zero 1723 * the block tail in the latter case, we can expose stale data via mmap 1724 * reads of the EOF block. 1725 */ 1726 zero_tail: 1727 if (need_zeroout || 1728 ((dio->flags & IOMAP_DIO_WRITE) && pos >= i_size_read(inode))) { 1729 /* zero out from the end of the write to the end of the block */ 1730 pad = pos & (fs_block_size - 1); 1731 if (pad) 1732 iomap_dio_zero(dio, iomap, pos, fs_block_size - pad); 1733 } 1734 return copied ? copied : ret; 1735 } 1736 1737 static loff_t 1738 iomap_dio_hole_actor(loff_t length, struct iomap_dio *dio) 1739 { 1740 length = iov_iter_zero(length, dio->submit.iter); 1741 dio->size += length; 1742 return length; 1743 } 1744 1745 static loff_t 1746 iomap_dio_inline_actor(struct inode *inode, loff_t pos, loff_t length, 1747 struct iomap_dio *dio, struct iomap *iomap) 1748 { 1749 struct iov_iter *iter = dio->submit.iter; 1750 size_t copied; 1751 1752 BUG_ON(pos + length > PAGE_SIZE - offset_in_page(iomap->inline_data)); 1753 1754 if (dio->flags & IOMAP_DIO_WRITE) { 1755 loff_t size = inode->i_size; 1756 1757 if (pos > size) 1758 memset(iomap->inline_data + size, 0, pos - size); 1759 copied = copy_from_iter(iomap->inline_data + pos, length, iter); 1760 if (copied) { 1761 if (pos + copied > size) 1762 i_size_write(inode, pos + copied); 1763 mark_inode_dirty(inode); 1764 } 1765 } else { 1766 copied = copy_to_iter(iomap->inline_data + pos, length, iter); 1767 } 1768 dio->size += copied; 1769 return copied; 1770 } 1771 1772 static loff_t 1773 iomap_dio_actor(struct inode *inode, loff_t pos, loff_t length, 1774 void *data, struct iomap *iomap) 1775 { 1776 struct iomap_dio *dio = data; 1777 1778 switch (iomap->type) { 1779 case IOMAP_HOLE: 1780 if (WARN_ON_ONCE(dio->flags & IOMAP_DIO_WRITE)) 1781 return -EIO; 1782 return iomap_dio_hole_actor(length, dio); 1783 case IOMAP_UNWRITTEN: 1784 if (!(dio->flags & IOMAP_DIO_WRITE)) 1785 return iomap_dio_hole_actor(length, dio); 1786 return iomap_dio_bio_actor(inode, pos, length, dio, iomap); 1787 case IOMAP_MAPPED: 1788 return iomap_dio_bio_actor(inode, pos, length, dio, iomap); 1789 case IOMAP_INLINE: 1790 return iomap_dio_inline_actor(inode, pos, length, dio, iomap); 1791 default: 1792 WARN_ON_ONCE(1); 1793 return -EIO; 1794 } 1795 } 1796 1797 /* 1798 * iomap_dio_rw() always completes O_[D]SYNC writes regardless of whether the IO 1799 * is being issued as AIO or not. This allows us to optimise pure data writes 1800 * to use REQ_FUA rather than requiring generic_write_sync() to issue a 1801 * REQ_FLUSH post write. This is slightly tricky because a single request here 1802 * can be mapped into multiple disjoint IOs and only a subset of the IOs issued 1803 * may be pure data writes. In that case, we still need to do a full data sync 1804 * completion. 1805 */ 1806 ssize_t 1807 iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter, 1808 const struct iomap_ops *ops, iomap_dio_end_io_t end_io) 1809 { 1810 struct address_space *mapping = iocb->ki_filp->f_mapping; 1811 struct inode *inode = file_inode(iocb->ki_filp); 1812 size_t count = iov_iter_count(iter); 1813 loff_t pos = iocb->ki_pos, start = pos; 1814 loff_t end = iocb->ki_pos + count - 1, ret = 0; 1815 unsigned int flags = IOMAP_DIRECT; 1816 bool wait_for_completion = is_sync_kiocb(iocb); 1817 struct blk_plug plug; 1818 struct iomap_dio *dio; 1819 1820 lockdep_assert_held(&inode->i_rwsem); 1821 1822 if (!count) 1823 return 0; 1824 1825 dio = kmalloc(sizeof(*dio), GFP_KERNEL); 1826 if (!dio) 1827 return -ENOMEM; 1828 1829 dio->iocb = iocb; 1830 atomic_set(&dio->ref, 1); 1831 dio->size = 0; 1832 dio->i_size = i_size_read(inode); 1833 dio->end_io = end_io; 1834 dio->error = 0; 1835 dio->flags = 0; 1836 1837 dio->submit.iter = iter; 1838 dio->submit.waiter = current; 1839 dio->submit.cookie = BLK_QC_T_NONE; 1840 dio->submit.last_queue = NULL; 1841 1842 if (iov_iter_rw(iter) == READ) { 1843 if (pos >= dio->i_size) 1844 goto out_free_dio; 1845 1846 if (iter_is_iovec(iter) && iov_iter_rw(iter) == READ) 1847 dio->flags |= IOMAP_DIO_DIRTY; 1848 } else { 1849 flags |= IOMAP_WRITE; 1850 dio->flags |= IOMAP_DIO_WRITE; 1851 1852 /* for data sync or sync, we need sync completion processing */ 1853 if (iocb->ki_flags & IOCB_DSYNC) 1854 dio->flags |= IOMAP_DIO_NEED_SYNC; 1855 1856 /* 1857 * For datasync only writes, we optimistically try using FUA for 1858 * this IO. Any non-FUA write that occurs will clear this flag, 1859 * hence we know before completion whether a cache flush is 1860 * necessary. 1861 */ 1862 if ((iocb->ki_flags & (IOCB_DSYNC | IOCB_SYNC)) == IOCB_DSYNC) 1863 dio->flags |= IOMAP_DIO_WRITE_FUA; 1864 } 1865 1866 if (iocb->ki_flags & IOCB_NOWAIT) { 1867 if (filemap_range_has_page(mapping, start, end)) { 1868 ret = -EAGAIN; 1869 goto out_free_dio; 1870 } 1871 flags |= IOMAP_NOWAIT; 1872 } 1873 1874 ret = filemap_write_and_wait_range(mapping, start, end); 1875 if (ret) 1876 goto out_free_dio; 1877 1878 /* 1879 * Try to invalidate cache pages for the range we're direct 1880 * writing. If this invalidation fails, tough, the write will 1881 * still work, but racing two incompatible write paths is a 1882 * pretty crazy thing to do, so we don't support it 100%. 1883 */ 1884 ret = invalidate_inode_pages2_range(mapping, 1885 start >> PAGE_SHIFT, end >> PAGE_SHIFT); 1886 if (ret) 1887 dio_warn_stale_pagecache(iocb->ki_filp); 1888 ret = 0; 1889 1890 if (iov_iter_rw(iter) == WRITE && !wait_for_completion && 1891 !inode->i_sb->s_dio_done_wq) { 1892 ret = sb_init_dio_done_wq(inode->i_sb); 1893 if (ret < 0) 1894 goto out_free_dio; 1895 } 1896 1897 inode_dio_begin(inode); 1898 1899 blk_start_plug(&plug); 1900 do { 1901 ret = iomap_apply(inode, pos, count, flags, ops, dio, 1902 iomap_dio_actor); 1903 if (ret <= 0) { 1904 /* magic error code to fall back to buffered I/O */ 1905 if (ret == -ENOTBLK) { 1906 wait_for_completion = true; 1907 ret = 0; 1908 } 1909 break; 1910 } 1911 pos += ret; 1912 1913 if (iov_iter_rw(iter) == READ && pos >= dio->i_size) 1914 break; 1915 } while ((count = iov_iter_count(iter)) > 0); 1916 blk_finish_plug(&plug); 1917 1918 if (ret < 0) 1919 iomap_dio_set_error(dio, ret); 1920 1921 /* 1922 * If all the writes we issued were FUA, we don't need to flush the 1923 * cache on IO completion. Clear the sync flag for this case. 1924 */ 1925 if (dio->flags & IOMAP_DIO_WRITE_FUA) 1926 dio->flags &= ~IOMAP_DIO_NEED_SYNC; 1927 1928 /* 1929 * We are about to drop our additional submission reference, which 1930 * might be the last reference to the dio. There are three three 1931 * different ways we can progress here: 1932 * 1933 * (a) If this is the last reference we will always complete and free 1934 * the dio ourselves. 1935 * (b) If this is not the last reference, and we serve an asynchronous 1936 * iocb, we must never touch the dio after the decrement, the 1937 * I/O completion handler will complete and free it. 1938 * (c) If this is not the last reference, but we serve a synchronous 1939 * iocb, the I/O completion handler will wake us up on the drop 1940 * of the final reference, and we will complete and free it here 1941 * after we got woken by the I/O completion handler. 1942 */ 1943 dio->wait_for_completion = wait_for_completion; 1944 if (!atomic_dec_and_test(&dio->ref)) { 1945 if (!wait_for_completion) 1946 return -EIOCBQUEUED; 1947 1948 for (;;) { 1949 set_current_state(TASK_UNINTERRUPTIBLE); 1950 if (!READ_ONCE(dio->submit.waiter)) 1951 break; 1952 1953 if (!(iocb->ki_flags & IOCB_HIPRI) || 1954 !dio->submit.last_queue || 1955 !blk_poll(dio->submit.last_queue, 1956 dio->submit.cookie, true)) 1957 io_schedule(); 1958 } 1959 __set_current_state(TASK_RUNNING); 1960 } 1961 1962 return iomap_dio_complete(dio); 1963 1964 out_free_dio: 1965 kfree(dio); 1966 return ret; 1967 } 1968 EXPORT_SYMBOL_GPL(iomap_dio_rw); 1969 1970 /* Swapfile activation */ 1971 1972 #ifdef CONFIG_SWAP 1973 struct iomap_swapfile_info { 1974 struct iomap iomap; /* accumulated iomap */ 1975 struct swap_info_struct *sis; 1976 uint64_t lowest_ppage; /* lowest physical addr seen (pages) */ 1977 uint64_t highest_ppage; /* highest physical addr seen (pages) */ 1978 unsigned long nr_pages; /* number of pages collected */ 1979 int nr_extents; /* extent count */ 1980 }; 1981 1982 /* 1983 * Collect physical extents for this swap file. Physical extents reported to 1984 * the swap code must be trimmed to align to a page boundary. The logical 1985 * offset within the file is irrelevant since the swapfile code maps logical 1986 * page numbers of the swap device to the physical page-aligned extents. 1987 */ 1988 static int iomap_swapfile_add_extent(struct iomap_swapfile_info *isi) 1989 { 1990 struct iomap *iomap = &isi->iomap; 1991 unsigned long nr_pages; 1992 uint64_t first_ppage; 1993 uint64_t first_ppage_reported; 1994 uint64_t next_ppage; 1995 int error; 1996 1997 /* 1998 * Round the start up and the end down so that the physical 1999 * extent aligns to a page boundary. 2000 */ 2001 first_ppage = ALIGN(iomap->addr, PAGE_SIZE) >> PAGE_SHIFT; 2002 next_ppage = ALIGN_DOWN(iomap->addr + iomap->length, PAGE_SIZE) >> 2003 PAGE_SHIFT; 2004 2005 /* Skip too-short physical extents. */ 2006 if (first_ppage >= next_ppage) 2007 return 0; 2008 nr_pages = next_ppage - first_ppage; 2009 2010 /* 2011 * Calculate how much swap space we're adding; the first page contains 2012 * the swap header and doesn't count. The mm still wants that first 2013 * page fed to add_swap_extent, however. 2014 */ 2015 first_ppage_reported = first_ppage; 2016 if (iomap->offset == 0) 2017 first_ppage_reported++; 2018 if (isi->lowest_ppage > first_ppage_reported) 2019 isi->lowest_ppage = first_ppage_reported; 2020 if (isi->highest_ppage < (next_ppage - 1)) 2021 isi->highest_ppage = next_ppage - 1; 2022 2023 /* Add extent, set up for the next call. */ 2024 error = add_swap_extent(isi->sis, isi->nr_pages, nr_pages, first_ppage); 2025 if (error < 0) 2026 return error; 2027 isi->nr_extents += error; 2028 isi->nr_pages += nr_pages; 2029 return 0; 2030 } 2031 2032 /* 2033 * Accumulate iomaps for this swap file. We have to accumulate iomaps because 2034 * swap only cares about contiguous page-aligned physical extents and makes no 2035 * distinction between written and unwritten extents. 2036 */ 2037 static loff_t iomap_swapfile_activate_actor(struct inode *inode, loff_t pos, 2038 loff_t count, void *data, struct iomap *iomap) 2039 { 2040 struct iomap_swapfile_info *isi = data; 2041 int error; 2042 2043 switch (iomap->type) { 2044 case IOMAP_MAPPED: 2045 case IOMAP_UNWRITTEN: 2046 /* Only real or unwritten extents. */ 2047 break; 2048 case IOMAP_INLINE: 2049 /* No inline data. */ 2050 pr_err("swapon: file is inline\n"); 2051 return -EINVAL; 2052 default: 2053 pr_err("swapon: file has unallocated extents\n"); 2054 return -EINVAL; 2055 } 2056 2057 /* No uncommitted metadata or shared blocks. */ 2058 if (iomap->flags & IOMAP_F_DIRTY) { 2059 pr_err("swapon: file is not committed\n"); 2060 return -EINVAL; 2061 } 2062 if (iomap->flags & IOMAP_F_SHARED) { 2063 pr_err("swapon: file has shared extents\n"); 2064 return -EINVAL; 2065 } 2066 2067 /* Only one bdev per swap file. */ 2068 if (iomap->bdev != isi->sis->bdev) { 2069 pr_err("swapon: file is on multiple devices\n"); 2070 return -EINVAL; 2071 } 2072 2073 if (isi->iomap.length == 0) { 2074 /* No accumulated extent, so just store it. */ 2075 memcpy(&isi->iomap, iomap, sizeof(isi->iomap)); 2076 } else if (isi->iomap.addr + isi->iomap.length == iomap->addr) { 2077 /* Append this to the accumulated extent. */ 2078 isi->iomap.length += iomap->length; 2079 } else { 2080 /* Otherwise, add the retained iomap and store this one. */ 2081 error = iomap_swapfile_add_extent(isi); 2082 if (error) 2083 return error; 2084 memcpy(&isi->iomap, iomap, sizeof(isi->iomap)); 2085 } 2086 return count; 2087 } 2088 2089 /* 2090 * Iterate a swap file's iomaps to construct physical extents that can be 2091 * passed to the swapfile subsystem. 2092 */ 2093 int iomap_swapfile_activate(struct swap_info_struct *sis, 2094 struct file *swap_file, sector_t *pagespan, 2095 const struct iomap_ops *ops) 2096 { 2097 struct iomap_swapfile_info isi = { 2098 .sis = sis, 2099 .lowest_ppage = (sector_t)-1ULL, 2100 }; 2101 struct address_space *mapping = swap_file->f_mapping; 2102 struct inode *inode = mapping->host; 2103 loff_t pos = 0; 2104 loff_t len = ALIGN_DOWN(i_size_read(inode), PAGE_SIZE); 2105 loff_t ret; 2106 2107 /* 2108 * Persist all file mapping metadata so that we won't have any 2109 * IOMAP_F_DIRTY iomaps. 2110 */ 2111 ret = vfs_fsync(swap_file, 1); 2112 if (ret) 2113 return ret; 2114 2115 while (len > 0) { 2116 ret = iomap_apply(inode, pos, len, IOMAP_REPORT, 2117 ops, &isi, iomap_swapfile_activate_actor); 2118 if (ret <= 0) 2119 return ret; 2120 2121 pos += ret; 2122 len -= ret; 2123 } 2124 2125 if (isi.iomap.length) { 2126 ret = iomap_swapfile_add_extent(&isi); 2127 if (ret) 2128 return ret; 2129 } 2130 2131 *pagespan = 1 + isi.highest_ppage - isi.lowest_ppage; 2132 sis->max = isi.nr_pages; 2133 sis->pages = isi.nr_pages - 1; 2134 sis->highest_bit = isi.nr_pages - 1; 2135 return isi.nr_extents; 2136 } 2137 EXPORT_SYMBOL_GPL(iomap_swapfile_activate); 2138 #endif /* CONFIG_SWAP */ 2139 2140 static loff_t 2141 iomap_bmap_actor(struct inode *inode, loff_t pos, loff_t length, 2142 void *data, struct iomap *iomap) 2143 { 2144 sector_t *bno = data, addr; 2145 2146 if (iomap->type == IOMAP_MAPPED) { 2147 addr = (pos - iomap->offset + iomap->addr) >> inode->i_blkbits; 2148 if (addr > INT_MAX) 2149 WARN(1, "would truncate bmap result\n"); 2150 else 2151 *bno = addr; 2152 } 2153 return 0; 2154 } 2155 2156 /* legacy ->bmap interface. 0 is the error return (!) */ 2157 sector_t 2158 iomap_bmap(struct address_space *mapping, sector_t bno, 2159 const struct iomap_ops *ops) 2160 { 2161 struct inode *inode = mapping->host; 2162 loff_t pos = bno << inode->i_blkbits; 2163 unsigned blocksize = i_blocksize(inode); 2164 2165 if (filemap_write_and_wait(mapping)) 2166 return 0; 2167 2168 bno = 0; 2169 iomap_apply(inode, pos, blocksize, 0, ops, &bno, iomap_bmap_actor); 2170 return bno; 2171 } 2172 EXPORT_SYMBOL_GPL(iomap_bmap); 2173
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.