1 /* Cache page management and data I/O routines 2 * 3 * Copyright (C) 2004-2008 Red Hat, Inc. All Rights Reserved. 4 * Written by David Howells (dhowells@redhat.com) 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 9 * 2 of the License, or (at your option) any later version. 10 */ 11 12 #define FSCACHE_DEBUG_LEVEL PAGE 13 #include <linux/module.h> 14 #include <linux/fscache-cache.h> 15 #include <linux/buffer_head.h> 16 #include <linux/pagevec.h> 17 #include <linux/slab.h> 18 #include "internal.h" 19 20 /* 21 * check to see if a page is being written to the cache 22 */ 23 bool __fscache_check_page_write(struct fscache_cookie *cookie, struct page *page) 24 { 25 void *val; 26 27 rcu_read_lock(); 28 val = radix_tree_lookup(&cookie->stores, page->index); 29 rcu_read_unlock(); 30 31 return val != NULL; 32 } 33 EXPORT_SYMBOL(__fscache_check_page_write); 34 35 /* 36 * wait for a page to finish being written to the cache 37 */ 38 void __fscache_wait_on_page_write(struct fscache_cookie *cookie, struct page *page) 39 { 40 wait_queue_head_t *wq = bit_waitqueue(&cookie->flags, 0); 41 42 wait_event(*wq, !__fscache_check_page_write(cookie, page)); 43 } 44 EXPORT_SYMBOL(__fscache_wait_on_page_write); 45 46 /* 47 * wait for a page to finish being written to the cache. Put a timeout here 48 * since we might be called recursively via parent fs. 49 */ 50 static 51 bool release_page_wait_timeout(struct fscache_cookie *cookie, struct page *page) 52 { 53 wait_queue_head_t *wq = bit_waitqueue(&cookie->flags, 0); 54 55 return wait_event_timeout(*wq, !__fscache_check_page_write(cookie, page), 56 HZ); 57 } 58 59 /* 60 * decide whether a page can be released, possibly by cancelling a store to it 61 * - we're allowed to sleep if __GFP_DIRECT_RECLAIM is flagged 62 */ 63 bool __fscache_maybe_release_page(struct fscache_cookie *cookie, 64 struct page *page, 65 gfp_t gfp) 66 { 67 struct page *xpage; 68 void *val; 69 70 _enter("%p,%p,%x", cookie, page, gfp); 71 72 try_again: 73 rcu_read_lock(); 74 val = radix_tree_lookup(&cookie->stores, page->index); 75 if (!val) { 76 rcu_read_unlock(); 77 fscache_stat(&fscache_n_store_vmscan_not_storing); 78 __fscache_uncache_page(cookie, page); 79 return true; 80 } 81 82 /* see if the page is actually undergoing storage - if so we can't get 83 * rid of it till the cache has finished with it */ 84 if (radix_tree_tag_get(&cookie->stores, page->index, 85 FSCACHE_COOKIE_STORING_TAG)) { 86 rcu_read_unlock(); 87 goto page_busy; 88 } 89 90 /* the page is pending storage, so we attempt to cancel the store and 91 * discard the store request so that the page can be reclaimed */ 92 spin_lock(&cookie->stores_lock); 93 rcu_read_unlock(); 94 95 if (radix_tree_tag_get(&cookie->stores, page->index, 96 FSCACHE_COOKIE_STORING_TAG)) { 97 /* the page started to undergo storage whilst we were looking, 98 * so now we can only wait or return */ 99 spin_unlock(&cookie->stores_lock); 100 goto page_busy; 101 } 102 103 xpage = radix_tree_delete(&cookie->stores, page->index); 104 spin_unlock(&cookie->stores_lock); 105 106 if (xpage) { 107 fscache_stat(&fscache_n_store_vmscan_cancelled); 108 fscache_stat(&fscache_n_store_radix_deletes); 109 ASSERTCMP(xpage, ==, page); 110 } else { 111 fscache_stat(&fscache_n_store_vmscan_gone); 112 } 113 114 wake_up_bit(&cookie->flags, 0); 115 if (xpage) 116 put_page(xpage); 117 __fscache_uncache_page(cookie, page); 118 return true; 119 120 page_busy: 121 /* We will wait here if we're allowed to, but that could deadlock the 122 * allocator as the work threads writing to the cache may all end up 123 * sleeping on memory allocation, so we may need to impose a timeout 124 * too. */ 125 if (!(gfp & __GFP_DIRECT_RECLAIM) || !(gfp & __GFP_FS)) { 126 fscache_stat(&fscache_n_store_vmscan_busy); 127 return false; 128 } 129 130 fscache_stat(&fscache_n_store_vmscan_wait); 131 if (!release_page_wait_timeout(cookie, page)) 132 _debug("fscache writeout timeout page: %p{%lx}", 133 page, page->index); 134 135 gfp &= ~__GFP_DIRECT_RECLAIM; 136 goto try_again; 137 } 138 EXPORT_SYMBOL(__fscache_maybe_release_page); 139 140 /* 141 * note that a page has finished being written to the cache 142 */ 143 static void fscache_end_page_write(struct fscache_object *object, 144 struct page *page) 145 { 146 struct fscache_cookie *cookie; 147 struct page *xpage = NULL; 148 149 spin_lock(&object->lock); 150 cookie = object->cookie; 151 if (cookie) { 152 /* delete the page from the tree if it is now no longer 153 * pending */ 154 spin_lock(&cookie->stores_lock); 155 radix_tree_tag_clear(&cookie->stores, page->index, 156 FSCACHE_COOKIE_STORING_TAG); 157 if (!radix_tree_tag_get(&cookie->stores, page->index, 158 FSCACHE_COOKIE_PENDING_TAG)) { 159 fscache_stat(&fscache_n_store_radix_deletes); 160 xpage = radix_tree_delete(&cookie->stores, page->index); 161 } 162 spin_unlock(&cookie->stores_lock); 163 wake_up_bit(&cookie->flags, 0); 164 } 165 spin_unlock(&object->lock); 166 if (xpage) 167 put_page(xpage); 168 } 169 170 /* 171 * actually apply the changed attributes to a cache object 172 */ 173 static void fscache_attr_changed_op(struct fscache_operation *op) 174 { 175 struct fscache_object *object = op->object; 176 int ret; 177 178 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id); 179 180 fscache_stat(&fscache_n_attr_changed_calls); 181 182 if (fscache_object_is_active(object)) { 183 fscache_stat(&fscache_n_cop_attr_changed); 184 ret = object->cache->ops->attr_changed(object); 185 fscache_stat_d(&fscache_n_cop_attr_changed); 186 if (ret < 0) 187 fscache_abort_object(object); 188 } 189 190 fscache_op_complete(op, true); 191 _leave(""); 192 } 193 194 /* 195 * notification that the attributes on an object have changed 196 */ 197 int __fscache_attr_changed(struct fscache_cookie *cookie) 198 { 199 struct fscache_operation *op; 200 struct fscache_object *object; 201 bool wake_cookie = false; 202 203 _enter("%p", cookie); 204 205 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX); 206 207 fscache_stat(&fscache_n_attr_changed); 208 209 op = kzalloc(sizeof(*op), GFP_KERNEL); 210 if (!op) { 211 fscache_stat(&fscache_n_attr_changed_nomem); 212 _leave(" = -ENOMEM"); 213 return -ENOMEM; 214 } 215 216 fscache_operation_init(op, fscache_attr_changed_op, NULL, NULL); 217 op->flags = FSCACHE_OP_ASYNC | 218 (1 << FSCACHE_OP_EXCLUSIVE) | 219 (1 << FSCACHE_OP_UNUSE_COOKIE); 220 221 spin_lock(&cookie->lock); 222 223 if (!fscache_cookie_enabled(cookie) || 224 hlist_empty(&cookie->backing_objects)) 225 goto nobufs; 226 object = hlist_entry(cookie->backing_objects.first, 227 struct fscache_object, cookie_link); 228 229 __fscache_use_cookie(cookie); 230 if (fscache_submit_exclusive_op(object, op) < 0) 231 goto nobufs_dec; 232 spin_unlock(&cookie->lock); 233 fscache_stat(&fscache_n_attr_changed_ok); 234 fscache_put_operation(op); 235 _leave(" = 0"); 236 return 0; 237 238 nobufs_dec: 239 wake_cookie = __fscache_unuse_cookie(cookie); 240 nobufs: 241 spin_unlock(&cookie->lock); 242 fscache_put_operation(op); 243 if (wake_cookie) 244 __fscache_wake_unused_cookie(cookie); 245 fscache_stat(&fscache_n_attr_changed_nobufs); 246 _leave(" = %d", -ENOBUFS); 247 return -ENOBUFS; 248 } 249 EXPORT_SYMBOL(__fscache_attr_changed); 250 251 /* 252 * Handle cancellation of a pending retrieval op 253 */ 254 static void fscache_do_cancel_retrieval(struct fscache_operation *_op) 255 { 256 struct fscache_retrieval *op = 257 container_of(_op, struct fscache_retrieval, op); 258 259 atomic_set(&op->n_pages, 0); 260 } 261 262 /* 263 * release a retrieval op reference 264 */ 265 static void fscache_release_retrieval_op(struct fscache_operation *_op) 266 { 267 struct fscache_retrieval *op = 268 container_of(_op, struct fscache_retrieval, op); 269 270 _enter("{OP%x}", op->op.debug_id); 271 272 ASSERTIFCMP(op->op.state != FSCACHE_OP_ST_INITIALISED, 273 atomic_read(&op->n_pages), ==, 0); 274 275 fscache_hist(fscache_retrieval_histogram, op->start_time); 276 if (op->context) 277 fscache_put_context(op->cookie, op->context); 278 279 _leave(""); 280 } 281 282 /* 283 * allocate a retrieval op 284 */ 285 static struct fscache_retrieval *fscache_alloc_retrieval( 286 struct fscache_cookie *cookie, 287 struct address_space *mapping, 288 fscache_rw_complete_t end_io_func, 289 void *context) 290 { 291 struct fscache_retrieval *op; 292 293 /* allocate a retrieval operation and attempt to submit it */ 294 op = kzalloc(sizeof(*op), GFP_NOIO); 295 if (!op) { 296 fscache_stat(&fscache_n_retrievals_nomem); 297 return NULL; 298 } 299 300 fscache_operation_init(&op->op, NULL, 301 fscache_do_cancel_retrieval, 302 fscache_release_retrieval_op); 303 op->op.flags = FSCACHE_OP_MYTHREAD | 304 (1UL << FSCACHE_OP_WAITING) | 305 (1UL << FSCACHE_OP_UNUSE_COOKIE); 306 op->cookie = cookie; 307 op->mapping = mapping; 308 op->end_io_func = end_io_func; 309 op->context = context; 310 op->start_time = jiffies; 311 INIT_LIST_HEAD(&op->to_do); 312 313 /* Pin the netfs read context in case we need to do the actual netfs 314 * read because we've encountered a cache read failure. 315 */ 316 if (context) 317 fscache_get_context(op->cookie, context); 318 return op; 319 } 320 321 /* 322 * wait for a deferred lookup to complete 323 */ 324 int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie) 325 { 326 unsigned long jif; 327 328 _enter(""); 329 330 if (!test_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags)) { 331 _leave(" = 0 [imm]"); 332 return 0; 333 } 334 335 fscache_stat(&fscache_n_retrievals_wait); 336 337 jif = jiffies; 338 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP, 339 TASK_INTERRUPTIBLE) != 0) { 340 fscache_stat(&fscache_n_retrievals_intr); 341 _leave(" = -ERESTARTSYS"); 342 return -ERESTARTSYS; 343 } 344 345 ASSERT(!test_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags)); 346 347 smp_rmb(); 348 fscache_hist(fscache_retrieval_delay_histogram, jif); 349 _leave(" = 0 [dly]"); 350 return 0; 351 } 352 353 /* 354 * wait for an object to become active (or dead) 355 */ 356 int fscache_wait_for_operation_activation(struct fscache_object *object, 357 struct fscache_operation *op, 358 atomic_t *stat_op_waits, 359 atomic_t *stat_object_dead) 360 { 361 int ret; 362 363 if (!test_bit(FSCACHE_OP_WAITING, &op->flags)) 364 goto check_if_dead; 365 366 _debug(">>> WT"); 367 if (stat_op_waits) 368 fscache_stat(stat_op_waits); 369 if (wait_on_bit(&op->flags, FSCACHE_OP_WAITING, 370 TASK_INTERRUPTIBLE) != 0) { 371 ret = fscache_cancel_op(op, false); 372 if (ret == 0) 373 return -ERESTARTSYS; 374 375 /* it's been removed from the pending queue by another party, 376 * so we should get to run shortly */ 377 wait_on_bit(&op->flags, FSCACHE_OP_WAITING, 378 TASK_UNINTERRUPTIBLE); 379 } 380 _debug("<<< GO"); 381 382 check_if_dead: 383 if (op->state == FSCACHE_OP_ST_CANCELLED) { 384 if (stat_object_dead) 385 fscache_stat(stat_object_dead); 386 _leave(" = -ENOBUFS [cancelled]"); 387 return -ENOBUFS; 388 } 389 if (unlikely(fscache_object_is_dying(object) || 390 fscache_cache_is_broken(object))) { 391 enum fscache_operation_state state = op->state; 392 fscache_cancel_op(op, true); 393 if (stat_object_dead) 394 fscache_stat(stat_object_dead); 395 _leave(" = -ENOBUFS [obj dead %d]", state); 396 return -ENOBUFS; 397 } 398 return 0; 399 } 400 401 /* 402 * read a page from the cache or allocate a block in which to store it 403 * - we return: 404 * -ENOMEM - out of memory, nothing done 405 * -ERESTARTSYS - interrupted 406 * -ENOBUFS - no backing object available in which to cache the block 407 * -ENODATA - no data available in the backing object for this block 408 * 0 - dispatched a read - it'll call end_io_func() when finished 409 */ 410 int __fscache_read_or_alloc_page(struct fscache_cookie *cookie, 411 struct page *page, 412 fscache_rw_complete_t end_io_func, 413 void *context, 414 gfp_t gfp) 415 { 416 struct fscache_retrieval *op; 417 struct fscache_object *object; 418 bool wake_cookie = false; 419 int ret; 420 421 _enter("%p,%p,,,", cookie, page); 422 423 fscache_stat(&fscache_n_retrievals); 424 425 if (hlist_empty(&cookie->backing_objects)) 426 goto nobufs; 427 428 if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) { 429 _leave(" = -ENOBUFS [invalidating]"); 430 return -ENOBUFS; 431 } 432 433 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX); 434 ASSERTCMP(page, !=, NULL); 435 436 if (fscache_wait_for_deferred_lookup(cookie) < 0) 437 return -ERESTARTSYS; 438 439 op = fscache_alloc_retrieval(cookie, page->mapping, 440 end_io_func, context); 441 if (!op) { 442 _leave(" = -ENOMEM"); 443 return -ENOMEM; 444 } 445 atomic_set(&op->n_pages, 1); 446 447 spin_lock(&cookie->lock); 448 449 if (!fscache_cookie_enabled(cookie) || 450 hlist_empty(&cookie->backing_objects)) 451 goto nobufs_unlock; 452 object = hlist_entry(cookie->backing_objects.first, 453 struct fscache_object, cookie_link); 454 455 ASSERT(test_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)); 456 457 __fscache_use_cookie(cookie); 458 atomic_inc(&object->n_reads); 459 __set_bit(FSCACHE_OP_DEC_READ_CNT, &op->op.flags); 460 461 if (fscache_submit_op(object, &op->op) < 0) 462 goto nobufs_unlock_dec; 463 spin_unlock(&cookie->lock); 464 465 fscache_stat(&fscache_n_retrieval_ops); 466 467 /* we wait for the operation to become active, and then process it 468 * *here*, in this thread, and not in the thread pool */ 469 ret = fscache_wait_for_operation_activation( 470 object, &op->op, 471 __fscache_stat(&fscache_n_retrieval_op_waits), 472 __fscache_stat(&fscache_n_retrievals_object_dead)); 473 if (ret < 0) 474 goto error; 475 476 /* ask the cache to honour the operation */ 477 if (test_bit(FSCACHE_COOKIE_NO_DATA_YET, &object->cookie->flags)) { 478 fscache_stat(&fscache_n_cop_allocate_page); 479 ret = object->cache->ops->allocate_page(op, page, gfp); 480 fscache_stat_d(&fscache_n_cop_allocate_page); 481 if (ret == 0) 482 ret = -ENODATA; 483 } else { 484 fscache_stat(&fscache_n_cop_read_or_alloc_page); 485 ret = object->cache->ops->read_or_alloc_page(op, page, gfp); 486 fscache_stat_d(&fscache_n_cop_read_or_alloc_page); 487 } 488 489 error: 490 if (ret == -ENOMEM) 491 fscache_stat(&fscache_n_retrievals_nomem); 492 else if (ret == -ERESTARTSYS) 493 fscache_stat(&fscache_n_retrievals_intr); 494 else if (ret == -ENODATA) 495 fscache_stat(&fscache_n_retrievals_nodata); 496 else if (ret < 0) 497 fscache_stat(&fscache_n_retrievals_nobufs); 498 else 499 fscache_stat(&fscache_n_retrievals_ok); 500 501 fscache_put_retrieval(op); 502 _leave(" = %d", ret); 503 return ret; 504 505 nobufs_unlock_dec: 506 atomic_dec(&object->n_reads); 507 wake_cookie = __fscache_unuse_cookie(cookie); 508 nobufs_unlock: 509 spin_unlock(&cookie->lock); 510 if (wake_cookie) 511 __fscache_wake_unused_cookie(cookie); 512 fscache_put_retrieval(op); 513 nobufs: 514 fscache_stat(&fscache_n_retrievals_nobufs); 515 _leave(" = -ENOBUFS"); 516 return -ENOBUFS; 517 } 518 EXPORT_SYMBOL(__fscache_read_or_alloc_page); 519 520 /* 521 * read a list of page from the cache or allocate a block in which to store 522 * them 523 * - we return: 524 * -ENOMEM - out of memory, some pages may be being read 525 * -ERESTARTSYS - interrupted, some pages may be being read 526 * -ENOBUFS - no backing object or space available in which to cache any 527 * pages not being read 528 * -ENODATA - no data available in the backing object for some or all of 529 * the pages 530 * 0 - dispatched a read on all pages 531 * 532 * end_io_func() will be called for each page read from the cache as it is 533 * finishes being read 534 * 535 * any pages for which a read is dispatched will be removed from pages and 536 * nr_pages 537 */ 538 int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie, 539 struct address_space *mapping, 540 struct list_head *pages, 541 unsigned *nr_pages, 542 fscache_rw_complete_t end_io_func, 543 void *context, 544 gfp_t gfp) 545 { 546 struct fscache_retrieval *op; 547 struct fscache_object *object; 548 bool wake_cookie = false; 549 int ret; 550 551 _enter("%p,,%d,,,", cookie, *nr_pages); 552 553 fscache_stat(&fscache_n_retrievals); 554 555 if (hlist_empty(&cookie->backing_objects)) 556 goto nobufs; 557 558 if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) { 559 _leave(" = -ENOBUFS [invalidating]"); 560 return -ENOBUFS; 561 } 562 563 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX); 564 ASSERTCMP(*nr_pages, >, 0); 565 ASSERT(!list_empty(pages)); 566 567 if (fscache_wait_for_deferred_lookup(cookie) < 0) 568 return -ERESTARTSYS; 569 570 op = fscache_alloc_retrieval(cookie, mapping, end_io_func, context); 571 if (!op) 572 return -ENOMEM; 573 atomic_set(&op->n_pages, *nr_pages); 574 575 spin_lock(&cookie->lock); 576 577 if (!fscache_cookie_enabled(cookie) || 578 hlist_empty(&cookie->backing_objects)) 579 goto nobufs_unlock; 580 object = hlist_entry(cookie->backing_objects.first, 581 struct fscache_object, cookie_link); 582 583 __fscache_use_cookie(cookie); 584 atomic_inc(&object->n_reads); 585 __set_bit(FSCACHE_OP_DEC_READ_CNT, &op->op.flags); 586 587 if (fscache_submit_op(object, &op->op) < 0) 588 goto nobufs_unlock_dec; 589 spin_unlock(&cookie->lock); 590 591 fscache_stat(&fscache_n_retrieval_ops); 592 593 /* we wait for the operation to become active, and then process it 594 * *here*, in this thread, and not in the thread pool */ 595 ret = fscache_wait_for_operation_activation( 596 object, &op->op, 597 __fscache_stat(&fscache_n_retrieval_op_waits), 598 __fscache_stat(&fscache_n_retrievals_object_dead)); 599 if (ret < 0) 600 goto error; 601 602 /* ask the cache to honour the operation */ 603 if (test_bit(FSCACHE_COOKIE_NO_DATA_YET, &object->cookie->flags)) { 604 fscache_stat(&fscache_n_cop_allocate_pages); 605 ret = object->cache->ops->allocate_pages( 606 op, pages, nr_pages, gfp); 607 fscache_stat_d(&fscache_n_cop_allocate_pages); 608 } else { 609 fscache_stat(&fscache_n_cop_read_or_alloc_pages); 610 ret = object->cache->ops->read_or_alloc_pages( 611 op, pages, nr_pages, gfp); 612 fscache_stat_d(&fscache_n_cop_read_or_alloc_pages); 613 } 614 615 error: 616 if (ret == -ENOMEM) 617 fscache_stat(&fscache_n_retrievals_nomem); 618 else if (ret == -ERESTARTSYS) 619 fscache_stat(&fscache_n_retrievals_intr); 620 else if (ret == -ENODATA) 621 fscache_stat(&fscache_n_retrievals_nodata); 622 else if (ret < 0) 623 fscache_stat(&fscache_n_retrievals_nobufs); 624 else 625 fscache_stat(&fscache_n_retrievals_ok); 626 627 fscache_put_retrieval(op); 628 _leave(" = %d", ret); 629 return ret; 630 631 nobufs_unlock_dec: 632 atomic_dec(&object->n_reads); 633 wake_cookie = __fscache_unuse_cookie(cookie); 634 nobufs_unlock: 635 spin_unlock(&cookie->lock); 636 fscache_put_retrieval(op); 637 if (wake_cookie) 638 __fscache_wake_unused_cookie(cookie); 639 nobufs: 640 fscache_stat(&fscache_n_retrievals_nobufs); 641 _leave(" = -ENOBUFS"); 642 return -ENOBUFS; 643 } 644 EXPORT_SYMBOL(__fscache_read_or_alloc_pages); 645 646 /* 647 * allocate a block in the cache on which to store a page 648 * - we return: 649 * -ENOMEM - out of memory, nothing done 650 * -ERESTARTSYS - interrupted 651 * -ENOBUFS - no backing object available in which to cache the block 652 * 0 - block allocated 653 */ 654 int __fscache_alloc_page(struct fscache_cookie *cookie, 655 struct page *page, 656 gfp_t gfp) 657 { 658 struct fscache_retrieval *op; 659 struct fscache_object *object; 660 bool wake_cookie = false; 661 int ret; 662 663 _enter("%p,%p,,,", cookie, page); 664 665 fscache_stat(&fscache_n_allocs); 666 667 if (hlist_empty(&cookie->backing_objects)) 668 goto nobufs; 669 670 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX); 671 ASSERTCMP(page, !=, NULL); 672 673 if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) { 674 _leave(" = -ENOBUFS [invalidating]"); 675 return -ENOBUFS; 676 } 677 678 if (fscache_wait_for_deferred_lookup(cookie) < 0) 679 return -ERESTARTSYS; 680 681 op = fscache_alloc_retrieval(cookie, page->mapping, NULL, NULL); 682 if (!op) 683 return -ENOMEM; 684 atomic_set(&op->n_pages, 1); 685 686 spin_lock(&cookie->lock); 687 688 if (!fscache_cookie_enabled(cookie) || 689 hlist_empty(&cookie->backing_objects)) 690 goto nobufs_unlock; 691 object = hlist_entry(cookie->backing_objects.first, 692 struct fscache_object, cookie_link); 693 694 __fscache_use_cookie(cookie); 695 if (fscache_submit_op(object, &op->op) < 0) 696 goto nobufs_unlock_dec; 697 spin_unlock(&cookie->lock); 698 699 fscache_stat(&fscache_n_alloc_ops); 700 701 ret = fscache_wait_for_operation_activation( 702 object, &op->op, 703 __fscache_stat(&fscache_n_alloc_op_waits), 704 __fscache_stat(&fscache_n_allocs_object_dead)); 705 if (ret < 0) 706 goto error; 707 708 /* ask the cache to honour the operation */ 709 fscache_stat(&fscache_n_cop_allocate_page); 710 ret = object->cache->ops->allocate_page(op, page, gfp); 711 fscache_stat_d(&fscache_n_cop_allocate_page); 712 713 error: 714 if (ret == -ERESTARTSYS) 715 fscache_stat(&fscache_n_allocs_intr); 716 else if (ret < 0) 717 fscache_stat(&fscache_n_allocs_nobufs); 718 else 719 fscache_stat(&fscache_n_allocs_ok); 720 721 fscache_put_retrieval(op); 722 _leave(" = %d", ret); 723 return ret; 724 725 nobufs_unlock_dec: 726 wake_cookie = __fscache_unuse_cookie(cookie); 727 nobufs_unlock: 728 spin_unlock(&cookie->lock); 729 fscache_put_retrieval(op); 730 if (wake_cookie) 731 __fscache_wake_unused_cookie(cookie); 732 nobufs: 733 fscache_stat(&fscache_n_allocs_nobufs); 734 _leave(" = -ENOBUFS"); 735 return -ENOBUFS; 736 } 737 EXPORT_SYMBOL(__fscache_alloc_page); 738 739 /* 740 * Unmark pages allocate in the readahead code path (via: 741 * fscache_readpages_or_alloc) after delegating to the base filesystem 742 */ 743 void __fscache_readpages_cancel(struct fscache_cookie *cookie, 744 struct list_head *pages) 745 { 746 struct page *page; 747 748 list_for_each_entry(page, pages, lru) { 749 if (PageFsCache(page)) 750 __fscache_uncache_page(cookie, page); 751 } 752 } 753 EXPORT_SYMBOL(__fscache_readpages_cancel); 754 755 /* 756 * release a write op reference 757 */ 758 static void fscache_release_write_op(struct fscache_operation *_op) 759 { 760 _enter("{OP%x}", _op->debug_id); 761 } 762 763 /* 764 * perform the background storage of a page into the cache 765 */ 766 static void fscache_write_op(struct fscache_operation *_op) 767 { 768 struct fscache_storage *op = 769 container_of(_op, struct fscache_storage, op); 770 struct fscache_object *object = op->op.object; 771 struct fscache_cookie *cookie; 772 struct page *page; 773 unsigned n; 774 void *results[1]; 775 int ret; 776 777 _enter("{OP%x,%d}", op->op.debug_id, atomic_read(&op->op.usage)); 778 779 spin_lock(&object->lock); 780 cookie = object->cookie; 781 782 if (!fscache_object_is_active(object)) { 783 /* If we get here, then the on-disk cache object likely longer 784 * exists, so we should just cancel this write operation. 785 */ 786 spin_unlock(&object->lock); 787 fscache_op_complete(&op->op, false); 788 _leave(" [inactive]"); 789 return; 790 } 791 792 if (!cookie) { 793 /* If we get here, then the cookie belonging to the object was 794 * detached, probably by the cookie being withdrawn due to 795 * memory pressure, which means that the pages we might write 796 * to the cache from no longer exist - therefore, we can just 797 * cancel this write operation. 798 */ 799 spin_unlock(&object->lock); 800 fscache_op_complete(&op->op, false); 801 _leave(" [cancel] op{f=%lx s=%u} obj{s=%s f=%lx}", 802 _op->flags, _op->state, object->state->short_name, 803 object->flags); 804 return; 805 } 806 807 spin_lock(&cookie->stores_lock); 808 809 fscache_stat(&fscache_n_store_calls); 810 811 /* find a page to store */ 812 page = NULL; 813 n = radix_tree_gang_lookup_tag(&cookie->stores, results, 0, 1, 814 FSCACHE_COOKIE_PENDING_TAG); 815 if (n != 1) 816 goto superseded; 817 page = results[0]; 818 _debug("gang %d [%lx]", n, page->index); 819 if (page->index >= op->store_limit) { 820 fscache_stat(&fscache_n_store_pages_over_limit); 821 goto superseded; 822 } 823 824 radix_tree_tag_set(&cookie->stores, page->index, 825 FSCACHE_COOKIE_STORING_TAG); 826 radix_tree_tag_clear(&cookie->stores, page->index, 827 FSCACHE_COOKIE_PENDING_TAG); 828 829 spin_unlock(&cookie->stores_lock); 830 spin_unlock(&object->lock); 831 832 fscache_stat(&fscache_n_store_pages); 833 fscache_stat(&fscache_n_cop_write_page); 834 ret = object->cache->ops->write_page(op, page); 835 fscache_stat_d(&fscache_n_cop_write_page); 836 fscache_end_page_write(object, page); 837 if (ret < 0) { 838 fscache_abort_object(object); 839 fscache_op_complete(&op->op, true); 840 } else { 841 fscache_enqueue_operation(&op->op); 842 } 843 844 _leave(""); 845 return; 846 847 superseded: 848 /* this writer is going away and there aren't any more things to 849 * write */ 850 _debug("cease"); 851 spin_unlock(&cookie->stores_lock); 852 clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags); 853 spin_unlock(&object->lock); 854 fscache_op_complete(&op->op, true); 855 _leave(""); 856 } 857 858 /* 859 * Clear the pages pending writing for invalidation 860 */ 861 void fscache_invalidate_writes(struct fscache_cookie *cookie) 862 { 863 struct page *page; 864 void *results[16]; 865 int n, i; 866 867 _enter(""); 868 869 for (;;) { 870 spin_lock(&cookie->stores_lock); 871 n = radix_tree_gang_lookup_tag(&cookie->stores, results, 0, 872 ARRAY_SIZE(results), 873 FSCACHE_COOKIE_PENDING_TAG); 874 if (n == 0) { 875 spin_unlock(&cookie->stores_lock); 876 break; 877 } 878 879 for (i = n - 1; i >= 0; i--) { 880 page = results[i]; 881 radix_tree_delete(&cookie->stores, page->index); 882 } 883 884 spin_unlock(&cookie->stores_lock); 885 886 for (i = n - 1; i >= 0; i--) 887 put_page(results[i]); 888 } 889 890 wake_up_bit(&cookie->flags, 0); 891 892 _leave(""); 893 } 894 895 /* 896 * request a page be stored in the cache 897 * - returns: 898 * -ENOMEM - out of memory, nothing done 899 * -ENOBUFS - no backing object available in which to cache the page 900 * 0 - dispatched a write - it'll call end_io_func() when finished 901 * 902 * if the cookie still has a backing object at this point, that object can be 903 * in one of a few states with respect to storage processing: 904 * 905 * (1) negative lookup, object not yet created (FSCACHE_COOKIE_CREATING is 906 * set) 907 * 908 * (a) no writes yet 909 * 910 * (b) writes deferred till post-creation (mark page for writing and 911 * return immediately) 912 * 913 * (2) negative lookup, object created, initial fill being made from netfs 914 * 915 * (a) fill point not yet reached this page (mark page for writing and 916 * return) 917 * 918 * (b) fill point passed this page (queue op to store this page) 919 * 920 * (3) object extant (queue op to store this page) 921 * 922 * any other state is invalid 923 */ 924 int __fscache_write_page(struct fscache_cookie *cookie, 925 struct page *page, 926 gfp_t gfp) 927 { 928 struct fscache_storage *op; 929 struct fscache_object *object; 930 bool wake_cookie = false; 931 int ret; 932 933 _enter("%p,%x,", cookie, (u32) page->flags); 934 935 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX); 936 ASSERT(PageFsCache(page)); 937 938 fscache_stat(&fscache_n_stores); 939 940 if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) { 941 _leave(" = -ENOBUFS [invalidating]"); 942 return -ENOBUFS; 943 } 944 945 op = kzalloc(sizeof(*op), GFP_NOIO | __GFP_NOMEMALLOC | __GFP_NORETRY); 946 if (!op) 947 goto nomem; 948 949 fscache_operation_init(&op->op, fscache_write_op, NULL, 950 fscache_release_write_op); 951 op->op.flags = FSCACHE_OP_ASYNC | 952 (1 << FSCACHE_OP_WAITING) | 953 (1 << FSCACHE_OP_UNUSE_COOKIE); 954 955 ret = radix_tree_maybe_preload(gfp & ~__GFP_HIGHMEM); 956 if (ret < 0) 957 goto nomem_free; 958 959 ret = -ENOBUFS; 960 spin_lock(&cookie->lock); 961 962 if (!fscache_cookie_enabled(cookie) || 963 hlist_empty(&cookie->backing_objects)) 964 goto nobufs; 965 object = hlist_entry(cookie->backing_objects.first, 966 struct fscache_object, cookie_link); 967 if (test_bit(FSCACHE_IOERROR, &object->cache->flags)) 968 goto nobufs; 969 970 /* add the page to the pending-storage radix tree on the backing 971 * object */ 972 spin_lock(&object->lock); 973 spin_lock(&cookie->stores_lock); 974 975 _debug("store limit %llx", (unsigned long long) object->store_limit); 976 977 ret = radix_tree_insert(&cookie->stores, page->index, page); 978 if (ret < 0) { 979 if (ret == -EEXIST) 980 goto already_queued; 981 _debug("insert failed %d", ret); 982 goto nobufs_unlock_obj; 983 } 984 985 radix_tree_tag_set(&cookie->stores, page->index, 986 FSCACHE_COOKIE_PENDING_TAG); 987 get_page(page); 988 989 /* we only want one writer at a time, but we do need to queue new 990 * writers after exclusive ops */ 991 if (test_and_set_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags)) 992 goto already_pending; 993 994 spin_unlock(&cookie->stores_lock); 995 spin_unlock(&object->lock); 996 997 op->op.debug_id = atomic_inc_return(&fscache_op_debug_id); 998 op->store_limit = object->store_limit; 999 1000 __fscache_use_cookie(cookie); 1001 if (fscache_submit_op(object, &op->op) < 0) 1002 goto submit_failed; 1003 1004 spin_unlock(&cookie->lock); 1005 radix_tree_preload_end(); 1006 fscache_stat(&fscache_n_store_ops); 1007 fscache_stat(&fscache_n_stores_ok); 1008 1009 /* the work queue now carries its own ref on the object */ 1010 fscache_put_operation(&op->op); 1011 _leave(" = 0"); 1012 return 0; 1013 1014 already_queued: 1015 fscache_stat(&fscache_n_stores_again); 1016 already_pending: 1017 spin_unlock(&cookie->stores_lock); 1018 spin_unlock(&object->lock); 1019 spin_unlock(&cookie->lock); 1020 radix_tree_preload_end(); 1021 fscache_put_operation(&op->op); 1022 fscache_stat(&fscache_n_stores_ok); 1023 _leave(" = 0"); 1024 return 0; 1025 1026 submit_failed: 1027 spin_lock(&cookie->stores_lock); 1028 radix_tree_delete(&cookie->stores, page->index); 1029 spin_unlock(&cookie->stores_lock); 1030 wake_cookie = __fscache_unuse_cookie(cookie); 1031 put_page(page); 1032 ret = -ENOBUFS; 1033 goto nobufs; 1034 1035 nobufs_unlock_obj: 1036 spin_unlock(&cookie->stores_lock); 1037 spin_unlock(&object->lock); 1038 nobufs: 1039 spin_unlock(&cookie->lock); 1040 radix_tree_preload_end(); 1041 fscache_put_operation(&op->op); 1042 if (wake_cookie) 1043 __fscache_wake_unused_cookie(cookie); 1044 fscache_stat(&fscache_n_stores_nobufs); 1045 _leave(" = -ENOBUFS"); 1046 return -ENOBUFS; 1047 1048 nomem_free: 1049 fscache_put_operation(&op->op); 1050 nomem: 1051 fscache_stat(&fscache_n_stores_oom); 1052 _leave(" = -ENOMEM"); 1053 return -ENOMEM; 1054 } 1055 EXPORT_SYMBOL(__fscache_write_page); 1056 1057 /* 1058 * remove a page from the cache 1059 */ 1060 void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page) 1061 { 1062 struct fscache_object *object; 1063 1064 _enter(",%p", page); 1065 1066 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX); 1067 ASSERTCMP(page, !=, NULL); 1068 1069 fscache_stat(&fscache_n_uncaches); 1070 1071 /* cache withdrawal may beat us to it */ 1072 if (!PageFsCache(page)) 1073 goto done; 1074 1075 /* get the object */ 1076 spin_lock(&cookie->lock); 1077 1078 if (hlist_empty(&cookie->backing_objects)) { 1079 ClearPageFsCache(page); 1080 goto done_unlock; 1081 } 1082 1083 object = hlist_entry(cookie->backing_objects.first, 1084 struct fscache_object, cookie_link); 1085 1086 /* there might now be stuff on disk we could read */ 1087 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags); 1088 1089 /* only invoke the cache backend if we managed to mark the page 1090 * uncached here; this deals with synchronisation vs withdrawal */ 1091 if (TestClearPageFsCache(page) && 1092 object->cache->ops->uncache_page) { 1093 /* the cache backend releases the cookie lock */ 1094 fscache_stat(&fscache_n_cop_uncache_page); 1095 object->cache->ops->uncache_page(object, page); 1096 fscache_stat_d(&fscache_n_cop_uncache_page); 1097 goto done; 1098 } 1099 1100 done_unlock: 1101 spin_unlock(&cookie->lock); 1102 done: 1103 _leave(""); 1104 } 1105 EXPORT_SYMBOL(__fscache_uncache_page); 1106 1107 /** 1108 * fscache_mark_page_cached - Mark a page as being cached 1109 * @op: The retrieval op pages are being marked for 1110 * @page: The page to be marked 1111 * 1112 * Mark a netfs page as being cached. After this is called, the netfs 1113 * must call fscache_uncache_page() to remove the mark. 1114 */ 1115 void fscache_mark_page_cached(struct fscache_retrieval *op, struct page *page) 1116 { 1117 struct fscache_cookie *cookie = op->op.object->cookie; 1118 1119 #ifdef CONFIG_FSCACHE_STATS 1120 atomic_inc(&fscache_n_marks); 1121 #endif 1122 1123 _debug("- mark %p{%lx}", page, page->index); 1124 if (TestSetPageFsCache(page)) { 1125 static bool once_only; 1126 if (!once_only) { 1127 once_only = true; 1128 pr_warn("Cookie type %s marked page %lx multiple times\n", 1129 cookie->def->name, page->index); 1130 } 1131 } 1132 1133 if (cookie->def->mark_page_cached) 1134 cookie->def->mark_page_cached(cookie->netfs_data, 1135 op->mapping, page); 1136 } 1137 EXPORT_SYMBOL(fscache_mark_page_cached); 1138 1139 /** 1140 * fscache_mark_pages_cached - Mark pages as being cached 1141 * @op: The retrieval op pages are being marked for 1142 * @pagevec: The pages to be marked 1143 * 1144 * Mark a bunch of netfs pages as being cached. After this is called, 1145 * the netfs must call fscache_uncache_page() to remove the mark. 1146 */ 1147 void fscache_mark_pages_cached(struct fscache_retrieval *op, 1148 struct pagevec *pagevec) 1149 { 1150 unsigned long loop; 1151 1152 for (loop = 0; loop < pagevec->nr; loop++) 1153 fscache_mark_page_cached(op, pagevec->pages[loop]); 1154 1155 pagevec_reinit(pagevec); 1156 } 1157 EXPORT_SYMBOL(fscache_mark_pages_cached); 1158 1159 /* 1160 * Uncache all the pages in an inode that are marked PG_fscache, assuming them 1161 * to be associated with the given cookie. 1162 */ 1163 void __fscache_uncache_all_inode_pages(struct fscache_cookie *cookie, 1164 struct inode *inode) 1165 { 1166 struct address_space *mapping = inode->i_mapping; 1167 struct pagevec pvec; 1168 pgoff_t next; 1169 int i; 1170 1171 _enter("%p,%p", cookie, inode); 1172 1173 if (!mapping || mapping->nrpages == 0) { 1174 _leave(" [no pages]"); 1175 return; 1176 } 1177 1178 pagevec_init(&pvec, 0); 1179 next = 0; 1180 do { 1181 if (!pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) 1182 break; 1183 for (i = 0; i < pagevec_count(&pvec); i++) { 1184 struct page *page = pvec.pages[i]; 1185 next = page->index; 1186 if (PageFsCache(page)) { 1187 __fscache_wait_on_page_write(cookie, page); 1188 __fscache_uncache_page(cookie, page); 1189 } 1190 } 1191 pagevec_release(&pvec); 1192 cond_resched(); 1193 } while (++next); 1194 1195 _leave(""); 1196 } 1197 EXPORT_SYMBOL(__fscache_uncache_all_inode_pages); 1198
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.