1 /* Cache page management and data I/O routines 2 * 3 * Copyright (C) 2004-2008 Red Hat, Inc. All Rights Reserved. 4 * Written by David Howells (dhowells@redhat.com) 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 9 * 2 of the License, or (at your option) any later version. 10 */ 11 12 #define FSCACHE_DEBUG_LEVEL PAGE 13 #include <linux/module.h> 14 #include <linux/fscache-cache.h> 15 #include <linux/buffer_head.h> 16 #include <linux/pagevec.h> 17 #include <linux/slab.h> 18 #include "internal.h" 19 20 /* 21 * check to see if a page is being written to the cache 22 */ 23 bool __fscache_check_page_write(struct fscache_cookie *cookie, struct page *page) 24 { 25 void *val; 26 27 rcu_read_lock(); 28 val = radix_tree_lookup(&cookie->stores, page->index); 29 rcu_read_unlock(); 30 trace_fscache_check_page(cookie, page, val, 0); 31 32 return val != NULL; 33 } 34 EXPORT_SYMBOL(__fscache_check_page_write); 35 36 /* 37 * wait for a page to finish being written to the cache 38 */ 39 void __fscache_wait_on_page_write(struct fscache_cookie *cookie, struct page *page) 40 { 41 wait_queue_head_t *wq = bit_waitqueue(&cookie->flags, 0); 42 43 trace_fscache_page(cookie, page, fscache_page_write_wait); 44 45 wait_event(*wq, !__fscache_check_page_write(cookie, page)); 46 } 47 EXPORT_SYMBOL(__fscache_wait_on_page_write); 48 49 /* 50 * wait for a page to finish being written to the cache. Put a timeout here 51 * since we might be called recursively via parent fs. 52 */ 53 static 54 bool release_page_wait_timeout(struct fscache_cookie *cookie, struct page *page) 55 { 56 wait_queue_head_t *wq = bit_waitqueue(&cookie->flags, 0); 57 58 return wait_event_timeout(*wq, !__fscache_check_page_write(cookie, page), 59 HZ); 60 } 61 62 /* 63 * decide whether a page can be released, possibly by cancelling a store to it 64 * - we're allowed to sleep if __GFP_DIRECT_RECLAIM is flagged 65 */ 66 bool __fscache_maybe_release_page(struct fscache_cookie *cookie, 67 struct page *page, 68 gfp_t gfp) 69 { 70 struct page *xpage; 71 void *val; 72 73 _enter("%p,%p,%x", cookie, page, gfp); 74 75 trace_fscache_page(cookie, page, fscache_page_maybe_release); 76 77 try_again: 78 rcu_read_lock(); 79 val = radix_tree_lookup(&cookie->stores, page->index); 80 if (!val) { 81 rcu_read_unlock(); 82 fscache_stat(&fscache_n_store_vmscan_not_storing); 83 __fscache_uncache_page(cookie, page); 84 return true; 85 } 86 87 /* see if the page is actually undergoing storage - if so we can't get 88 * rid of it till the cache has finished with it */ 89 if (radix_tree_tag_get(&cookie->stores, page->index, 90 FSCACHE_COOKIE_STORING_TAG)) { 91 rcu_read_unlock(); 92 goto page_busy; 93 } 94 95 /* the page is pending storage, so we attempt to cancel the store and 96 * discard the store request so that the page can be reclaimed */ 97 spin_lock(&cookie->stores_lock); 98 rcu_read_unlock(); 99 100 if (radix_tree_tag_get(&cookie->stores, page->index, 101 FSCACHE_COOKIE_STORING_TAG)) { 102 /* the page started to undergo storage whilst we were looking, 103 * so now we can only wait or return */ 104 spin_unlock(&cookie->stores_lock); 105 goto page_busy; 106 } 107 108 xpage = radix_tree_delete(&cookie->stores, page->index); 109 trace_fscache_page(cookie, page, fscache_page_radix_delete); 110 spin_unlock(&cookie->stores_lock); 111 112 if (xpage) { 113 fscache_stat(&fscache_n_store_vmscan_cancelled); 114 fscache_stat(&fscache_n_store_radix_deletes); 115 ASSERTCMP(xpage, ==, page); 116 } else { 117 fscache_stat(&fscache_n_store_vmscan_gone); 118 } 119 120 wake_up_bit(&cookie->flags, 0); 121 trace_fscache_wake_cookie(cookie); 122 if (xpage) 123 put_page(xpage); 124 __fscache_uncache_page(cookie, page); 125 return true; 126 127 page_busy: 128 /* We will wait here if we're allowed to, but that could deadlock the 129 * allocator as the work threads writing to the cache may all end up 130 * sleeping on memory allocation, so we may need to impose a timeout 131 * too. */ 132 if (!(gfp & __GFP_DIRECT_RECLAIM) || !(gfp & __GFP_FS)) { 133 fscache_stat(&fscache_n_store_vmscan_busy); 134 return false; 135 } 136 137 fscache_stat(&fscache_n_store_vmscan_wait); 138 if (!release_page_wait_timeout(cookie, page)) 139 _debug("fscache writeout timeout page: %p{%lx}", 140 page, page->index); 141 142 gfp &= ~__GFP_DIRECT_RECLAIM; 143 goto try_again; 144 } 145 EXPORT_SYMBOL(__fscache_maybe_release_page); 146 147 /* 148 * note that a page has finished being written to the cache 149 */ 150 static void fscache_end_page_write(struct fscache_object *object, 151 struct page *page) 152 { 153 struct fscache_cookie *cookie; 154 struct page *xpage = NULL, *val; 155 156 spin_lock(&object->lock); 157 cookie = object->cookie; 158 if (cookie) { 159 /* delete the page from the tree if it is now no longer 160 * pending */ 161 spin_lock(&cookie->stores_lock); 162 radix_tree_tag_clear(&cookie->stores, page->index, 163 FSCACHE_COOKIE_STORING_TAG); 164 trace_fscache_page(cookie, page, fscache_page_radix_clear_store); 165 if (!radix_tree_tag_get(&cookie->stores, page->index, 166 FSCACHE_COOKIE_PENDING_TAG)) { 167 fscache_stat(&fscache_n_store_radix_deletes); 168 xpage = radix_tree_delete(&cookie->stores, page->index); 169 trace_fscache_page(cookie, page, fscache_page_radix_delete); 170 trace_fscache_page(cookie, page, fscache_page_write_end); 171 172 val = radix_tree_lookup(&cookie->stores, page->index); 173 trace_fscache_check_page(cookie, page, val, 1); 174 } else { 175 trace_fscache_page(cookie, page, fscache_page_write_end_pend); 176 } 177 spin_unlock(&cookie->stores_lock); 178 wake_up_bit(&cookie->flags, 0); 179 trace_fscache_wake_cookie(cookie); 180 } else { 181 trace_fscache_page(cookie, page, fscache_page_write_end_noc); 182 } 183 spin_unlock(&object->lock); 184 if (xpage) 185 put_page(xpage); 186 } 187 188 /* 189 * actually apply the changed attributes to a cache object 190 */ 191 static void fscache_attr_changed_op(struct fscache_operation *op) 192 { 193 struct fscache_object *object = op->object; 194 int ret; 195 196 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id); 197 198 fscache_stat(&fscache_n_attr_changed_calls); 199 200 if (fscache_object_is_active(object)) { 201 fscache_stat(&fscache_n_cop_attr_changed); 202 ret = object->cache->ops->attr_changed(object); 203 fscache_stat_d(&fscache_n_cop_attr_changed); 204 if (ret < 0) 205 fscache_abort_object(object); 206 fscache_op_complete(op, ret < 0); 207 } else { 208 fscache_op_complete(op, true); 209 } 210 211 _leave(""); 212 } 213 214 /* 215 * notification that the attributes on an object have changed 216 */ 217 int __fscache_attr_changed(struct fscache_cookie *cookie) 218 { 219 struct fscache_operation *op; 220 struct fscache_object *object; 221 bool wake_cookie = false; 222 223 _enter("%p", cookie); 224 225 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX); 226 227 fscache_stat(&fscache_n_attr_changed); 228 229 op = kzalloc(sizeof(*op), GFP_KERNEL); 230 if (!op) { 231 fscache_stat(&fscache_n_attr_changed_nomem); 232 _leave(" = -ENOMEM"); 233 return -ENOMEM; 234 } 235 236 fscache_operation_init(cookie, op, fscache_attr_changed_op, NULL, NULL); 237 trace_fscache_page_op(cookie, NULL, op, fscache_page_op_attr_changed); 238 op->flags = FSCACHE_OP_ASYNC | 239 (1 << FSCACHE_OP_EXCLUSIVE) | 240 (1 << FSCACHE_OP_UNUSE_COOKIE); 241 242 spin_lock(&cookie->lock); 243 244 if (!fscache_cookie_enabled(cookie) || 245 hlist_empty(&cookie->backing_objects)) 246 goto nobufs; 247 object = hlist_entry(cookie->backing_objects.first, 248 struct fscache_object, cookie_link); 249 250 __fscache_use_cookie(cookie); 251 if (fscache_submit_exclusive_op(object, op) < 0) 252 goto nobufs_dec; 253 spin_unlock(&cookie->lock); 254 fscache_stat(&fscache_n_attr_changed_ok); 255 fscache_put_operation(op); 256 _leave(" = 0"); 257 return 0; 258 259 nobufs_dec: 260 wake_cookie = __fscache_unuse_cookie(cookie); 261 nobufs: 262 spin_unlock(&cookie->lock); 263 fscache_put_operation(op); 264 if (wake_cookie) 265 __fscache_wake_unused_cookie(cookie); 266 fscache_stat(&fscache_n_attr_changed_nobufs); 267 _leave(" = %d", -ENOBUFS); 268 return -ENOBUFS; 269 } 270 EXPORT_SYMBOL(__fscache_attr_changed); 271 272 /* 273 * Handle cancellation of a pending retrieval op 274 */ 275 static void fscache_do_cancel_retrieval(struct fscache_operation *_op) 276 { 277 struct fscache_retrieval *op = 278 container_of(_op, struct fscache_retrieval, op); 279 280 atomic_set(&op->n_pages, 0); 281 } 282 283 /* 284 * release a retrieval op reference 285 */ 286 static void fscache_release_retrieval_op(struct fscache_operation *_op) 287 { 288 struct fscache_retrieval *op = 289 container_of(_op, struct fscache_retrieval, op); 290 291 _enter("{OP%x}", op->op.debug_id); 292 293 ASSERTIFCMP(op->op.state != FSCACHE_OP_ST_INITIALISED, 294 atomic_read(&op->n_pages), ==, 0); 295 296 fscache_hist(fscache_retrieval_histogram, op->start_time); 297 if (op->context) 298 fscache_put_context(op->cookie, op->context); 299 300 _leave(""); 301 } 302 303 /* 304 * allocate a retrieval op 305 */ 306 static struct fscache_retrieval *fscache_alloc_retrieval( 307 struct fscache_cookie *cookie, 308 struct address_space *mapping, 309 fscache_rw_complete_t end_io_func, 310 void *context) 311 { 312 struct fscache_retrieval *op; 313 314 /* allocate a retrieval operation and attempt to submit it */ 315 op = kzalloc(sizeof(*op), GFP_NOIO); 316 if (!op) { 317 fscache_stat(&fscache_n_retrievals_nomem); 318 return NULL; 319 } 320 321 fscache_operation_init(cookie, &op->op, NULL, 322 fscache_do_cancel_retrieval, 323 fscache_release_retrieval_op); 324 op->op.flags = FSCACHE_OP_MYTHREAD | 325 (1UL << FSCACHE_OP_WAITING) | 326 (1UL << FSCACHE_OP_UNUSE_COOKIE); 327 op->cookie = cookie; 328 op->mapping = mapping; 329 op->end_io_func = end_io_func; 330 op->context = context; 331 op->start_time = jiffies; 332 INIT_LIST_HEAD(&op->to_do); 333 334 /* Pin the netfs read context in case we need to do the actual netfs 335 * read because we've encountered a cache read failure. 336 */ 337 if (context) 338 fscache_get_context(op->cookie, context); 339 return op; 340 } 341 342 /* 343 * wait for a deferred lookup to complete 344 */ 345 int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie) 346 { 347 unsigned long jif; 348 349 _enter(""); 350 351 if (!test_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags)) { 352 _leave(" = 0 [imm]"); 353 return 0; 354 } 355 356 fscache_stat(&fscache_n_retrievals_wait); 357 358 jif = jiffies; 359 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP, 360 TASK_INTERRUPTIBLE) != 0) { 361 fscache_stat(&fscache_n_retrievals_intr); 362 _leave(" = -ERESTARTSYS"); 363 return -ERESTARTSYS; 364 } 365 366 ASSERT(!test_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags)); 367 368 smp_rmb(); 369 fscache_hist(fscache_retrieval_delay_histogram, jif); 370 _leave(" = 0 [dly]"); 371 return 0; 372 } 373 374 /* 375 * wait for an object to become active (or dead) 376 */ 377 int fscache_wait_for_operation_activation(struct fscache_object *object, 378 struct fscache_operation *op, 379 atomic_t *stat_op_waits, 380 atomic_t *stat_object_dead) 381 { 382 int ret; 383 384 if (!test_bit(FSCACHE_OP_WAITING, &op->flags)) 385 goto check_if_dead; 386 387 _debug(">>> WT"); 388 if (stat_op_waits) 389 fscache_stat(stat_op_waits); 390 if (wait_on_bit(&op->flags, FSCACHE_OP_WAITING, 391 TASK_INTERRUPTIBLE) != 0) { 392 trace_fscache_op(object->cookie, op, fscache_op_signal); 393 ret = fscache_cancel_op(op, false); 394 if (ret == 0) 395 return -ERESTARTSYS; 396 397 /* it's been removed from the pending queue by another party, 398 * so we should get to run shortly */ 399 wait_on_bit(&op->flags, FSCACHE_OP_WAITING, 400 TASK_UNINTERRUPTIBLE); 401 } 402 _debug("<<< GO"); 403 404 check_if_dead: 405 if (op->state == FSCACHE_OP_ST_CANCELLED) { 406 if (stat_object_dead) 407 fscache_stat(stat_object_dead); 408 _leave(" = -ENOBUFS [cancelled]"); 409 return -ENOBUFS; 410 } 411 if (unlikely(fscache_object_is_dying(object) || 412 fscache_cache_is_broken(object))) { 413 enum fscache_operation_state state = op->state; 414 trace_fscache_op(object->cookie, op, fscache_op_signal); 415 fscache_cancel_op(op, true); 416 if (stat_object_dead) 417 fscache_stat(stat_object_dead); 418 _leave(" = -ENOBUFS [obj dead %d]", state); 419 return -ENOBUFS; 420 } 421 return 0; 422 } 423 424 /* 425 * read a page from the cache or allocate a block in which to store it 426 * - we return: 427 * -ENOMEM - out of memory, nothing done 428 * -ERESTARTSYS - interrupted 429 * -ENOBUFS - no backing object available in which to cache the block 430 * -ENODATA - no data available in the backing object for this block 431 * 0 - dispatched a read - it'll call end_io_func() when finished 432 */ 433 int __fscache_read_or_alloc_page(struct fscache_cookie *cookie, 434 struct page *page, 435 fscache_rw_complete_t end_io_func, 436 void *context, 437 gfp_t gfp) 438 { 439 struct fscache_retrieval *op; 440 struct fscache_object *object; 441 bool wake_cookie = false; 442 int ret; 443 444 _enter("%p,%p,,,", cookie, page); 445 446 fscache_stat(&fscache_n_retrievals); 447 448 if (hlist_empty(&cookie->backing_objects)) 449 goto nobufs; 450 451 if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) { 452 _leave(" = -ENOBUFS [invalidating]"); 453 return -ENOBUFS; 454 } 455 456 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX); 457 ASSERTCMP(page, !=, NULL); 458 459 if (fscache_wait_for_deferred_lookup(cookie) < 0) 460 return -ERESTARTSYS; 461 462 op = fscache_alloc_retrieval(cookie, page->mapping, 463 end_io_func, context); 464 if (!op) { 465 _leave(" = -ENOMEM"); 466 return -ENOMEM; 467 } 468 atomic_set(&op->n_pages, 1); 469 trace_fscache_page_op(cookie, page, &op->op, fscache_page_op_retr_one); 470 471 spin_lock(&cookie->lock); 472 473 if (!fscache_cookie_enabled(cookie) || 474 hlist_empty(&cookie->backing_objects)) 475 goto nobufs_unlock; 476 object = hlist_entry(cookie->backing_objects.first, 477 struct fscache_object, cookie_link); 478 479 ASSERT(test_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)); 480 481 __fscache_use_cookie(cookie); 482 atomic_inc(&object->n_reads); 483 __set_bit(FSCACHE_OP_DEC_READ_CNT, &op->op.flags); 484 485 if (fscache_submit_op(object, &op->op) < 0) 486 goto nobufs_unlock_dec; 487 spin_unlock(&cookie->lock); 488 489 fscache_stat(&fscache_n_retrieval_ops); 490 491 /* we wait for the operation to become active, and then process it 492 * *here*, in this thread, and not in the thread pool */ 493 ret = fscache_wait_for_operation_activation( 494 object, &op->op, 495 __fscache_stat(&fscache_n_retrieval_op_waits), 496 __fscache_stat(&fscache_n_retrievals_object_dead)); 497 if (ret < 0) 498 goto error; 499 500 /* ask the cache to honour the operation */ 501 if (test_bit(FSCACHE_COOKIE_NO_DATA_YET, &object->cookie->flags)) { 502 fscache_stat(&fscache_n_cop_allocate_page); 503 ret = object->cache->ops->allocate_page(op, page, gfp); 504 fscache_stat_d(&fscache_n_cop_allocate_page); 505 if (ret == 0) 506 ret = -ENODATA; 507 } else { 508 fscache_stat(&fscache_n_cop_read_or_alloc_page); 509 ret = object->cache->ops->read_or_alloc_page(op, page, gfp); 510 fscache_stat_d(&fscache_n_cop_read_or_alloc_page); 511 } 512 513 error: 514 if (ret == -ENOMEM) 515 fscache_stat(&fscache_n_retrievals_nomem); 516 else if (ret == -ERESTARTSYS) 517 fscache_stat(&fscache_n_retrievals_intr); 518 else if (ret == -ENODATA) 519 fscache_stat(&fscache_n_retrievals_nodata); 520 else if (ret < 0) 521 fscache_stat(&fscache_n_retrievals_nobufs); 522 else 523 fscache_stat(&fscache_n_retrievals_ok); 524 525 fscache_put_retrieval(op); 526 _leave(" = %d", ret); 527 return ret; 528 529 nobufs_unlock_dec: 530 atomic_dec(&object->n_reads); 531 wake_cookie = __fscache_unuse_cookie(cookie); 532 nobufs_unlock: 533 spin_unlock(&cookie->lock); 534 if (wake_cookie) 535 __fscache_wake_unused_cookie(cookie); 536 fscache_put_retrieval(op); 537 nobufs: 538 fscache_stat(&fscache_n_retrievals_nobufs); 539 _leave(" = -ENOBUFS"); 540 return -ENOBUFS; 541 } 542 EXPORT_SYMBOL(__fscache_read_or_alloc_page); 543 544 /* 545 * read a list of page from the cache or allocate a block in which to store 546 * them 547 * - we return: 548 * -ENOMEM - out of memory, some pages may be being read 549 * -ERESTARTSYS - interrupted, some pages may be being read 550 * -ENOBUFS - no backing object or space available in which to cache any 551 * pages not being read 552 * -ENODATA - no data available in the backing object for some or all of 553 * the pages 554 * 0 - dispatched a read on all pages 555 * 556 * end_io_func() will be called for each page read from the cache as it is 557 * finishes being read 558 * 559 * any pages for which a read is dispatched will be removed from pages and 560 * nr_pages 561 */ 562 int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie, 563 struct address_space *mapping, 564 struct list_head *pages, 565 unsigned *nr_pages, 566 fscache_rw_complete_t end_io_func, 567 void *context, 568 gfp_t gfp) 569 { 570 struct fscache_retrieval *op; 571 struct fscache_object *object; 572 bool wake_cookie = false; 573 int ret; 574 575 _enter("%p,,%d,,,", cookie, *nr_pages); 576 577 fscache_stat(&fscache_n_retrievals); 578 579 if (hlist_empty(&cookie->backing_objects)) 580 goto nobufs; 581 582 if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) { 583 _leave(" = -ENOBUFS [invalidating]"); 584 return -ENOBUFS; 585 } 586 587 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX); 588 ASSERTCMP(*nr_pages, >, 0); 589 ASSERT(!list_empty(pages)); 590 591 if (fscache_wait_for_deferred_lookup(cookie) < 0) 592 return -ERESTARTSYS; 593 594 op = fscache_alloc_retrieval(cookie, mapping, end_io_func, context); 595 if (!op) 596 return -ENOMEM; 597 atomic_set(&op->n_pages, *nr_pages); 598 trace_fscache_page_op(cookie, NULL, &op->op, fscache_page_op_retr_multi); 599 600 spin_lock(&cookie->lock); 601 602 if (!fscache_cookie_enabled(cookie) || 603 hlist_empty(&cookie->backing_objects)) 604 goto nobufs_unlock; 605 object = hlist_entry(cookie->backing_objects.first, 606 struct fscache_object, cookie_link); 607 608 __fscache_use_cookie(cookie); 609 atomic_inc(&object->n_reads); 610 __set_bit(FSCACHE_OP_DEC_READ_CNT, &op->op.flags); 611 612 if (fscache_submit_op(object, &op->op) < 0) 613 goto nobufs_unlock_dec; 614 spin_unlock(&cookie->lock); 615 616 fscache_stat(&fscache_n_retrieval_ops); 617 618 /* we wait for the operation to become active, and then process it 619 * *here*, in this thread, and not in the thread pool */ 620 ret = fscache_wait_for_operation_activation( 621 object, &op->op, 622 __fscache_stat(&fscache_n_retrieval_op_waits), 623 __fscache_stat(&fscache_n_retrievals_object_dead)); 624 if (ret < 0) 625 goto error; 626 627 /* ask the cache to honour the operation */ 628 if (test_bit(FSCACHE_COOKIE_NO_DATA_YET, &object->cookie->flags)) { 629 fscache_stat(&fscache_n_cop_allocate_pages); 630 ret = object->cache->ops->allocate_pages( 631 op, pages, nr_pages, gfp); 632 fscache_stat_d(&fscache_n_cop_allocate_pages); 633 } else { 634 fscache_stat(&fscache_n_cop_read_or_alloc_pages); 635 ret = object->cache->ops->read_or_alloc_pages( 636 op, pages, nr_pages, gfp); 637 fscache_stat_d(&fscache_n_cop_read_or_alloc_pages); 638 } 639 640 error: 641 if (ret == -ENOMEM) 642 fscache_stat(&fscache_n_retrievals_nomem); 643 else if (ret == -ERESTARTSYS) 644 fscache_stat(&fscache_n_retrievals_intr); 645 else if (ret == -ENODATA) 646 fscache_stat(&fscache_n_retrievals_nodata); 647 else if (ret < 0) 648 fscache_stat(&fscache_n_retrievals_nobufs); 649 else 650 fscache_stat(&fscache_n_retrievals_ok); 651 652 fscache_put_retrieval(op); 653 _leave(" = %d", ret); 654 return ret; 655 656 nobufs_unlock_dec: 657 atomic_dec(&object->n_reads); 658 wake_cookie = __fscache_unuse_cookie(cookie); 659 nobufs_unlock: 660 spin_unlock(&cookie->lock); 661 fscache_put_retrieval(op); 662 if (wake_cookie) 663 __fscache_wake_unused_cookie(cookie); 664 nobufs: 665 fscache_stat(&fscache_n_retrievals_nobufs); 666 _leave(" = -ENOBUFS"); 667 return -ENOBUFS; 668 } 669 EXPORT_SYMBOL(__fscache_read_or_alloc_pages); 670 671 /* 672 * allocate a block in the cache on which to store a page 673 * - we return: 674 * -ENOMEM - out of memory, nothing done 675 * -ERESTARTSYS - interrupted 676 * -ENOBUFS - no backing object available in which to cache the block 677 * 0 - block allocated 678 */ 679 int __fscache_alloc_page(struct fscache_cookie *cookie, 680 struct page *page, 681 gfp_t gfp) 682 { 683 struct fscache_retrieval *op; 684 struct fscache_object *object; 685 bool wake_cookie = false; 686 int ret; 687 688 _enter("%p,%p,,,", cookie, page); 689 690 fscache_stat(&fscache_n_allocs); 691 692 if (hlist_empty(&cookie->backing_objects)) 693 goto nobufs; 694 695 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX); 696 ASSERTCMP(page, !=, NULL); 697 698 if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) { 699 _leave(" = -ENOBUFS [invalidating]"); 700 return -ENOBUFS; 701 } 702 703 if (fscache_wait_for_deferred_lookup(cookie) < 0) 704 return -ERESTARTSYS; 705 706 op = fscache_alloc_retrieval(cookie, page->mapping, NULL, NULL); 707 if (!op) 708 return -ENOMEM; 709 atomic_set(&op->n_pages, 1); 710 trace_fscache_page_op(cookie, page, &op->op, fscache_page_op_alloc_one); 711 712 spin_lock(&cookie->lock); 713 714 if (!fscache_cookie_enabled(cookie) || 715 hlist_empty(&cookie->backing_objects)) 716 goto nobufs_unlock; 717 object = hlist_entry(cookie->backing_objects.first, 718 struct fscache_object, cookie_link); 719 720 __fscache_use_cookie(cookie); 721 if (fscache_submit_op(object, &op->op) < 0) 722 goto nobufs_unlock_dec; 723 spin_unlock(&cookie->lock); 724 725 fscache_stat(&fscache_n_alloc_ops); 726 727 ret = fscache_wait_for_operation_activation( 728 object, &op->op, 729 __fscache_stat(&fscache_n_alloc_op_waits), 730 __fscache_stat(&fscache_n_allocs_object_dead)); 731 if (ret < 0) 732 goto error; 733 734 /* ask the cache to honour the operation */ 735 fscache_stat(&fscache_n_cop_allocate_page); 736 ret = object->cache->ops->allocate_page(op, page, gfp); 737 fscache_stat_d(&fscache_n_cop_allocate_page); 738 739 error: 740 if (ret == -ERESTARTSYS) 741 fscache_stat(&fscache_n_allocs_intr); 742 else if (ret < 0) 743 fscache_stat(&fscache_n_allocs_nobufs); 744 else 745 fscache_stat(&fscache_n_allocs_ok); 746 747 fscache_put_retrieval(op); 748 _leave(" = %d", ret); 749 return ret; 750 751 nobufs_unlock_dec: 752 wake_cookie = __fscache_unuse_cookie(cookie); 753 nobufs_unlock: 754 spin_unlock(&cookie->lock); 755 fscache_put_retrieval(op); 756 if (wake_cookie) 757 __fscache_wake_unused_cookie(cookie); 758 nobufs: 759 fscache_stat(&fscache_n_allocs_nobufs); 760 _leave(" = -ENOBUFS"); 761 return -ENOBUFS; 762 } 763 EXPORT_SYMBOL(__fscache_alloc_page); 764 765 /* 766 * Unmark pages allocate in the readahead code path (via: 767 * fscache_readpages_or_alloc) after delegating to the base filesystem 768 */ 769 void __fscache_readpages_cancel(struct fscache_cookie *cookie, 770 struct list_head *pages) 771 { 772 struct page *page; 773 774 list_for_each_entry(page, pages, lru) { 775 if (PageFsCache(page)) 776 __fscache_uncache_page(cookie, page); 777 } 778 } 779 EXPORT_SYMBOL(__fscache_readpages_cancel); 780 781 /* 782 * release a write op reference 783 */ 784 static void fscache_release_write_op(struct fscache_operation *_op) 785 { 786 _enter("{OP%x}", _op->debug_id); 787 } 788 789 /* 790 * perform the background storage of a page into the cache 791 */ 792 static void fscache_write_op(struct fscache_operation *_op) 793 { 794 struct fscache_storage *op = 795 container_of(_op, struct fscache_storage, op); 796 struct fscache_object *object = op->op.object; 797 struct fscache_cookie *cookie; 798 struct page *page; 799 unsigned n; 800 void *results[1]; 801 int ret; 802 803 _enter("{OP%x,%d}", op->op.debug_id, atomic_read(&op->op.usage)); 804 805 again: 806 spin_lock(&object->lock); 807 cookie = object->cookie; 808 809 if (!fscache_object_is_active(object)) { 810 /* If we get here, then the on-disk cache object likely no 811 * longer exists, so we should just cancel this write 812 * operation. 813 */ 814 spin_unlock(&object->lock); 815 fscache_op_complete(&op->op, true); 816 _leave(" [inactive]"); 817 return; 818 } 819 820 if (!cookie) { 821 /* If we get here, then the cookie belonging to the object was 822 * detached, probably by the cookie being withdrawn due to 823 * memory pressure, which means that the pages we might write 824 * to the cache from no longer exist - therefore, we can just 825 * cancel this write operation. 826 */ 827 spin_unlock(&object->lock); 828 fscache_op_complete(&op->op, true); 829 _leave(" [cancel] op{f=%lx s=%u} obj{s=%s f=%lx}", 830 _op->flags, _op->state, object->state->short_name, 831 object->flags); 832 return; 833 } 834 835 spin_lock(&cookie->stores_lock); 836 837 fscache_stat(&fscache_n_store_calls); 838 839 /* find a page to store */ 840 results[0] = NULL; 841 page = NULL; 842 n = radix_tree_gang_lookup_tag(&cookie->stores, results, 0, 1, 843 FSCACHE_COOKIE_PENDING_TAG); 844 trace_fscache_gang_lookup(cookie, &op->op, results, n, op->store_limit); 845 if (n != 1) 846 goto superseded; 847 page = results[0]; 848 _debug("gang %d [%lx]", n, page->index); 849 850 radix_tree_tag_set(&cookie->stores, page->index, 851 FSCACHE_COOKIE_STORING_TAG); 852 radix_tree_tag_clear(&cookie->stores, page->index, 853 FSCACHE_COOKIE_PENDING_TAG); 854 trace_fscache_page(cookie, page, fscache_page_radix_pend2store); 855 856 spin_unlock(&cookie->stores_lock); 857 spin_unlock(&object->lock); 858 859 if (page->index >= op->store_limit) 860 goto discard_page; 861 862 fscache_stat(&fscache_n_store_pages); 863 fscache_stat(&fscache_n_cop_write_page); 864 ret = object->cache->ops->write_page(op, page); 865 fscache_stat_d(&fscache_n_cop_write_page); 866 trace_fscache_wrote_page(cookie, page, &op->op, ret); 867 fscache_end_page_write(object, page); 868 if (ret < 0) { 869 fscache_abort_object(object); 870 fscache_op_complete(&op->op, true); 871 } else { 872 fscache_enqueue_operation(&op->op); 873 } 874 875 _leave(""); 876 return; 877 878 discard_page: 879 fscache_stat(&fscache_n_store_pages_over_limit); 880 trace_fscache_wrote_page(cookie, page, &op->op, -ENOBUFS); 881 fscache_end_page_write(object, page); 882 goto again; 883 884 superseded: 885 /* this writer is going away and there aren't any more things to 886 * write */ 887 _debug("cease"); 888 spin_unlock(&cookie->stores_lock); 889 clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags); 890 spin_unlock(&object->lock); 891 fscache_op_complete(&op->op, false); 892 _leave(""); 893 } 894 895 /* 896 * Clear the pages pending writing for invalidation 897 */ 898 void fscache_invalidate_writes(struct fscache_cookie *cookie) 899 { 900 struct page *page; 901 void *results[16]; 902 int n, i; 903 904 _enter(""); 905 906 for (;;) { 907 spin_lock(&cookie->stores_lock); 908 n = radix_tree_gang_lookup_tag(&cookie->stores, results, 0, 909 ARRAY_SIZE(results), 910 FSCACHE_COOKIE_PENDING_TAG); 911 if (n == 0) { 912 spin_unlock(&cookie->stores_lock); 913 break; 914 } 915 916 for (i = n - 1; i >= 0; i--) { 917 page = results[i]; 918 radix_tree_delete(&cookie->stores, page->index); 919 trace_fscache_page(cookie, page, fscache_page_radix_delete); 920 trace_fscache_page(cookie, page, fscache_page_inval); 921 } 922 923 spin_unlock(&cookie->stores_lock); 924 925 for (i = n - 1; i >= 0; i--) 926 put_page(results[i]); 927 } 928 929 wake_up_bit(&cookie->flags, 0); 930 trace_fscache_wake_cookie(cookie); 931 932 _leave(""); 933 } 934 935 /* 936 * request a page be stored in the cache 937 * - returns: 938 * -ENOMEM - out of memory, nothing done 939 * -ENOBUFS - no backing object available in which to cache the page 940 * 0 - dispatched a write - it'll call end_io_func() when finished 941 * 942 * if the cookie still has a backing object at this point, that object can be 943 * in one of a few states with respect to storage processing: 944 * 945 * (1) negative lookup, object not yet created (FSCACHE_COOKIE_CREATING is 946 * set) 947 * 948 * (a) no writes yet 949 * 950 * (b) writes deferred till post-creation (mark page for writing and 951 * return immediately) 952 * 953 * (2) negative lookup, object created, initial fill being made from netfs 954 * 955 * (a) fill point not yet reached this page (mark page for writing and 956 * return) 957 * 958 * (b) fill point passed this page (queue op to store this page) 959 * 960 * (3) object extant (queue op to store this page) 961 * 962 * any other state is invalid 963 */ 964 int __fscache_write_page(struct fscache_cookie *cookie, 965 struct page *page, 966 loff_t object_size, 967 gfp_t gfp) 968 { 969 struct fscache_storage *op; 970 struct fscache_object *object; 971 bool wake_cookie = false; 972 int ret; 973 974 _enter("%p,%x,", cookie, (u32) page->flags); 975 976 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX); 977 ASSERT(PageFsCache(page)); 978 979 fscache_stat(&fscache_n_stores); 980 981 if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) { 982 _leave(" = -ENOBUFS [invalidating]"); 983 return -ENOBUFS; 984 } 985 986 op = kzalloc(sizeof(*op), GFP_NOIO | __GFP_NOMEMALLOC | __GFP_NORETRY); 987 if (!op) 988 goto nomem; 989 990 fscache_operation_init(cookie, &op->op, fscache_write_op, NULL, 991 fscache_release_write_op); 992 op->op.flags = FSCACHE_OP_ASYNC | 993 (1 << FSCACHE_OP_WAITING) | 994 (1 << FSCACHE_OP_UNUSE_COOKIE); 995 996 ret = radix_tree_maybe_preload(gfp & ~__GFP_HIGHMEM); 997 if (ret < 0) 998 goto nomem_free; 999 1000 trace_fscache_page_op(cookie, page, &op->op, fscache_page_op_write_one); 1001 1002 ret = -ENOBUFS; 1003 spin_lock(&cookie->lock); 1004 1005 if (!fscache_cookie_enabled(cookie) || 1006 hlist_empty(&cookie->backing_objects)) 1007 goto nobufs; 1008 object = hlist_entry(cookie->backing_objects.first, 1009 struct fscache_object, cookie_link); 1010 if (test_bit(FSCACHE_IOERROR, &object->cache->flags)) 1011 goto nobufs; 1012 1013 trace_fscache_page(cookie, page, fscache_page_write); 1014 1015 /* add the page to the pending-storage radix tree on the backing 1016 * object */ 1017 spin_lock(&object->lock); 1018 1019 if (object->store_limit_l != object_size) 1020 fscache_set_store_limit(object, object_size); 1021 1022 spin_lock(&cookie->stores_lock); 1023 1024 _debug("store limit %llx", (unsigned long long) object->store_limit); 1025 1026 ret = radix_tree_insert(&cookie->stores, page->index, page); 1027 if (ret < 0) { 1028 if (ret == -EEXIST) 1029 goto already_queued; 1030 _debug("insert failed %d", ret); 1031 goto nobufs_unlock_obj; 1032 } 1033 1034 trace_fscache_page(cookie, page, fscache_page_radix_insert); 1035 radix_tree_tag_set(&cookie->stores, page->index, 1036 FSCACHE_COOKIE_PENDING_TAG); 1037 trace_fscache_page(cookie, page, fscache_page_radix_set_pend); 1038 get_page(page); 1039 1040 /* we only want one writer at a time, but we do need to queue new 1041 * writers after exclusive ops */ 1042 if (test_and_set_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags)) 1043 goto already_pending; 1044 1045 spin_unlock(&cookie->stores_lock); 1046 spin_unlock(&object->lock); 1047 1048 op->op.debug_id = atomic_inc_return(&fscache_op_debug_id); 1049 op->store_limit = object->store_limit; 1050 1051 __fscache_use_cookie(cookie); 1052 if (fscache_submit_op(object, &op->op) < 0) 1053 goto submit_failed; 1054 1055 spin_unlock(&cookie->lock); 1056 radix_tree_preload_end(); 1057 fscache_stat(&fscache_n_store_ops); 1058 fscache_stat(&fscache_n_stores_ok); 1059 1060 /* the work queue now carries its own ref on the object */ 1061 fscache_put_operation(&op->op); 1062 _leave(" = 0"); 1063 return 0; 1064 1065 already_queued: 1066 fscache_stat(&fscache_n_stores_again); 1067 already_pending: 1068 spin_unlock(&cookie->stores_lock); 1069 spin_unlock(&object->lock); 1070 spin_unlock(&cookie->lock); 1071 radix_tree_preload_end(); 1072 fscache_put_operation(&op->op); 1073 fscache_stat(&fscache_n_stores_ok); 1074 _leave(" = 0"); 1075 return 0; 1076 1077 submit_failed: 1078 spin_lock(&cookie->stores_lock); 1079 radix_tree_delete(&cookie->stores, page->index); 1080 trace_fscache_page(cookie, page, fscache_page_radix_delete); 1081 spin_unlock(&cookie->stores_lock); 1082 wake_cookie = __fscache_unuse_cookie(cookie); 1083 put_page(page); 1084 ret = -ENOBUFS; 1085 goto nobufs; 1086 1087 nobufs_unlock_obj: 1088 spin_unlock(&cookie->stores_lock); 1089 spin_unlock(&object->lock); 1090 nobufs: 1091 spin_unlock(&cookie->lock); 1092 radix_tree_preload_end(); 1093 fscache_put_operation(&op->op); 1094 if (wake_cookie) 1095 __fscache_wake_unused_cookie(cookie); 1096 fscache_stat(&fscache_n_stores_nobufs); 1097 _leave(" = -ENOBUFS"); 1098 return -ENOBUFS; 1099 1100 nomem_free: 1101 fscache_put_operation(&op->op); 1102 nomem: 1103 fscache_stat(&fscache_n_stores_oom); 1104 _leave(" = -ENOMEM"); 1105 return -ENOMEM; 1106 } 1107 EXPORT_SYMBOL(__fscache_write_page); 1108 1109 /* 1110 * remove a page from the cache 1111 */ 1112 void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page) 1113 { 1114 struct fscache_object *object; 1115 1116 _enter(",%p", page); 1117 1118 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX); 1119 ASSERTCMP(page, !=, NULL); 1120 1121 fscache_stat(&fscache_n_uncaches); 1122 1123 /* cache withdrawal may beat us to it */ 1124 if (!PageFsCache(page)) 1125 goto done; 1126 1127 trace_fscache_page(cookie, page, fscache_page_uncache); 1128 1129 /* get the object */ 1130 spin_lock(&cookie->lock); 1131 1132 if (hlist_empty(&cookie->backing_objects)) { 1133 ClearPageFsCache(page); 1134 goto done_unlock; 1135 } 1136 1137 object = hlist_entry(cookie->backing_objects.first, 1138 struct fscache_object, cookie_link); 1139 1140 /* there might now be stuff on disk we could read */ 1141 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags); 1142 1143 /* only invoke the cache backend if we managed to mark the page 1144 * uncached here; this deals with synchronisation vs withdrawal */ 1145 if (TestClearPageFsCache(page) && 1146 object->cache->ops->uncache_page) { 1147 /* the cache backend releases the cookie lock */ 1148 fscache_stat(&fscache_n_cop_uncache_page); 1149 object->cache->ops->uncache_page(object, page); 1150 fscache_stat_d(&fscache_n_cop_uncache_page); 1151 goto done; 1152 } 1153 1154 done_unlock: 1155 spin_unlock(&cookie->lock); 1156 done: 1157 _leave(""); 1158 } 1159 EXPORT_SYMBOL(__fscache_uncache_page); 1160 1161 /** 1162 * fscache_mark_page_cached - Mark a page as being cached 1163 * @op: The retrieval op pages are being marked for 1164 * @page: The page to be marked 1165 * 1166 * Mark a netfs page as being cached. After this is called, the netfs 1167 * must call fscache_uncache_page() to remove the mark. 1168 */ 1169 void fscache_mark_page_cached(struct fscache_retrieval *op, struct page *page) 1170 { 1171 struct fscache_cookie *cookie = op->op.object->cookie; 1172 1173 #ifdef CONFIG_FSCACHE_STATS 1174 atomic_inc(&fscache_n_marks); 1175 #endif 1176 1177 trace_fscache_page(cookie, page, fscache_page_cached); 1178 1179 _debug("- mark %p{%lx}", page, page->index); 1180 if (TestSetPageFsCache(page)) { 1181 static bool once_only; 1182 if (!once_only) { 1183 once_only = true; 1184 pr_warn("Cookie type %s marked page %lx multiple times\n", 1185 cookie->def->name, page->index); 1186 } 1187 } 1188 1189 if (cookie->def->mark_page_cached) 1190 cookie->def->mark_page_cached(cookie->netfs_data, 1191 op->mapping, page); 1192 } 1193 EXPORT_SYMBOL(fscache_mark_page_cached); 1194 1195 /** 1196 * fscache_mark_pages_cached - Mark pages as being cached 1197 * @op: The retrieval op pages are being marked for 1198 * @pagevec: The pages to be marked 1199 * 1200 * Mark a bunch of netfs pages as being cached. After this is called, 1201 * the netfs must call fscache_uncache_page() to remove the mark. 1202 */ 1203 void fscache_mark_pages_cached(struct fscache_retrieval *op, 1204 struct pagevec *pagevec) 1205 { 1206 unsigned long loop; 1207 1208 for (loop = 0; loop < pagevec->nr; loop++) 1209 fscache_mark_page_cached(op, pagevec->pages[loop]); 1210 1211 pagevec_reinit(pagevec); 1212 } 1213 EXPORT_SYMBOL(fscache_mark_pages_cached); 1214 1215 /* 1216 * Uncache all the pages in an inode that are marked PG_fscache, assuming them 1217 * to be associated with the given cookie. 1218 */ 1219 void __fscache_uncache_all_inode_pages(struct fscache_cookie *cookie, 1220 struct inode *inode) 1221 { 1222 struct address_space *mapping = inode->i_mapping; 1223 struct pagevec pvec; 1224 pgoff_t next; 1225 int i; 1226 1227 _enter("%p,%p", cookie, inode); 1228 1229 if (!mapping || mapping->nrpages == 0) { 1230 _leave(" [no pages]"); 1231 return; 1232 } 1233 1234 pagevec_init(&pvec); 1235 next = 0; 1236 do { 1237 if (!pagevec_lookup(&pvec, mapping, &next)) 1238 break; 1239 for (i = 0; i < pagevec_count(&pvec); i++) { 1240 struct page *page = pvec.pages[i]; 1241 if (PageFsCache(page)) { 1242 __fscache_wait_on_page_write(cookie, page); 1243 __fscache_uncache_page(cookie, page); 1244 } 1245 } 1246 pagevec_release(&pvec); 1247 cond_resched(); 1248 } while (next); 1249 1250 _leave(""); 1251 } 1252 EXPORT_SYMBOL(__fscache_uncache_all_inode_pages); 1253
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.