~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/include/linux/pagemap.h

Version: ~ [ linux-4.19-rc7 ] ~ [ linux-4.18.12 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.74 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.131 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.159 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.123 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.59 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-3.9.11 ] ~ [ linux-3.8.13 ] ~ [ linux-3.7.10 ] ~ [ linux-3.6.11 ] ~ [ linux-3.5.7 ] ~ [ linux-3.4.113 ] ~ [ linux-3.3.8 ] ~ [ linux-3.2.102 ] ~ [ linux-3.1.10 ] ~ [ linux-3.0.101 ] ~ [ linux-2.6.39.4 ] ~ [ linux-2.6.38.8 ] ~ [ linux-2.6.37.6 ] ~ [ linux-2.6.36.4 ] ~ [ linux-2.6.35.14 ] ~ [ linux-2.6.34.15 ] ~ [ linux-2.6.33.20 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.31.14 ] ~ [ linux-2.6.30.10 ] ~ [ linux-2.6.29.6 ] ~ [ linux-2.6.28.10 ] ~ [ linux-2.6.27.62 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* SPDX-License-Identifier: GPL-2.0 */
  2 #ifndef _LINUX_PAGEMAP_H
  3 #define _LINUX_PAGEMAP_H
  4 
  5 /*
  6  * Copyright 1995 Linus Torvalds
  7  */
  8 #include <linux/mm.h>
  9 #include <linux/fs.h>
 10 #include <linux/list.h>
 11 #include <linux/highmem.h>
 12 #include <linux/compiler.h>
 13 #include <linux/uaccess.h>
 14 #include <linux/gfp.h>
 15 #include <linux/bitops.h>
 16 #include <linux/hardirq.h> /* for in_interrupt() */
 17 #include <linux/hugetlb_inline.h>
 18 
 19 struct pagevec;
 20 
 21 /*
 22  * Bits in mapping->flags.
 23  */
 24 enum mapping_flags {
 25         AS_EIO          = 0,    /* IO error on async write */
 26         AS_ENOSPC       = 1,    /* ENOSPC on async write */
 27         AS_MM_ALL_LOCKS = 2,    /* under mm_take_all_locks() */
 28         AS_UNEVICTABLE  = 3,    /* e.g., ramdisk, SHM_LOCK */
 29         AS_EXITING      = 4,    /* final truncate in progress */
 30         /* writeback related tags are not used */
 31         AS_NO_WRITEBACK_TAGS = 5,
 32 };
 33 
 34 /**
 35  * mapping_set_error - record a writeback error in the address_space
 36  * @mapping - the mapping in which an error should be set
 37  * @error - the error to set in the mapping
 38  *
 39  * When writeback fails in some way, we must record that error so that
 40  * userspace can be informed when fsync and the like are called.  We endeavor
 41  * to report errors on any file that was open at the time of the error.  Some
 42  * internal callers also need to know when writeback errors have occurred.
 43  *
 44  * When a writeback error occurs, most filesystems will want to call
 45  * mapping_set_error to record the error in the mapping so that it can be
 46  * reported when the application calls fsync(2).
 47  */
 48 static inline void mapping_set_error(struct address_space *mapping, int error)
 49 {
 50         if (likely(!error))
 51                 return;
 52 
 53         /* Record in wb_err for checkers using errseq_t based tracking */
 54         filemap_set_wb_err(mapping, error);
 55 
 56         /* Record it in flags for now, for legacy callers */
 57         if (error == -ENOSPC)
 58                 set_bit(AS_ENOSPC, &mapping->flags);
 59         else
 60                 set_bit(AS_EIO, &mapping->flags);
 61 }
 62 
 63 static inline void mapping_set_unevictable(struct address_space *mapping)
 64 {
 65         set_bit(AS_UNEVICTABLE, &mapping->flags);
 66 }
 67 
 68 static inline void mapping_clear_unevictable(struct address_space *mapping)
 69 {
 70         clear_bit(AS_UNEVICTABLE, &mapping->flags);
 71 }
 72 
 73 static inline int mapping_unevictable(struct address_space *mapping)
 74 {
 75         if (mapping)
 76                 return test_bit(AS_UNEVICTABLE, &mapping->flags);
 77         return !!mapping;
 78 }
 79 
 80 static inline void mapping_set_exiting(struct address_space *mapping)
 81 {
 82         set_bit(AS_EXITING, &mapping->flags);
 83 }
 84 
 85 static inline int mapping_exiting(struct address_space *mapping)
 86 {
 87         return test_bit(AS_EXITING, &mapping->flags);
 88 }
 89 
 90 static inline void mapping_set_no_writeback_tags(struct address_space *mapping)
 91 {
 92         set_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags);
 93 }
 94 
 95 static inline int mapping_use_writeback_tags(struct address_space *mapping)
 96 {
 97         return !test_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags);
 98 }
 99 
100 static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
101 {
102         return mapping->gfp_mask;
103 }
104 
105 /* Restricts the given gfp_mask to what the mapping allows. */
106 static inline gfp_t mapping_gfp_constraint(struct address_space *mapping,
107                 gfp_t gfp_mask)
108 {
109         return mapping_gfp_mask(mapping) & gfp_mask;
110 }
111 
112 /*
113  * This is non-atomic.  Only to be used before the mapping is activated.
114  * Probably needs a barrier...
115  */
116 static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
117 {
118         m->gfp_mask = mask;
119 }
120 
121 void release_pages(struct page **pages, int nr);
122 
123 /*
124  * speculatively take a reference to a page.
125  * If the page is free (_refcount == 0), then _refcount is untouched, and 0
126  * is returned. Otherwise, _refcount is incremented by 1 and 1 is returned.
127  *
128  * This function must be called inside the same rcu_read_lock() section as has
129  * been used to lookup the page in the pagecache radix-tree (or page table):
130  * this allows allocators to use a synchronize_rcu() to stabilize _refcount.
131  *
132  * Unless an RCU grace period has passed, the count of all pages coming out
133  * of the allocator must be considered unstable. page_count may return higher
134  * than expected, and put_page must be able to do the right thing when the
135  * page has been finished with, no matter what it is subsequently allocated
136  * for (because put_page is what is used here to drop an invalid speculative
137  * reference).
138  *
139  * This is the interesting part of the lockless pagecache (and lockless
140  * get_user_pages) locking protocol, where the lookup-side (eg. find_get_page)
141  * has the following pattern:
142  * 1. find page in radix tree
143  * 2. conditionally increment refcount
144  * 3. check the page is still in pagecache (if no, goto 1)
145  *
146  * Remove-side that cares about stability of _refcount (eg. reclaim) has the
147  * following (with the i_pages lock held):
148  * A. atomically check refcount is correct and set it to 0 (atomic_cmpxchg)
149  * B. remove page from pagecache
150  * C. free the page
151  *
152  * There are 2 critical interleavings that matter:
153  * - 2 runs before A: in this case, A sees elevated refcount and bails out
154  * - A runs before 2: in this case, 2 sees zero refcount and retries;
155  *   subsequently, B will complete and 1 will find no page, causing the
156  *   lookup to return NULL.
157  *
158  * It is possible that between 1 and 2, the page is removed then the exact same
159  * page is inserted into the same position in pagecache. That's OK: the
160  * old find_get_page using a lock could equally have run before or after
161  * such a re-insertion, depending on order that locks are granted.
162  *
163  * Lookups racing against pagecache insertion isn't a big problem: either 1
164  * will find the page or it will not. Likewise, the old find_get_page could run
165  * either before the insertion or afterwards, depending on timing.
166  */
167 static inline int page_cache_get_speculative(struct page *page)
168 {
169 #ifdef CONFIG_TINY_RCU
170 # ifdef CONFIG_PREEMPT_COUNT
171         VM_BUG_ON(!in_atomic() && !irqs_disabled());
172 # endif
173         /*
174          * Preempt must be disabled here - we rely on rcu_read_lock doing
175          * this for us.
176          *
177          * Pagecache won't be truncated from interrupt context, so if we have
178          * found a page in the radix tree here, we have pinned its refcount by
179          * disabling preempt, and hence no need for the "speculative get" that
180          * SMP requires.
181          */
182         VM_BUG_ON_PAGE(page_count(page) == 0, page);
183         page_ref_inc(page);
184 
185 #else
186         if (unlikely(!get_page_unless_zero(page))) {
187                 /*
188                  * Either the page has been freed, or will be freed.
189                  * In either case, retry here and the caller should
190                  * do the right thing (see comments above).
191                  */
192                 return 0;
193         }
194 #endif
195         VM_BUG_ON_PAGE(PageTail(page), page);
196 
197         return 1;
198 }
199 
200 /*
201  * Same as above, but add instead of inc (could just be merged)
202  */
203 static inline int page_cache_add_speculative(struct page *page, int count)
204 {
205         VM_BUG_ON(in_interrupt());
206 
207 #if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU)
208 # ifdef CONFIG_PREEMPT_COUNT
209         VM_BUG_ON(!in_atomic() && !irqs_disabled());
210 # endif
211         VM_BUG_ON_PAGE(page_count(page) == 0, page);
212         page_ref_add(page, count);
213 
214 #else
215         if (unlikely(!page_ref_add_unless(page, count, 0)))
216                 return 0;
217 #endif
218         VM_BUG_ON_PAGE(PageCompound(page) && page != compound_head(page), page);
219 
220         return 1;
221 }
222 
223 #ifdef CONFIG_NUMA
224 extern struct page *__page_cache_alloc(gfp_t gfp);
225 #else
226 static inline struct page *__page_cache_alloc(gfp_t gfp)
227 {
228         return alloc_pages(gfp, 0);
229 }
230 #endif
231 
232 static inline struct page *page_cache_alloc(struct address_space *x)
233 {
234         return __page_cache_alloc(mapping_gfp_mask(x));
235 }
236 
237 static inline gfp_t readahead_gfp_mask(struct address_space *x)
238 {
239         return mapping_gfp_mask(x) | __GFP_NORETRY | __GFP_NOWARN;
240 }
241 
242 typedef int filler_t(void *, struct page *);
243 
244 pgoff_t page_cache_next_hole(struct address_space *mapping,
245                              pgoff_t index, unsigned long max_scan);
246 pgoff_t page_cache_prev_hole(struct address_space *mapping,
247                              pgoff_t index, unsigned long max_scan);
248 
249 #define FGP_ACCESSED            0x00000001
250 #define FGP_LOCK                0x00000002
251 #define FGP_CREAT               0x00000004
252 #define FGP_WRITE               0x00000008
253 #define FGP_NOFS                0x00000010
254 #define FGP_NOWAIT              0x00000020
255 
256 struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
257                 int fgp_flags, gfp_t cache_gfp_mask);
258 
259 /**
260  * find_get_page - find and get a page reference
261  * @mapping: the address_space to search
262  * @offset: the page index
263  *
264  * Looks up the page cache slot at @mapping & @offset.  If there is a
265  * page cache page, it is returned with an increased refcount.
266  *
267  * Otherwise, %NULL is returned.
268  */
269 static inline struct page *find_get_page(struct address_space *mapping,
270                                         pgoff_t offset)
271 {
272         return pagecache_get_page(mapping, offset, 0, 0);
273 }
274 
275 static inline struct page *find_get_page_flags(struct address_space *mapping,
276                                         pgoff_t offset, int fgp_flags)
277 {
278         return pagecache_get_page(mapping, offset, fgp_flags, 0);
279 }
280 
281 /**
282  * find_lock_page - locate, pin and lock a pagecache page
283  * @mapping: the address_space to search
284  * @offset: the page index
285  *
286  * Looks up the page cache slot at @mapping & @offset.  If there is a
287  * page cache page, it is returned locked and with an increased
288  * refcount.
289  *
290  * Otherwise, %NULL is returned.
291  *
292  * find_lock_page() may sleep.
293  */
294 static inline struct page *find_lock_page(struct address_space *mapping,
295                                         pgoff_t offset)
296 {
297         return pagecache_get_page(mapping, offset, FGP_LOCK, 0);
298 }
299 
300 /**
301  * find_or_create_page - locate or add a pagecache page
302  * @mapping: the page's address_space
303  * @index: the page's index into the mapping
304  * @gfp_mask: page allocation mode
305  *
306  * Looks up the page cache slot at @mapping & @offset.  If there is a
307  * page cache page, it is returned locked and with an increased
308  * refcount.
309  *
310  * If the page is not present, a new page is allocated using @gfp_mask
311  * and added to the page cache and the VM's LRU list.  The page is
312  * returned locked and with an increased refcount.
313  *
314  * On memory exhaustion, %NULL is returned.
315  *
316  * find_or_create_page() may sleep, even if @gfp_flags specifies an
317  * atomic allocation!
318  */
319 static inline struct page *find_or_create_page(struct address_space *mapping,
320                                         pgoff_t offset, gfp_t gfp_mask)
321 {
322         return pagecache_get_page(mapping, offset,
323                                         FGP_LOCK|FGP_ACCESSED|FGP_CREAT,
324                                         gfp_mask);
325 }
326 
327 /**
328  * grab_cache_page_nowait - returns locked page at given index in given cache
329  * @mapping: target address_space
330  * @index: the page index
331  *
332  * Same as grab_cache_page(), but do not wait if the page is unavailable.
333  * This is intended for speculative data generators, where the data can
334  * be regenerated if the page couldn't be grabbed.  This routine should
335  * be safe to call while holding the lock for another page.
336  *
337  * Clear __GFP_FS when allocating the page to avoid recursion into the fs
338  * and deadlock against the caller's locked page.
339  */
340 static inline struct page *grab_cache_page_nowait(struct address_space *mapping,
341                                 pgoff_t index)
342 {
343         return pagecache_get_page(mapping, index,
344                         FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT,
345                         mapping_gfp_mask(mapping));
346 }
347 
348 struct page *find_get_entry(struct address_space *mapping, pgoff_t offset);
349 struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset);
350 unsigned find_get_entries(struct address_space *mapping, pgoff_t start,
351                           unsigned int nr_entries, struct page **entries,
352                           pgoff_t *indices);
353 unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start,
354                         pgoff_t end, unsigned int nr_pages,
355                         struct page **pages);
356 static inline unsigned find_get_pages(struct address_space *mapping,
357                         pgoff_t *start, unsigned int nr_pages,
358                         struct page **pages)
359 {
360         return find_get_pages_range(mapping, start, (pgoff_t)-1, nr_pages,
361                                     pages);
362 }
363 unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start,
364                                unsigned int nr_pages, struct page **pages);
365 unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index,
366                         pgoff_t end, int tag, unsigned int nr_pages,
367                         struct page **pages);
368 static inline unsigned find_get_pages_tag(struct address_space *mapping,
369                         pgoff_t *index, int tag, unsigned int nr_pages,
370                         struct page **pages)
371 {
372         return find_get_pages_range_tag(mapping, index, (pgoff_t)-1, tag,
373                                         nr_pages, pages);
374 }
375 unsigned find_get_entries_tag(struct address_space *mapping, pgoff_t start,
376                         int tag, unsigned int nr_entries,
377                         struct page **entries, pgoff_t *indices);
378 
379 struct page *grab_cache_page_write_begin(struct address_space *mapping,
380                         pgoff_t index, unsigned flags);
381 
382 /*
383  * Returns locked page at given index in given cache, creating it if needed.
384  */
385 static inline struct page *grab_cache_page(struct address_space *mapping,
386                                                                 pgoff_t index)
387 {
388         return find_or_create_page(mapping, index, mapping_gfp_mask(mapping));
389 }
390 
391 extern struct page * read_cache_page(struct address_space *mapping,
392                                 pgoff_t index, filler_t *filler, void *data);
393 extern struct page * read_cache_page_gfp(struct address_space *mapping,
394                                 pgoff_t index, gfp_t gfp_mask);
395 extern int read_cache_pages(struct address_space *mapping,
396                 struct list_head *pages, filler_t *filler, void *data);
397 
398 static inline struct page *read_mapping_page(struct address_space *mapping,
399                                 pgoff_t index, void *data)
400 {
401         filler_t *filler = (filler_t *)mapping->a_ops->readpage;
402         return read_cache_page(mapping, index, filler, data);
403 }
404 
405 /*
406  * Get index of the page with in radix-tree
407  * (TODO: remove once hugetlb pages will have ->index in PAGE_SIZE)
408  */
409 static inline pgoff_t page_to_index(struct page *page)
410 {
411         pgoff_t pgoff;
412 
413         if (likely(!PageTransTail(page)))
414                 return page->index;
415 
416         /*
417          *  We don't initialize ->index for tail pages: calculate based on
418          *  head page
419          */
420         pgoff = compound_head(page)->index;
421         pgoff += page - compound_head(page);
422         return pgoff;
423 }
424 
425 /*
426  * Get the offset in PAGE_SIZE.
427  * (TODO: hugepage should have ->index in PAGE_SIZE)
428  */
429 static inline pgoff_t page_to_pgoff(struct page *page)
430 {
431         if (unlikely(PageHeadHuge(page)))
432                 return page->index << compound_order(page);
433 
434         return page_to_index(page);
435 }
436 
437 /*
438  * Return byte-offset into filesystem object for page.
439  */
440 static inline loff_t page_offset(struct page *page)
441 {
442         return ((loff_t)page->index) << PAGE_SHIFT;
443 }
444 
445 static inline loff_t page_file_offset(struct page *page)
446 {
447         return ((loff_t)page_index(page)) << PAGE_SHIFT;
448 }
449 
450 extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
451                                      unsigned long address);
452 
453 static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
454                                         unsigned long address)
455 {
456         pgoff_t pgoff;
457         if (unlikely(is_vm_hugetlb_page(vma)))
458                 return linear_hugepage_index(vma, address);
459         pgoff = (address - vma->vm_start) >> PAGE_SHIFT;
460         pgoff += vma->vm_pgoff;
461         return pgoff;
462 }
463 
464 extern void __lock_page(struct page *page);
465 extern int __lock_page_killable(struct page *page);
466 extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
467                                 unsigned int flags);
468 extern void unlock_page(struct page *page);
469 
470 static inline int trylock_page(struct page *page)
471 {
472         page = compound_head(page);
473         return (likely(!test_and_set_bit_lock(PG_locked, &page->flags)));
474 }
475 
476 /*
477  * lock_page may only be called if we have the page's inode pinned.
478  */
479 static inline void lock_page(struct page *page)
480 {
481         might_sleep();
482         if (!trylock_page(page))
483                 __lock_page(page);
484 }
485 
486 /*
487  * lock_page_killable is like lock_page but can be interrupted by fatal
488  * signals.  It returns 0 if it locked the page and -EINTR if it was
489  * killed while waiting.
490  */
491 static inline int lock_page_killable(struct page *page)
492 {
493         might_sleep();
494         if (!trylock_page(page))
495                 return __lock_page_killable(page);
496         return 0;
497 }
498 
499 /*
500  * lock_page_or_retry - Lock the page, unless this would block and the
501  * caller indicated that it can handle a retry.
502  *
503  * Return value and mmap_sem implications depend on flags; see
504  * __lock_page_or_retry().
505  */
506 static inline int lock_page_or_retry(struct page *page, struct mm_struct *mm,
507                                      unsigned int flags)
508 {
509         might_sleep();
510         return trylock_page(page) || __lock_page_or_retry(page, mm, flags);
511 }
512 
513 /*
514  * This is exported only for wait_on_page_locked/wait_on_page_writeback, etc.,
515  * and should not be used directly.
516  */
517 extern void wait_on_page_bit(struct page *page, int bit_nr);
518 extern int wait_on_page_bit_killable(struct page *page, int bit_nr);
519 
520 /* 
521  * Wait for a page to be unlocked.
522  *
523  * This must be called with the caller "holding" the page,
524  * ie with increased "page->count" so that the page won't
525  * go away during the wait..
526  */
527 static inline void wait_on_page_locked(struct page *page)
528 {
529         if (PageLocked(page))
530                 wait_on_page_bit(compound_head(page), PG_locked);
531 }
532 
533 static inline int wait_on_page_locked_killable(struct page *page)
534 {
535         if (!PageLocked(page))
536                 return 0;
537         return wait_on_page_bit_killable(compound_head(page), PG_locked);
538 }
539 
540 /* 
541  * Wait for a page to complete writeback
542  */
543 static inline void wait_on_page_writeback(struct page *page)
544 {
545         if (PageWriteback(page))
546                 wait_on_page_bit(page, PG_writeback);
547 }
548 
549 extern void end_page_writeback(struct page *page);
550 void wait_for_stable_page(struct page *page);
551 
552 void page_endio(struct page *page, bool is_write, int err);
553 
554 /*
555  * Add an arbitrary waiter to a page's wait queue
556  */
557 extern void add_page_wait_queue(struct page *page, wait_queue_entry_t *waiter);
558 
559 /*
560  * Fault everything in given userspace address range in.
561  */
562 static inline int fault_in_pages_writeable(char __user *uaddr, int size)
563 {
564         char __user *end = uaddr + size - 1;
565 
566         if (unlikely(size == 0))
567                 return 0;
568 
569         if (unlikely(uaddr > end))
570                 return -EFAULT;
571         /*
572          * Writing zeroes into userspace here is OK, because we know that if
573          * the zero gets there, we'll be overwriting it.
574          */
575         do {
576                 if (unlikely(__put_user(0, uaddr) != 0))
577                         return -EFAULT;
578                 uaddr += PAGE_SIZE;
579         } while (uaddr <= end);
580 
581         /* Check whether the range spilled into the next page. */
582         if (((unsigned long)uaddr & PAGE_MASK) ==
583                         ((unsigned long)end & PAGE_MASK))
584                 return __put_user(0, end);
585 
586         return 0;
587 }
588 
589 static inline int fault_in_pages_readable(const char __user *uaddr, int size)
590 {
591         volatile char c;
592         const char __user *end = uaddr + size - 1;
593 
594         if (unlikely(size == 0))
595                 return 0;
596 
597         if (unlikely(uaddr > end))
598                 return -EFAULT;
599 
600         do {
601                 if (unlikely(__get_user(c, uaddr) != 0))
602                         return -EFAULT;
603                 uaddr += PAGE_SIZE;
604         } while (uaddr <= end);
605 
606         /* Check whether the range spilled into the next page. */
607         if (((unsigned long)uaddr & PAGE_MASK) ==
608                         ((unsigned long)end & PAGE_MASK)) {
609                 return __get_user(c, end);
610         }
611 
612         (void)c;
613         return 0;
614 }
615 
616 int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
617                                 pgoff_t index, gfp_t gfp_mask);
618 int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
619                                 pgoff_t index, gfp_t gfp_mask);
620 extern void delete_from_page_cache(struct page *page);
621 extern void __delete_from_page_cache(struct page *page, void *shadow);
622 int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask);
623 void delete_from_page_cache_batch(struct address_space *mapping,
624                                   struct pagevec *pvec);
625 
626 /*
627  * Like add_to_page_cache_locked, but used to add newly allocated pages:
628  * the page is new, so we can just run __SetPageLocked() against it.
629  */
630 static inline int add_to_page_cache(struct page *page,
631                 struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask)
632 {
633         int error;
634 
635         __SetPageLocked(page);
636         error = add_to_page_cache_locked(page, mapping, offset, gfp_mask);
637         if (unlikely(error))
638                 __ClearPageLocked(page);
639         return error;
640 }
641 
642 static inline unsigned long dir_pages(struct inode *inode)
643 {
644         return (unsigned long)(inode->i_size + PAGE_SIZE - 1) >>
645                                PAGE_SHIFT;
646 }
647 
648 #endif /* _LINUX_PAGEMAP_H */
649 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp