~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/include/linux/pagemap.h

Version: ~ [ linux-5.4-rc3 ] ~ [ linux-5.3.6 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.79 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.149 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.196 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.196 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.75 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-3.9.11 ] ~ [ linux-3.8.13 ] ~ [ linux-3.7.10 ] ~ [ linux-3.6.11 ] ~ [ linux-3.5.7 ] ~ [ linux-3.4.113 ] ~ [ linux-3.3.8 ] ~ [ linux-3.2.102 ] ~ [ linux-3.1.10 ] ~ [ linux-3.0.101 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 #ifndef _LINUX_PAGEMAP_H
  2 #define _LINUX_PAGEMAP_H
  3 
  4 /*
  5  * Copyright 1995 Linus Torvalds
  6  */
  7 #include <linux/mm.h>
  8 #include <linux/fs.h>
  9 #include <linux/list.h>
 10 #include <linux/highmem.h>
 11 #include <linux/compiler.h>
 12 #include <asm/uaccess.h>
 13 #include <linux/gfp.h>
 14 #include <linux/bitops.h>
 15 #include <linux/hardirq.h> /* for in_interrupt() */
 16 #include <linux/hugetlb_inline.h>
 17 
 18 /*
 19  * Bits in mapping->flags.  The lower __GFP_BITS_SHIFT bits are the page
 20  * allocation mode flags.
 21  */
 22 enum mapping_flags {
 23         AS_EIO          = __GFP_BITS_SHIFT + 0, /* IO error on async write */
 24         AS_ENOSPC       = __GFP_BITS_SHIFT + 1, /* ENOSPC on async write */
 25         AS_MM_ALL_LOCKS = __GFP_BITS_SHIFT + 2, /* under mm_take_all_locks() */
 26         AS_UNEVICTABLE  = __GFP_BITS_SHIFT + 3, /* e.g., ramdisk, SHM_LOCK */
 27         AS_EXITING      = __GFP_BITS_SHIFT + 4, /* final truncate in progress */
 28 };
 29 
 30 static inline void mapping_set_error(struct address_space *mapping, int error)
 31 {
 32         if (unlikely(error)) {
 33                 if (error == -ENOSPC)
 34                         set_bit(AS_ENOSPC, &mapping->flags);
 35                 else
 36                         set_bit(AS_EIO, &mapping->flags);
 37         }
 38 }
 39 
 40 static inline void mapping_set_unevictable(struct address_space *mapping)
 41 {
 42         set_bit(AS_UNEVICTABLE, &mapping->flags);
 43 }
 44 
 45 static inline void mapping_clear_unevictable(struct address_space *mapping)
 46 {
 47         clear_bit(AS_UNEVICTABLE, &mapping->flags);
 48 }
 49 
 50 static inline int mapping_unevictable(struct address_space *mapping)
 51 {
 52         if (mapping)
 53                 return test_bit(AS_UNEVICTABLE, &mapping->flags);
 54         return !!mapping;
 55 }
 56 
 57 static inline void mapping_set_exiting(struct address_space *mapping)
 58 {
 59         set_bit(AS_EXITING, &mapping->flags);
 60 }
 61 
 62 static inline int mapping_exiting(struct address_space *mapping)
 63 {
 64         return test_bit(AS_EXITING, &mapping->flags);
 65 }
 66 
 67 static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
 68 {
 69         return (__force gfp_t)mapping->flags & __GFP_BITS_MASK;
 70 }
 71 
 72 /* Restricts the given gfp_mask to what the mapping allows. */
 73 static inline gfp_t mapping_gfp_constraint(struct address_space *mapping,
 74                 gfp_t gfp_mask)
 75 {
 76         return mapping_gfp_mask(mapping) & gfp_mask;
 77 }
 78 
 79 /*
 80  * This is non-atomic.  Only to be used before the mapping is activated.
 81  * Probably needs a barrier...
 82  */
 83 static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
 84 {
 85         m->flags = (m->flags & ~(__force unsigned long)__GFP_BITS_MASK) |
 86                                 (__force unsigned long)mask;
 87 }
 88 
 89 void release_pages(struct page **pages, int nr, bool cold);
 90 
 91 /*
 92  * speculatively take a reference to a page.
 93  * If the page is free (_refcount == 0), then _refcount is untouched, and 0
 94  * is returned. Otherwise, _refcount is incremented by 1 and 1 is returned.
 95  *
 96  * This function must be called inside the same rcu_read_lock() section as has
 97  * been used to lookup the page in the pagecache radix-tree (or page table):
 98  * this allows allocators to use a synchronize_rcu() to stabilize _refcount.
 99  *
100  * Unless an RCU grace period has passed, the count of all pages coming out
101  * of the allocator must be considered unstable. page_count may return higher
102  * than expected, and put_page must be able to do the right thing when the
103  * page has been finished with, no matter what it is subsequently allocated
104  * for (because put_page is what is used here to drop an invalid speculative
105  * reference).
106  *
107  * This is the interesting part of the lockless pagecache (and lockless
108  * get_user_pages) locking protocol, where the lookup-side (eg. find_get_page)
109  * has the following pattern:
110  * 1. find page in radix tree
111  * 2. conditionally increment refcount
112  * 3. check the page is still in pagecache (if no, goto 1)
113  *
114  * Remove-side that cares about stability of _refcount (eg. reclaim) has the
115  * following (with tree_lock held for write):
116  * A. atomically check refcount is correct and set it to 0 (atomic_cmpxchg)
117  * B. remove page from pagecache
118  * C. free the page
119  *
120  * There are 2 critical interleavings that matter:
121  * - 2 runs before A: in this case, A sees elevated refcount and bails out
122  * - A runs before 2: in this case, 2 sees zero refcount and retries;
123  *   subsequently, B will complete and 1 will find no page, causing the
124  *   lookup to return NULL.
125  *
126  * It is possible that between 1 and 2, the page is removed then the exact same
127  * page is inserted into the same position in pagecache. That's OK: the
128  * old find_get_page using tree_lock could equally have run before or after
129  * such a re-insertion, depending on order that locks are granted.
130  *
131  * Lookups racing against pagecache insertion isn't a big problem: either 1
132  * will find the page or it will not. Likewise, the old find_get_page could run
133  * either before the insertion or afterwards, depending on timing.
134  */
135 static inline int page_cache_get_speculative(struct page *page)
136 {
137         VM_BUG_ON(in_interrupt());
138 
139 #ifdef CONFIG_TINY_RCU
140 # ifdef CONFIG_PREEMPT_COUNT
141         VM_BUG_ON(!in_atomic());
142 # endif
143         /*
144          * Preempt must be disabled here - we rely on rcu_read_lock doing
145          * this for us.
146          *
147          * Pagecache won't be truncated from interrupt context, so if we have
148          * found a page in the radix tree here, we have pinned its refcount by
149          * disabling preempt, and hence no need for the "speculative get" that
150          * SMP requires.
151          */
152         VM_BUG_ON_PAGE(page_count(page) == 0, page);
153         page_ref_inc(page);
154 
155 #else
156         if (unlikely(!get_page_unless_zero(page))) {
157                 /*
158                  * Either the page has been freed, or will be freed.
159                  * In either case, retry here and the caller should
160                  * do the right thing (see comments above).
161                  */
162                 return 0;
163         }
164 #endif
165         VM_BUG_ON_PAGE(PageTail(page), page);
166 
167         return 1;
168 }
169 
170 /*
171  * Same as above, but add instead of inc (could just be merged)
172  */
173 static inline int page_cache_add_speculative(struct page *page, int count)
174 {
175         VM_BUG_ON(in_interrupt());
176 
177 #if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU)
178 # ifdef CONFIG_PREEMPT_COUNT
179         VM_BUG_ON(!in_atomic());
180 # endif
181         VM_BUG_ON_PAGE(page_count(page) == 0, page);
182         page_ref_add(page, count);
183 
184 #else
185         if (unlikely(!page_ref_add_unless(page, count, 0)))
186                 return 0;
187 #endif
188         VM_BUG_ON_PAGE(PageCompound(page) && page != compound_head(page), page);
189 
190         return 1;
191 }
192 
193 #ifdef CONFIG_NUMA
194 extern struct page *__page_cache_alloc(gfp_t gfp);
195 #else
196 static inline struct page *__page_cache_alloc(gfp_t gfp)
197 {
198         return alloc_pages(gfp, 0);
199 }
200 #endif
201 
202 static inline struct page *page_cache_alloc(struct address_space *x)
203 {
204         return __page_cache_alloc(mapping_gfp_mask(x));
205 }
206 
207 static inline struct page *page_cache_alloc_cold(struct address_space *x)
208 {
209         return __page_cache_alloc(mapping_gfp_mask(x)|__GFP_COLD);
210 }
211 
212 static inline gfp_t readahead_gfp_mask(struct address_space *x)
213 {
214         return mapping_gfp_mask(x) |
215                                   __GFP_COLD | __GFP_NORETRY | __GFP_NOWARN;
216 }
217 
218 typedef int filler_t(void *, struct page *);
219 
220 pgoff_t page_cache_next_hole(struct address_space *mapping,
221                              pgoff_t index, unsigned long max_scan);
222 pgoff_t page_cache_prev_hole(struct address_space *mapping,
223                              pgoff_t index, unsigned long max_scan);
224 
225 #define FGP_ACCESSED            0x00000001
226 #define FGP_LOCK                0x00000002
227 #define FGP_CREAT               0x00000004
228 #define FGP_WRITE               0x00000008
229 #define FGP_NOFS                0x00000010
230 #define FGP_NOWAIT              0x00000020
231 
232 struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
233                 int fgp_flags, gfp_t cache_gfp_mask);
234 
235 /**
236  * find_get_page - find and get a page reference
237  * @mapping: the address_space to search
238  * @offset: the page index
239  *
240  * Looks up the page cache slot at @mapping & @offset.  If there is a
241  * page cache page, it is returned with an increased refcount.
242  *
243  * Otherwise, %NULL is returned.
244  */
245 static inline struct page *find_get_page(struct address_space *mapping,
246                                         pgoff_t offset)
247 {
248         return pagecache_get_page(mapping, offset, 0, 0);
249 }
250 
251 static inline struct page *find_get_page_flags(struct address_space *mapping,
252                                         pgoff_t offset, int fgp_flags)
253 {
254         return pagecache_get_page(mapping, offset, fgp_flags, 0);
255 }
256 
257 /**
258  * find_lock_page - locate, pin and lock a pagecache page
259  * pagecache_get_page - find and get a page reference
260  * @mapping: the address_space to search
261  * @offset: the page index
262  *
263  * Looks up the page cache slot at @mapping & @offset.  If there is a
264  * page cache page, it is returned locked and with an increased
265  * refcount.
266  *
267  * Otherwise, %NULL is returned.
268  *
269  * find_lock_page() may sleep.
270  */
271 static inline struct page *find_lock_page(struct address_space *mapping,
272                                         pgoff_t offset)
273 {
274         return pagecache_get_page(mapping, offset, FGP_LOCK, 0);
275 }
276 
277 /**
278  * find_or_create_page - locate or add a pagecache page
279  * @mapping: the page's address_space
280  * @index: the page's index into the mapping
281  * @gfp_mask: page allocation mode
282  *
283  * Looks up the page cache slot at @mapping & @offset.  If there is a
284  * page cache page, it is returned locked and with an increased
285  * refcount.
286  *
287  * If the page is not present, a new page is allocated using @gfp_mask
288  * and added to the page cache and the VM's LRU list.  The page is
289  * returned locked and with an increased refcount.
290  *
291  * On memory exhaustion, %NULL is returned.
292  *
293  * find_or_create_page() may sleep, even if @gfp_flags specifies an
294  * atomic allocation!
295  */
296 static inline struct page *find_or_create_page(struct address_space *mapping,
297                                         pgoff_t offset, gfp_t gfp_mask)
298 {
299         return pagecache_get_page(mapping, offset,
300                                         FGP_LOCK|FGP_ACCESSED|FGP_CREAT,
301                                         gfp_mask);
302 }
303 
304 /**
305  * grab_cache_page_nowait - returns locked page at given index in given cache
306  * @mapping: target address_space
307  * @index: the page index
308  *
309  * Same as grab_cache_page(), but do not wait if the page is unavailable.
310  * This is intended for speculative data generators, where the data can
311  * be regenerated if the page couldn't be grabbed.  This routine should
312  * be safe to call while holding the lock for another page.
313  *
314  * Clear __GFP_FS when allocating the page to avoid recursion into the fs
315  * and deadlock against the caller's locked page.
316  */
317 static inline struct page *grab_cache_page_nowait(struct address_space *mapping,
318                                 pgoff_t index)
319 {
320         return pagecache_get_page(mapping, index,
321                         FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT,
322                         mapping_gfp_mask(mapping));
323 }
324 
325 struct page *find_get_entry(struct address_space *mapping, pgoff_t offset);
326 struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset);
327 unsigned find_get_entries(struct address_space *mapping, pgoff_t start,
328                           unsigned int nr_entries, struct page **entries,
329                           pgoff_t *indices);
330 unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
331                         unsigned int nr_pages, struct page **pages);
332 unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start,
333                                unsigned int nr_pages, struct page **pages);
334 unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
335                         int tag, unsigned int nr_pages, struct page **pages);
336 unsigned find_get_entries_tag(struct address_space *mapping, pgoff_t start,
337                         int tag, unsigned int nr_entries,
338                         struct page **entries, pgoff_t *indices);
339 
340 struct page *grab_cache_page_write_begin(struct address_space *mapping,
341                         pgoff_t index, unsigned flags);
342 
343 /*
344  * Returns locked page at given index in given cache, creating it if needed.
345  */
346 static inline struct page *grab_cache_page(struct address_space *mapping,
347                                                                 pgoff_t index)
348 {
349         return find_or_create_page(mapping, index, mapping_gfp_mask(mapping));
350 }
351 
352 extern struct page * read_cache_page(struct address_space *mapping,
353                                 pgoff_t index, filler_t *filler, void *data);
354 extern struct page * read_cache_page_gfp(struct address_space *mapping,
355                                 pgoff_t index, gfp_t gfp_mask);
356 extern int read_cache_pages(struct address_space *mapping,
357                 struct list_head *pages, filler_t *filler, void *data);
358 
359 static inline struct page *read_mapping_page(struct address_space *mapping,
360                                 pgoff_t index, void *data)
361 {
362         filler_t *filler = (filler_t *)mapping->a_ops->readpage;
363         return read_cache_page(mapping, index, filler, data);
364 }
365 
366 /*
367  * Get index of the page with in radix-tree
368  * (TODO: remove once hugetlb pages will have ->index in PAGE_SIZE)
369  */
370 static inline pgoff_t page_to_index(struct page *page)
371 {
372         pgoff_t pgoff;
373 
374         if (likely(!PageTransTail(page)))
375                 return page->index;
376 
377         /*
378          *  We don't initialize ->index for tail pages: calculate based on
379          *  head page
380          */
381         pgoff = compound_head(page)->index;
382         pgoff += page - compound_head(page);
383         return pgoff;
384 }
385 
386 /*
387  * Get the offset in PAGE_SIZE.
388  * (TODO: hugepage should have ->index in PAGE_SIZE)
389  */
390 static inline pgoff_t page_to_pgoff(struct page *page)
391 {
392         if (unlikely(PageHeadHuge(page)))
393                 return page->index << compound_order(page);
394 
395         return page_to_index(page);
396 }
397 
398 /*
399  * Return byte-offset into filesystem object for page.
400  */
401 static inline loff_t page_offset(struct page *page)
402 {
403         return ((loff_t)page->index) << PAGE_SHIFT;
404 }
405 
406 static inline loff_t page_file_offset(struct page *page)
407 {
408         return ((loff_t)page_file_index(page)) << PAGE_SHIFT;
409 }
410 
411 extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
412                                      unsigned long address);
413 
414 static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
415                                         unsigned long address)
416 {
417         pgoff_t pgoff;
418         if (unlikely(is_vm_hugetlb_page(vma)))
419                 return linear_hugepage_index(vma, address);
420         pgoff = (address - vma->vm_start) >> PAGE_SHIFT;
421         pgoff += vma->vm_pgoff;
422         return pgoff;
423 }
424 
425 extern void __lock_page(struct page *page);
426 extern int __lock_page_killable(struct page *page);
427 extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
428                                 unsigned int flags);
429 extern void unlock_page(struct page *page);
430 
431 static inline int trylock_page(struct page *page)
432 {
433         page = compound_head(page);
434         return (likely(!test_and_set_bit_lock(PG_locked, &page->flags)));
435 }
436 
437 /*
438  * lock_page may only be called if we have the page's inode pinned.
439  */
440 static inline void lock_page(struct page *page)
441 {
442         might_sleep();
443         if (!trylock_page(page))
444                 __lock_page(page);
445 }
446 
447 /*
448  * lock_page_killable is like lock_page but can be interrupted by fatal
449  * signals.  It returns 0 if it locked the page and -EINTR if it was
450  * killed while waiting.
451  */
452 static inline int lock_page_killable(struct page *page)
453 {
454         might_sleep();
455         if (!trylock_page(page))
456                 return __lock_page_killable(page);
457         return 0;
458 }
459 
460 /*
461  * lock_page_or_retry - Lock the page, unless this would block and the
462  * caller indicated that it can handle a retry.
463  *
464  * Return value and mmap_sem implications depend on flags; see
465  * __lock_page_or_retry().
466  */
467 static inline int lock_page_or_retry(struct page *page, struct mm_struct *mm,
468                                      unsigned int flags)
469 {
470         might_sleep();
471         return trylock_page(page) || __lock_page_or_retry(page, mm, flags);
472 }
473 
474 /*
475  * This is exported only for wait_on_page_locked/wait_on_page_writeback,
476  * and for filesystems which need to wait on PG_private.
477  */
478 extern void wait_on_page_bit(struct page *page, int bit_nr);
479 
480 extern int wait_on_page_bit_killable(struct page *page, int bit_nr);
481 extern int wait_on_page_bit_killable_timeout(struct page *page,
482                                              int bit_nr, unsigned long timeout);
483 
484 static inline int wait_on_page_locked_killable(struct page *page)
485 {
486         if (!PageLocked(page))
487                 return 0;
488         return wait_on_page_bit_killable(compound_head(page), PG_locked);
489 }
490 
491 extern wait_queue_head_t *page_waitqueue(struct page *page);
492 static inline void wake_up_page(struct page *page, int bit)
493 {
494         __wake_up_bit(page_waitqueue(page), &page->flags, bit);
495 }
496 
497 /* 
498  * Wait for a page to be unlocked.
499  *
500  * This must be called with the caller "holding" the page,
501  * ie with increased "page->count" so that the page won't
502  * go away during the wait..
503  */
504 static inline void wait_on_page_locked(struct page *page)
505 {
506         if (PageLocked(page))
507                 wait_on_page_bit(compound_head(page), PG_locked);
508 }
509 
510 /* 
511  * Wait for a page to complete writeback
512  */
513 static inline void wait_on_page_writeback(struct page *page)
514 {
515         if (PageWriteback(page))
516                 wait_on_page_bit(page, PG_writeback);
517 }
518 
519 extern void end_page_writeback(struct page *page);
520 void wait_for_stable_page(struct page *page);
521 
522 void page_endio(struct page *page, bool is_write, int err);
523 
524 /*
525  * Add an arbitrary waiter to a page's wait queue
526  */
527 extern void add_page_wait_queue(struct page *page, wait_queue_t *waiter);
528 
529 /*
530  * Fault one or two userspace pages into pagetables.
531  * Return -EINVAL if more than two pages would be needed.
532  * Return non-zero on a fault.
533  */
534 static inline int fault_in_pages_writeable(char __user *uaddr, int size)
535 {
536         int span, ret;
537 
538         if (unlikely(size == 0))
539                 return 0;
540 
541         span = offset_in_page(uaddr) + size;
542         if (span > 2 * PAGE_SIZE)
543                 return -EINVAL;
544         /*
545          * Writing zeroes into userspace here is OK, because we know that if
546          * the zero gets there, we'll be overwriting it.
547          */
548         ret = __put_user(0, uaddr);
549         if (ret == 0 && span > PAGE_SIZE)
550                 ret = __put_user(0, uaddr + size - 1);
551         return ret;
552 }
553 
554 static inline int fault_in_pages_readable(const char __user *uaddr, int size)
555 {
556         volatile char c;
557         int ret;
558 
559         if (unlikely(size == 0))
560                 return 0;
561 
562         ret = __get_user(c, uaddr);
563         if (ret == 0) {
564                 const char __user *end = uaddr + size - 1;
565 
566                 if (((unsigned long)uaddr & PAGE_MASK) !=
567                                 ((unsigned long)end & PAGE_MASK)) {
568                         ret = __get_user(c, end);
569                         (void)c;
570                 }
571         }
572         return ret;
573 }
574 
575 /*
576  * Multipage variants of the above prefault helpers, useful if more than
577  * PAGE_SIZE of data needs to be prefaulted. These are separate from the above
578  * functions (which only handle up to PAGE_SIZE) to avoid clobbering the
579  * filemap.c hotpaths.
580  */
581 static inline int fault_in_multipages_writeable(char __user *uaddr, int size)
582 {
583         char __user *end = uaddr + size - 1;
584 
585         if (unlikely(size == 0))
586                 return 0;
587 
588         if (unlikely(uaddr > end))
589                 return -EFAULT;
590         /*
591          * Writing zeroes into userspace here is OK, because we know that if
592          * the zero gets there, we'll be overwriting it.
593          */
594         do {
595                 if (unlikely(__put_user(0, uaddr) != 0))
596                         return -EFAULT;
597                 uaddr += PAGE_SIZE;
598         } while (uaddr <= end);
599 
600         /* Check whether the range spilled into the next page. */
601         if (((unsigned long)uaddr & PAGE_MASK) ==
602                         ((unsigned long)end & PAGE_MASK))
603                 return __put_user(0, end);
604 
605         return 0;
606 }
607 
608 static inline int fault_in_multipages_readable(const char __user *uaddr,
609                                                int size)
610 {
611         volatile char c;
612         const char __user *end = uaddr + size - 1;
613 
614         if (unlikely(size == 0))
615                 return 0;
616 
617         if (unlikely(uaddr > end))
618                 return -EFAULT;
619 
620         do {
621                 if (unlikely(__get_user(c, uaddr) != 0))
622                         return -EFAULT;
623                 uaddr += PAGE_SIZE;
624         } while (uaddr <= end);
625 
626         /* Check whether the range spilled into the next page. */
627         if (((unsigned long)uaddr & PAGE_MASK) ==
628                         ((unsigned long)end & PAGE_MASK)) {
629                 return __get_user(c, end);
630         }
631 
632         (void)c;
633         return 0;
634 }
635 
636 int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
637                                 pgoff_t index, gfp_t gfp_mask);
638 int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
639                                 pgoff_t index, gfp_t gfp_mask);
640 extern void delete_from_page_cache(struct page *page);
641 extern void __delete_from_page_cache(struct page *page, void *shadow);
642 int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask);
643 
644 /*
645  * Like add_to_page_cache_locked, but used to add newly allocated pages:
646  * the page is new, so we can just run __SetPageLocked() against it.
647  */
648 static inline int add_to_page_cache(struct page *page,
649                 struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask)
650 {
651         int error;
652 
653         __SetPageLocked(page);
654         error = add_to_page_cache_locked(page, mapping, offset, gfp_mask);
655         if (unlikely(error))
656                 __ClearPageLocked(page);
657         return error;
658 }
659 
660 static inline unsigned long dir_pages(struct inode *inode)
661 {
662         return (unsigned long)(inode->i_size + PAGE_SIZE - 1) >>
663                                PAGE_SHIFT;
664 }
665 
666 #endif /* _LINUX_PAGEMAP_H */
667 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp