~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/fs/gfs2/aops.c

Version: ~ [ linux-5.19-rc3 ] ~ [ linux-5.18.5 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.48 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.123 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.199 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.248 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.284 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.319 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.302 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
  3  * Copyright (C) 2004-2008 Red Hat, Inc.  All rights reserved.
  4  *
  5  * This copyrighted material is made available to anyone wishing to use,
  6  * modify, copy, or redistribute it subject to the terms and conditions
  7  * of the GNU General Public License version 2.
  8  */
  9 
 10 #include <linux/sched.h>
 11 #include <linux/slab.h>
 12 #include <linux/spinlock.h>
 13 #include <linux/completion.h>
 14 #include <linux/buffer_head.h>
 15 #include <linux/pagemap.h>
 16 #include <linux/pagevec.h>
 17 #include <linux/mpage.h>
 18 #include <linux/fs.h>
 19 #include <linux/writeback.h>
 20 #include <linux/swap.h>
 21 #include <linux/gfs2_ondisk.h>
 22 #include <linux/backing-dev.h>
 23 #include <linux/uio.h>
 24 #include <trace/events/writeback.h>
 25 
 26 #include "gfs2.h"
 27 #include "incore.h"
 28 #include "bmap.h"
 29 #include "glock.h"
 30 #include "inode.h"
 31 #include "log.h"
 32 #include "meta_io.h"
 33 #include "quota.h"
 34 #include "trans.h"
 35 #include "rgrp.h"
 36 #include "super.h"
 37 #include "util.h"
 38 #include "glops.h"
 39 
 40 
 41 static void gfs2_page_add_databufs(struct gfs2_inode *ip, struct page *page,
 42                                    unsigned int from, unsigned int to)
 43 {
 44         struct buffer_head *head = page_buffers(page);
 45         unsigned int bsize = head->b_size;
 46         struct buffer_head *bh;
 47         unsigned int start, end;
 48 
 49         for (bh = head, start = 0; bh != head || !start;
 50              bh = bh->b_this_page, start = end) {
 51                 end = start + bsize;
 52                 if (end <= from || start >= to)
 53                         continue;
 54                 if (gfs2_is_jdata(ip))
 55                         set_buffer_uptodate(bh);
 56                 gfs2_trans_add_data(ip->i_gl, bh);
 57         }
 58 }
 59 
 60 /**
 61  * gfs2_get_block_noalloc - Fills in a buffer head with details about a block
 62  * @inode: The inode
 63  * @lblock: The block number to look up
 64  * @bh_result: The buffer head to return the result in
 65  * @create: Non-zero if we may add block to the file
 66  *
 67  * Returns: errno
 68  */
 69 
 70 static int gfs2_get_block_noalloc(struct inode *inode, sector_t lblock,
 71                                   struct buffer_head *bh_result, int create)
 72 {
 73         int error;
 74 
 75         error = gfs2_block_map(inode, lblock, bh_result, 0);
 76         if (error)
 77                 return error;
 78         if (!buffer_mapped(bh_result))
 79                 return -EIO;
 80         return 0;
 81 }
 82 
 83 static int gfs2_get_block_direct(struct inode *inode, sector_t lblock,
 84                                  struct buffer_head *bh_result, int create)
 85 {
 86         return gfs2_block_map(inode, lblock, bh_result, 0);
 87 }
 88 
 89 /**
 90  * gfs2_writepage_common - Common bits of writepage
 91  * @page: The page to be written
 92  * @wbc: The writeback control
 93  *
 94  * Returns: 1 if writepage is ok, otherwise an error code or zero if no error.
 95  */
 96 
 97 static int gfs2_writepage_common(struct page *page,
 98                                  struct writeback_control *wbc)
 99 {
100         struct inode *inode = page->mapping->host;
101         struct gfs2_inode *ip = GFS2_I(inode);
102         struct gfs2_sbd *sdp = GFS2_SB(inode);
103         loff_t i_size = i_size_read(inode);
104         pgoff_t end_index = i_size >> PAGE_SHIFT;
105         unsigned offset;
106 
107         if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl)))
108                 goto out;
109         if (current->journal_info)
110                 goto redirty;
111         /* Is the page fully outside i_size? (truncate in progress) */
112         offset = i_size & (PAGE_SIZE-1);
113         if (page->index > end_index || (page->index == end_index && !offset)) {
114                 page->mapping->a_ops->invalidatepage(page, 0, PAGE_SIZE);
115                 goto out;
116         }
117         return 1;
118 redirty:
119         redirty_page_for_writepage(wbc, page);
120 out:
121         unlock_page(page);
122         return 0;
123 }
124 
125 /**
126  * gfs2_writepage - Write page for writeback mappings
127  * @page: The page
128  * @wbc: The writeback control
129  *
130  */
131 
132 static int gfs2_writepage(struct page *page, struct writeback_control *wbc)
133 {
134         int ret;
135 
136         ret = gfs2_writepage_common(page, wbc);
137         if (ret <= 0)
138                 return ret;
139 
140         return nobh_writepage(page, gfs2_get_block_noalloc, wbc);
141 }
142 
143 /* This is the same as calling block_write_full_page, but it also
144  * writes pages outside of i_size
145  */
146 static int gfs2_write_full_page(struct page *page, get_block_t *get_block,
147                                 struct writeback_control *wbc)
148 {
149         struct inode * const inode = page->mapping->host;
150         loff_t i_size = i_size_read(inode);
151         const pgoff_t end_index = i_size >> PAGE_SHIFT;
152         unsigned offset;
153 
154         /*
155          * The page straddles i_size.  It must be zeroed out on each and every
156          * writepage invocation because it may be mmapped.  "A file is mapped
157          * in multiples of the page size.  For a file that is not a multiple of
158          * the  page size, the remaining memory is zeroed when mapped, and
159          * writes to that region are not written out to the file."
160          */
161         offset = i_size & (PAGE_SIZE-1);
162         if (page->index == end_index && offset)
163                 zero_user_segment(page, offset, PAGE_SIZE);
164 
165         return __block_write_full_page(inode, page, get_block, wbc,
166                                        end_buffer_async_write);
167 }
168 
169 /**
170  * __gfs2_jdata_writepage - The core of jdata writepage
171  * @page: The page to write
172  * @wbc: The writeback control
173  *
174  * This is shared between writepage and writepages and implements the
175  * core of the writepage operation. If a transaction is required then
176  * PageChecked will have been set and the transaction will have
177  * already been started before this is called.
178  */
179 
180 static int __gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc)
181 {
182         struct inode *inode = page->mapping->host;
183         struct gfs2_inode *ip = GFS2_I(inode);
184         struct gfs2_sbd *sdp = GFS2_SB(inode);
185 
186         if (PageChecked(page)) {
187                 ClearPageChecked(page);
188                 if (!page_has_buffers(page)) {
189                         create_empty_buffers(page, inode->i_sb->s_blocksize,
190                                              BIT(BH_Dirty)|BIT(BH_Uptodate));
191                 }
192                 gfs2_page_add_databufs(ip, page, 0, sdp->sd_vfs->s_blocksize-1);
193         }
194         return gfs2_write_full_page(page, gfs2_get_block_noalloc, wbc);
195 }
196 
197 /**
198  * gfs2_jdata_writepage - Write complete page
199  * @page: Page to write
200  * @wbc: The writeback control
201  *
202  * Returns: errno
203  *
204  */
205 
206 static int gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc)
207 {
208         struct inode *inode = page->mapping->host;
209         struct gfs2_inode *ip = GFS2_I(inode);
210         struct gfs2_sbd *sdp = GFS2_SB(inode);
211         int ret;
212 
213         if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl)))
214                 goto out;
215         if (PageChecked(page) || current->journal_info)
216                 goto out_ignore;
217         ret = __gfs2_jdata_writepage(page, wbc);
218         return ret;
219 
220 out_ignore:
221         redirty_page_for_writepage(wbc, page);
222 out:
223         unlock_page(page);
224         return 0;
225 }
226 
227 /**
228  * gfs2_writepages - Write a bunch of dirty pages back to disk
229  * @mapping: The mapping to write
230  * @wbc: Write-back control
231  *
232  * Used for both ordered and writeback modes.
233  */
234 static int gfs2_writepages(struct address_space *mapping,
235                            struct writeback_control *wbc)
236 {
237         return mpage_writepages(mapping, wbc, gfs2_get_block_noalloc);
238 }
239 
240 /**
241  * gfs2_write_jdata_pagevec - Write back a pagevec's worth of pages
242  * @mapping: The mapping
243  * @wbc: The writeback control
244  * @pvec: The vector of pages
245  * @nr_pages: The number of pages to write
246  * @end: End position
247  * @done_index: Page index
248  *
249  * Returns: non-zero if loop should terminate, zero otherwise
250  */
251 
252 static int gfs2_write_jdata_pagevec(struct address_space *mapping,
253                                     struct writeback_control *wbc,
254                                     struct pagevec *pvec,
255                                     int nr_pages, pgoff_t end,
256                                     pgoff_t *done_index)
257 {
258         struct inode *inode = mapping->host;
259         struct gfs2_sbd *sdp = GFS2_SB(inode);
260         unsigned nrblocks = nr_pages * (PAGE_SIZE/inode->i_sb->s_blocksize);
261         int i;
262         int ret;
263 
264         ret = gfs2_trans_begin(sdp, nrblocks, nrblocks);
265         if (ret < 0)
266                 return ret;
267 
268         for(i = 0; i < nr_pages; i++) {
269                 struct page *page = pvec->pages[i];
270 
271                 /*
272                  * At this point, the page may be truncated or
273                  * invalidated (changing page->mapping to NULL), or
274                  * even swizzled back from swapper_space to tmpfs file
275                  * mapping. However, page->index will not change
276                  * because we have a reference on the page.
277                  */
278                 if (page->index > end) {
279                         /*
280                          * can't be range_cyclic (1st pass) because
281                          * end == -1 in that case.
282                          */
283                         ret = 1;
284                         break;
285                 }
286 
287                 *done_index = page->index;
288 
289                 lock_page(page);
290 
291                 if (unlikely(page->mapping != mapping)) {
292 continue_unlock:
293                         unlock_page(page);
294                         continue;
295                 }
296 
297                 if (!PageDirty(page)) {
298                         /* someone wrote it for us */
299                         goto continue_unlock;
300                 }
301 
302                 if (PageWriteback(page)) {
303                         if (wbc->sync_mode != WB_SYNC_NONE)
304                                 wait_on_page_writeback(page);
305                         else
306                                 goto continue_unlock;
307                 }
308 
309                 BUG_ON(PageWriteback(page));
310                 if (!clear_page_dirty_for_io(page))
311                         goto continue_unlock;
312 
313                 trace_wbc_writepage(wbc, inode_to_bdi(inode));
314 
315                 ret = __gfs2_jdata_writepage(page, wbc);
316                 if (unlikely(ret)) {
317                         if (ret == AOP_WRITEPAGE_ACTIVATE) {
318                                 unlock_page(page);
319                                 ret = 0;
320                         } else {
321 
322                                 /*
323                                  * done_index is set past this page,
324                                  * so media errors will not choke
325                                  * background writeout for the entire
326                                  * file. This has consequences for
327                                  * range_cyclic semantics (ie. it may
328                                  * not be suitable for data integrity
329                                  * writeout).
330                                  */
331                                 *done_index = page->index + 1;
332                                 ret = 1;
333                                 break;
334                         }
335                 }
336 
337                 /*
338                  * We stop writing back only if we are not doing
339                  * integrity sync. In case of integrity sync we have to
340                  * keep going until we have written all the pages
341                  * we tagged for writeback prior to entering this loop.
342                  */
343                 if (--wbc->nr_to_write <= 0 && wbc->sync_mode == WB_SYNC_NONE) {
344                         ret = 1;
345                         break;
346                 }
347 
348         }
349         gfs2_trans_end(sdp);
350         return ret;
351 }
352 
353 /**
354  * gfs2_write_cache_jdata - Like write_cache_pages but different
355  * @mapping: The mapping to write
356  * @wbc: The writeback control
357  *
358  * The reason that we use our own function here is that we need to
359  * start transactions before we grab page locks. This allows us
360  * to get the ordering right.
361  */
362 
363 static int gfs2_write_cache_jdata(struct address_space *mapping,
364                                   struct writeback_control *wbc)
365 {
366         int ret = 0;
367         int done = 0;
368         struct pagevec pvec;
369         int nr_pages;
370         pgoff_t uninitialized_var(writeback_index);
371         pgoff_t index;
372         pgoff_t end;
373         pgoff_t done_index;
374         int cycled;
375         int range_whole = 0;
376         int tag;
377 
378         pagevec_init(&pvec, 0);
379         if (wbc->range_cyclic) {
380                 writeback_index = mapping->writeback_index; /* prev offset */
381                 index = writeback_index;
382                 if (index == 0)
383                         cycled = 1;
384                 else
385                         cycled = 0;
386                 end = -1;
387         } else {
388                 index = wbc->range_start >> PAGE_SHIFT;
389                 end = wbc->range_end >> PAGE_SHIFT;
390                 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
391                         range_whole = 1;
392                 cycled = 1; /* ignore range_cyclic tests */
393         }
394         if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
395                 tag = PAGECACHE_TAG_TOWRITE;
396         else
397                 tag = PAGECACHE_TAG_DIRTY;
398 
399 retry:
400         if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
401                 tag_pages_for_writeback(mapping, index, end);
402         done_index = index;
403         while (!done && (index <= end)) {
404                 nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
405                               min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
406                 if (nr_pages == 0)
407                         break;
408 
409                 ret = gfs2_write_jdata_pagevec(mapping, wbc, &pvec, nr_pages, end, &done_index);
410                 if (ret)
411                         done = 1;
412                 if (ret > 0)
413                         ret = 0;
414                 pagevec_release(&pvec);
415                 cond_resched();
416         }
417 
418         if (!cycled && !done) {
419                 /*
420                  * range_cyclic:
421                  * We hit the last page and there is more work to be done: wrap
422                  * back to the start of the file
423                  */
424                 cycled = 1;
425                 index = 0;
426                 end = writeback_index - 1;
427                 goto retry;
428         }
429 
430         if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
431                 mapping->writeback_index = done_index;
432 
433         return ret;
434 }
435 
436 
437 /**
438  * gfs2_jdata_writepages - Write a bunch of dirty pages back to disk
439  * @mapping: The mapping to write
440  * @wbc: The writeback control
441  * 
442  */
443 
444 static int gfs2_jdata_writepages(struct address_space *mapping,
445                                  struct writeback_control *wbc)
446 {
447         struct gfs2_inode *ip = GFS2_I(mapping->host);
448         struct gfs2_sbd *sdp = GFS2_SB(mapping->host);
449         int ret;
450 
451         ret = gfs2_write_cache_jdata(mapping, wbc);
452         if (ret == 0 && wbc->sync_mode == WB_SYNC_ALL) {
453                 gfs2_log_flush(sdp, ip->i_gl, NORMAL_FLUSH);
454                 ret = gfs2_write_cache_jdata(mapping, wbc);
455         }
456         return ret;
457 }
458 
459 /**
460  * stuffed_readpage - Fill in a Linux page with stuffed file data
461  * @ip: the inode
462  * @page: the page
463  *
464  * Returns: errno
465  */
466 
467 static int stuffed_readpage(struct gfs2_inode *ip, struct page *page)
468 {
469         struct buffer_head *dibh;
470         u64 dsize = i_size_read(&ip->i_inode);
471         void *kaddr;
472         int error;
473 
474         /*
475          * Due to the order of unstuffing files and ->fault(), we can be
476          * asked for a zero page in the case of a stuffed file being extended,
477          * so we need to supply one here. It doesn't happen often.
478          */
479         if (unlikely(page->index)) {
480                 zero_user(page, 0, PAGE_SIZE);
481                 SetPageUptodate(page);
482                 return 0;
483         }
484 
485         error = gfs2_meta_inode_buffer(ip, &dibh);
486         if (error)
487                 return error;
488 
489         kaddr = kmap_atomic(page);
490         if (dsize > (dibh->b_size - sizeof(struct gfs2_dinode)))
491                 dsize = (dibh->b_size - sizeof(struct gfs2_dinode));
492         memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize);
493         memset(kaddr + dsize, 0, PAGE_SIZE - dsize);
494         kunmap_atomic(kaddr);
495         flush_dcache_page(page);
496         brelse(dibh);
497         SetPageUptodate(page);
498 
499         return 0;
500 }
501 
502 
503 /**
504  * __gfs2_readpage - readpage
505  * @file: The file to read a page for
506  * @page: The page to read
507  *
508  * This is the core of gfs2's readpage. Its used by the internal file
509  * reading code as in that case we already hold the glock. Also its
510  * called by gfs2_readpage() once the required lock has been granted.
511  *
512  */
513 
514 static int __gfs2_readpage(void *file, struct page *page)
515 {
516         struct gfs2_inode *ip = GFS2_I(page->mapping->host);
517         struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host);
518         int error;
519 
520         if (gfs2_is_stuffed(ip)) {
521                 error = stuffed_readpage(ip, page);
522                 unlock_page(page);
523         } else {
524                 error = mpage_readpage(page, gfs2_block_map);
525         }
526 
527         if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
528                 return -EIO;
529 
530         return error;
531 }
532 
533 /**
534  * gfs2_readpage - read a page of a file
535  * @file: The file to read
536  * @page: The page of the file
537  *
538  * This deals with the locking required. We have to unlock and
539  * relock the page in order to get the locking in the right
540  * order.
541  */
542 
543 static int gfs2_readpage(struct file *file, struct page *page)
544 {
545         struct address_space *mapping = page->mapping;
546         struct gfs2_inode *ip = GFS2_I(mapping->host);
547         struct gfs2_holder gh;
548         int error;
549 
550         unlock_page(page);
551         gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
552         error = gfs2_glock_nq(&gh);
553         if (unlikely(error))
554                 goto out;
555         error = AOP_TRUNCATED_PAGE;
556         lock_page(page);
557         if (page->mapping == mapping && !PageUptodate(page))
558                 error = __gfs2_readpage(file, page);
559         else
560                 unlock_page(page);
561         gfs2_glock_dq(&gh);
562 out:
563         gfs2_holder_uninit(&gh);
564         if (error && error != AOP_TRUNCATED_PAGE)
565                 lock_page(page);
566         return error;
567 }
568 
569 /**
570  * gfs2_internal_read - read an internal file
571  * @ip: The gfs2 inode
572  * @buf: The buffer to fill
573  * @pos: The file position
574  * @size: The amount to read
575  *
576  */
577 
578 int gfs2_internal_read(struct gfs2_inode *ip, char *buf, loff_t *pos,
579                        unsigned size)
580 {
581         struct address_space *mapping = ip->i_inode.i_mapping;
582         unsigned long index = *pos / PAGE_SIZE;
583         unsigned offset = *pos & (PAGE_SIZE - 1);
584         unsigned copied = 0;
585         unsigned amt;
586         struct page *page;
587         void *p;
588 
589         do {
590                 amt = size - copied;
591                 if (offset + size > PAGE_SIZE)
592                         amt = PAGE_SIZE - offset;
593                 page = read_cache_page(mapping, index, __gfs2_readpage, NULL);
594                 if (IS_ERR(page))
595                         return PTR_ERR(page);
596                 p = kmap_atomic(page);
597                 memcpy(buf + copied, p + offset, amt);
598                 kunmap_atomic(p);
599                 put_page(page);
600                 copied += amt;
601                 index++;
602                 offset = 0;
603         } while(copied < size);
604         (*pos) += size;
605         return size;
606 }
607 
608 /**
609  * gfs2_readpages - Read a bunch of pages at once
610  * @file: The file to read from
611  * @mapping: Address space info
612  * @pages: List of pages to read
613  * @nr_pages: Number of pages to read
614  *
615  * Some notes:
616  * 1. This is only for readahead, so we can simply ignore any things
617  *    which are slightly inconvenient (such as locking conflicts between
618  *    the page lock and the glock) and return having done no I/O. Its
619  *    obviously not something we'd want to do on too regular a basis.
620  *    Any I/O we ignore at this time will be done via readpage later.
621  * 2. We don't handle stuffed files here we let readpage do the honours.
622  * 3. mpage_readpages() does most of the heavy lifting in the common case.
623  * 4. gfs2_block_map() is relied upon to set BH_Boundary in the right places.
624  */
625 
626 static int gfs2_readpages(struct file *file, struct address_space *mapping,
627                           struct list_head *pages, unsigned nr_pages)
628 {
629         struct inode *inode = mapping->host;
630         struct gfs2_inode *ip = GFS2_I(inode);
631         struct gfs2_sbd *sdp = GFS2_SB(inode);
632         struct gfs2_holder gh;
633         int ret;
634 
635         gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
636         ret = gfs2_glock_nq(&gh);
637         if (unlikely(ret))
638                 goto out_uninit;
639         if (!gfs2_is_stuffed(ip))
640                 ret = mpage_readpages(mapping, pages, nr_pages, gfs2_block_map);
641         gfs2_glock_dq(&gh);
642 out_uninit:
643         gfs2_holder_uninit(&gh);
644         if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
645                 ret = -EIO;
646         return ret;
647 }
648 
649 /**
650  * gfs2_write_begin - Begin to write to a file
651  * @file: The file to write to
652  * @mapping: The mapping in which to write
653  * @pos: The file offset at which to start writing
654  * @len: Length of the write
655  * @flags: Various flags
656  * @pagep: Pointer to return the page
657  * @fsdata: Pointer to return fs data (unused by GFS2)
658  *
659  * Returns: errno
660  */
661 
662 static int gfs2_write_begin(struct file *file, struct address_space *mapping,
663                             loff_t pos, unsigned len, unsigned flags,
664                             struct page **pagep, void **fsdata)
665 {
666         struct gfs2_inode *ip = GFS2_I(mapping->host);
667         struct gfs2_sbd *sdp = GFS2_SB(mapping->host);
668         struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
669         unsigned int data_blocks = 0, ind_blocks = 0, rblocks;
670         unsigned requested = 0;
671         int alloc_required;
672         int error = 0;
673         pgoff_t index = pos >> PAGE_SHIFT;
674         unsigned from = pos & (PAGE_SIZE - 1);
675         struct page *page;
676 
677         gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &ip->i_gh);
678         error = gfs2_glock_nq(&ip->i_gh);
679         if (unlikely(error))
680                 goto out_uninit;
681         if (&ip->i_inode == sdp->sd_rindex) {
682                 error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE,
683                                            GL_NOCACHE, &m_ip->i_gh);
684                 if (unlikely(error)) {
685                         gfs2_glock_dq(&ip->i_gh);
686                         goto out_uninit;
687                 }
688         }
689 
690         alloc_required = gfs2_write_alloc_required(ip, pos, len);
691 
692         if (alloc_required || gfs2_is_jdata(ip))
693                 gfs2_write_calc_reserv(ip, len, &data_blocks, &ind_blocks);
694 
695         if (alloc_required) {
696                 struct gfs2_alloc_parms ap = { .aflags = 0, };
697                 requested = data_blocks + ind_blocks;
698                 ap.target = requested;
699                 error = gfs2_quota_lock_check(ip, &ap);
700                 if (error)
701                         goto out_unlock;
702 
703                 error = gfs2_inplace_reserve(ip, &ap);
704                 if (error)
705                         goto out_qunlock;
706         }
707 
708         rblocks = RES_DINODE + ind_blocks;
709         if (gfs2_is_jdata(ip))
710                 rblocks += data_blocks ? data_blocks : 1;
711         if (ind_blocks || data_blocks)
712                 rblocks += RES_STATFS + RES_QUOTA;
713         if (&ip->i_inode == sdp->sd_rindex)
714                 rblocks += 2 * RES_STATFS;
715         if (alloc_required)
716                 rblocks += gfs2_rg_blocks(ip, requested);
717 
718         error = gfs2_trans_begin(sdp, rblocks,
719                                  PAGE_SIZE/sdp->sd_sb.sb_bsize);
720         if (error)
721                 goto out_trans_fail;
722 
723         error = -ENOMEM;
724         flags |= AOP_FLAG_NOFS;
725         page = grab_cache_page_write_begin(mapping, index, flags);
726         *pagep = page;
727         if (unlikely(!page))
728                 goto out_endtrans;
729 
730         if (gfs2_is_stuffed(ip)) {
731                 error = 0;
732                 if (pos + len > sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode)) {
733                         error = gfs2_unstuff_dinode(ip, page);
734                         if (error == 0)
735                                 goto prepare_write;
736                 } else if (!PageUptodate(page)) {
737                         error = stuffed_readpage(ip, page);
738                 }
739                 goto out;
740         }
741 
742 prepare_write:
743         error = __block_write_begin(page, from, len, gfs2_block_map);
744 out:
745         if (error == 0)
746                 return 0;
747 
748         unlock_page(page);
749         put_page(page);
750 
751         gfs2_trans_end(sdp);
752         if (pos + len > ip->i_inode.i_size)
753                 gfs2_trim_blocks(&ip->i_inode);
754         goto out_trans_fail;
755 
756 out_endtrans:
757         gfs2_trans_end(sdp);
758 out_trans_fail:
759         if (alloc_required) {
760                 gfs2_inplace_release(ip);
761 out_qunlock:
762                 gfs2_quota_unlock(ip);
763         }
764 out_unlock:
765         if (&ip->i_inode == sdp->sd_rindex) {
766                 gfs2_glock_dq(&m_ip->i_gh);
767                 gfs2_holder_uninit(&m_ip->i_gh);
768         }
769         gfs2_glock_dq(&ip->i_gh);
770 out_uninit:
771         gfs2_holder_uninit(&ip->i_gh);
772         return error;
773 }
774 
775 /**
776  * adjust_fs_space - Adjusts the free space available due to gfs2_grow
777  * @inode: the rindex inode
778  */
779 static void adjust_fs_space(struct inode *inode)
780 {
781         struct gfs2_sbd *sdp = inode->i_sb->s_fs_info;
782         struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
783         struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
784         struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
785         struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
786         struct buffer_head *m_bh, *l_bh;
787         u64 fs_total, new_free;
788 
789         /* Total up the file system space, according to the latest rindex. */
790         fs_total = gfs2_ri_total(sdp);
791         if (gfs2_meta_inode_buffer(m_ip, &m_bh) != 0)
792                 return;
793 
794         spin_lock(&sdp->sd_statfs_spin);
795         gfs2_statfs_change_in(m_sc, m_bh->b_data +
796                               sizeof(struct gfs2_dinode));
797         if (fs_total > (m_sc->sc_total + l_sc->sc_total))
798                 new_free = fs_total - (m_sc->sc_total + l_sc->sc_total);
799         else
800                 new_free = 0;
801         spin_unlock(&sdp->sd_statfs_spin);
802         fs_warn(sdp, "File system extended by %llu blocks.\n",
803                 (unsigned long long)new_free);
804         gfs2_statfs_change(sdp, new_free, new_free, 0);
805 
806         if (gfs2_meta_inode_buffer(l_ip, &l_bh) != 0)
807                 goto out;
808         update_statfs(sdp, m_bh, l_bh);
809         brelse(l_bh);
810 out:
811         brelse(m_bh);
812 }
813 
814 /**
815  * gfs2_stuffed_write_end - Write end for stuffed files
816  * @inode: The inode
817  * @dibh: The buffer_head containing the on-disk inode
818  * @pos: The file position
819  * @len: The length of the write
820  * @copied: How much was actually copied by the VFS
821  * @page: The page
822  *
823  * This copies the data from the page into the inode block after
824  * the inode data structure itself.
825  *
826  * Returns: errno
827  */
828 static int gfs2_stuffed_write_end(struct inode *inode, struct buffer_head *dibh,
829                                   loff_t pos, unsigned len, unsigned copied,
830                                   struct page *page)
831 {
832         struct gfs2_inode *ip = GFS2_I(inode);
833         struct gfs2_sbd *sdp = GFS2_SB(inode);
834         struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
835         u64 to = pos + copied;
836         void *kaddr;
837         unsigned char *buf = dibh->b_data + sizeof(struct gfs2_dinode);
838 
839         BUG_ON((pos + len) > (dibh->b_size - sizeof(struct gfs2_dinode)));
840         kaddr = kmap_atomic(page);
841         memcpy(buf + pos, kaddr + pos, copied);
842         flush_dcache_page(page);
843         kunmap_atomic(kaddr);
844 
845         WARN_ON(!PageUptodate(page));
846         unlock_page(page);
847         put_page(page);
848 
849         if (copied) {
850                 if (inode->i_size < to)
851                         i_size_write(inode, to);
852                 mark_inode_dirty(inode);
853         }
854 
855         if (inode == sdp->sd_rindex) {
856                 adjust_fs_space(inode);
857                 sdp->sd_rindex_uptodate = 0;
858         }
859 
860         brelse(dibh);
861         gfs2_trans_end(sdp);
862         if (inode == sdp->sd_rindex) {
863                 gfs2_glock_dq(&m_ip->i_gh);
864                 gfs2_holder_uninit(&m_ip->i_gh);
865         }
866         gfs2_glock_dq(&ip->i_gh);
867         gfs2_holder_uninit(&ip->i_gh);
868         return copied;
869 }
870 
871 /**
872  * gfs2_write_end
873  * @file: The file to write to
874  * @mapping: The address space to write to
875  * @pos: The file position
876  * @len: The length of the data
877  * @copied: How much was actually copied by the VFS
878  * @page: The page that has been written
879  * @fsdata: The fsdata (unused in GFS2)
880  *
881  * The main write_end function for GFS2. We have a separate one for
882  * stuffed files as they are slightly different, otherwise we just
883  * put our locking around the VFS provided functions.
884  *
885  * Returns: errno
886  */
887 
888 static int gfs2_write_end(struct file *file, struct address_space *mapping,
889                           loff_t pos, unsigned len, unsigned copied,
890                           struct page *page, void *fsdata)
891 {
892         struct inode *inode = page->mapping->host;
893         struct gfs2_inode *ip = GFS2_I(inode);
894         struct gfs2_sbd *sdp = GFS2_SB(inode);
895         struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
896         struct buffer_head *dibh;
897         unsigned int from = pos & (PAGE_SIZE - 1);
898         unsigned int to = from + len;
899         int ret;
900         struct gfs2_trans *tr = current->journal_info;
901         BUG_ON(!tr);
902 
903         BUG_ON(gfs2_glock_is_locked_by_me(ip->i_gl) == NULL);
904 
905         ret = gfs2_meta_inode_buffer(ip, &dibh);
906         if (unlikely(ret)) {
907                 unlock_page(page);
908                 put_page(page);
909                 goto failed;
910         }
911 
912         if (gfs2_is_stuffed(ip))
913                 return gfs2_stuffed_write_end(inode, dibh, pos, len, copied, page);
914 
915         if (!gfs2_is_writeback(ip))
916                 gfs2_page_add_databufs(ip, page, from, to);
917 
918         ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata);
919         if (tr->tr_num_buf_new)
920                 __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
921         else
922                 gfs2_trans_add_meta(ip->i_gl, dibh);
923 
924 
925         if (inode == sdp->sd_rindex) {
926                 adjust_fs_space(inode);
927                 sdp->sd_rindex_uptodate = 0;
928         }
929 
930         brelse(dibh);
931 failed:
932         gfs2_trans_end(sdp);
933         gfs2_inplace_release(ip);
934         if (ip->i_qadata && ip->i_qadata->qa_qd_num)
935                 gfs2_quota_unlock(ip);
936         if (inode == sdp->sd_rindex) {
937                 gfs2_glock_dq(&m_ip->i_gh);
938                 gfs2_holder_uninit(&m_ip->i_gh);
939         }
940         gfs2_glock_dq(&ip->i_gh);
941         gfs2_holder_uninit(&ip->i_gh);
942         return ret;
943 }
944 
945 /**
946  * gfs2_set_page_dirty - Page dirtying function
947  * @page: The page to dirty
948  *
949  * Returns: 1 if it dirtyed the page, or 0 otherwise
950  */
951  
952 static int gfs2_set_page_dirty(struct page *page)
953 {
954         SetPageChecked(page);
955         return __set_page_dirty_buffers(page);
956 }
957 
958 /**
959  * gfs2_bmap - Block map function
960  * @mapping: Address space info
961  * @lblock: The block to map
962  *
963  * Returns: The disk address for the block or 0 on hole or error
964  */
965 
966 static sector_t gfs2_bmap(struct address_space *mapping, sector_t lblock)
967 {
968         struct gfs2_inode *ip = GFS2_I(mapping->host);
969         struct gfs2_holder i_gh;
970         sector_t dblock = 0;
971         int error;
972 
973         error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
974         if (error)
975                 return 0;
976 
977         if (!gfs2_is_stuffed(ip))
978                 dblock = generic_block_bmap(mapping, lblock, gfs2_block_map);
979 
980         gfs2_glock_dq_uninit(&i_gh);
981 
982         return dblock;
983 }
984 
985 static void gfs2_discard(struct gfs2_sbd *sdp, struct buffer_head *bh)
986 {
987         struct gfs2_bufdata *bd;
988 
989         lock_buffer(bh);
990         gfs2_log_lock(sdp);
991         clear_buffer_dirty(bh);
992         bd = bh->b_private;
993         if (bd) {
994                 if (!list_empty(&bd->bd_list) && !buffer_pinned(bh))
995                         list_del_init(&bd->bd_list);
996                 else
997                         gfs2_remove_from_journal(bh, REMOVE_JDATA);
998         }
999         bh->b_bdev = NULL;
1000         clear_buffer_mapped(bh);
1001         clear_buffer_req(bh);
1002         clear_buffer_new(bh);
1003         gfs2_log_unlock(sdp);
1004         unlock_buffer(bh);
1005 }
1006 
1007 static void gfs2_invalidatepage(struct page *page, unsigned int offset,
1008                                 unsigned int length)
1009 {
1010         struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host);
1011         unsigned int stop = offset + length;
1012         int partial_page = (offset || length < PAGE_SIZE);
1013         struct buffer_head *bh, *head;
1014         unsigned long pos = 0;
1015 
1016         BUG_ON(!PageLocked(page));
1017         if (!partial_page)
1018                 ClearPageChecked(page);
1019         if (!page_has_buffers(page))
1020                 goto out;
1021 
1022         bh = head = page_buffers(page);
1023         do {
1024                 if (pos + bh->b_size > stop)
1025                         return;
1026 
1027                 if (offset <= pos)
1028                         gfs2_discard(sdp, bh);
1029                 pos += bh->b_size;
1030                 bh = bh->b_this_page;
1031         } while (bh != head);
1032 out:
1033         if (!partial_page)
1034                 try_to_release_page(page, 0);
1035 }
1036 
1037 /**
1038  * gfs2_ok_for_dio - check that dio is valid on this file
1039  * @ip: The inode
1040  * @offset: The offset at which we are reading or writing
1041  *
1042  * Returns: 0 (to ignore the i/o request and thus fall back to buffered i/o)
1043  *          1 (to accept the i/o request)
1044  */
1045 static int gfs2_ok_for_dio(struct gfs2_inode *ip, loff_t offset)
1046 {
1047         /*
1048          * Should we return an error here? I can't see that O_DIRECT for
1049          * a stuffed file makes any sense. For now we'll silently fall
1050          * back to buffered I/O
1051          */
1052         if (gfs2_is_stuffed(ip))
1053                 return 0;
1054 
1055         if (offset >= i_size_read(&ip->i_inode))
1056                 return 0;
1057         return 1;
1058 }
1059 
1060 
1061 
1062 static ssize_t gfs2_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
1063 {
1064         struct file *file = iocb->ki_filp;
1065         struct inode *inode = file->f_mapping->host;
1066         struct address_space *mapping = inode->i_mapping;
1067         struct gfs2_inode *ip = GFS2_I(inode);
1068         loff_t offset = iocb->ki_pos;
1069         struct gfs2_holder gh;
1070         int rv;
1071 
1072         /*
1073          * Deferred lock, even if its a write, since we do no allocation
1074          * on this path. All we need change is atime, and this lock mode
1075          * ensures that other nodes have flushed their buffered read caches
1076          * (i.e. their page cache entries for this inode). We do not,
1077          * unfortunately have the option of only flushing a range like
1078          * the VFS does.
1079          */
1080         gfs2_holder_init(ip->i_gl, LM_ST_DEFERRED, 0, &gh);
1081         rv = gfs2_glock_nq(&gh);
1082         if (rv)
1083                 goto out_uninit;
1084         rv = gfs2_ok_for_dio(ip, offset);
1085         if (rv != 1)
1086                 goto out; /* dio not valid, fall back to buffered i/o */
1087 
1088         /*
1089          * Now since we are holding a deferred (CW) lock at this point, you
1090          * might be wondering why this is ever needed. There is a case however
1091          * where we've granted a deferred local lock against a cached exclusive
1092          * glock. That is ok provided all granted local locks are deferred, but
1093          * it also means that it is possible to encounter pages which are
1094          * cached and possibly also mapped. So here we check for that and sort
1095          * them out ahead of the dio. The glock state machine will take care of
1096          * everything else.
1097          *
1098          * If in fact the cached glock state (gl->gl_state) is deferred (CW) in
1099          * the first place, mapping->nr_pages will always be zero.
1100          */
1101         if (mapping->nrpages) {
1102                 loff_t lstart = offset & ~(PAGE_SIZE - 1);
1103                 loff_t len = iov_iter_count(iter);
1104                 loff_t end = PAGE_ALIGN(offset + len) - 1;
1105 
1106                 rv = 0;
1107                 if (len == 0)
1108                         goto out;
1109                 if (test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags))
1110                         unmap_shared_mapping_range(ip->i_inode.i_mapping, offset, len);
1111                 rv = filemap_write_and_wait_range(mapping, lstart, end);
1112                 if (rv)
1113                         goto out;
1114                 if (iov_iter_rw(iter) == WRITE)
1115                         truncate_inode_pages_range(mapping, lstart, end);
1116         }
1117 
1118         rv = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, iter,
1119                                   gfs2_get_block_direct, NULL, NULL, 0);
1120 out:
1121         gfs2_glock_dq(&gh);
1122 out_uninit:
1123         gfs2_holder_uninit(&gh);
1124         return rv;
1125 }
1126 
1127 /**
1128  * gfs2_releasepage - free the metadata associated with a page
1129  * @page: the page that's being released
1130  * @gfp_mask: passed from Linux VFS, ignored by us
1131  *
1132  * Call try_to_free_buffers() if the buffers in this page can be
1133  * released.
1134  *
1135  * Returns: 0
1136  */
1137 
1138 int gfs2_releasepage(struct page *page, gfp_t gfp_mask)
1139 {
1140         struct address_space *mapping = page->mapping;
1141         struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
1142         struct buffer_head *bh, *head;
1143         struct gfs2_bufdata *bd;
1144 
1145         if (!page_has_buffers(page))
1146                 return 0;
1147 
1148         /*
1149          * From xfs_vm_releasepage: mm accommodates an old ext3 case where
1150          * clean pages might not have had the dirty bit cleared.  Thus, it can
1151          * send actual dirty pages to ->releasepage() via shrink_active_list().
1152          *
1153          * As a workaround, we skip pages that contain dirty buffers below.
1154          * Once ->releasepage isn't called on dirty pages anymore, we can warn
1155          * on dirty buffers like we used to here again.
1156          */
1157 
1158         gfs2_log_lock(sdp);
1159         spin_lock(&sdp->sd_ail_lock);
1160         head = bh = page_buffers(page);
1161         do {
1162                 if (atomic_read(&bh->b_count))
1163                         goto cannot_release;
1164                 bd = bh->b_private;
1165                 if (bd && bd->bd_tr)
1166                         goto cannot_release;
1167                 if (buffer_dirty(bh) || WARN_ON(buffer_pinned(bh)))
1168                         goto cannot_release;
1169                 bh = bh->b_this_page;
1170         } while(bh != head);
1171         spin_unlock(&sdp->sd_ail_lock);
1172 
1173         head = bh = page_buffers(page);
1174         do {
1175                 bd = bh->b_private;
1176                 if (bd) {
1177                         gfs2_assert_warn(sdp, bd->bd_bh == bh);
1178                         if (!list_empty(&bd->bd_list))
1179                                 list_del_init(&bd->bd_list);
1180                         bd->bd_bh = NULL;
1181                         bh->b_private = NULL;
1182                         kmem_cache_free(gfs2_bufdata_cachep, bd);
1183                 }
1184 
1185                 bh = bh->b_this_page;
1186         } while (bh != head);
1187         gfs2_log_unlock(sdp);
1188 
1189         return try_to_free_buffers(page);
1190 
1191 cannot_release:
1192         spin_unlock(&sdp->sd_ail_lock);
1193         gfs2_log_unlock(sdp);
1194         return 0;
1195 }
1196 
1197 static const struct address_space_operations gfs2_writeback_aops = {
1198         .writepage = gfs2_writepage,
1199         .writepages = gfs2_writepages,
1200         .readpage = gfs2_readpage,
1201         .readpages = gfs2_readpages,
1202         .write_begin = gfs2_write_begin,
1203         .write_end = gfs2_write_end,
1204         .bmap = gfs2_bmap,
1205         .invalidatepage = gfs2_invalidatepage,
1206         .releasepage = gfs2_releasepage,
1207         .direct_IO = gfs2_direct_IO,
1208         .migratepage = buffer_migrate_page,
1209         .is_partially_uptodate = block_is_partially_uptodate,
1210         .error_remove_page = generic_error_remove_page,
1211 };
1212 
1213 static const struct address_space_operations gfs2_ordered_aops = {
1214         .writepage = gfs2_writepage,
1215         .writepages = gfs2_writepages,
1216         .readpage = gfs2_readpage,
1217         .readpages = gfs2_readpages,
1218         .write_begin = gfs2_write_begin,
1219         .write_end = gfs2_write_end,
1220         .set_page_dirty = gfs2_set_page_dirty,
1221         .bmap = gfs2_bmap,
1222         .invalidatepage = gfs2_invalidatepage,
1223         .releasepage = gfs2_releasepage,
1224         .direct_IO = gfs2_direct_IO,
1225         .migratepage = buffer_migrate_page,
1226         .is_partially_uptodate = block_is_partially_uptodate,
1227         .error_remove_page = generic_error_remove_page,
1228 };
1229 
1230 static const struct address_space_operations gfs2_jdata_aops = {
1231         .writepage = gfs2_jdata_writepage,
1232         .writepages = gfs2_jdata_writepages,
1233         .readpage = gfs2_readpage,
1234         .readpages = gfs2_readpages,
1235         .write_begin = gfs2_write_begin,
1236         .write_end = gfs2_write_end,
1237         .set_page_dirty = gfs2_set_page_dirty,
1238         .bmap = gfs2_bmap,
1239         .invalidatepage = gfs2_invalidatepage,
1240         .releasepage = gfs2_releasepage,
1241         .is_partially_uptodate = block_is_partially_uptodate,
1242         .error_remove_page = generic_error_remove_page,
1243 };
1244 
1245 void gfs2_set_aops(struct inode *inode)
1246 {
1247         struct gfs2_inode *ip = GFS2_I(inode);
1248 
1249         if (gfs2_is_writeback(ip))
1250                 inode->i_mapping->a_ops = &gfs2_writeback_aops;
1251         else if (gfs2_is_ordered(ip))
1252                 inode->i_mapping->a_ops = &gfs2_ordered_aops;
1253         else if (gfs2_is_jdata(ip))
1254                 inode->i_mapping->a_ops = &gfs2_jdata_aops;
1255         else
1256                 BUG();
1257 }
1258 
1259 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp