~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/fs/ntfs/compress.c

Version: ~ [ linux-5.4-rc3 ] ~ [ linux-5.3.6 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.79 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.149 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.196 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.196 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.75 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-3.9.11 ] ~ [ linux-3.8.13 ] ~ [ linux-3.7.10 ] ~ [ linux-3.6.11 ] ~ [ linux-3.5.7 ] ~ [ linux-3.4.113 ] ~ [ linux-3.3.8 ] ~ [ linux-3.2.102 ] ~ [ linux-3.1.10 ] ~ [ linux-3.0.101 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /**
  2  * compress.c - NTFS kernel compressed attributes handling.
  3  *              Part of the Linux-NTFS project.
  4  *
  5  * Copyright (c) 2001-2004 Anton Altaparmakov
  6  * Copyright (c) 2002 Richard Russon
  7  *
  8  * This program/include file is free software; you can redistribute it and/or
  9  * modify it under the terms of the GNU General Public License as published
 10  * by the Free Software Foundation; either version 2 of the License, or
 11  * (at your option) any later version.
 12  *
 13  * This program/include file is distributed in the hope that it will be
 14  * useful, but WITHOUT ANY WARRANTY; without even the implied warranty
 15  * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 16  * GNU General Public License for more details.
 17  *
 18  * You should have received a copy of the GNU General Public License
 19  * along with this program (in the main directory of the Linux-NTFS
 20  * distribution in the file COPYING); if not, write to the Free Software
 21  * Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 22  */
 23 
 24 #include <linux/fs.h>
 25 #include <linux/buffer_head.h>
 26 #include <linux/blkdev.h>
 27 #include <linux/vmalloc.h>
 28 #include <linux/slab.h>
 29 
 30 #include "attrib.h"
 31 #include "inode.h"
 32 #include "debug.h"
 33 #include "ntfs.h"
 34 
 35 /**
 36  * ntfs_compression_constants - enum of constants used in the compression code
 37  */
 38 typedef enum {
 39         /* Token types and access mask. */
 40         NTFS_SYMBOL_TOKEN       =       0,
 41         NTFS_PHRASE_TOKEN       =       1,
 42         NTFS_TOKEN_MASK         =       1,
 43 
 44         /* Compression sub-block constants. */
 45         NTFS_SB_SIZE_MASK       =       0x0fff,
 46         NTFS_SB_SIZE            =       0x1000,
 47         NTFS_SB_IS_COMPRESSED   =       0x8000,
 48 
 49         /*
 50          * The maximum compression block size is by definition 16 * the cluster
 51          * size, with the maximum supported cluster size being 4kiB. Thus the
 52          * maximum compression buffer size is 64kiB, so we use this when
 53          * initializing the compression buffer.
 54          */
 55         NTFS_MAX_CB_SIZE        = 64 * 1024,
 56 } ntfs_compression_constants;
 57 
 58 /**
 59  * ntfs_compression_buffer - one buffer for the decompression engine
 60  */
 61 static u8 *ntfs_compression_buffer;
 62 
 63 /**
 64  * ntfs_cb_lock - spinlock which protects ntfs_compression_buffer
 65  */
 66 static DEFINE_SPINLOCK(ntfs_cb_lock);
 67 
 68 /**
 69  * allocate_compression_buffers - allocate the decompression buffers
 70  *
 71  * Caller has to hold the ntfs_lock mutex.
 72  *
 73  * Return 0 on success or -ENOMEM if the allocations failed.
 74  */
 75 int allocate_compression_buffers(void)
 76 {
 77         BUG_ON(ntfs_compression_buffer);
 78 
 79         ntfs_compression_buffer = vmalloc(NTFS_MAX_CB_SIZE);
 80         if (!ntfs_compression_buffer)
 81                 return -ENOMEM;
 82         return 0;
 83 }
 84 
 85 /**
 86  * free_compression_buffers - free the decompression buffers
 87  *
 88  * Caller has to hold the ntfs_lock mutex.
 89  */
 90 void free_compression_buffers(void)
 91 {
 92         BUG_ON(!ntfs_compression_buffer);
 93         vfree(ntfs_compression_buffer);
 94         ntfs_compression_buffer = NULL;
 95 }
 96 
 97 /**
 98  * zero_partial_compressed_page - zero out of bounds compressed page region
 99  */
100 static void zero_partial_compressed_page(struct page *page,
101                 const s64 initialized_size)
102 {
103         u8 *kp = page_address(page);
104         unsigned int kp_ofs;
105 
106         ntfs_debug("Zeroing page region outside initialized size.");
107         if (((s64)page->index << PAGE_SHIFT) >= initialized_size) {
108                 clear_page(kp);
109                 return;
110         }
111         kp_ofs = initialized_size & ~PAGE_MASK;
112         memset(kp + kp_ofs, 0, PAGE_SIZE - kp_ofs);
113         return;
114 }
115 
116 /**
117  * handle_bounds_compressed_page - test for&handle out of bounds compressed page
118  */
119 static inline void handle_bounds_compressed_page(struct page *page,
120                 const loff_t i_size, const s64 initialized_size)
121 {
122         if ((page->index >= (initialized_size >> PAGE_SHIFT)) &&
123                         (initialized_size < i_size))
124                 zero_partial_compressed_page(page, initialized_size);
125         return;
126 }
127 
128 /**
129  * ntfs_decompress - decompress a compression block into an array of pages
130  * @dest_pages:         destination array of pages
131  * @dest_index:         current index into @dest_pages (IN/OUT)
132  * @dest_ofs:           current offset within @dest_pages[@dest_index] (IN/OUT)
133  * @dest_max_index:     maximum index into @dest_pages (IN)
134  * @dest_max_ofs:       maximum offset within @dest_pages[@dest_max_index] (IN)
135  * @xpage:              the target page (-1 if none) (IN)
136  * @xpage_done:         set to 1 if xpage was completed successfully (IN/OUT)
137  * @cb_start:           compression block to decompress (IN)
138  * @cb_size:            size of compression block @cb_start in bytes (IN)
139  * @i_size:             file size when we started the read (IN)
140  * @initialized_size:   initialized file size when we started the read (IN)
141  *
142  * The caller must have disabled preemption. ntfs_decompress() reenables it when
143  * the critical section is finished.
144  *
145  * This decompresses the compression block @cb_start into the array of
146  * destination pages @dest_pages starting at index @dest_index into @dest_pages
147  * and at offset @dest_pos into the page @dest_pages[@dest_index].
148  *
149  * When the page @dest_pages[@xpage] is completed, @xpage_done is set to 1.
150  * If xpage is -1 or @xpage has not been completed, @xpage_done is not modified.
151  *
152  * @cb_start is a pointer to the compression block which needs decompressing
153  * and @cb_size is the size of @cb_start in bytes (8-64kiB).
154  *
155  * Return 0 if success or -EOVERFLOW on error in the compressed stream.
156  * @xpage_done indicates whether the target page (@dest_pages[@xpage]) was
157  * completed during the decompression of the compression block (@cb_start).
158  *
159  * Warning: This function *REQUIRES* PAGE_SIZE >= 4096 or it will blow up
160  * unpredicatbly! You have been warned!
161  *
162  * Note to hackers: This function may not sleep until it has finished accessing
163  * the compression block @cb_start as it is a per-CPU buffer.
164  */
165 static int ntfs_decompress(struct page *dest_pages[], int *dest_index,
166                 int *dest_ofs, const int dest_max_index, const int dest_max_ofs,
167                 const int xpage, char *xpage_done, u8 *const cb_start,
168                 const u32 cb_size, const loff_t i_size,
169                 const s64 initialized_size)
170 {
171         /*
172          * Pointers into the compressed data, i.e. the compression block (cb),
173          * and the therein contained sub-blocks (sb).
174          */
175         u8 *cb_end = cb_start + cb_size; /* End of cb. */
176         u8 *cb = cb_start;      /* Current position in cb. */
177         u8 *cb_sb_start = cb;   /* Beginning of the current sb in the cb. */
178         u8 *cb_sb_end;          /* End of current sb / beginning of next sb. */
179 
180         /* Variables for uncompressed data / destination. */
181         struct page *dp;        /* Current destination page being worked on. */
182         u8 *dp_addr;            /* Current pointer into dp. */
183         u8 *dp_sb_start;        /* Start of current sub-block in dp. */
184         u8 *dp_sb_end;          /* End of current sb in dp (dp_sb_start +
185                                    NTFS_SB_SIZE). */
186         u16 do_sb_start;        /* @dest_ofs when starting this sub-block. */
187         u16 do_sb_end;          /* @dest_ofs of end of this sb (do_sb_start +
188                                    NTFS_SB_SIZE). */
189 
190         /* Variables for tag and token parsing. */
191         u8 tag;                 /* Current tag. */
192         int token;              /* Loop counter for the eight tokens in tag. */
193 
194         /* Need this because we can't sleep, so need two stages. */
195         int completed_pages[dest_max_index - *dest_index + 1];
196         int nr_completed_pages = 0;
197 
198         /* Default error code. */
199         int err = -EOVERFLOW;
200 
201         ntfs_debug("Entering, cb_size = 0x%x.", cb_size);
202 do_next_sb:
203         ntfs_debug("Beginning sub-block at offset = 0x%zx in the cb.",
204                         cb - cb_start);
205         /*
206          * Have we reached the end of the compression block or the end of the
207          * decompressed data?  The latter can happen for example if the current
208          * position in the compression block is one byte before its end so the
209          * first two checks do not detect it.
210          */
211         if (cb == cb_end || !le16_to_cpup((le16*)cb) ||
212                         (*dest_index == dest_max_index &&
213                         *dest_ofs == dest_max_ofs)) {
214                 int i;
215 
216                 ntfs_debug("Completed. Returning success (0).");
217                 err = 0;
218 return_error:
219                 /* We can sleep from now on, so we drop lock. */
220                 spin_unlock(&ntfs_cb_lock);
221                 /* Second stage: finalize completed pages. */
222                 if (nr_completed_pages > 0) {
223                         for (i = 0; i < nr_completed_pages; i++) {
224                                 int di = completed_pages[i];
225 
226                                 dp = dest_pages[di];
227                                 /*
228                                  * If we are outside the initialized size, zero
229                                  * the out of bounds page range.
230                                  */
231                                 handle_bounds_compressed_page(dp, i_size,
232                                                 initialized_size);
233                                 flush_dcache_page(dp);
234                                 kunmap(dp);
235                                 SetPageUptodate(dp);
236                                 unlock_page(dp);
237                                 if (di == xpage)
238                                         *xpage_done = 1;
239                                 else
240                                         put_page(dp);
241                                 dest_pages[di] = NULL;
242                         }
243                 }
244                 return err;
245         }
246 
247         /* Setup offsets for the current sub-block destination. */
248         do_sb_start = *dest_ofs;
249         do_sb_end = do_sb_start + NTFS_SB_SIZE;
250 
251         /* Check that we are still within allowed boundaries. */
252         if (*dest_index == dest_max_index && do_sb_end > dest_max_ofs)
253                 goto return_overflow;
254 
255         /* Does the minimum size of a compressed sb overflow valid range? */
256         if (cb + 6 > cb_end)
257                 goto return_overflow;
258 
259         /* Setup the current sub-block source pointers and validate range. */
260         cb_sb_start = cb;
261         cb_sb_end = cb_sb_start + (le16_to_cpup((le16*)cb) & NTFS_SB_SIZE_MASK)
262                         + 3;
263         if (cb_sb_end > cb_end)
264                 goto return_overflow;
265 
266         /* Get the current destination page. */
267         dp = dest_pages[*dest_index];
268         if (!dp) {
269                 /* No page present. Skip decompression of this sub-block. */
270                 cb = cb_sb_end;
271 
272                 /* Advance destination position to next sub-block. */
273                 *dest_ofs = (*dest_ofs + NTFS_SB_SIZE) & ~PAGE_MASK;
274                 if (!*dest_ofs && (++*dest_index > dest_max_index))
275                         goto return_overflow;
276                 goto do_next_sb;
277         }
278 
279         /* We have a valid destination page. Setup the destination pointers. */
280         dp_addr = (u8*)page_address(dp) + do_sb_start;
281 
282         /* Now, we are ready to process the current sub-block (sb). */
283         if (!(le16_to_cpup((le16*)cb) & NTFS_SB_IS_COMPRESSED)) {
284                 ntfs_debug("Found uncompressed sub-block.");
285                 /* This sb is not compressed, just copy it into destination. */
286 
287                 /* Advance source position to first data byte. */
288                 cb += 2;
289 
290                 /* An uncompressed sb must be full size. */
291                 if (cb_sb_end - cb != NTFS_SB_SIZE)
292                         goto return_overflow;
293 
294                 /* Copy the block and advance the source position. */
295                 memcpy(dp_addr, cb, NTFS_SB_SIZE);
296                 cb += NTFS_SB_SIZE;
297 
298                 /* Advance destination position to next sub-block. */
299                 *dest_ofs += NTFS_SB_SIZE;
300                 if (!(*dest_ofs &= ~PAGE_MASK)) {
301 finalize_page:
302                         /*
303                          * First stage: add current page index to array of
304                          * completed pages.
305                          */
306                         completed_pages[nr_completed_pages++] = *dest_index;
307                         if (++*dest_index > dest_max_index)
308                                 goto return_overflow;
309                 }
310                 goto do_next_sb;
311         }
312         ntfs_debug("Found compressed sub-block.");
313         /* This sb is compressed, decompress it into destination. */
314 
315         /* Setup destination pointers. */
316         dp_sb_start = dp_addr;
317         dp_sb_end = dp_sb_start + NTFS_SB_SIZE;
318 
319         /* Forward to the first tag in the sub-block. */
320         cb += 2;
321 do_next_tag:
322         if (cb == cb_sb_end) {
323                 /* Check if the decompressed sub-block was not full-length. */
324                 if (dp_addr < dp_sb_end) {
325                         int nr_bytes = do_sb_end - *dest_ofs;
326 
327                         ntfs_debug("Filling incomplete sub-block with "
328                                         "zeroes.");
329                         /* Zero remainder and update destination position. */
330                         memset(dp_addr, 0, nr_bytes);
331                         *dest_ofs += nr_bytes;
332                 }
333                 /* We have finished the current sub-block. */
334                 if (!(*dest_ofs &= ~PAGE_MASK))
335                         goto finalize_page;
336                 goto do_next_sb;
337         }
338 
339         /* Check we are still in range. */
340         if (cb > cb_sb_end || dp_addr > dp_sb_end)
341                 goto return_overflow;
342 
343         /* Get the next tag and advance to first token. */
344         tag = *cb++;
345 
346         /* Parse the eight tokens described by the tag. */
347         for (token = 0; token < 8; token++, tag >>= 1) {
348                 u16 lg, pt, length, max_non_overlap;
349                 register u16 i;
350                 u8 *dp_back_addr;
351 
352                 /* Check if we are done / still in range. */
353                 if (cb >= cb_sb_end || dp_addr > dp_sb_end)
354                         break;
355 
356                 /* Determine token type and parse appropriately.*/
357                 if ((tag & NTFS_TOKEN_MASK) == NTFS_SYMBOL_TOKEN) {
358                         /*
359                          * We have a symbol token, copy the symbol across, and
360                          * advance the source and destination positions.
361                          */
362                         *dp_addr++ = *cb++;
363                         ++*dest_ofs;
364 
365                         /* Continue with the next token. */
366                         continue;
367                 }
368 
369                 /*
370                  * We have a phrase token. Make sure it is not the first tag in
371                  * the sb as this is illegal and would confuse the code below.
372                  */
373                 if (dp_addr == dp_sb_start)
374                         goto return_overflow;
375 
376                 /*
377                  * Determine the number of bytes to go back (p) and the number
378                  * of bytes to copy (l). We use an optimized algorithm in which
379                  * we first calculate log2(current destination position in sb),
380                  * which allows determination of l and p in O(1) rather than
381                  * O(n). We just need an arch-optimized log2() function now.
382                  */
383                 lg = 0;
384                 for (i = *dest_ofs - do_sb_start - 1; i >= 0x10; i >>= 1)
385                         lg++;
386 
387                 /* Get the phrase token into i. */
388                 pt = le16_to_cpup((le16*)cb);
389 
390                 /*
391                  * Calculate starting position of the byte sequence in
392                  * the destination using the fact that p = (pt >> (12 - lg)) + 1
393                  * and make sure we don't go too far back.
394                  */
395                 dp_back_addr = dp_addr - (pt >> (12 - lg)) - 1;
396                 if (dp_back_addr < dp_sb_start)
397                         goto return_overflow;
398 
399                 /* Now calculate the length of the byte sequence. */
400                 length = (pt & (0xfff >> lg)) + 3;
401 
402                 /* Advance destination position and verify it is in range. */
403                 *dest_ofs += length;
404                 if (*dest_ofs > do_sb_end)
405                         goto return_overflow;
406 
407                 /* The number of non-overlapping bytes. */
408                 max_non_overlap = dp_addr - dp_back_addr;
409 
410                 if (length <= max_non_overlap) {
411                         /* The byte sequence doesn't overlap, just copy it. */
412                         memcpy(dp_addr, dp_back_addr, length);
413 
414                         /* Advance destination pointer. */
415                         dp_addr += length;
416                 } else {
417                         /*
418                          * The byte sequence does overlap, copy non-overlapping
419                          * part and then do a slow byte by byte copy for the
420                          * overlapping part. Also, advance the destination
421                          * pointer.
422                          */
423                         memcpy(dp_addr, dp_back_addr, max_non_overlap);
424                         dp_addr += max_non_overlap;
425                         dp_back_addr += max_non_overlap;
426                         length -= max_non_overlap;
427                         while (length--)
428                                 *dp_addr++ = *dp_back_addr++;
429                 }
430 
431                 /* Advance source position and continue with the next token. */
432                 cb += 2;
433         }
434 
435         /* No tokens left in the current tag. Continue with the next tag. */
436         goto do_next_tag;
437 
438 return_overflow:
439         ntfs_error(NULL, "Failed. Returning -EOVERFLOW.");
440         goto return_error;
441 }
442 
443 /**
444  * ntfs_read_compressed_block - read a compressed block into the page cache
445  * @page:       locked page in the compression block(s) we need to read
446  *
447  * When we are called the page has already been verified to be locked and the
448  * attribute is known to be non-resident, not encrypted, but compressed.
449  *
450  * 1. Determine which compression block(s) @page is in.
451  * 2. Get hold of all pages corresponding to this/these compression block(s).
452  * 3. Read the (first) compression block.
453  * 4. Decompress it into the corresponding pages.
454  * 5. Throw the compressed data away and proceed to 3. for the next compression
455  *    block or return success if no more compression blocks left.
456  *
457  * Warning: We have to be careful what we do about existing pages. They might
458  * have been written to so that we would lose data if we were to just overwrite
459  * them with the out-of-date uncompressed data.
460  *
461  * FIXME: For PAGE_SIZE > cb_size we are not doing the Right Thing(TM) at
462  * the end of the file I think. We need to detect this case and zero the out
463  * of bounds remainder of the page in question and mark it as handled. At the
464  * moment we would just return -EIO on such a page. This bug will only become
465  * apparent if pages are above 8kiB and the NTFS volume only uses 512 byte
466  * clusters so is probably not going to be seen by anyone. Still this should
467  * be fixed. (AIA)
468  *
469  * FIXME: Again for PAGE_SIZE > cb_size we are screwing up both in
470  * handling sparse and compressed cbs. (AIA)
471  *
472  * FIXME: At the moment we don't do any zeroing out in the case that
473  * initialized_size is less than data_size. This should be safe because of the
474  * nature of the compression algorithm used. Just in case we check and output
475  * an error message in read inode if the two sizes are not equal for a
476  * compressed file. (AIA)
477  */
478 int ntfs_read_compressed_block(struct page *page)
479 {
480         loff_t i_size;
481         s64 initialized_size;
482         struct address_space *mapping = page->mapping;
483         ntfs_inode *ni = NTFS_I(mapping->host);
484         ntfs_volume *vol = ni->vol;
485         struct super_block *sb = vol->sb;
486         runlist_element *rl;
487         unsigned long flags, block_size = sb->s_blocksize;
488         unsigned char block_size_bits = sb->s_blocksize_bits;
489         u8 *cb, *cb_pos, *cb_end;
490         struct buffer_head **bhs;
491         unsigned long offset, index = page->index;
492         u32 cb_size = ni->itype.compressed.block_size;
493         u64 cb_size_mask = cb_size - 1UL;
494         VCN vcn;
495         LCN lcn;
496         /* The first wanted vcn (minimum alignment is PAGE_SIZE). */
497         VCN start_vcn = (((s64)index << PAGE_SHIFT) & ~cb_size_mask) >>
498                         vol->cluster_size_bits;
499         /*
500          * The first vcn after the last wanted vcn (minimum alignment is again
501          * PAGE_SIZE.
502          */
503         VCN end_vcn = ((((s64)(index + 1UL) << PAGE_SHIFT) + cb_size - 1)
504                         & ~cb_size_mask) >> vol->cluster_size_bits;
505         /* Number of compression blocks (cbs) in the wanted vcn range. */
506         unsigned int nr_cbs = (end_vcn - start_vcn) << vol->cluster_size_bits
507                         >> ni->itype.compressed.block_size_bits;
508         /*
509          * Number of pages required to store the uncompressed data from all
510          * compression blocks (cbs) overlapping @page. Due to alignment
511          * guarantees of start_vcn and end_vcn, no need to round up here.
512          */
513         unsigned int nr_pages = (end_vcn - start_vcn) <<
514                         vol->cluster_size_bits >> PAGE_SHIFT;
515         unsigned int xpage, max_page, cur_page, cur_ofs, i;
516         unsigned int cb_clusters, cb_max_ofs;
517         int block, max_block, cb_max_page, bhs_size, nr_bhs, err = 0;
518         struct page **pages;
519         unsigned char xpage_done = 0;
520 
521         ntfs_debug("Entering, page->index = 0x%lx, cb_size = 0x%x, nr_pages = "
522                         "%i.", index, cb_size, nr_pages);
523         /*
524          * Bad things happen if we get here for anything that is not an
525          * unnamed $DATA attribute.
526          */
527         BUG_ON(ni->type != AT_DATA);
528         BUG_ON(ni->name_len);
529 
530         pages = kmalloc(nr_pages * sizeof(struct page *), GFP_NOFS);
531 
532         /* Allocate memory to store the buffer heads we need. */
533         bhs_size = cb_size / block_size * sizeof(struct buffer_head *);
534         bhs = kmalloc(bhs_size, GFP_NOFS);
535 
536         if (unlikely(!pages || !bhs)) {
537                 kfree(bhs);
538                 kfree(pages);
539                 unlock_page(page);
540                 ntfs_error(vol->sb, "Failed to allocate internal buffers.");
541                 return -ENOMEM;
542         }
543 
544         /*
545          * We have already been given one page, this is the one we must do.
546          * Once again, the alignment guarantees keep it simple.
547          */
548         offset = start_vcn << vol->cluster_size_bits >> PAGE_SHIFT;
549         xpage = index - offset;
550         pages[xpage] = page;
551         /*
552          * The remaining pages need to be allocated and inserted into the page
553          * cache, alignment guarantees keep all the below much simpler. (-8
554          */
555         read_lock_irqsave(&ni->size_lock, flags);
556         i_size = i_size_read(VFS_I(ni));
557         initialized_size = ni->initialized_size;
558         read_unlock_irqrestore(&ni->size_lock, flags);
559         max_page = ((i_size + PAGE_SIZE - 1) >> PAGE_SHIFT) -
560                         offset;
561         /* Is the page fully outside i_size? (truncate in progress) */
562         if (xpage >= max_page) {
563                 kfree(bhs);
564                 kfree(pages);
565                 zero_user(page, 0, PAGE_SIZE);
566                 ntfs_debug("Compressed read outside i_size - truncated?");
567                 SetPageUptodate(page);
568                 unlock_page(page);
569                 return 0;
570         }
571         if (nr_pages < max_page)
572                 max_page = nr_pages;
573         for (i = 0; i < max_page; i++, offset++) {
574                 if (i != xpage)
575                         pages[i] = grab_cache_page_nowait(mapping, offset);
576                 page = pages[i];
577                 if (page) {
578                         /*
579                          * We only (re)read the page if it isn't already read
580                          * in and/or dirty or we would be losing data or at
581                          * least wasting our time.
582                          */
583                         if (!PageDirty(page) && (!PageUptodate(page) ||
584                                         PageError(page))) {
585                                 ClearPageError(page);
586                                 kmap(page);
587                                 continue;
588                         }
589                         unlock_page(page);
590                         put_page(page);
591                         pages[i] = NULL;
592                 }
593         }
594 
595         /*
596          * We have the runlist, and all the destination pages we need to fill.
597          * Now read the first compression block.
598          */
599         cur_page = 0;
600         cur_ofs = 0;
601         cb_clusters = ni->itype.compressed.block_clusters;
602 do_next_cb:
603         nr_cbs--;
604         nr_bhs = 0;
605 
606         /* Read all cb buffer heads one cluster at a time. */
607         rl = NULL;
608         for (vcn = start_vcn, start_vcn += cb_clusters; vcn < start_vcn;
609                         vcn++) {
610                 bool is_retry = false;
611 
612                 if (!rl) {
613 lock_retry_remap:
614                         down_read(&ni->runlist.lock);
615                         rl = ni->runlist.rl;
616                 }
617                 if (likely(rl != NULL)) {
618                         /* Seek to element containing target vcn. */
619                         while (rl->length && rl[1].vcn <= vcn)
620                                 rl++;
621                         lcn = ntfs_rl_vcn_to_lcn(rl, vcn);
622                 } else
623                         lcn = LCN_RL_NOT_MAPPED;
624                 ntfs_debug("Reading vcn = 0x%llx, lcn = 0x%llx.",
625                                 (unsigned long long)vcn,
626                                 (unsigned long long)lcn);
627                 if (lcn < 0) {
628                         /*
629                          * When we reach the first sparse cluster we have
630                          * finished with the cb.
631                          */
632                         if (lcn == LCN_HOLE)
633                                 break;
634                         if (is_retry || lcn != LCN_RL_NOT_MAPPED)
635                                 goto rl_err;
636                         is_retry = true;
637                         /*
638                          * Attempt to map runlist, dropping lock for the
639                          * duration.
640                          */
641                         up_read(&ni->runlist.lock);
642                         if (!ntfs_map_runlist(ni, vcn))
643                                 goto lock_retry_remap;
644                         goto map_rl_err;
645                 }
646                 block = lcn << vol->cluster_size_bits >> block_size_bits;
647                 /* Read the lcn from device in chunks of block_size bytes. */
648                 max_block = block + (vol->cluster_size >> block_size_bits);
649                 do {
650                         ntfs_debug("block = 0x%x.", block);
651                         if (unlikely(!(bhs[nr_bhs] = sb_getblk(sb, block))))
652                                 goto getblk_err;
653                         nr_bhs++;
654                 } while (++block < max_block);
655         }
656 
657         /* Release the lock if we took it. */
658         if (rl)
659                 up_read(&ni->runlist.lock);
660 
661         /* Setup and initiate io on all buffer heads. */
662         for (i = 0; i < nr_bhs; i++) {
663                 struct buffer_head *tbh = bhs[i];
664 
665                 if (!trylock_buffer(tbh))
666                         continue;
667                 if (unlikely(buffer_uptodate(tbh))) {
668                         unlock_buffer(tbh);
669                         continue;
670                 }
671                 get_bh(tbh);
672                 tbh->b_end_io = end_buffer_read_sync;
673                 submit_bh(REQ_OP_READ, 0, tbh);
674         }
675 
676         /* Wait for io completion on all buffer heads. */
677         for (i = 0; i < nr_bhs; i++) {
678                 struct buffer_head *tbh = bhs[i];
679 
680                 if (buffer_uptodate(tbh))
681                         continue;
682                 wait_on_buffer(tbh);
683                 /*
684                  * We need an optimization barrier here, otherwise we start
685                  * hitting the below fixup code when accessing a loopback
686                  * mounted ntfs partition. This indicates either there is a
687                  * race condition in the loop driver or, more likely, gcc
688                  * overoptimises the code without the barrier and it doesn't
689                  * do the Right Thing(TM).
690                  */
691                 barrier();
692                 if (unlikely(!buffer_uptodate(tbh))) {
693                         ntfs_warning(vol->sb, "Buffer is unlocked but not "
694                                         "uptodate! Unplugging the disk queue "
695                                         "and rescheduling.");
696                         get_bh(tbh);
697                         io_schedule();
698                         put_bh(tbh);
699                         if (unlikely(!buffer_uptodate(tbh)))
700                                 goto read_err;
701                         ntfs_warning(vol->sb, "Buffer is now uptodate. Good.");
702                 }
703         }
704 
705         /*
706          * Get the compression buffer. We must not sleep any more
707          * until we are finished with it.
708          */
709         spin_lock(&ntfs_cb_lock);
710         cb = ntfs_compression_buffer;
711 
712         BUG_ON(!cb);
713 
714         cb_pos = cb;
715         cb_end = cb + cb_size;
716 
717         /* Copy the buffer heads into the contiguous buffer. */
718         for (i = 0; i < nr_bhs; i++) {
719                 memcpy(cb_pos, bhs[i]->b_data, block_size);
720                 cb_pos += block_size;
721         }
722 
723         /* Just a precaution. */
724         if (cb_pos + 2 <= cb + cb_size)
725                 *(u16*)cb_pos = 0;
726 
727         /* Reset cb_pos back to the beginning. */
728         cb_pos = cb;
729 
730         /* We now have both source (if present) and destination. */
731         ntfs_debug("Successfully read the compression block.");
732 
733         /* The last page and maximum offset within it for the current cb. */
734         cb_max_page = (cur_page << PAGE_SHIFT) + cur_ofs + cb_size;
735         cb_max_ofs = cb_max_page & ~PAGE_MASK;
736         cb_max_page >>= PAGE_SHIFT;
737 
738         /* Catch end of file inside a compression block. */
739         if (cb_max_page > max_page)
740                 cb_max_page = max_page;
741 
742         if (vcn == start_vcn - cb_clusters) {
743                 /* Sparse cb, zero out page range overlapping the cb. */
744                 ntfs_debug("Found sparse compression block.");
745                 /* We can sleep from now on, so we drop lock. */
746                 spin_unlock(&ntfs_cb_lock);
747                 if (cb_max_ofs)
748                         cb_max_page--;
749                 for (; cur_page < cb_max_page; cur_page++) {
750                         page = pages[cur_page];
751                         if (page) {
752                                 if (likely(!cur_ofs))
753                                         clear_page(page_address(page));
754                                 else
755                                         memset(page_address(page) + cur_ofs, 0,
756                                                         PAGE_SIZE -
757                                                         cur_ofs);
758                                 flush_dcache_page(page);
759                                 kunmap(page);
760                                 SetPageUptodate(page);
761                                 unlock_page(page);
762                                 if (cur_page == xpage)
763                                         xpage_done = 1;
764                                 else
765                                         put_page(page);
766                                 pages[cur_page] = NULL;
767                         }
768                         cb_pos += PAGE_SIZE - cur_ofs;
769                         cur_ofs = 0;
770                         if (cb_pos >= cb_end)
771                                 break;
772                 }
773                 /* If we have a partial final page, deal with it now. */
774                 if (cb_max_ofs && cb_pos < cb_end) {
775                         page = pages[cur_page];
776                         if (page)
777                                 memset(page_address(page) + cur_ofs, 0,
778                                                 cb_max_ofs - cur_ofs);
779                         /*
780                          * No need to update cb_pos at this stage:
781                          *      cb_pos += cb_max_ofs - cur_ofs;
782                          */
783                         cur_ofs = cb_max_ofs;
784                 }
785         } else if (vcn == start_vcn) {
786                 /* We can't sleep so we need two stages. */
787                 unsigned int cur2_page = cur_page;
788                 unsigned int cur_ofs2 = cur_ofs;
789                 u8 *cb_pos2 = cb_pos;
790 
791                 ntfs_debug("Found uncompressed compression block.");
792                 /* Uncompressed cb, copy it to the destination pages. */
793                 /*
794                  * TODO: As a big optimization, we could detect this case
795                  * before we read all the pages and use block_read_full_page()
796                  * on all full pages instead (we still have to treat partial
797                  * pages especially but at least we are getting rid of the
798                  * synchronous io for the majority of pages.
799                  * Or if we choose not to do the read-ahead/-behind stuff, we
800                  * could just return block_read_full_page(pages[xpage]) as long
801                  * as PAGE_SIZE <= cb_size.
802                  */
803                 if (cb_max_ofs)
804                         cb_max_page--;
805                 /* First stage: copy data into destination pages. */
806                 for (; cur_page < cb_max_page; cur_page++) {
807                         page = pages[cur_page];
808                         if (page)
809                                 memcpy(page_address(page) + cur_ofs, cb_pos,
810                                                 PAGE_SIZE - cur_ofs);
811                         cb_pos += PAGE_SIZE - cur_ofs;
812                         cur_ofs = 0;
813                         if (cb_pos >= cb_end)
814                                 break;
815                 }
816                 /* If we have a partial final page, deal with it now. */
817                 if (cb_max_ofs && cb_pos < cb_end) {
818                         page = pages[cur_page];
819                         if (page)
820                                 memcpy(page_address(page) + cur_ofs, cb_pos,
821                                                 cb_max_ofs - cur_ofs);
822                         cb_pos += cb_max_ofs - cur_ofs;
823                         cur_ofs = cb_max_ofs;
824                 }
825                 /* We can sleep from now on, so drop lock. */
826                 spin_unlock(&ntfs_cb_lock);
827                 /* Second stage: finalize pages. */
828                 for (; cur2_page < cb_max_page; cur2_page++) {
829                         page = pages[cur2_page];
830                         if (page) {
831                                 /*
832                                  * If we are outside the initialized size, zero
833                                  * the out of bounds page range.
834                                  */
835                                 handle_bounds_compressed_page(page, i_size,
836                                                 initialized_size);
837                                 flush_dcache_page(page);
838                                 kunmap(page);
839                                 SetPageUptodate(page);
840                                 unlock_page(page);
841                                 if (cur2_page == xpage)
842                                         xpage_done = 1;
843                                 else
844                                         put_page(page);
845                                 pages[cur2_page] = NULL;
846                         }
847                         cb_pos2 += PAGE_SIZE - cur_ofs2;
848                         cur_ofs2 = 0;
849                         if (cb_pos2 >= cb_end)
850                                 break;
851                 }
852         } else {
853                 /* Compressed cb, decompress it into the destination page(s). */
854                 unsigned int prev_cur_page = cur_page;
855 
856                 ntfs_debug("Found compressed compression block.");
857                 err = ntfs_decompress(pages, &cur_page, &cur_ofs,
858                                 cb_max_page, cb_max_ofs, xpage, &xpage_done,
859                                 cb_pos, cb_size - (cb_pos - cb), i_size,
860                                 initialized_size);
861                 /*
862                  * We can sleep from now on, lock already dropped by
863                  * ntfs_decompress().
864                  */
865                 if (err) {
866                         ntfs_error(vol->sb, "ntfs_decompress() failed in inode "
867                                         "0x%lx with error code %i. Skipping "
868                                         "this compression block.",
869                                         ni->mft_no, -err);
870                         /* Release the unfinished pages. */
871                         for (; prev_cur_page < cur_page; prev_cur_page++) {
872                                 page = pages[prev_cur_page];
873                                 if (page) {
874                                         flush_dcache_page(page);
875                                         kunmap(page);
876                                         unlock_page(page);
877                                         if (prev_cur_page != xpage)
878                                                 put_page(page);
879                                         pages[prev_cur_page] = NULL;
880                                 }
881                         }
882                 }
883         }
884 
885         /* Release the buffer heads. */
886         for (i = 0; i < nr_bhs; i++)
887                 brelse(bhs[i]);
888 
889         /* Do we have more work to do? */
890         if (nr_cbs)
891                 goto do_next_cb;
892 
893         /* We no longer need the list of buffer heads. */
894         kfree(bhs);
895 
896         /* Clean up if we have any pages left. Should never happen. */
897         for (cur_page = 0; cur_page < max_page; cur_page++) {
898                 page = pages[cur_page];
899                 if (page) {
900                         ntfs_error(vol->sb, "Still have pages left! "
901                                         "Terminating them with extreme "
902                                         "prejudice.  Inode 0x%lx, page index "
903                                         "0x%lx.", ni->mft_no, page->index);
904                         flush_dcache_page(page);
905                         kunmap(page);
906                         unlock_page(page);
907                         if (cur_page != xpage)
908                                 put_page(page);
909                         pages[cur_page] = NULL;
910                 }
911         }
912 
913         /* We no longer need the list of pages. */
914         kfree(pages);
915 
916         /* If we have completed the requested page, we return success. */
917         if (likely(xpage_done))
918                 return 0;
919 
920         ntfs_debug("Failed. Returning error code %s.", err == -EOVERFLOW ?
921                         "EOVERFLOW" : (!err ? "EIO" : "unknown error"));
922         return err < 0 ? err : -EIO;
923 
924 read_err:
925         ntfs_error(vol->sb, "IO error while reading compressed data.");
926         /* Release the buffer heads. */
927         for (i = 0; i < nr_bhs; i++)
928                 brelse(bhs[i]);
929         goto err_out;
930 
931 map_rl_err:
932         ntfs_error(vol->sb, "ntfs_map_runlist() failed. Cannot read "
933                         "compression block.");
934         goto err_out;
935 
936 rl_err:
937         up_read(&ni->runlist.lock);
938         ntfs_error(vol->sb, "ntfs_rl_vcn_to_lcn() failed. Cannot read "
939                         "compression block.");
940         goto err_out;
941 
942 getblk_err:
943         up_read(&ni->runlist.lock);
944         ntfs_error(vol->sb, "getblk() failed. Cannot read compression block.");
945 
946 err_out:
947         kfree(bhs);
948         for (i = cur_page; i < max_page; i++) {
949                 page = pages[i];
950                 if (page) {
951                         flush_dcache_page(page);
952                         kunmap(page);
953                         unlock_page(page);
954                         if (i != xpage)
955                                 put_page(page);
956                 }
957         }
958         kfree(pages);
959         return -EIO;
960 }
961 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp