1 /* 2 * linux/fs/f2fs/crypto.c 3 * 4 * Copied from linux/fs/ext4/crypto.c 5 * 6 * Copyright (C) 2015, Google, Inc. 7 * Copyright (C) 2015, Motorola Mobility 8 * 9 * This contains encryption functions for f2fs 10 * 11 * Written by Michael Halcrow, 2014. 12 * 13 * Filename encryption additions 14 * Uday Savagaonkar, 2014 15 * Encryption policy handling additions 16 * Ildar Muslukhov, 2014 17 * Remove ext4_encrypted_zeroout(), 18 * add f2fs_restore_and_release_control_page() 19 * Jaegeuk Kim, 2015. 20 * 21 * This has not yet undergone a rigorous security audit. 22 * 23 * The usage of AES-XTS should conform to recommendations in NIST 24 * Special Publication 800-38E and IEEE P1619/D16. 25 */ 26 #include <crypto/hash.h> 27 #include <crypto/sha.h> 28 #include <keys/user-type.h> 29 #include <keys/encrypted-type.h> 30 #include <linux/crypto.h> 31 #include <linux/ecryptfs.h> 32 #include <linux/gfp.h> 33 #include <linux/kernel.h> 34 #include <linux/key.h> 35 #include <linux/list.h> 36 #include <linux/mempool.h> 37 #include <linux/module.h> 38 #include <linux/mutex.h> 39 #include <linux/random.h> 40 #include <linux/scatterlist.h> 41 #include <linux/spinlock_types.h> 42 #include <linux/f2fs_fs.h> 43 #include <linux/ratelimit.h> 44 #include <linux/bio.h> 45 46 #include "f2fs.h" 47 #include "xattr.h" 48 49 /* Encryption added and removed here! (L: */ 50 51 static unsigned int num_prealloc_crypto_pages = 32; 52 static unsigned int num_prealloc_crypto_ctxs = 128; 53 54 module_param(num_prealloc_crypto_pages, uint, 0444); 55 MODULE_PARM_DESC(num_prealloc_crypto_pages, 56 "Number of crypto pages to preallocate"); 57 module_param(num_prealloc_crypto_ctxs, uint, 0444); 58 MODULE_PARM_DESC(num_prealloc_crypto_ctxs, 59 "Number of crypto contexts to preallocate"); 60 61 static mempool_t *f2fs_bounce_page_pool; 62 63 static LIST_HEAD(f2fs_free_crypto_ctxs); 64 static DEFINE_SPINLOCK(f2fs_crypto_ctx_lock); 65 66 static struct workqueue_struct *f2fs_read_workqueue; 67 static DEFINE_MUTEX(crypto_init); 68 69 static struct kmem_cache *f2fs_crypto_ctx_cachep; 70 struct kmem_cache *f2fs_crypt_info_cachep; 71 72 /** 73 * f2fs_release_crypto_ctx() - Releases an encryption context 74 * @ctx: The encryption context to release. 75 * 76 * If the encryption context was allocated from the pre-allocated pool, returns 77 * it to that pool. Else, frees it. 78 * 79 * If there's a bounce page in the context, this frees that. 80 */ 81 void f2fs_release_crypto_ctx(struct f2fs_crypto_ctx *ctx) 82 { 83 unsigned long flags; 84 85 if (ctx->flags & F2FS_WRITE_PATH_FL && ctx->w.bounce_page) { 86 mempool_free(ctx->w.bounce_page, f2fs_bounce_page_pool); 87 ctx->w.bounce_page = NULL; 88 } 89 ctx->w.control_page = NULL; 90 if (ctx->flags & F2FS_CTX_REQUIRES_FREE_ENCRYPT_FL) { 91 kmem_cache_free(f2fs_crypto_ctx_cachep, ctx); 92 } else { 93 spin_lock_irqsave(&f2fs_crypto_ctx_lock, flags); 94 list_add(&ctx->free_list, &f2fs_free_crypto_ctxs); 95 spin_unlock_irqrestore(&f2fs_crypto_ctx_lock, flags); 96 } 97 } 98 99 /** 100 * f2fs_get_crypto_ctx() - Gets an encryption context 101 * @inode: The inode for which we are doing the crypto 102 * 103 * Allocates and initializes an encryption context. 104 * 105 * Return: An allocated and initialized encryption context on success; error 106 * value or NULL otherwise. 107 */ 108 struct f2fs_crypto_ctx *f2fs_get_crypto_ctx(struct inode *inode) 109 { 110 struct f2fs_crypto_ctx *ctx = NULL; 111 unsigned long flags; 112 struct f2fs_crypt_info *ci = F2FS_I(inode)->i_crypt_info; 113 114 if (ci == NULL) 115 return ERR_PTR(-ENOKEY); 116 117 /* 118 * We first try getting the ctx from a free list because in 119 * the common case the ctx will have an allocated and 120 * initialized crypto tfm, so it's probably a worthwhile 121 * optimization. For the bounce page, we first try getting it 122 * from the kernel allocator because that's just about as fast 123 * as getting it from a list and because a cache of free pages 124 * should generally be a "last resort" option for a filesystem 125 * to be able to do its job. 126 */ 127 spin_lock_irqsave(&f2fs_crypto_ctx_lock, flags); 128 ctx = list_first_entry_or_null(&f2fs_free_crypto_ctxs, 129 struct f2fs_crypto_ctx, free_list); 130 if (ctx) 131 list_del(&ctx->free_list); 132 spin_unlock_irqrestore(&f2fs_crypto_ctx_lock, flags); 133 if (!ctx) { 134 ctx = kmem_cache_zalloc(f2fs_crypto_ctx_cachep, GFP_NOFS); 135 if (!ctx) 136 return ERR_PTR(-ENOMEM); 137 ctx->flags |= F2FS_CTX_REQUIRES_FREE_ENCRYPT_FL; 138 } else { 139 ctx->flags &= ~F2FS_CTX_REQUIRES_FREE_ENCRYPT_FL; 140 } 141 ctx->flags &= ~F2FS_WRITE_PATH_FL; 142 return ctx; 143 } 144 145 /* 146 * Call f2fs_decrypt on every single page, reusing the encryption 147 * context. 148 */ 149 static void completion_pages(struct work_struct *work) 150 { 151 struct f2fs_crypto_ctx *ctx = 152 container_of(work, struct f2fs_crypto_ctx, r.work); 153 struct bio *bio = ctx->r.bio; 154 struct bio_vec *bv; 155 int i; 156 157 bio_for_each_segment_all(bv, bio, i) { 158 struct page *page = bv->bv_page; 159 int ret = f2fs_decrypt(ctx, page); 160 161 if (ret) { 162 WARN_ON_ONCE(1); 163 SetPageError(page); 164 } else 165 SetPageUptodate(page); 166 unlock_page(page); 167 } 168 f2fs_release_crypto_ctx(ctx); 169 bio_put(bio); 170 } 171 172 void f2fs_end_io_crypto_work(struct f2fs_crypto_ctx *ctx, struct bio *bio) 173 { 174 INIT_WORK(&ctx->r.work, completion_pages); 175 ctx->r.bio = bio; 176 queue_work(f2fs_read_workqueue, &ctx->r.work); 177 } 178 179 static void f2fs_crypto_destroy(void) 180 { 181 struct f2fs_crypto_ctx *pos, *n; 182 183 list_for_each_entry_safe(pos, n, &f2fs_free_crypto_ctxs, free_list) 184 kmem_cache_free(f2fs_crypto_ctx_cachep, pos); 185 INIT_LIST_HEAD(&f2fs_free_crypto_ctxs); 186 if (f2fs_bounce_page_pool) 187 mempool_destroy(f2fs_bounce_page_pool); 188 f2fs_bounce_page_pool = NULL; 189 } 190 191 /** 192 * f2fs_crypto_initialize() - Set up for f2fs encryption. 193 * 194 * We only call this when we start accessing encrypted files, since it 195 * results in memory getting allocated that wouldn't otherwise be used. 196 * 197 * Return: Zero on success, non-zero otherwise. 198 */ 199 int f2fs_crypto_initialize(void) 200 { 201 int i, res = -ENOMEM; 202 203 if (f2fs_bounce_page_pool) 204 return 0; 205 206 mutex_lock(&crypto_init); 207 if (f2fs_bounce_page_pool) 208 goto already_initialized; 209 210 for (i = 0; i < num_prealloc_crypto_ctxs; i++) { 211 struct f2fs_crypto_ctx *ctx; 212 213 ctx = kmem_cache_zalloc(f2fs_crypto_ctx_cachep, GFP_KERNEL); 214 if (!ctx) 215 goto fail; 216 list_add(&ctx->free_list, &f2fs_free_crypto_ctxs); 217 } 218 219 /* must be allocated at the last step to avoid race condition above */ 220 f2fs_bounce_page_pool = 221 mempool_create_page_pool(num_prealloc_crypto_pages, 0); 222 if (!f2fs_bounce_page_pool) 223 goto fail; 224 225 already_initialized: 226 mutex_unlock(&crypto_init); 227 return 0; 228 fail: 229 f2fs_crypto_destroy(); 230 mutex_unlock(&crypto_init); 231 return res; 232 } 233 234 /** 235 * f2fs_exit_crypto() - Shutdown the f2fs encryption system 236 */ 237 void f2fs_exit_crypto(void) 238 { 239 f2fs_crypto_destroy(); 240 241 if (f2fs_read_workqueue) 242 destroy_workqueue(f2fs_read_workqueue); 243 if (f2fs_crypto_ctx_cachep) 244 kmem_cache_destroy(f2fs_crypto_ctx_cachep); 245 if (f2fs_crypt_info_cachep) 246 kmem_cache_destroy(f2fs_crypt_info_cachep); 247 } 248 249 int __init f2fs_init_crypto(void) 250 { 251 int res = -ENOMEM; 252 253 f2fs_read_workqueue = alloc_workqueue("f2fs_crypto", WQ_HIGHPRI, 0); 254 if (!f2fs_read_workqueue) 255 goto fail; 256 257 f2fs_crypto_ctx_cachep = KMEM_CACHE(f2fs_crypto_ctx, 258 SLAB_RECLAIM_ACCOUNT); 259 if (!f2fs_crypto_ctx_cachep) 260 goto fail; 261 262 f2fs_crypt_info_cachep = KMEM_CACHE(f2fs_crypt_info, 263 SLAB_RECLAIM_ACCOUNT); 264 if (!f2fs_crypt_info_cachep) 265 goto fail; 266 267 return 0; 268 fail: 269 f2fs_exit_crypto(); 270 return res; 271 } 272 273 void f2fs_restore_and_release_control_page(struct page **page) 274 { 275 struct f2fs_crypto_ctx *ctx; 276 struct page *bounce_page; 277 278 /* The bounce data pages are unmapped. */ 279 if ((*page)->mapping) 280 return; 281 282 /* The bounce data page is unmapped. */ 283 bounce_page = *page; 284 ctx = (struct f2fs_crypto_ctx *)page_private(bounce_page); 285 286 /* restore control page */ 287 *page = ctx->w.control_page; 288 289 f2fs_restore_control_page(bounce_page); 290 } 291 292 void f2fs_restore_control_page(struct page *data_page) 293 { 294 struct f2fs_crypto_ctx *ctx = 295 (struct f2fs_crypto_ctx *)page_private(data_page); 296 297 set_page_private(data_page, (unsigned long)NULL); 298 ClearPagePrivate(data_page); 299 unlock_page(data_page); 300 f2fs_release_crypto_ctx(ctx); 301 } 302 303 /** 304 * f2fs_crypt_complete() - The completion callback for page encryption 305 * @req: The asynchronous encryption request context 306 * @res: The result of the encryption operation 307 */ 308 static void f2fs_crypt_complete(struct crypto_async_request *req, int res) 309 { 310 struct f2fs_completion_result *ecr = req->data; 311 312 if (res == -EINPROGRESS) 313 return; 314 ecr->res = res; 315 complete(&ecr->completion); 316 } 317 318 typedef enum { 319 F2FS_DECRYPT = 0, 320 F2FS_ENCRYPT, 321 } f2fs_direction_t; 322 323 static int f2fs_page_crypto(struct f2fs_crypto_ctx *ctx, 324 struct inode *inode, 325 f2fs_direction_t rw, 326 pgoff_t index, 327 struct page *src_page, 328 struct page *dest_page) 329 { 330 u8 xts_tweak[F2FS_XTS_TWEAK_SIZE]; 331 struct ablkcipher_request *req = NULL; 332 DECLARE_F2FS_COMPLETION_RESULT(ecr); 333 struct scatterlist dst, src; 334 struct f2fs_crypt_info *ci = F2FS_I(inode)->i_crypt_info; 335 struct crypto_ablkcipher *tfm = ci->ci_ctfm; 336 int res = 0; 337 338 req = ablkcipher_request_alloc(tfm, GFP_NOFS); 339 if (!req) { 340 printk_ratelimited(KERN_ERR 341 "%s: crypto_request_alloc() failed\n", 342 __func__); 343 return -ENOMEM; 344 } 345 ablkcipher_request_set_callback( 346 req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, 347 f2fs_crypt_complete, &ecr); 348 349 BUILD_BUG_ON(F2FS_XTS_TWEAK_SIZE < sizeof(index)); 350 memcpy(xts_tweak, &index, sizeof(index)); 351 memset(&xts_tweak[sizeof(index)], 0, 352 F2FS_XTS_TWEAK_SIZE - sizeof(index)); 353 354 sg_init_table(&dst, 1); 355 sg_set_page(&dst, dest_page, PAGE_CACHE_SIZE, 0); 356 sg_init_table(&src, 1); 357 sg_set_page(&src, src_page, PAGE_CACHE_SIZE, 0); 358 ablkcipher_request_set_crypt(req, &src, &dst, PAGE_CACHE_SIZE, 359 xts_tweak); 360 if (rw == F2FS_DECRYPT) 361 res = crypto_ablkcipher_decrypt(req); 362 else 363 res = crypto_ablkcipher_encrypt(req); 364 if (res == -EINPROGRESS || res == -EBUSY) { 365 BUG_ON(req->base.data != &ecr); 366 wait_for_completion(&ecr.completion); 367 res = ecr.res; 368 } 369 ablkcipher_request_free(req); 370 if (res) { 371 printk_ratelimited(KERN_ERR 372 "%s: crypto_ablkcipher_encrypt() returned %d\n", 373 __func__, res); 374 return res; 375 } 376 return 0; 377 } 378 379 static struct page *alloc_bounce_page(struct f2fs_crypto_ctx *ctx) 380 { 381 ctx->w.bounce_page = mempool_alloc(f2fs_bounce_page_pool, GFP_NOWAIT); 382 if (ctx->w.bounce_page == NULL) 383 return ERR_PTR(-ENOMEM); 384 ctx->flags |= F2FS_WRITE_PATH_FL; 385 return ctx->w.bounce_page; 386 } 387 388 /** 389 * f2fs_encrypt() - Encrypts a page 390 * @inode: The inode for which the encryption should take place 391 * @plaintext_page: The page to encrypt. Must be locked. 392 * 393 * Allocates a ciphertext page and encrypts plaintext_page into it using the ctx 394 * encryption context. 395 * 396 * Called on the page write path. The caller must call 397 * f2fs_restore_control_page() on the returned ciphertext page to 398 * release the bounce buffer and the encryption context. 399 * 400 * Return: An allocated page with the encrypted content on success. Else, an 401 * error value or NULL. 402 */ 403 struct page *f2fs_encrypt(struct inode *inode, 404 struct page *plaintext_page) 405 { 406 struct f2fs_crypto_ctx *ctx; 407 struct page *ciphertext_page = NULL; 408 int err; 409 410 BUG_ON(!PageLocked(plaintext_page)); 411 412 ctx = f2fs_get_crypto_ctx(inode); 413 if (IS_ERR(ctx)) 414 return (struct page *)ctx; 415 416 /* The encryption operation will require a bounce page. */ 417 ciphertext_page = alloc_bounce_page(ctx); 418 if (IS_ERR(ciphertext_page)) 419 goto err_out; 420 421 ctx->w.control_page = plaintext_page; 422 err = f2fs_page_crypto(ctx, inode, F2FS_ENCRYPT, plaintext_page->index, 423 plaintext_page, ciphertext_page); 424 if (err) { 425 ciphertext_page = ERR_PTR(err); 426 goto err_out; 427 } 428 429 SetPagePrivate(ciphertext_page); 430 set_page_private(ciphertext_page, (unsigned long)ctx); 431 lock_page(ciphertext_page); 432 return ciphertext_page; 433 434 err_out: 435 f2fs_release_crypto_ctx(ctx); 436 return ciphertext_page; 437 } 438 439 /** 440 * f2fs_decrypt() - Decrypts a page in-place 441 * @ctx: The encryption context. 442 * @page: The page to decrypt. Must be locked. 443 * 444 * Decrypts page in-place using the ctx encryption context. 445 * 446 * Called from the read completion callback. 447 * 448 * Return: Zero on success, non-zero otherwise. 449 */ 450 int f2fs_decrypt(struct f2fs_crypto_ctx *ctx, struct page *page) 451 { 452 BUG_ON(!PageLocked(page)); 453 454 return f2fs_page_crypto(ctx, page->mapping->host, 455 F2FS_DECRYPT, page->index, page, page); 456 } 457 458 /* 459 * Convenience function which takes care of allocating and 460 * deallocating the encryption context 461 */ 462 int f2fs_decrypt_one(struct inode *inode, struct page *page) 463 { 464 struct f2fs_crypto_ctx *ctx = f2fs_get_crypto_ctx(inode); 465 int ret; 466 467 if (IS_ERR(ctx)) 468 return PTR_ERR(ctx); 469 ret = f2fs_decrypt(ctx, page); 470 f2fs_release_crypto_ctx(ctx); 471 return ret; 472 } 473 474 bool f2fs_valid_contents_enc_mode(uint32_t mode) 475 { 476 return (mode == F2FS_ENCRYPTION_MODE_AES_256_XTS); 477 } 478 479 /** 480 * f2fs_validate_encryption_key_size() - Validate the encryption key size 481 * @mode: The key mode. 482 * @size: The key size to validate. 483 * 484 * Return: The validated key size for @mode. Zero if invalid. 485 */ 486 uint32_t f2fs_validate_encryption_key_size(uint32_t mode, uint32_t size) 487 { 488 if (size == f2fs_encryption_key_size(mode)) 489 return size; 490 return 0; 491 } 492
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.