1 /* 2 * Asynchronous block chaining cipher operations. 3 * 4 * This is the asynchronous version of blkcipher.c indicating completion 5 * via a callback. 6 * 7 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> 8 * 9 * This program is free software; you can redistribute it and/or modify it 10 * under the terms of the GNU General Public License as published by the Free 11 * Software Foundation; either version 2 of the License, or (at your option) 12 * any later version. 13 * 14 */ 15 16 #include <crypto/internal/skcipher.h> 17 #include <linux/err.h> 18 #include <linux/kernel.h> 19 #include <linux/slab.h> 20 #include <linux/seq_file.h> 21 #include <linux/cryptouser.h> 22 #include <net/netlink.h> 23 24 #include <crypto/scatterwalk.h> 25 26 #include "internal.h" 27 28 struct ablkcipher_buffer { 29 struct list_head entry; 30 struct scatter_walk dst; 31 unsigned int len; 32 void *data; 33 }; 34 35 enum { 36 ABLKCIPHER_WALK_SLOW = 1 << 0, 37 }; 38 39 static inline void ablkcipher_buffer_write(struct ablkcipher_buffer *p) 40 { 41 scatterwalk_copychunks(p->data, &p->dst, p->len, 1); 42 } 43 44 void __ablkcipher_walk_complete(struct ablkcipher_walk *walk) 45 { 46 struct ablkcipher_buffer *p, *tmp; 47 48 list_for_each_entry_safe(p, tmp, &walk->buffers, entry) { 49 ablkcipher_buffer_write(p); 50 list_del(&p->entry); 51 kfree(p); 52 } 53 } 54 EXPORT_SYMBOL_GPL(__ablkcipher_walk_complete); 55 56 static inline void ablkcipher_queue_write(struct ablkcipher_walk *walk, 57 struct ablkcipher_buffer *p) 58 { 59 p->dst = walk->out; 60 list_add_tail(&p->entry, &walk->buffers); 61 } 62 63 /* Get a spot of the specified length that does not straddle a page. 64 * The caller needs to ensure that there is enough space for this operation. 65 */ 66 static inline u8 *ablkcipher_get_spot(u8 *start, unsigned int len) 67 { 68 u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK); 69 70 return max(start, end_page); 71 } 72 73 static inline unsigned int ablkcipher_done_slow(struct ablkcipher_walk *walk, 74 unsigned int bsize) 75 { 76 unsigned int n = bsize; 77 78 for (;;) { 79 unsigned int len_this_page = scatterwalk_pagelen(&walk->out); 80 81 if (len_this_page > n) 82 len_this_page = n; 83 scatterwalk_advance(&walk->out, n); 84 if (n == len_this_page) 85 break; 86 n -= len_this_page; 87 scatterwalk_start(&walk->out, sg_next(walk->out.sg)); 88 } 89 90 return bsize; 91 } 92 93 static inline unsigned int ablkcipher_done_fast(struct ablkcipher_walk *walk, 94 unsigned int n) 95 { 96 scatterwalk_advance(&walk->in, n); 97 scatterwalk_advance(&walk->out, n); 98 99 return n; 100 } 101 102 static int ablkcipher_walk_next(struct ablkcipher_request *req, 103 struct ablkcipher_walk *walk); 104 105 int ablkcipher_walk_done(struct ablkcipher_request *req, 106 struct ablkcipher_walk *walk, int err) 107 { 108 struct crypto_tfm *tfm = req->base.tfm; 109 unsigned int nbytes = 0; 110 111 if (likely(err >= 0)) { 112 unsigned int n = walk->nbytes - err; 113 114 if (likely(!(walk->flags & ABLKCIPHER_WALK_SLOW))) 115 n = ablkcipher_done_fast(walk, n); 116 else if (WARN_ON(err)) { 117 err = -EINVAL; 118 goto err; 119 } else 120 n = ablkcipher_done_slow(walk, n); 121 122 nbytes = walk->total - n; 123 err = 0; 124 } 125 126 scatterwalk_done(&walk->in, 0, nbytes); 127 scatterwalk_done(&walk->out, 1, nbytes); 128 129 err: 130 walk->total = nbytes; 131 walk->nbytes = nbytes; 132 133 if (nbytes) { 134 crypto_yield(req->base.flags); 135 return ablkcipher_walk_next(req, walk); 136 } 137 138 if (walk->iv != req->info) 139 memcpy(req->info, walk->iv, tfm->crt_ablkcipher.ivsize); 140 kfree(walk->iv_buffer); 141 142 return err; 143 } 144 EXPORT_SYMBOL_GPL(ablkcipher_walk_done); 145 146 static inline int ablkcipher_next_slow(struct ablkcipher_request *req, 147 struct ablkcipher_walk *walk, 148 unsigned int bsize, 149 unsigned int alignmask, 150 void **src_p, void **dst_p) 151 { 152 unsigned aligned_bsize = ALIGN(bsize, alignmask + 1); 153 struct ablkcipher_buffer *p; 154 void *src, *dst, *base; 155 unsigned int n; 156 157 n = ALIGN(sizeof(struct ablkcipher_buffer), alignmask + 1); 158 n += (aligned_bsize * 3 - (alignmask + 1) + 159 (alignmask & ~(crypto_tfm_ctx_alignment() - 1))); 160 161 p = kmalloc(n, GFP_ATOMIC); 162 if (!p) 163 return ablkcipher_walk_done(req, walk, -ENOMEM); 164 165 base = p + 1; 166 167 dst = (u8 *)ALIGN((unsigned long)base, alignmask + 1); 168 src = dst = ablkcipher_get_spot(dst, bsize); 169 170 p->len = bsize; 171 p->data = dst; 172 173 scatterwalk_copychunks(src, &walk->in, bsize, 0); 174 175 ablkcipher_queue_write(walk, p); 176 177 walk->nbytes = bsize; 178 walk->flags |= ABLKCIPHER_WALK_SLOW; 179 180 *src_p = src; 181 *dst_p = dst; 182 183 return 0; 184 } 185 186 static inline int ablkcipher_copy_iv(struct ablkcipher_walk *walk, 187 struct crypto_tfm *tfm, 188 unsigned int alignmask) 189 { 190 unsigned bs = walk->blocksize; 191 unsigned int ivsize = tfm->crt_ablkcipher.ivsize; 192 unsigned aligned_bs = ALIGN(bs, alignmask + 1); 193 unsigned int size = aligned_bs * 2 + ivsize + max(aligned_bs, ivsize) - 194 (alignmask + 1); 195 u8 *iv; 196 197 size += alignmask & ~(crypto_tfm_ctx_alignment() - 1); 198 walk->iv_buffer = kmalloc(size, GFP_ATOMIC); 199 if (!walk->iv_buffer) 200 return -ENOMEM; 201 202 iv = (u8 *)ALIGN((unsigned long)walk->iv_buffer, alignmask + 1); 203 iv = ablkcipher_get_spot(iv, bs) + aligned_bs; 204 iv = ablkcipher_get_spot(iv, bs) + aligned_bs; 205 iv = ablkcipher_get_spot(iv, ivsize); 206 207 walk->iv = memcpy(iv, walk->iv, ivsize); 208 return 0; 209 } 210 211 static inline int ablkcipher_next_fast(struct ablkcipher_request *req, 212 struct ablkcipher_walk *walk) 213 { 214 walk->src.page = scatterwalk_page(&walk->in); 215 walk->src.offset = offset_in_page(walk->in.offset); 216 walk->dst.page = scatterwalk_page(&walk->out); 217 walk->dst.offset = offset_in_page(walk->out.offset); 218 219 return 0; 220 } 221 222 static int ablkcipher_walk_next(struct ablkcipher_request *req, 223 struct ablkcipher_walk *walk) 224 { 225 struct crypto_tfm *tfm = req->base.tfm; 226 unsigned int alignmask, bsize, n; 227 void *src, *dst; 228 int err; 229 230 alignmask = crypto_tfm_alg_alignmask(tfm); 231 n = walk->total; 232 if (unlikely(n < crypto_tfm_alg_blocksize(tfm))) { 233 req->base.flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN; 234 return ablkcipher_walk_done(req, walk, -EINVAL); 235 } 236 237 walk->flags &= ~ABLKCIPHER_WALK_SLOW; 238 src = dst = NULL; 239 240 bsize = min(walk->blocksize, n); 241 n = scatterwalk_clamp(&walk->in, n); 242 n = scatterwalk_clamp(&walk->out, n); 243 244 if (n < bsize || 245 !scatterwalk_aligned(&walk->in, alignmask) || 246 !scatterwalk_aligned(&walk->out, alignmask)) { 247 err = ablkcipher_next_slow(req, walk, bsize, alignmask, 248 &src, &dst); 249 goto set_phys_lowmem; 250 } 251 252 walk->nbytes = n; 253 254 return ablkcipher_next_fast(req, walk); 255 256 set_phys_lowmem: 257 if (err >= 0) { 258 walk->src.page = virt_to_page(src); 259 walk->dst.page = virt_to_page(dst); 260 walk->src.offset = ((unsigned long)src & (PAGE_SIZE - 1)); 261 walk->dst.offset = ((unsigned long)dst & (PAGE_SIZE - 1)); 262 } 263 264 return err; 265 } 266 267 static int ablkcipher_walk_first(struct ablkcipher_request *req, 268 struct ablkcipher_walk *walk) 269 { 270 struct crypto_tfm *tfm = req->base.tfm; 271 unsigned int alignmask; 272 273 alignmask = crypto_tfm_alg_alignmask(tfm); 274 if (WARN_ON_ONCE(in_irq())) 275 return -EDEADLK; 276 277 walk->iv = req->info; 278 walk->nbytes = walk->total; 279 if (unlikely(!walk->total)) 280 return 0; 281 282 walk->iv_buffer = NULL; 283 if (unlikely(((unsigned long)walk->iv & alignmask))) { 284 int err = ablkcipher_copy_iv(walk, tfm, alignmask); 285 286 if (err) 287 return err; 288 } 289 290 scatterwalk_start(&walk->in, walk->in.sg); 291 scatterwalk_start(&walk->out, walk->out.sg); 292 293 return ablkcipher_walk_next(req, walk); 294 } 295 296 int ablkcipher_walk_phys(struct ablkcipher_request *req, 297 struct ablkcipher_walk *walk) 298 { 299 walk->blocksize = crypto_tfm_alg_blocksize(req->base.tfm); 300 return ablkcipher_walk_first(req, walk); 301 } 302 EXPORT_SYMBOL_GPL(ablkcipher_walk_phys); 303 304 static int setkey_unaligned(struct crypto_ablkcipher *tfm, const u8 *key, 305 unsigned int keylen) 306 { 307 struct ablkcipher_alg *cipher = crypto_ablkcipher_alg(tfm); 308 unsigned long alignmask = crypto_ablkcipher_alignmask(tfm); 309 int ret; 310 u8 *buffer, *alignbuffer; 311 unsigned long absize; 312 313 absize = keylen + alignmask; 314 buffer = kmalloc(absize, GFP_ATOMIC); 315 if (!buffer) 316 return -ENOMEM; 317 318 alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1); 319 memcpy(alignbuffer, key, keylen); 320 ret = cipher->setkey(tfm, alignbuffer, keylen); 321 memset(alignbuffer, 0, keylen); 322 kfree(buffer); 323 return ret; 324 } 325 326 static int setkey(struct crypto_ablkcipher *tfm, const u8 *key, 327 unsigned int keylen) 328 { 329 struct ablkcipher_alg *cipher = crypto_ablkcipher_alg(tfm); 330 unsigned long alignmask = crypto_ablkcipher_alignmask(tfm); 331 332 if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) { 333 crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); 334 return -EINVAL; 335 } 336 337 if ((unsigned long)key & alignmask) 338 return setkey_unaligned(tfm, key, keylen); 339 340 return cipher->setkey(tfm, key, keylen); 341 } 342 343 static unsigned int crypto_ablkcipher_ctxsize(struct crypto_alg *alg, u32 type, 344 u32 mask) 345 { 346 return alg->cra_ctxsize; 347 } 348 349 static int crypto_init_ablkcipher_ops(struct crypto_tfm *tfm, u32 type, 350 u32 mask) 351 { 352 struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher; 353 struct ablkcipher_tfm *crt = &tfm->crt_ablkcipher; 354 355 if (alg->ivsize > PAGE_SIZE / 8) 356 return -EINVAL; 357 358 crt->setkey = setkey; 359 crt->encrypt = alg->encrypt; 360 crt->decrypt = alg->decrypt; 361 crt->base = __crypto_ablkcipher_cast(tfm); 362 crt->ivsize = alg->ivsize; 363 364 return 0; 365 } 366 367 #ifdef CONFIG_NET 368 static int crypto_ablkcipher_report(struct sk_buff *skb, struct crypto_alg *alg) 369 { 370 struct crypto_report_blkcipher rblkcipher; 371 372 strncpy(rblkcipher.type, "ablkcipher", sizeof(rblkcipher.type)); 373 strncpy(rblkcipher.geniv, alg->cra_ablkcipher.geniv ?: "<default>", 374 sizeof(rblkcipher.geniv)); 375 376 rblkcipher.blocksize = alg->cra_blocksize; 377 rblkcipher.min_keysize = alg->cra_ablkcipher.min_keysize; 378 rblkcipher.max_keysize = alg->cra_ablkcipher.max_keysize; 379 rblkcipher.ivsize = alg->cra_ablkcipher.ivsize; 380 381 if (nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER, 382 sizeof(struct crypto_report_blkcipher), &rblkcipher)) 383 goto nla_put_failure; 384 return 0; 385 386 nla_put_failure: 387 return -EMSGSIZE; 388 } 389 #else 390 static int crypto_ablkcipher_report(struct sk_buff *skb, struct crypto_alg *alg) 391 { 392 return -ENOSYS; 393 } 394 #endif 395 396 static void crypto_ablkcipher_show(struct seq_file *m, struct crypto_alg *alg) 397 __attribute__ ((unused)); 398 static void crypto_ablkcipher_show(struct seq_file *m, struct crypto_alg *alg) 399 { 400 struct ablkcipher_alg *ablkcipher = &alg->cra_ablkcipher; 401 402 seq_printf(m, "type : ablkcipher\n"); 403 seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ? 404 "yes" : "no"); 405 seq_printf(m, "blocksize : %u\n", alg->cra_blocksize); 406 seq_printf(m, "min keysize : %u\n", ablkcipher->min_keysize); 407 seq_printf(m, "max keysize : %u\n", ablkcipher->max_keysize); 408 seq_printf(m, "ivsize : %u\n", ablkcipher->ivsize); 409 seq_printf(m, "geniv : %s\n", ablkcipher->geniv ?: "<default>"); 410 } 411 412 const struct crypto_type crypto_ablkcipher_type = { 413 .ctxsize = crypto_ablkcipher_ctxsize, 414 .init = crypto_init_ablkcipher_ops, 415 #ifdef CONFIG_PROC_FS 416 .show = crypto_ablkcipher_show, 417 #endif 418 .report = crypto_ablkcipher_report, 419 }; 420 EXPORT_SYMBOL_GPL(crypto_ablkcipher_type); 421 422 static int crypto_init_givcipher_ops(struct crypto_tfm *tfm, u32 type, 423 u32 mask) 424 { 425 struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher; 426 struct ablkcipher_tfm *crt = &tfm->crt_ablkcipher; 427 428 if (alg->ivsize > PAGE_SIZE / 8) 429 return -EINVAL; 430 431 crt->setkey = tfm->__crt_alg->cra_flags & CRYPTO_ALG_GENIV ? 432 alg->setkey : setkey; 433 crt->encrypt = alg->encrypt; 434 crt->decrypt = alg->decrypt; 435 crt->base = __crypto_ablkcipher_cast(tfm); 436 crt->ivsize = alg->ivsize; 437 438 return 0; 439 } 440 441 #ifdef CONFIG_NET 442 static int crypto_givcipher_report(struct sk_buff *skb, struct crypto_alg *alg) 443 { 444 struct crypto_report_blkcipher rblkcipher; 445 446 strncpy(rblkcipher.type, "givcipher", sizeof(rblkcipher.type)); 447 strncpy(rblkcipher.geniv, alg->cra_ablkcipher.geniv ?: "<built-in>", 448 sizeof(rblkcipher.geniv)); 449 450 rblkcipher.blocksize = alg->cra_blocksize; 451 rblkcipher.min_keysize = alg->cra_ablkcipher.min_keysize; 452 rblkcipher.max_keysize = alg->cra_ablkcipher.max_keysize; 453 rblkcipher.ivsize = alg->cra_ablkcipher.ivsize; 454 455 if (nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER, 456 sizeof(struct crypto_report_blkcipher), &rblkcipher)) 457 goto nla_put_failure; 458 return 0; 459 460 nla_put_failure: 461 return -EMSGSIZE; 462 } 463 #else 464 static int crypto_givcipher_report(struct sk_buff *skb, struct crypto_alg *alg) 465 { 466 return -ENOSYS; 467 } 468 #endif 469 470 static void crypto_givcipher_show(struct seq_file *m, struct crypto_alg *alg) 471 __attribute__ ((unused)); 472 static void crypto_givcipher_show(struct seq_file *m, struct crypto_alg *alg) 473 { 474 struct ablkcipher_alg *ablkcipher = &alg->cra_ablkcipher; 475 476 seq_printf(m, "type : givcipher\n"); 477 seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ? 478 "yes" : "no"); 479 seq_printf(m, "blocksize : %u\n", alg->cra_blocksize); 480 seq_printf(m, "min keysize : %u\n", ablkcipher->min_keysize); 481 seq_printf(m, "max keysize : %u\n", ablkcipher->max_keysize); 482 seq_printf(m, "ivsize : %u\n", ablkcipher->ivsize); 483 seq_printf(m, "geniv : %s\n", ablkcipher->geniv ?: "<built-in>"); 484 } 485 486 const struct crypto_type crypto_givcipher_type = { 487 .ctxsize = crypto_ablkcipher_ctxsize, 488 .init = crypto_init_givcipher_ops, 489 #ifdef CONFIG_PROC_FS 490 .show = crypto_givcipher_show, 491 #endif 492 .report = crypto_givcipher_report, 493 }; 494 EXPORT_SYMBOL_GPL(crypto_givcipher_type); 495
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.