1 2 #include <linux/ceph/ceph_debug.h> 3 4 #include <linux/err.h> 5 #include <linux/scatterlist.h> 6 #include <linux/slab.h> 7 #include <crypto/hash.h> 8 #include <linux/key-type.h> 9 10 #include <keys/ceph-type.h> 11 #include <linux/ceph/decode.h> 12 #include "crypto.h" 13 14 int ceph_crypto_key_clone(struct ceph_crypto_key *dst, 15 const struct ceph_crypto_key *src) 16 { 17 memcpy(dst, src, sizeof(struct ceph_crypto_key)); 18 dst->key = kmemdup(src->key, src->len, GFP_NOFS); 19 if (!dst->key) 20 return -ENOMEM; 21 return 0; 22 } 23 24 int ceph_crypto_key_encode(struct ceph_crypto_key *key, void **p, void *end) 25 { 26 if (*p + sizeof(u16) + sizeof(key->created) + 27 sizeof(u16) + key->len > end) 28 return -ERANGE; 29 ceph_encode_16(p, key->type); 30 ceph_encode_copy(p, &key->created, sizeof(key->created)); 31 ceph_encode_16(p, key->len); 32 ceph_encode_copy(p, key->key, key->len); 33 return 0; 34 } 35 36 int ceph_crypto_key_decode(struct ceph_crypto_key *key, void **p, void *end) 37 { 38 ceph_decode_need(p, end, 2*sizeof(u16) + sizeof(key->created), bad); 39 key->type = ceph_decode_16(p); 40 ceph_decode_copy(p, &key->created, sizeof(key->created)); 41 key->len = ceph_decode_16(p); 42 ceph_decode_need(p, end, key->len, bad); 43 key->key = kmalloc(key->len, GFP_NOFS); 44 if (!key->key) 45 return -ENOMEM; 46 ceph_decode_copy(p, key->key, key->len); 47 return 0; 48 49 bad: 50 dout("failed to decode crypto key\n"); 51 return -EINVAL; 52 } 53 54 int ceph_crypto_key_unarmor(struct ceph_crypto_key *key, const char *inkey) 55 { 56 int inlen = strlen(inkey); 57 int blen = inlen * 3 / 4; 58 void *buf, *p; 59 int ret; 60 61 dout("crypto_key_unarmor %s\n", inkey); 62 buf = kmalloc(blen, GFP_NOFS); 63 if (!buf) 64 return -ENOMEM; 65 blen = ceph_unarmor(buf, inkey, inkey+inlen); 66 if (blen < 0) { 67 kfree(buf); 68 return blen; 69 } 70 71 p = buf; 72 ret = ceph_crypto_key_decode(key, &p, p + blen); 73 kfree(buf); 74 if (ret) 75 return ret; 76 dout("crypto_key_unarmor key %p type %d len %d\n", key, 77 key->type, key->len); 78 return 0; 79 } 80 81 82 83 #define AES_KEY_SIZE 16 84 85 static struct crypto_blkcipher *ceph_crypto_alloc_cipher(void) 86 { 87 return crypto_alloc_blkcipher("cbc(aes)", 0, CRYPTO_ALG_ASYNC); 88 } 89 90 static const u8 *aes_iv = (u8 *)CEPH_AES_IV; 91 92 /* 93 * Should be used for buffers allocated with ceph_kvmalloc(). 94 * Currently these are encrypt out-buffer (ceph_buffer) and decrypt 95 * in-buffer (msg front). 96 * 97 * Dispose of @sgt with teardown_sgtable(). 98 * 99 * @prealloc_sg is to avoid memory allocation inside sg_alloc_table() 100 * in cases where a single sg is sufficient. No attempt to reduce the 101 * number of sgs by squeezing physically contiguous pages together is 102 * made though, for simplicity. 103 */ 104 static int setup_sgtable(struct sg_table *sgt, struct scatterlist *prealloc_sg, 105 const void *buf, unsigned int buf_len) 106 { 107 struct scatterlist *sg; 108 const bool is_vmalloc = is_vmalloc_addr(buf); 109 unsigned int off = offset_in_page(buf); 110 unsigned int chunk_cnt = 1; 111 unsigned int chunk_len = PAGE_ALIGN(off + buf_len); 112 int i; 113 int ret; 114 115 if (buf_len == 0) { 116 memset(sgt, 0, sizeof(*sgt)); 117 return -EINVAL; 118 } 119 120 if (is_vmalloc) { 121 chunk_cnt = chunk_len >> PAGE_SHIFT; 122 chunk_len = PAGE_SIZE; 123 } 124 125 if (chunk_cnt > 1) { 126 ret = sg_alloc_table(sgt, chunk_cnt, GFP_NOFS); 127 if (ret) 128 return ret; 129 } else { 130 WARN_ON(chunk_cnt != 1); 131 sg_init_table(prealloc_sg, 1); 132 sgt->sgl = prealloc_sg; 133 sgt->nents = sgt->orig_nents = 1; 134 } 135 136 for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) { 137 struct page *page; 138 unsigned int len = min(chunk_len - off, buf_len); 139 140 if (is_vmalloc) 141 page = vmalloc_to_page(buf); 142 else 143 page = virt_to_page(buf); 144 145 sg_set_page(sg, page, len, off); 146 147 off = 0; 148 buf += len; 149 buf_len -= len; 150 } 151 WARN_ON(buf_len != 0); 152 153 return 0; 154 } 155 156 static void teardown_sgtable(struct sg_table *sgt) 157 { 158 if (sgt->orig_nents > 1) 159 sg_free_table(sgt); 160 } 161 162 static int ceph_aes_encrypt(const void *key, int key_len, 163 void *dst, size_t *dst_len, 164 const void *src, size_t src_len) 165 { 166 struct scatterlist sg_in[2], prealloc_sg; 167 struct sg_table sg_out; 168 struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher(); 169 struct blkcipher_desc desc = { .tfm = tfm, .flags = 0 }; 170 int ret; 171 void *iv; 172 int ivsize; 173 size_t zero_padding = (0x10 - (src_len & 0x0f)); 174 char pad[16]; 175 176 if (IS_ERR(tfm)) 177 return PTR_ERR(tfm); 178 179 memset(pad, zero_padding, zero_padding); 180 181 *dst_len = src_len + zero_padding; 182 183 sg_init_table(sg_in, 2); 184 sg_set_buf(&sg_in[0], src, src_len); 185 sg_set_buf(&sg_in[1], pad, zero_padding); 186 ret = setup_sgtable(&sg_out, &prealloc_sg, dst, *dst_len); 187 if (ret) 188 goto out_tfm; 189 190 crypto_blkcipher_setkey((void *)tfm, key, key_len); 191 iv = crypto_blkcipher_crt(tfm)->iv; 192 ivsize = crypto_blkcipher_ivsize(tfm); 193 memcpy(iv, aes_iv, ivsize); 194 195 /* 196 print_hex_dump(KERN_ERR, "enc key: ", DUMP_PREFIX_NONE, 16, 1, 197 key, key_len, 1); 198 print_hex_dump(KERN_ERR, "enc src: ", DUMP_PREFIX_NONE, 16, 1, 199 src, src_len, 1); 200 print_hex_dump(KERN_ERR, "enc pad: ", DUMP_PREFIX_NONE, 16, 1, 201 pad, zero_padding, 1); 202 */ 203 ret = crypto_blkcipher_encrypt(&desc, sg_out.sgl, sg_in, 204 src_len + zero_padding); 205 if (ret < 0) { 206 pr_err("ceph_aes_crypt failed %d\n", ret); 207 goto out_sg; 208 } 209 /* 210 print_hex_dump(KERN_ERR, "enc out: ", DUMP_PREFIX_NONE, 16, 1, 211 dst, *dst_len, 1); 212 */ 213 214 out_sg: 215 teardown_sgtable(&sg_out); 216 out_tfm: 217 crypto_free_blkcipher(tfm); 218 return ret; 219 } 220 221 static int ceph_aes_encrypt2(const void *key, int key_len, void *dst, 222 size_t *dst_len, 223 const void *src1, size_t src1_len, 224 const void *src2, size_t src2_len) 225 { 226 struct scatterlist sg_in[3], prealloc_sg; 227 struct sg_table sg_out; 228 struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher(); 229 struct blkcipher_desc desc = { .tfm = tfm, .flags = 0 }; 230 int ret; 231 void *iv; 232 int ivsize; 233 size_t zero_padding = (0x10 - ((src1_len + src2_len) & 0x0f)); 234 char pad[16]; 235 236 if (IS_ERR(tfm)) 237 return PTR_ERR(tfm); 238 239 memset(pad, zero_padding, zero_padding); 240 241 *dst_len = src1_len + src2_len + zero_padding; 242 243 sg_init_table(sg_in, 3); 244 sg_set_buf(&sg_in[0], src1, src1_len); 245 sg_set_buf(&sg_in[1], src2, src2_len); 246 sg_set_buf(&sg_in[2], pad, zero_padding); 247 ret = setup_sgtable(&sg_out, &prealloc_sg, dst, *dst_len); 248 if (ret) 249 goto out_tfm; 250 251 crypto_blkcipher_setkey((void *)tfm, key, key_len); 252 iv = crypto_blkcipher_crt(tfm)->iv; 253 ivsize = crypto_blkcipher_ivsize(tfm); 254 memcpy(iv, aes_iv, ivsize); 255 256 /* 257 print_hex_dump(KERN_ERR, "enc key: ", DUMP_PREFIX_NONE, 16, 1, 258 key, key_len, 1); 259 print_hex_dump(KERN_ERR, "enc src1: ", DUMP_PREFIX_NONE, 16, 1, 260 src1, src1_len, 1); 261 print_hex_dump(KERN_ERR, "enc src2: ", DUMP_PREFIX_NONE, 16, 1, 262 src2, src2_len, 1); 263 print_hex_dump(KERN_ERR, "enc pad: ", DUMP_PREFIX_NONE, 16, 1, 264 pad, zero_padding, 1); 265 */ 266 ret = crypto_blkcipher_encrypt(&desc, sg_out.sgl, sg_in, 267 src1_len + src2_len + zero_padding); 268 if (ret < 0) { 269 pr_err("ceph_aes_crypt2 failed %d\n", ret); 270 goto out_sg; 271 } 272 /* 273 print_hex_dump(KERN_ERR, "enc out: ", DUMP_PREFIX_NONE, 16, 1, 274 dst, *dst_len, 1); 275 */ 276 277 out_sg: 278 teardown_sgtable(&sg_out); 279 out_tfm: 280 crypto_free_blkcipher(tfm); 281 return ret; 282 } 283 284 static int ceph_aes_decrypt(const void *key, int key_len, 285 void *dst, size_t *dst_len, 286 const void *src, size_t src_len) 287 { 288 struct sg_table sg_in; 289 struct scatterlist sg_out[2], prealloc_sg; 290 struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher(); 291 struct blkcipher_desc desc = { .tfm = tfm }; 292 char pad[16]; 293 void *iv; 294 int ivsize; 295 int ret; 296 int last_byte; 297 298 if (IS_ERR(tfm)) 299 return PTR_ERR(tfm); 300 301 sg_init_table(sg_out, 2); 302 sg_set_buf(&sg_out[0], dst, *dst_len); 303 sg_set_buf(&sg_out[1], pad, sizeof(pad)); 304 ret = setup_sgtable(&sg_in, &prealloc_sg, src, src_len); 305 if (ret) 306 goto out_tfm; 307 308 crypto_blkcipher_setkey((void *)tfm, key, key_len); 309 iv = crypto_blkcipher_crt(tfm)->iv; 310 ivsize = crypto_blkcipher_ivsize(tfm); 311 memcpy(iv, aes_iv, ivsize); 312 313 /* 314 print_hex_dump(KERN_ERR, "dec key: ", DUMP_PREFIX_NONE, 16, 1, 315 key, key_len, 1); 316 print_hex_dump(KERN_ERR, "dec in: ", DUMP_PREFIX_NONE, 16, 1, 317 src, src_len, 1); 318 */ 319 ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in.sgl, src_len); 320 if (ret < 0) { 321 pr_err("ceph_aes_decrypt failed %d\n", ret); 322 goto out_sg; 323 } 324 325 if (src_len <= *dst_len) 326 last_byte = ((char *)dst)[src_len - 1]; 327 else 328 last_byte = pad[src_len - *dst_len - 1]; 329 if (last_byte <= 16 && src_len >= last_byte) { 330 *dst_len = src_len - last_byte; 331 } else { 332 pr_err("ceph_aes_decrypt got bad padding %d on src len %d\n", 333 last_byte, (int)src_len); 334 return -EPERM; /* bad padding */ 335 } 336 /* 337 print_hex_dump(KERN_ERR, "dec out: ", DUMP_PREFIX_NONE, 16, 1, 338 dst, *dst_len, 1); 339 */ 340 341 out_sg: 342 teardown_sgtable(&sg_in); 343 out_tfm: 344 crypto_free_blkcipher(tfm); 345 return ret; 346 } 347 348 static int ceph_aes_decrypt2(const void *key, int key_len, 349 void *dst1, size_t *dst1_len, 350 void *dst2, size_t *dst2_len, 351 const void *src, size_t src_len) 352 { 353 struct sg_table sg_in; 354 struct scatterlist sg_out[3], prealloc_sg; 355 struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher(); 356 struct blkcipher_desc desc = { .tfm = tfm }; 357 char pad[16]; 358 void *iv; 359 int ivsize; 360 int ret; 361 int last_byte; 362 363 if (IS_ERR(tfm)) 364 return PTR_ERR(tfm); 365 366 sg_init_table(sg_out, 3); 367 sg_set_buf(&sg_out[0], dst1, *dst1_len); 368 sg_set_buf(&sg_out[1], dst2, *dst2_len); 369 sg_set_buf(&sg_out[2], pad, sizeof(pad)); 370 ret = setup_sgtable(&sg_in, &prealloc_sg, src, src_len); 371 if (ret) 372 goto out_tfm; 373 374 crypto_blkcipher_setkey((void *)tfm, key, key_len); 375 iv = crypto_blkcipher_crt(tfm)->iv; 376 ivsize = crypto_blkcipher_ivsize(tfm); 377 memcpy(iv, aes_iv, ivsize); 378 379 /* 380 print_hex_dump(KERN_ERR, "dec key: ", DUMP_PREFIX_NONE, 16, 1, 381 key, key_len, 1); 382 print_hex_dump(KERN_ERR, "dec in: ", DUMP_PREFIX_NONE, 16, 1, 383 src, src_len, 1); 384 */ 385 ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in.sgl, src_len); 386 if (ret < 0) { 387 pr_err("ceph_aes_decrypt failed %d\n", ret); 388 goto out_sg; 389 } 390 391 if (src_len <= *dst1_len) 392 last_byte = ((char *)dst1)[src_len - 1]; 393 else if (src_len <= *dst1_len + *dst2_len) 394 last_byte = ((char *)dst2)[src_len - *dst1_len - 1]; 395 else 396 last_byte = pad[src_len - *dst1_len - *dst2_len - 1]; 397 if (last_byte <= 16 && src_len >= last_byte) { 398 src_len -= last_byte; 399 } else { 400 pr_err("ceph_aes_decrypt got bad padding %d on src len %d\n", 401 last_byte, (int)src_len); 402 return -EPERM; /* bad padding */ 403 } 404 405 if (src_len < *dst1_len) { 406 *dst1_len = src_len; 407 *dst2_len = 0; 408 } else { 409 *dst2_len = src_len - *dst1_len; 410 } 411 /* 412 print_hex_dump(KERN_ERR, "dec out1: ", DUMP_PREFIX_NONE, 16, 1, 413 dst1, *dst1_len, 1); 414 print_hex_dump(KERN_ERR, "dec out2: ", DUMP_PREFIX_NONE, 16, 1, 415 dst2, *dst2_len, 1); 416 */ 417 418 out_sg: 419 teardown_sgtable(&sg_in); 420 out_tfm: 421 crypto_free_blkcipher(tfm); 422 return ret; 423 } 424 425 426 int ceph_decrypt(struct ceph_crypto_key *secret, void *dst, size_t *dst_len, 427 const void *src, size_t src_len) 428 { 429 switch (secret->type) { 430 case CEPH_CRYPTO_NONE: 431 if (*dst_len < src_len) 432 return -ERANGE; 433 memcpy(dst, src, src_len); 434 *dst_len = src_len; 435 return 0; 436 437 case CEPH_CRYPTO_AES: 438 return ceph_aes_decrypt(secret->key, secret->len, dst, 439 dst_len, src, src_len); 440 441 default: 442 return -EINVAL; 443 } 444 } 445 446 int ceph_decrypt2(struct ceph_crypto_key *secret, 447 void *dst1, size_t *dst1_len, 448 void *dst2, size_t *dst2_len, 449 const void *src, size_t src_len) 450 { 451 size_t t; 452 453 switch (secret->type) { 454 case CEPH_CRYPTO_NONE: 455 if (*dst1_len + *dst2_len < src_len) 456 return -ERANGE; 457 t = min(*dst1_len, src_len); 458 memcpy(dst1, src, t); 459 *dst1_len = t; 460 src += t; 461 src_len -= t; 462 if (src_len) { 463 t = min(*dst2_len, src_len); 464 memcpy(dst2, src, t); 465 *dst2_len = t; 466 } 467 return 0; 468 469 case CEPH_CRYPTO_AES: 470 return ceph_aes_decrypt2(secret->key, secret->len, 471 dst1, dst1_len, dst2, dst2_len, 472 src, src_len); 473 474 default: 475 return -EINVAL; 476 } 477 } 478 479 int ceph_encrypt(struct ceph_crypto_key *secret, void *dst, size_t *dst_len, 480 const void *src, size_t src_len) 481 { 482 switch (secret->type) { 483 case CEPH_CRYPTO_NONE: 484 if (*dst_len < src_len) 485 return -ERANGE; 486 memcpy(dst, src, src_len); 487 *dst_len = src_len; 488 return 0; 489 490 case CEPH_CRYPTO_AES: 491 return ceph_aes_encrypt(secret->key, secret->len, dst, 492 dst_len, src, src_len); 493 494 default: 495 return -EINVAL; 496 } 497 } 498 499 int ceph_encrypt2(struct ceph_crypto_key *secret, void *dst, size_t *dst_len, 500 const void *src1, size_t src1_len, 501 const void *src2, size_t src2_len) 502 { 503 switch (secret->type) { 504 case CEPH_CRYPTO_NONE: 505 if (*dst_len < src1_len + src2_len) 506 return -ERANGE; 507 memcpy(dst, src1, src1_len); 508 memcpy(dst + src1_len, src2, src2_len); 509 *dst_len = src1_len + src2_len; 510 return 0; 511 512 case CEPH_CRYPTO_AES: 513 return ceph_aes_encrypt2(secret->key, secret->len, dst, dst_len, 514 src1, src1_len, src2, src2_len); 515 516 default: 517 return -EINVAL; 518 } 519 } 520 521 static int ceph_key_instantiate(struct key *key, 522 struct key_preparsed_payload *prep) 523 { 524 struct ceph_crypto_key *ckey; 525 size_t datalen = prep->datalen; 526 int ret; 527 void *p; 528 529 ret = -EINVAL; 530 if (datalen <= 0 || datalen > 32767 || !prep->data) 531 goto err; 532 533 ret = key_payload_reserve(key, datalen); 534 if (ret < 0) 535 goto err; 536 537 ret = -ENOMEM; 538 ckey = kmalloc(sizeof(*ckey), GFP_KERNEL); 539 if (!ckey) 540 goto err; 541 542 /* TODO ceph_crypto_key_decode should really take const input */ 543 p = (void *)prep->data; 544 ret = ceph_crypto_key_decode(ckey, &p, (char*)prep->data+datalen); 545 if (ret < 0) 546 goto err_ckey; 547 548 key->payload.data = ckey; 549 return 0; 550 551 err_ckey: 552 kfree(ckey); 553 err: 554 return ret; 555 } 556 557 static int ceph_key_match(const struct key *key, const void *description) 558 { 559 return strcmp(key->description, description) == 0; 560 } 561 562 static void ceph_key_destroy(struct key *key) { 563 struct ceph_crypto_key *ckey = key->payload.data; 564 565 ceph_crypto_key_destroy(ckey); 566 kfree(ckey); 567 } 568 569 struct key_type key_type_ceph = { 570 .name = "ceph", 571 .instantiate = ceph_key_instantiate, 572 .match = ceph_key_match, 573 .destroy = ceph_key_destroy, 574 }; 575 576 int ceph_crypto_init(void) { 577 return register_key_type(&key_type_ceph); 578 } 579 580 void ceph_crypto_shutdown(void) { 581 unregister_key_type(&key_type_ceph); 582 } 583
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.