~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/crypto/ablkcipher.c

Version: ~ [ linux-5.11-rc3 ] ~ [ linux-5.10.7 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.89 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.167 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.215 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.251 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.251 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.85 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  * Asynchronous block chaining cipher operations.
  3  *
  4  * This is the asynchronous version of blkcipher.c indicating completion
  5  * via a callback.
  6  *
  7  * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
  8  *
  9  * This program is free software; you can redistribute it and/or modify it
 10  * under the terms of the GNU General Public License as published by the Free
 11  * Software Foundation; either version 2 of the License, or (at your option)
 12  * any later version.
 13  *
 14  */
 15 
 16 #include <crypto/internal/skcipher.h>
 17 #include <linux/err.h>
 18 #include <linux/kernel.h>
 19 #include <linux/slab.h>
 20 #include <linux/seq_file.h>
 21 #include <linux/cryptouser.h>
 22 #include <linux/compiler.h>
 23 #include <net/netlink.h>
 24 
 25 #include <crypto/scatterwalk.h>
 26 
 27 #include "internal.h"
 28 
 29 struct ablkcipher_buffer {
 30         struct list_head        entry;
 31         struct scatter_walk     dst;
 32         unsigned int            len;
 33         void                    *data;
 34 };
 35 
 36 enum {
 37         ABLKCIPHER_WALK_SLOW = 1 << 0,
 38 };
 39 
 40 static inline void ablkcipher_buffer_write(struct ablkcipher_buffer *p)
 41 {
 42         scatterwalk_copychunks(p->data, &p->dst, p->len, 1);
 43 }
 44 
 45 void __ablkcipher_walk_complete(struct ablkcipher_walk *walk)
 46 {
 47         struct ablkcipher_buffer *p, *tmp;
 48 
 49         list_for_each_entry_safe(p, tmp, &walk->buffers, entry) {
 50                 ablkcipher_buffer_write(p);
 51                 list_del(&p->entry);
 52                 kfree(p);
 53         }
 54 }
 55 EXPORT_SYMBOL_GPL(__ablkcipher_walk_complete);
 56 
 57 static inline void ablkcipher_queue_write(struct ablkcipher_walk *walk,
 58                                           struct ablkcipher_buffer *p)
 59 {
 60         p->dst = walk->out;
 61         list_add_tail(&p->entry, &walk->buffers);
 62 }
 63 
 64 /* Get a spot of the specified length that does not straddle a page.
 65  * The caller needs to ensure that there is enough space for this operation.
 66  */
 67 static inline u8 *ablkcipher_get_spot(u8 *start, unsigned int len)
 68 {
 69         u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK);
 70 
 71         return max(start, end_page);
 72 }
 73 
 74 static inline void ablkcipher_done_slow(struct ablkcipher_walk *walk,
 75                                         unsigned int n)
 76 {
 77         for (;;) {
 78                 unsigned int len_this_page = scatterwalk_pagelen(&walk->out);
 79 
 80                 if (len_this_page > n)
 81                         len_this_page = n;
 82                 scatterwalk_advance(&walk->out, n);
 83                 if (n == len_this_page)
 84                         break;
 85                 n -= len_this_page;
 86                 scatterwalk_start(&walk->out, sg_next(walk->out.sg));
 87         }
 88 }
 89 
 90 static inline void ablkcipher_done_fast(struct ablkcipher_walk *walk,
 91                                         unsigned int n)
 92 {
 93         scatterwalk_advance(&walk->in, n);
 94         scatterwalk_advance(&walk->out, n);
 95 }
 96 
 97 static int ablkcipher_walk_next(struct ablkcipher_request *req,
 98                                 struct ablkcipher_walk *walk);
 99 
100 int ablkcipher_walk_done(struct ablkcipher_request *req,
101                          struct ablkcipher_walk *walk, int err)
102 {
103         struct crypto_tfm *tfm = req->base.tfm;
104         unsigned int n; /* bytes processed */
105         bool more;
106 
107         if (unlikely(err < 0))
108                 goto finish;
109 
110         n = walk->nbytes - err;
111         walk->total -= n;
112         more = (walk->total != 0);
113 
114         if (likely(!(walk->flags & ABLKCIPHER_WALK_SLOW))) {
115                 ablkcipher_done_fast(walk, n);
116         } else {
117                 if (WARN_ON(err)) {
118                         /* unexpected case; didn't process all bytes */
119                         err = -EINVAL;
120                         goto finish;
121                 }
122                 ablkcipher_done_slow(walk, n);
123         }
124 
125         scatterwalk_done(&walk->in, 0, more);
126         scatterwalk_done(&walk->out, 1, more);
127 
128         if (more) {
129                 crypto_yield(req->base.flags);
130                 return ablkcipher_walk_next(req, walk);
131         }
132         err = 0;
133 finish:
134         walk->nbytes = 0;
135         if (walk->iv != req->info)
136                 memcpy(req->info, walk->iv, tfm->crt_ablkcipher.ivsize);
137         kfree(walk->iv_buffer);
138         return err;
139 }
140 EXPORT_SYMBOL_GPL(ablkcipher_walk_done);
141 
142 static inline int ablkcipher_next_slow(struct ablkcipher_request *req,
143                                        struct ablkcipher_walk *walk,
144                                        unsigned int bsize,
145                                        unsigned int alignmask,
146                                        void **src_p, void **dst_p)
147 {
148         unsigned aligned_bsize = ALIGN(bsize, alignmask + 1);
149         struct ablkcipher_buffer *p;
150         void *src, *dst, *base;
151         unsigned int n;
152 
153         n = ALIGN(sizeof(struct ablkcipher_buffer), alignmask + 1);
154         n += (aligned_bsize * 3 - (alignmask + 1) +
155               (alignmask & ~(crypto_tfm_ctx_alignment() - 1)));
156 
157         p = kmalloc(n, GFP_ATOMIC);
158         if (!p)
159                 return ablkcipher_walk_done(req, walk, -ENOMEM);
160 
161         base = p + 1;
162 
163         dst = (u8 *)ALIGN((unsigned long)base, alignmask + 1);
164         src = dst = ablkcipher_get_spot(dst, bsize);
165 
166         p->len = bsize;
167         p->data = dst;
168 
169         scatterwalk_copychunks(src, &walk->in, bsize, 0);
170 
171         ablkcipher_queue_write(walk, p);
172 
173         walk->nbytes = bsize;
174         walk->flags |= ABLKCIPHER_WALK_SLOW;
175 
176         *src_p = src;
177         *dst_p = dst;
178 
179         return 0;
180 }
181 
182 static inline int ablkcipher_copy_iv(struct ablkcipher_walk *walk,
183                                      struct crypto_tfm *tfm,
184                                      unsigned int alignmask)
185 {
186         unsigned bs = walk->blocksize;
187         unsigned int ivsize = tfm->crt_ablkcipher.ivsize;
188         unsigned aligned_bs = ALIGN(bs, alignmask + 1);
189         unsigned int size = aligned_bs * 2 + ivsize + max(aligned_bs, ivsize) -
190                             (alignmask + 1);
191         u8 *iv;
192 
193         size += alignmask & ~(crypto_tfm_ctx_alignment() - 1);
194         walk->iv_buffer = kmalloc(size, GFP_ATOMIC);
195         if (!walk->iv_buffer)
196                 return -ENOMEM;
197 
198         iv = (u8 *)ALIGN((unsigned long)walk->iv_buffer, alignmask + 1);
199         iv = ablkcipher_get_spot(iv, bs) + aligned_bs;
200         iv = ablkcipher_get_spot(iv, bs) + aligned_bs;
201         iv = ablkcipher_get_spot(iv, ivsize);
202 
203         walk->iv = memcpy(iv, walk->iv, ivsize);
204         return 0;
205 }
206 
207 static inline int ablkcipher_next_fast(struct ablkcipher_request *req,
208                                        struct ablkcipher_walk *walk)
209 {
210         walk->src.page = scatterwalk_page(&walk->in);
211         walk->src.offset = offset_in_page(walk->in.offset);
212         walk->dst.page = scatterwalk_page(&walk->out);
213         walk->dst.offset = offset_in_page(walk->out.offset);
214 
215         return 0;
216 }
217 
218 static int ablkcipher_walk_next(struct ablkcipher_request *req,
219                                 struct ablkcipher_walk *walk)
220 {
221         struct crypto_tfm *tfm = req->base.tfm;
222         unsigned int alignmask, bsize, n;
223         void *src, *dst;
224         int err;
225 
226         alignmask = crypto_tfm_alg_alignmask(tfm);
227         n = walk->total;
228         if (unlikely(n < crypto_tfm_alg_blocksize(tfm))) {
229                 req->base.flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
230                 return ablkcipher_walk_done(req, walk, -EINVAL);
231         }
232 
233         walk->flags &= ~ABLKCIPHER_WALK_SLOW;
234         src = dst = NULL;
235 
236         bsize = min(walk->blocksize, n);
237         n = scatterwalk_clamp(&walk->in, n);
238         n = scatterwalk_clamp(&walk->out, n);
239 
240         if (n < bsize ||
241             !scatterwalk_aligned(&walk->in, alignmask) ||
242             !scatterwalk_aligned(&walk->out, alignmask)) {
243                 err = ablkcipher_next_slow(req, walk, bsize, alignmask,
244                                            &src, &dst);
245                 goto set_phys_lowmem;
246         }
247 
248         walk->nbytes = n;
249 
250         return ablkcipher_next_fast(req, walk);
251 
252 set_phys_lowmem:
253         if (err >= 0) {
254                 walk->src.page = virt_to_page(src);
255                 walk->dst.page = virt_to_page(dst);
256                 walk->src.offset = ((unsigned long)src & (PAGE_SIZE - 1));
257                 walk->dst.offset = ((unsigned long)dst & (PAGE_SIZE - 1));
258         }
259 
260         return err;
261 }
262 
263 static int ablkcipher_walk_first(struct ablkcipher_request *req,
264                                  struct ablkcipher_walk *walk)
265 {
266         struct crypto_tfm *tfm = req->base.tfm;
267         unsigned int alignmask;
268 
269         alignmask = crypto_tfm_alg_alignmask(tfm);
270         if (WARN_ON_ONCE(in_irq()))
271                 return -EDEADLK;
272 
273         walk->iv = req->info;
274         walk->nbytes = walk->total;
275         if (unlikely(!walk->total))
276                 return 0;
277 
278         walk->iv_buffer = NULL;
279         if (unlikely(((unsigned long)walk->iv & alignmask))) {
280                 int err = ablkcipher_copy_iv(walk, tfm, alignmask);
281 
282                 if (err)
283                         return err;
284         }
285 
286         scatterwalk_start(&walk->in, walk->in.sg);
287         scatterwalk_start(&walk->out, walk->out.sg);
288 
289         return ablkcipher_walk_next(req, walk);
290 }
291 
292 int ablkcipher_walk_phys(struct ablkcipher_request *req,
293                          struct ablkcipher_walk *walk)
294 {
295         walk->blocksize = crypto_tfm_alg_blocksize(req->base.tfm);
296         return ablkcipher_walk_first(req, walk);
297 }
298 EXPORT_SYMBOL_GPL(ablkcipher_walk_phys);
299 
300 static int setkey_unaligned(struct crypto_ablkcipher *tfm, const u8 *key,
301                             unsigned int keylen)
302 {
303         struct ablkcipher_alg *cipher = crypto_ablkcipher_alg(tfm);
304         unsigned long alignmask = crypto_ablkcipher_alignmask(tfm);
305         int ret;
306         u8 *buffer, *alignbuffer;
307         unsigned long absize;
308 
309         absize = keylen + alignmask;
310         buffer = kmalloc(absize, GFP_ATOMIC);
311         if (!buffer)
312                 return -ENOMEM;
313 
314         alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
315         memcpy(alignbuffer, key, keylen);
316         ret = cipher->setkey(tfm, alignbuffer, keylen);
317         memset(alignbuffer, 0, keylen);
318         kfree(buffer);
319         return ret;
320 }
321 
322 static int setkey(struct crypto_ablkcipher *tfm, const u8 *key,
323                   unsigned int keylen)
324 {
325         struct ablkcipher_alg *cipher = crypto_ablkcipher_alg(tfm);
326         unsigned long alignmask = crypto_ablkcipher_alignmask(tfm);
327 
328         if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) {
329                 crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
330                 return -EINVAL;
331         }
332 
333         if ((unsigned long)key & alignmask)
334                 return setkey_unaligned(tfm, key, keylen);
335 
336         return cipher->setkey(tfm, key, keylen);
337 }
338 
339 static unsigned int crypto_ablkcipher_ctxsize(struct crypto_alg *alg, u32 type,
340                                               u32 mask)
341 {
342         return alg->cra_ctxsize;
343 }
344 
345 static int crypto_init_ablkcipher_ops(struct crypto_tfm *tfm, u32 type,
346                                       u32 mask)
347 {
348         struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher;
349         struct ablkcipher_tfm *crt = &tfm->crt_ablkcipher;
350 
351         if (alg->ivsize > PAGE_SIZE / 8)
352                 return -EINVAL;
353 
354         crt->setkey = setkey;
355         crt->encrypt = alg->encrypt;
356         crt->decrypt = alg->decrypt;
357         crt->base = __crypto_ablkcipher_cast(tfm);
358         crt->ivsize = alg->ivsize;
359 
360         return 0;
361 }
362 
363 #ifdef CONFIG_NET
364 static int crypto_ablkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
365 {
366         struct crypto_report_blkcipher rblkcipher;
367 
368         strncpy(rblkcipher.type, "ablkcipher", sizeof(rblkcipher.type));
369         strncpy(rblkcipher.geniv, alg->cra_ablkcipher.geniv ?: "<default>",
370                 sizeof(rblkcipher.geniv));
371 
372         rblkcipher.blocksize = alg->cra_blocksize;
373         rblkcipher.min_keysize = alg->cra_ablkcipher.min_keysize;
374         rblkcipher.max_keysize = alg->cra_ablkcipher.max_keysize;
375         rblkcipher.ivsize = alg->cra_ablkcipher.ivsize;
376 
377         if (nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
378                     sizeof(struct crypto_report_blkcipher), &rblkcipher))
379                 goto nla_put_failure;
380         return 0;
381 
382 nla_put_failure:
383         return -EMSGSIZE;
384 }
385 #else
386 static int crypto_ablkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
387 {
388         return -ENOSYS;
389 }
390 #endif
391 
392 static void crypto_ablkcipher_show(struct seq_file *m, struct crypto_alg *alg)
393         __maybe_unused;
394 static void crypto_ablkcipher_show(struct seq_file *m, struct crypto_alg *alg)
395 {
396         struct ablkcipher_alg *ablkcipher = &alg->cra_ablkcipher;
397 
398         seq_printf(m, "type         : ablkcipher\n");
399         seq_printf(m, "async        : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
400                                              "yes" : "no");
401         seq_printf(m, "blocksize    : %u\n", alg->cra_blocksize);
402         seq_printf(m, "min keysize  : %u\n", ablkcipher->min_keysize);
403         seq_printf(m, "max keysize  : %u\n", ablkcipher->max_keysize);
404         seq_printf(m, "ivsize       : %u\n", ablkcipher->ivsize);
405         seq_printf(m, "geniv        : %s\n", ablkcipher->geniv ?: "<default>");
406 }
407 
408 const struct crypto_type crypto_ablkcipher_type = {
409         .ctxsize = crypto_ablkcipher_ctxsize,
410         .init = crypto_init_ablkcipher_ops,
411 #ifdef CONFIG_PROC_FS
412         .show = crypto_ablkcipher_show,
413 #endif
414         .report = crypto_ablkcipher_report,
415 };
416 EXPORT_SYMBOL_GPL(crypto_ablkcipher_type);
417 
418 static int crypto_init_givcipher_ops(struct crypto_tfm *tfm, u32 type,
419                                       u32 mask)
420 {
421         struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher;
422         struct ablkcipher_tfm *crt = &tfm->crt_ablkcipher;
423 
424         if (alg->ivsize > PAGE_SIZE / 8)
425                 return -EINVAL;
426 
427         crt->setkey = tfm->__crt_alg->cra_flags & CRYPTO_ALG_GENIV ?
428                       alg->setkey : setkey;
429         crt->encrypt = alg->encrypt;
430         crt->decrypt = alg->decrypt;
431         crt->base = __crypto_ablkcipher_cast(tfm);
432         crt->ivsize = alg->ivsize;
433 
434         return 0;
435 }
436 
437 #ifdef CONFIG_NET
438 static int crypto_givcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
439 {
440         struct crypto_report_blkcipher rblkcipher;
441 
442         strncpy(rblkcipher.type, "givcipher", sizeof(rblkcipher.type));
443         strncpy(rblkcipher.geniv, alg->cra_ablkcipher.geniv ?: "<built-in>",
444                 sizeof(rblkcipher.geniv));
445 
446         rblkcipher.blocksize = alg->cra_blocksize;
447         rblkcipher.min_keysize = alg->cra_ablkcipher.min_keysize;
448         rblkcipher.max_keysize = alg->cra_ablkcipher.max_keysize;
449         rblkcipher.ivsize = alg->cra_ablkcipher.ivsize;
450 
451         if (nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
452                     sizeof(struct crypto_report_blkcipher), &rblkcipher))
453                 goto nla_put_failure;
454         return 0;
455 
456 nla_put_failure:
457         return -EMSGSIZE;
458 }
459 #else
460 static int crypto_givcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
461 {
462         return -ENOSYS;
463 }
464 #endif
465 
466 static void crypto_givcipher_show(struct seq_file *m, struct crypto_alg *alg)
467         __maybe_unused;
468 static void crypto_givcipher_show(struct seq_file *m, struct crypto_alg *alg)
469 {
470         struct ablkcipher_alg *ablkcipher = &alg->cra_ablkcipher;
471 
472         seq_printf(m, "type         : givcipher\n");
473         seq_printf(m, "async        : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
474                                              "yes" : "no");
475         seq_printf(m, "blocksize    : %u\n", alg->cra_blocksize);
476         seq_printf(m, "min keysize  : %u\n", ablkcipher->min_keysize);
477         seq_printf(m, "max keysize  : %u\n", ablkcipher->max_keysize);
478         seq_printf(m, "ivsize       : %u\n", ablkcipher->ivsize);
479         seq_printf(m, "geniv        : %s\n", ablkcipher->geniv ?: "<built-in>");
480 }
481 
482 const struct crypto_type crypto_givcipher_type = {
483         .ctxsize = crypto_ablkcipher_ctxsize,
484         .init = crypto_init_givcipher_ops,
485 #ifdef CONFIG_PROC_FS
486         .show = crypto_givcipher_show,
487 #endif
488         .report = crypto_givcipher_report,
489 };
490 EXPORT_SYMBOL_GPL(crypto_givcipher_type);
491 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp