~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/x86/crypto/blowfish_glue.c

Version: ~ [ linux-5.10-rc6 ] ~ [ linux-5.9.12 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.81 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.161 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.210 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.247 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.247 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.85 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0-or-later
  2 /*
  3  * Glue Code for assembler optimized version of Blowfish
  4  *
  5  * Copyright (c) 2011 Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
  6  *
  7  * CBC & ECB parts based on code (crypto/cbc.c,ecb.c) by:
  8  *   Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
  9  * CTR part based on code (crypto/ctr.c) by:
 10  *   (C) Copyright IBM Corp. 2007 - Joy Latten <latten@us.ibm.com>
 11  */
 12 
 13 #include <crypto/algapi.h>
 14 #include <crypto/blowfish.h>
 15 #include <crypto/internal/skcipher.h>
 16 #include <linux/crypto.h>
 17 #include <linux/init.h>
 18 #include <linux/module.h>
 19 #include <linux/types.h>
 20 
 21 /* regular block cipher functions */
 22 asmlinkage void __blowfish_enc_blk(struct bf_ctx *ctx, u8 *dst, const u8 *src,
 23                                    bool xor);
 24 asmlinkage void blowfish_dec_blk(struct bf_ctx *ctx, u8 *dst, const u8 *src);
 25 
 26 /* 4-way parallel cipher functions */
 27 asmlinkage void __blowfish_enc_blk_4way(struct bf_ctx *ctx, u8 *dst,
 28                                         const u8 *src, bool xor);
 29 asmlinkage void blowfish_dec_blk_4way(struct bf_ctx *ctx, u8 *dst,
 30                                       const u8 *src);
 31 
 32 static inline void blowfish_enc_blk(struct bf_ctx *ctx, u8 *dst, const u8 *src)
 33 {
 34         __blowfish_enc_blk(ctx, dst, src, false);
 35 }
 36 
 37 static inline void blowfish_enc_blk_xor(struct bf_ctx *ctx, u8 *dst,
 38                                         const u8 *src)
 39 {
 40         __blowfish_enc_blk(ctx, dst, src, true);
 41 }
 42 
 43 static inline void blowfish_enc_blk_4way(struct bf_ctx *ctx, u8 *dst,
 44                                          const u8 *src)
 45 {
 46         __blowfish_enc_blk_4way(ctx, dst, src, false);
 47 }
 48 
 49 static inline void blowfish_enc_blk_xor_4way(struct bf_ctx *ctx, u8 *dst,
 50                                       const u8 *src)
 51 {
 52         __blowfish_enc_blk_4way(ctx, dst, src, true);
 53 }
 54 
 55 static void blowfish_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
 56 {
 57         blowfish_enc_blk(crypto_tfm_ctx(tfm), dst, src);
 58 }
 59 
 60 static void blowfish_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
 61 {
 62         blowfish_dec_blk(crypto_tfm_ctx(tfm), dst, src);
 63 }
 64 
 65 static int blowfish_setkey_skcipher(struct crypto_skcipher *tfm,
 66                                     const u8 *key, unsigned int keylen)
 67 {
 68         return blowfish_setkey(&tfm->base, key, keylen);
 69 }
 70 
 71 static int ecb_crypt(struct skcipher_request *req,
 72                      void (*fn)(struct bf_ctx *, u8 *, const u8 *),
 73                      void (*fn_4way)(struct bf_ctx *, u8 *, const u8 *))
 74 {
 75         unsigned int bsize = BF_BLOCK_SIZE;
 76         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 77         struct bf_ctx *ctx = crypto_skcipher_ctx(tfm);
 78         struct skcipher_walk walk;
 79         unsigned int nbytes;
 80         int err;
 81 
 82         err = skcipher_walk_virt(&walk, req, false);
 83 
 84         while ((nbytes = walk.nbytes)) {
 85                 u8 *wsrc = walk.src.virt.addr;
 86                 u8 *wdst = walk.dst.virt.addr;
 87 
 88                 /* Process four block batch */
 89                 if (nbytes >= bsize * 4) {
 90                         do {
 91                                 fn_4way(ctx, wdst, wsrc);
 92 
 93                                 wsrc += bsize * 4;
 94                                 wdst += bsize * 4;
 95                                 nbytes -= bsize * 4;
 96                         } while (nbytes >= bsize * 4);
 97 
 98                         if (nbytes < bsize)
 99                                 goto done;
100                 }
101 
102                 /* Handle leftovers */
103                 do {
104                         fn(ctx, wdst, wsrc);
105 
106                         wsrc += bsize;
107                         wdst += bsize;
108                         nbytes -= bsize;
109                 } while (nbytes >= bsize);
110 
111 done:
112                 err = skcipher_walk_done(&walk, nbytes);
113         }
114 
115         return err;
116 }
117 
118 static int ecb_encrypt(struct skcipher_request *req)
119 {
120         return ecb_crypt(req, blowfish_enc_blk, blowfish_enc_blk_4way);
121 }
122 
123 static int ecb_decrypt(struct skcipher_request *req)
124 {
125         return ecb_crypt(req, blowfish_dec_blk, blowfish_dec_blk_4way);
126 }
127 
128 static unsigned int __cbc_encrypt(struct bf_ctx *ctx,
129                                   struct skcipher_walk *walk)
130 {
131         unsigned int bsize = BF_BLOCK_SIZE;
132         unsigned int nbytes = walk->nbytes;
133         u64 *src = (u64 *)walk->src.virt.addr;
134         u64 *dst = (u64 *)walk->dst.virt.addr;
135         u64 *iv = (u64 *)walk->iv;
136 
137         do {
138                 *dst = *src ^ *iv;
139                 blowfish_enc_blk(ctx, (u8 *)dst, (u8 *)dst);
140                 iv = dst;
141 
142                 src += 1;
143                 dst += 1;
144                 nbytes -= bsize;
145         } while (nbytes >= bsize);
146 
147         *(u64 *)walk->iv = *iv;
148         return nbytes;
149 }
150 
151 static int cbc_encrypt(struct skcipher_request *req)
152 {
153         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
154         struct bf_ctx *ctx = crypto_skcipher_ctx(tfm);
155         struct skcipher_walk walk;
156         unsigned int nbytes;
157         int err;
158 
159         err = skcipher_walk_virt(&walk, req, false);
160 
161         while ((nbytes = walk.nbytes)) {
162                 nbytes = __cbc_encrypt(ctx, &walk);
163                 err = skcipher_walk_done(&walk, nbytes);
164         }
165 
166         return err;
167 }
168 
169 static unsigned int __cbc_decrypt(struct bf_ctx *ctx,
170                                   struct skcipher_walk *walk)
171 {
172         unsigned int bsize = BF_BLOCK_SIZE;
173         unsigned int nbytes = walk->nbytes;
174         u64 *src = (u64 *)walk->src.virt.addr;
175         u64 *dst = (u64 *)walk->dst.virt.addr;
176         u64 ivs[4 - 1];
177         u64 last_iv;
178 
179         /* Start of the last block. */
180         src += nbytes / bsize - 1;
181         dst += nbytes / bsize - 1;
182 
183         last_iv = *src;
184 
185         /* Process four block batch */
186         if (nbytes >= bsize * 4) {
187                 do {
188                         nbytes -= bsize * 4 - bsize;
189                         src -= 4 - 1;
190                         dst -= 4 - 1;
191 
192                         ivs[0] = src[0];
193                         ivs[1] = src[1];
194                         ivs[2] = src[2];
195 
196                         blowfish_dec_blk_4way(ctx, (u8 *)dst, (u8 *)src);
197 
198                         dst[1] ^= ivs[0];
199                         dst[2] ^= ivs[1];
200                         dst[3] ^= ivs[2];
201 
202                         nbytes -= bsize;
203                         if (nbytes < bsize)
204                                 goto done;
205 
206                         *dst ^= *(src - 1);
207                         src -= 1;
208                         dst -= 1;
209                 } while (nbytes >= bsize * 4);
210         }
211 
212         /* Handle leftovers */
213         for (;;) {
214                 blowfish_dec_blk(ctx, (u8 *)dst, (u8 *)src);
215 
216                 nbytes -= bsize;
217                 if (nbytes < bsize)
218                         break;
219 
220                 *dst ^= *(src - 1);
221                 src -= 1;
222                 dst -= 1;
223         }
224 
225 done:
226         *dst ^= *(u64 *)walk->iv;
227         *(u64 *)walk->iv = last_iv;
228 
229         return nbytes;
230 }
231 
232 static int cbc_decrypt(struct skcipher_request *req)
233 {
234         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
235         struct bf_ctx *ctx = crypto_skcipher_ctx(tfm);
236         struct skcipher_walk walk;
237         unsigned int nbytes;
238         int err;
239 
240         err = skcipher_walk_virt(&walk, req, false);
241 
242         while ((nbytes = walk.nbytes)) {
243                 nbytes = __cbc_decrypt(ctx, &walk);
244                 err = skcipher_walk_done(&walk, nbytes);
245         }
246 
247         return err;
248 }
249 
250 static void ctr_crypt_final(struct bf_ctx *ctx, struct skcipher_walk *walk)
251 {
252         u8 *ctrblk = walk->iv;
253         u8 keystream[BF_BLOCK_SIZE];
254         u8 *src = walk->src.virt.addr;
255         u8 *dst = walk->dst.virt.addr;
256         unsigned int nbytes = walk->nbytes;
257 
258         blowfish_enc_blk(ctx, keystream, ctrblk);
259         crypto_xor_cpy(dst, keystream, src, nbytes);
260 
261         crypto_inc(ctrblk, BF_BLOCK_SIZE);
262 }
263 
264 static unsigned int __ctr_crypt(struct bf_ctx *ctx, struct skcipher_walk *walk)
265 {
266         unsigned int bsize = BF_BLOCK_SIZE;
267         unsigned int nbytes = walk->nbytes;
268         u64 *src = (u64 *)walk->src.virt.addr;
269         u64 *dst = (u64 *)walk->dst.virt.addr;
270         u64 ctrblk = be64_to_cpu(*(__be64 *)walk->iv);
271         __be64 ctrblocks[4];
272 
273         /* Process four block batch */
274         if (nbytes >= bsize * 4) {
275                 do {
276                         if (dst != src) {
277                                 dst[0] = src[0];
278                                 dst[1] = src[1];
279                                 dst[2] = src[2];
280                                 dst[3] = src[3];
281                         }
282 
283                         /* create ctrblks for parallel encrypt */
284                         ctrblocks[0] = cpu_to_be64(ctrblk++);
285                         ctrblocks[1] = cpu_to_be64(ctrblk++);
286                         ctrblocks[2] = cpu_to_be64(ctrblk++);
287                         ctrblocks[3] = cpu_to_be64(ctrblk++);
288 
289                         blowfish_enc_blk_xor_4way(ctx, (u8 *)dst,
290                                                   (u8 *)ctrblocks);
291 
292                         src += 4;
293                         dst += 4;
294                 } while ((nbytes -= bsize * 4) >= bsize * 4);
295 
296                 if (nbytes < bsize)
297                         goto done;
298         }
299 
300         /* Handle leftovers */
301         do {
302                 if (dst != src)
303                         *dst = *src;
304 
305                 ctrblocks[0] = cpu_to_be64(ctrblk++);
306 
307                 blowfish_enc_blk_xor(ctx, (u8 *)dst, (u8 *)ctrblocks);
308 
309                 src += 1;
310                 dst += 1;
311         } while ((nbytes -= bsize) >= bsize);
312 
313 done:
314         *(__be64 *)walk->iv = cpu_to_be64(ctrblk);
315         return nbytes;
316 }
317 
318 static int ctr_crypt(struct skcipher_request *req)
319 {
320         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
321         struct bf_ctx *ctx = crypto_skcipher_ctx(tfm);
322         struct skcipher_walk walk;
323         unsigned int nbytes;
324         int err;
325 
326         err = skcipher_walk_virt(&walk, req, false);
327 
328         while ((nbytes = walk.nbytes) >= BF_BLOCK_SIZE) {
329                 nbytes = __ctr_crypt(ctx, &walk);
330                 err = skcipher_walk_done(&walk, nbytes);
331         }
332 
333         if (nbytes) {
334                 ctr_crypt_final(ctx, &walk);
335                 err = skcipher_walk_done(&walk, 0);
336         }
337 
338         return err;
339 }
340 
341 static struct crypto_alg bf_cipher_alg = {
342         .cra_name               = "blowfish",
343         .cra_driver_name        = "blowfish-asm",
344         .cra_priority           = 200,
345         .cra_flags              = CRYPTO_ALG_TYPE_CIPHER,
346         .cra_blocksize          = BF_BLOCK_SIZE,
347         .cra_ctxsize            = sizeof(struct bf_ctx),
348         .cra_alignmask          = 0,
349         .cra_module             = THIS_MODULE,
350         .cra_u = {
351                 .cipher = {
352                         .cia_min_keysize        = BF_MIN_KEY_SIZE,
353                         .cia_max_keysize        = BF_MAX_KEY_SIZE,
354                         .cia_setkey             = blowfish_setkey,
355                         .cia_encrypt            = blowfish_encrypt,
356                         .cia_decrypt            = blowfish_decrypt,
357                 }
358         }
359 };
360 
361 static struct skcipher_alg bf_skcipher_algs[] = {
362         {
363                 .base.cra_name          = "ecb(blowfish)",
364                 .base.cra_driver_name   = "ecb-blowfish-asm",
365                 .base.cra_priority      = 300,
366                 .base.cra_blocksize     = BF_BLOCK_SIZE,
367                 .base.cra_ctxsize       = sizeof(struct bf_ctx),
368                 .base.cra_module        = THIS_MODULE,
369                 .min_keysize            = BF_MIN_KEY_SIZE,
370                 .max_keysize            = BF_MAX_KEY_SIZE,
371                 .setkey                 = blowfish_setkey_skcipher,
372                 .encrypt                = ecb_encrypt,
373                 .decrypt                = ecb_decrypt,
374         }, {
375                 .base.cra_name          = "cbc(blowfish)",
376                 .base.cra_driver_name   = "cbc-blowfish-asm",
377                 .base.cra_priority      = 300,
378                 .base.cra_blocksize     = BF_BLOCK_SIZE,
379                 .base.cra_ctxsize       = sizeof(struct bf_ctx),
380                 .base.cra_module        = THIS_MODULE,
381                 .min_keysize            = BF_MIN_KEY_SIZE,
382                 .max_keysize            = BF_MAX_KEY_SIZE,
383                 .ivsize                 = BF_BLOCK_SIZE,
384                 .setkey                 = blowfish_setkey_skcipher,
385                 .encrypt                = cbc_encrypt,
386                 .decrypt                = cbc_decrypt,
387         }, {
388                 .base.cra_name          = "ctr(blowfish)",
389                 .base.cra_driver_name   = "ctr-blowfish-asm",
390                 .base.cra_priority      = 300,
391                 .base.cra_blocksize     = 1,
392                 .base.cra_ctxsize       = sizeof(struct bf_ctx),
393                 .base.cra_module        = THIS_MODULE,
394                 .min_keysize            = BF_MIN_KEY_SIZE,
395                 .max_keysize            = BF_MAX_KEY_SIZE,
396                 .ivsize                 = BF_BLOCK_SIZE,
397                 .chunksize              = BF_BLOCK_SIZE,
398                 .setkey                 = blowfish_setkey_skcipher,
399                 .encrypt                = ctr_crypt,
400                 .decrypt                = ctr_crypt,
401         },
402 };
403 
404 static bool is_blacklisted_cpu(void)
405 {
406         if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
407                 return false;
408 
409         if (boot_cpu_data.x86 == 0x0f) {
410                 /*
411                  * On Pentium 4, blowfish-x86_64 is slower than generic C
412                  * implementation because use of 64bit rotates (which are really
413                  * slow on P4). Therefore blacklist P4s.
414                  */
415                 return true;
416         }
417 
418         return false;
419 }
420 
421 static int force;
422 module_param(force, int, 0);
423 MODULE_PARM_DESC(force, "Force module load, ignore CPU blacklist");
424 
425 static int __init init(void)
426 {
427         int err;
428 
429         if (!force && is_blacklisted_cpu()) {
430                 printk(KERN_INFO
431                         "blowfish-x86_64: performance on this CPU "
432                         "would be suboptimal: disabling "
433                         "blowfish-x86_64.\n");
434                 return -ENODEV;
435         }
436 
437         err = crypto_register_alg(&bf_cipher_alg);
438         if (err)
439                 return err;
440 
441         err = crypto_register_skciphers(bf_skcipher_algs,
442                                         ARRAY_SIZE(bf_skcipher_algs));
443         if (err)
444                 crypto_unregister_alg(&bf_cipher_alg);
445 
446         return err;
447 }
448 
449 static void __exit fini(void)
450 {
451         crypto_unregister_alg(&bf_cipher_alg);
452         crypto_unregister_skciphers(bf_skcipher_algs,
453                                     ARRAY_SIZE(bf_skcipher_algs));
454 }
455 
456 module_init(init);
457 module_exit(fini);
458 
459 MODULE_LICENSE("GPL");
460 MODULE_DESCRIPTION("Blowfish Cipher Algorithm, asm optimized");
461 MODULE_ALIAS_CRYPTO("blowfish");
462 MODULE_ALIAS_CRYPTO("blowfish-asm");
463 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp