~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/x86/crypto/aesni-intel_glue.c

Version: ~ [ linux-5.6 ] ~ [ linux-5.5.13 ] ~ [ linux-5.4.28 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.113 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.174 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.217 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.217 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.82 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-3.9.11 ] ~ [ linux-3.8.13 ] ~ [ linux-3.7.10 ] ~ [ linux-3.6.11 ] ~ [ linux-3.5.7 ] ~ [ linux-3.4.113 ] ~ [ linux-3.3.8 ] ~ [ linux-3.2.102 ] ~ [ linux-3.1.10 ] ~ [ linux-3.0.101 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  * Support for Intel AES-NI instructions. This file contains glue
  3  * code, the real AES implementation is in intel-aes_asm.S.
  4  *
  5  * Copyright (C) 2008, Intel Corp.
  6  *    Author: Huang Ying <ying.huang@intel.com>
  7  *
  8  * Added RFC4106 AES-GCM support for 128-bit keys under the AEAD
  9  * interface for 64-bit kernels.
 10  *    Authors: Adrian Hoban <adrian.hoban@intel.com>
 11  *             Gabriele Paoloni <gabriele.paoloni@intel.com>
 12  *             Tadeusz Struk (tadeusz.struk@intel.com)
 13  *             Aidan O'Mahony (aidan.o.mahony@intel.com)
 14  *    Copyright (c) 2010, Intel Corporation.
 15  *
 16  * This program is free software; you can redistribute it and/or modify
 17  * it under the terms of the GNU General Public License as published by
 18  * the Free Software Foundation; either version 2 of the License, or
 19  * (at your option) any later version.
 20  */
 21 
 22 #include <linux/hardirq.h>
 23 #include <linux/types.h>
 24 #include <linux/crypto.h>
 25 #include <linux/module.h>
 26 #include <linux/err.h>
 27 #include <crypto/algapi.h>
 28 #include <crypto/aes.h>
 29 #include <crypto/cryptd.h>
 30 #include <crypto/ctr.h>
 31 #include <crypto/b128ops.h>
 32 #include <crypto/lrw.h>
 33 #include <crypto/xts.h>
 34 #include <asm/cpu_device_id.h>
 35 #include <asm/fpu/api.h>
 36 #include <asm/crypto/aes.h>
 37 #include <crypto/ablk_helper.h>
 38 #include <crypto/scatterwalk.h>
 39 #include <crypto/internal/aead.h>
 40 #include <linux/workqueue.h>
 41 #include <linux/spinlock.h>
 42 #ifdef CONFIG_X86_64
 43 #include <asm/crypto/glue_helper.h>
 44 #endif
 45 
 46 
 47 #define AESNI_ALIGN     16
 48 #define AES_BLOCK_MASK  (~(AES_BLOCK_SIZE - 1))
 49 #define RFC4106_HASH_SUBKEY_SIZE 16
 50 
 51 /* This data is stored at the end of the crypto_tfm struct.
 52  * It's a type of per "session" data storage location.
 53  * This needs to be 16 byte aligned.
 54  */
 55 struct aesni_rfc4106_gcm_ctx {
 56         u8 hash_subkey[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
 57         struct crypto_aes_ctx aes_key_expanded
 58                 __attribute__ ((__aligned__(AESNI_ALIGN)));
 59         u8 nonce[4];
 60 };
 61 
 62 struct aesni_gcm_set_hash_subkey_result {
 63         int err;
 64         struct completion completion;
 65 };
 66 
 67 struct aesni_hash_subkey_req_data {
 68         u8 iv[16];
 69         struct aesni_gcm_set_hash_subkey_result result;
 70         struct scatterlist sg;
 71 };
 72 
 73 struct aesni_lrw_ctx {
 74         struct lrw_table_ctx lrw_table;
 75         u8 raw_aes_ctx[sizeof(struct crypto_aes_ctx) + AESNI_ALIGN - 1];
 76 };
 77 
 78 struct aesni_xts_ctx {
 79         u8 raw_tweak_ctx[sizeof(struct crypto_aes_ctx) + AESNI_ALIGN - 1];
 80         u8 raw_crypt_ctx[sizeof(struct crypto_aes_ctx) + AESNI_ALIGN - 1];
 81 };
 82 
 83 asmlinkage int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key,
 84                              unsigned int key_len);
 85 asmlinkage void aesni_enc(struct crypto_aes_ctx *ctx, u8 *out,
 86                           const u8 *in);
 87 asmlinkage void aesni_dec(struct crypto_aes_ctx *ctx, u8 *out,
 88                           const u8 *in);
 89 asmlinkage void aesni_ecb_enc(struct crypto_aes_ctx *ctx, u8 *out,
 90                               const u8 *in, unsigned int len);
 91 asmlinkage void aesni_ecb_dec(struct crypto_aes_ctx *ctx, u8 *out,
 92                               const u8 *in, unsigned int len);
 93 asmlinkage void aesni_cbc_enc(struct crypto_aes_ctx *ctx, u8 *out,
 94                               const u8 *in, unsigned int len, u8 *iv);
 95 asmlinkage void aesni_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out,
 96                               const u8 *in, unsigned int len, u8 *iv);
 97 
 98 int crypto_fpu_init(void);
 99 void crypto_fpu_exit(void);
100 
101 #define AVX_GEN2_OPTSIZE 640
102 #define AVX_GEN4_OPTSIZE 4096
103 
104 #ifdef CONFIG_X86_64
105 
106 static void (*aesni_ctr_enc_tfm)(struct crypto_aes_ctx *ctx, u8 *out,
107                               const u8 *in, unsigned int len, u8 *iv);
108 asmlinkage void aesni_ctr_enc(struct crypto_aes_ctx *ctx, u8 *out,
109                               const u8 *in, unsigned int len, u8 *iv);
110 
111 asmlinkage void aesni_xts_crypt8(struct crypto_aes_ctx *ctx, u8 *out,
112                                  const u8 *in, bool enc, u8 *iv);
113 
114 /* asmlinkage void aesni_gcm_enc()
115  * void *ctx,  AES Key schedule. Starts on a 16 byte boundary.
116  * u8 *out, Ciphertext output. Encrypt in-place is allowed.
117  * const u8 *in, Plaintext input
118  * unsigned long plaintext_len, Length of data in bytes for encryption.
119  * u8 *iv, Pre-counter block j0: 4 byte salt (from Security Association)
120  *         concatenated with 8 byte Initialisation Vector (from IPSec ESP
121  *         Payload) concatenated with 0x00000001. 16-byte aligned pointer.
122  * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
123  * const u8 *aad, Additional Authentication Data (AAD)
124  * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this
125  *          is going to be 8 or 12 bytes
126  * u8 *auth_tag, Authenticated Tag output.
127  * unsigned long auth_tag_len), Authenticated Tag Length in bytes.
128  *          Valid values are 16 (most likely), 12 or 8.
129  */
130 asmlinkage void aesni_gcm_enc(void *ctx, u8 *out,
131                         const u8 *in, unsigned long plaintext_len, u8 *iv,
132                         u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
133                         u8 *auth_tag, unsigned long auth_tag_len);
134 
135 /* asmlinkage void aesni_gcm_dec()
136  * void *ctx, AES Key schedule. Starts on a 16 byte boundary.
137  * u8 *out, Plaintext output. Decrypt in-place is allowed.
138  * const u8 *in, Ciphertext input
139  * unsigned long ciphertext_len, Length of data in bytes for decryption.
140  * u8 *iv, Pre-counter block j0: 4 byte salt (from Security Association)
141  *         concatenated with 8 byte Initialisation Vector (from IPSec ESP
142  *         Payload) concatenated with 0x00000001. 16-byte aligned pointer.
143  * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
144  * const u8 *aad, Additional Authentication Data (AAD)
145  * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this is going
146  * to be 8 or 12 bytes
147  * u8 *auth_tag, Authenticated Tag output.
148  * unsigned long auth_tag_len) Authenticated Tag Length in bytes.
149  * Valid values are 16 (most likely), 12 or 8.
150  */
151 asmlinkage void aesni_gcm_dec(void *ctx, u8 *out,
152                         const u8 *in, unsigned long ciphertext_len, u8 *iv,
153                         u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
154                         u8 *auth_tag, unsigned long auth_tag_len);
155 
156 
157 #ifdef CONFIG_AS_AVX
158 asmlinkage void aes_ctr_enc_128_avx_by8(const u8 *in, u8 *iv,
159                 void *keys, u8 *out, unsigned int num_bytes);
160 asmlinkage void aes_ctr_enc_192_avx_by8(const u8 *in, u8 *iv,
161                 void *keys, u8 *out, unsigned int num_bytes);
162 asmlinkage void aes_ctr_enc_256_avx_by8(const u8 *in, u8 *iv,
163                 void *keys, u8 *out, unsigned int num_bytes);
164 /*
165  * asmlinkage void aesni_gcm_precomp_avx_gen2()
166  * gcm_data *my_ctx_data, context data
167  * u8 *hash_subkey,  the Hash sub key input. Data starts on a 16-byte boundary.
168  */
169 asmlinkage void aesni_gcm_precomp_avx_gen2(void *my_ctx_data, u8 *hash_subkey);
170 
171 asmlinkage void aesni_gcm_enc_avx_gen2(void *ctx, u8 *out,
172                         const u8 *in, unsigned long plaintext_len, u8 *iv,
173                         const u8 *aad, unsigned long aad_len,
174                         u8 *auth_tag, unsigned long auth_tag_len);
175 
176 asmlinkage void aesni_gcm_dec_avx_gen2(void *ctx, u8 *out,
177                         const u8 *in, unsigned long ciphertext_len, u8 *iv,
178                         const u8 *aad, unsigned long aad_len,
179                         u8 *auth_tag, unsigned long auth_tag_len);
180 
181 static void aesni_gcm_enc_avx(void *ctx, u8 *out,
182                         const u8 *in, unsigned long plaintext_len, u8 *iv,
183                         u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
184                         u8 *auth_tag, unsigned long auth_tag_len)
185 {
186         struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx;
187         if ((plaintext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)){
188                 aesni_gcm_enc(ctx, out, in, plaintext_len, iv, hash_subkey, aad,
189                                 aad_len, auth_tag, auth_tag_len);
190         } else {
191                 aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
192                 aesni_gcm_enc_avx_gen2(ctx, out, in, plaintext_len, iv, aad,
193                                         aad_len, auth_tag, auth_tag_len);
194         }
195 }
196 
197 static void aesni_gcm_dec_avx(void *ctx, u8 *out,
198                         const u8 *in, unsigned long ciphertext_len, u8 *iv,
199                         u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
200                         u8 *auth_tag, unsigned long auth_tag_len)
201 {
202         struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx;
203         if ((ciphertext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)) {
204                 aesni_gcm_dec(ctx, out, in, ciphertext_len, iv, hash_subkey, aad,
205                                 aad_len, auth_tag, auth_tag_len);
206         } else {
207                 aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
208                 aesni_gcm_dec_avx_gen2(ctx, out, in, ciphertext_len, iv, aad,
209                                         aad_len, auth_tag, auth_tag_len);
210         }
211 }
212 #endif
213 
214 #ifdef CONFIG_AS_AVX2
215 /*
216  * asmlinkage void aesni_gcm_precomp_avx_gen4()
217  * gcm_data *my_ctx_data, context data
218  * u8 *hash_subkey,  the Hash sub key input. Data starts on a 16-byte boundary.
219  */
220 asmlinkage void aesni_gcm_precomp_avx_gen4(void *my_ctx_data, u8 *hash_subkey);
221 
222 asmlinkage void aesni_gcm_enc_avx_gen4(void *ctx, u8 *out,
223                         const u8 *in, unsigned long plaintext_len, u8 *iv,
224                         const u8 *aad, unsigned long aad_len,
225                         u8 *auth_tag, unsigned long auth_tag_len);
226 
227 asmlinkage void aesni_gcm_dec_avx_gen4(void *ctx, u8 *out,
228                         const u8 *in, unsigned long ciphertext_len, u8 *iv,
229                         const u8 *aad, unsigned long aad_len,
230                         u8 *auth_tag, unsigned long auth_tag_len);
231 
232 static void aesni_gcm_enc_avx2(void *ctx, u8 *out,
233                         const u8 *in, unsigned long plaintext_len, u8 *iv,
234                         u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
235                         u8 *auth_tag, unsigned long auth_tag_len)
236 {
237        struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx;
238         if ((plaintext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)) {
239                 aesni_gcm_enc(ctx, out, in, plaintext_len, iv, hash_subkey, aad,
240                                 aad_len, auth_tag, auth_tag_len);
241         } else if (plaintext_len < AVX_GEN4_OPTSIZE) {
242                 aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
243                 aesni_gcm_enc_avx_gen2(ctx, out, in, plaintext_len, iv, aad,
244                                         aad_len, auth_tag, auth_tag_len);
245         } else {
246                 aesni_gcm_precomp_avx_gen4(ctx, hash_subkey);
247                 aesni_gcm_enc_avx_gen4(ctx, out, in, plaintext_len, iv, aad,
248                                         aad_len, auth_tag, auth_tag_len);
249         }
250 }
251 
252 static void aesni_gcm_dec_avx2(void *ctx, u8 *out,
253                         const u8 *in, unsigned long ciphertext_len, u8 *iv,
254                         u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
255                         u8 *auth_tag, unsigned long auth_tag_len)
256 {
257        struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx;
258         if ((ciphertext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)) {
259                 aesni_gcm_dec(ctx, out, in, ciphertext_len, iv, hash_subkey,
260                                 aad, aad_len, auth_tag, auth_tag_len);
261         } else if (ciphertext_len < AVX_GEN4_OPTSIZE) {
262                 aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
263                 aesni_gcm_dec_avx_gen2(ctx, out, in, ciphertext_len, iv, aad,
264                                         aad_len, auth_tag, auth_tag_len);
265         } else {
266                 aesni_gcm_precomp_avx_gen4(ctx, hash_subkey);
267                 aesni_gcm_dec_avx_gen4(ctx, out, in, ciphertext_len, iv, aad,
268                                         aad_len, auth_tag, auth_tag_len);
269         }
270 }
271 #endif
272 
273 static void (*aesni_gcm_enc_tfm)(void *ctx, u8 *out,
274                         const u8 *in, unsigned long plaintext_len, u8 *iv,
275                         u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
276                         u8 *auth_tag, unsigned long auth_tag_len);
277 
278 static void (*aesni_gcm_dec_tfm)(void *ctx, u8 *out,
279                         const u8 *in, unsigned long ciphertext_len, u8 *iv,
280                         u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
281                         u8 *auth_tag, unsigned long auth_tag_len);
282 
283 static inline struct
284 aesni_rfc4106_gcm_ctx *aesni_rfc4106_gcm_ctx_get(struct crypto_aead *tfm)
285 {
286         unsigned long align = AESNI_ALIGN;
287 
288         if (align <= crypto_tfm_ctx_alignment())
289                 align = 1;
290         return PTR_ALIGN(crypto_aead_ctx(tfm), align);
291 }
292 #endif
293 
294 static inline struct crypto_aes_ctx *aes_ctx(void *raw_ctx)
295 {
296         unsigned long addr = (unsigned long)raw_ctx;
297         unsigned long align = AESNI_ALIGN;
298 
299         if (align <= crypto_tfm_ctx_alignment())
300                 align = 1;
301         return (struct crypto_aes_ctx *)ALIGN(addr, align);
302 }
303 
304 static int aes_set_key_common(struct crypto_tfm *tfm, void *raw_ctx,
305                               const u8 *in_key, unsigned int key_len)
306 {
307         struct crypto_aes_ctx *ctx = aes_ctx(raw_ctx);
308         u32 *flags = &tfm->crt_flags;
309         int err;
310 
311         if (key_len != AES_KEYSIZE_128 && key_len != AES_KEYSIZE_192 &&
312             key_len != AES_KEYSIZE_256) {
313                 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
314                 return -EINVAL;
315         }
316 
317         if (!irq_fpu_usable())
318                 err = crypto_aes_expand_key(ctx, in_key, key_len);
319         else {
320                 kernel_fpu_begin();
321                 err = aesni_set_key(ctx, in_key, key_len);
322                 kernel_fpu_end();
323         }
324 
325         return err;
326 }
327 
328 static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
329                        unsigned int key_len)
330 {
331         return aes_set_key_common(tfm, crypto_tfm_ctx(tfm), in_key, key_len);
332 }
333 
334 static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
335 {
336         struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
337 
338         if (!irq_fpu_usable())
339                 crypto_aes_encrypt_x86(ctx, dst, src);
340         else {
341                 kernel_fpu_begin();
342                 aesni_enc(ctx, dst, src);
343                 kernel_fpu_end();
344         }
345 }
346 
347 static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
348 {
349         struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
350 
351         if (!irq_fpu_usable())
352                 crypto_aes_decrypt_x86(ctx, dst, src);
353         else {
354                 kernel_fpu_begin();
355                 aesni_dec(ctx, dst, src);
356                 kernel_fpu_end();
357         }
358 }
359 
360 static void __aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
361 {
362         struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
363 
364         aesni_enc(ctx, dst, src);
365 }
366 
367 static void __aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
368 {
369         struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
370 
371         aesni_dec(ctx, dst, src);
372 }
373 
374 static int ecb_encrypt(struct blkcipher_desc *desc,
375                        struct scatterlist *dst, struct scatterlist *src,
376                        unsigned int nbytes)
377 {
378         struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
379         struct blkcipher_walk walk;
380         int err;
381 
382         blkcipher_walk_init(&walk, dst, src, nbytes);
383         err = blkcipher_walk_virt(desc, &walk);
384         desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
385 
386         kernel_fpu_begin();
387         while ((nbytes = walk.nbytes)) {
388                 aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
389                               nbytes & AES_BLOCK_MASK);
390                 nbytes &= AES_BLOCK_SIZE - 1;
391                 err = blkcipher_walk_done(desc, &walk, nbytes);
392         }
393         kernel_fpu_end();
394 
395         return err;
396 }
397 
398 static int ecb_decrypt(struct blkcipher_desc *desc,
399                        struct scatterlist *dst, struct scatterlist *src,
400                        unsigned int nbytes)
401 {
402         struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
403         struct blkcipher_walk walk;
404         int err;
405 
406         blkcipher_walk_init(&walk, dst, src, nbytes);
407         err = blkcipher_walk_virt(desc, &walk);
408         desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
409 
410         kernel_fpu_begin();
411         while ((nbytes = walk.nbytes)) {
412                 aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
413                               nbytes & AES_BLOCK_MASK);
414                 nbytes &= AES_BLOCK_SIZE - 1;
415                 err = blkcipher_walk_done(desc, &walk, nbytes);
416         }
417         kernel_fpu_end();
418 
419         return err;
420 }
421 
422 static int cbc_encrypt(struct blkcipher_desc *desc,
423                        struct scatterlist *dst, struct scatterlist *src,
424                        unsigned int nbytes)
425 {
426         struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
427         struct blkcipher_walk walk;
428         int err;
429 
430         blkcipher_walk_init(&walk, dst, src, nbytes);
431         err = blkcipher_walk_virt(desc, &walk);
432         desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
433 
434         kernel_fpu_begin();
435         while ((nbytes = walk.nbytes)) {
436                 aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
437                               nbytes & AES_BLOCK_MASK, walk.iv);
438                 nbytes &= AES_BLOCK_SIZE - 1;
439                 err = blkcipher_walk_done(desc, &walk, nbytes);
440         }
441         kernel_fpu_end();
442 
443         return err;
444 }
445 
446 static int cbc_decrypt(struct blkcipher_desc *desc,
447                        struct scatterlist *dst, struct scatterlist *src,
448                        unsigned int nbytes)
449 {
450         struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
451         struct blkcipher_walk walk;
452         int err;
453 
454         blkcipher_walk_init(&walk, dst, src, nbytes);
455         err = blkcipher_walk_virt(desc, &walk);
456         desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
457 
458         kernel_fpu_begin();
459         while ((nbytes = walk.nbytes)) {
460                 aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
461                               nbytes & AES_BLOCK_MASK, walk.iv);
462                 nbytes &= AES_BLOCK_SIZE - 1;
463                 err = blkcipher_walk_done(desc, &walk, nbytes);
464         }
465         kernel_fpu_end();
466 
467         return err;
468 }
469 
470 #ifdef CONFIG_X86_64
471 static void ctr_crypt_final(struct crypto_aes_ctx *ctx,
472                             struct blkcipher_walk *walk)
473 {
474         u8 *ctrblk = walk->iv;
475         u8 keystream[AES_BLOCK_SIZE];
476         u8 *src = walk->src.virt.addr;
477         u8 *dst = walk->dst.virt.addr;
478         unsigned int nbytes = walk->nbytes;
479 
480         aesni_enc(ctx, keystream, ctrblk);
481         crypto_xor(keystream, src, nbytes);
482         memcpy(dst, keystream, nbytes);
483         crypto_inc(ctrblk, AES_BLOCK_SIZE);
484 }
485 
486 #ifdef CONFIG_AS_AVX
487 static void aesni_ctr_enc_avx_tfm(struct crypto_aes_ctx *ctx, u8 *out,
488                               const u8 *in, unsigned int len, u8 *iv)
489 {
490         /*
491          * based on key length, override with the by8 version
492          * of ctr mode encryption/decryption for improved performance
493          * aes_set_key_common() ensures that key length is one of
494          * {128,192,256}
495          */
496         if (ctx->key_length == AES_KEYSIZE_128)
497                 aes_ctr_enc_128_avx_by8(in, iv, (void *)ctx, out, len);
498         else if (ctx->key_length == AES_KEYSIZE_192)
499                 aes_ctr_enc_192_avx_by8(in, iv, (void *)ctx, out, len);
500         else
501                 aes_ctr_enc_256_avx_by8(in, iv, (void *)ctx, out, len);
502 }
503 #endif
504 
505 static int ctr_crypt(struct blkcipher_desc *desc,
506                      struct scatterlist *dst, struct scatterlist *src,
507                      unsigned int nbytes)
508 {
509         struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
510         struct blkcipher_walk walk;
511         int err;
512 
513         blkcipher_walk_init(&walk, dst, src, nbytes);
514         err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
515         desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
516 
517         kernel_fpu_begin();
518         while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
519                 aesni_ctr_enc_tfm(ctx, walk.dst.virt.addr, walk.src.virt.addr,
520                                       nbytes & AES_BLOCK_MASK, walk.iv);
521                 nbytes &= AES_BLOCK_SIZE - 1;
522                 err = blkcipher_walk_done(desc, &walk, nbytes);
523         }
524         if (walk.nbytes) {
525                 ctr_crypt_final(ctx, &walk);
526                 err = blkcipher_walk_done(desc, &walk, 0);
527         }
528         kernel_fpu_end();
529 
530         return err;
531 }
532 #endif
533 
534 static int ablk_ecb_init(struct crypto_tfm *tfm)
535 {
536         return ablk_init_common(tfm, "__driver-ecb-aes-aesni");
537 }
538 
539 static int ablk_cbc_init(struct crypto_tfm *tfm)
540 {
541         return ablk_init_common(tfm, "__driver-cbc-aes-aesni");
542 }
543 
544 #ifdef CONFIG_X86_64
545 static int ablk_ctr_init(struct crypto_tfm *tfm)
546 {
547         return ablk_init_common(tfm, "__driver-ctr-aes-aesni");
548 }
549 
550 #endif
551 
552 #if IS_ENABLED(CONFIG_CRYPTO_PCBC)
553 static int ablk_pcbc_init(struct crypto_tfm *tfm)
554 {
555         return ablk_init_common(tfm, "fpu(pcbc(__driver-aes-aesni))");
556 }
557 #endif
558 
559 static void lrw_xts_encrypt_callback(void *ctx, u8 *blks, unsigned int nbytes)
560 {
561         aesni_ecb_enc(ctx, blks, blks, nbytes);
562 }
563 
564 static void lrw_xts_decrypt_callback(void *ctx, u8 *blks, unsigned int nbytes)
565 {
566         aesni_ecb_dec(ctx, blks, blks, nbytes);
567 }
568 
569 static int lrw_aesni_setkey(struct crypto_tfm *tfm, const u8 *key,
570                             unsigned int keylen)
571 {
572         struct aesni_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
573         int err;
574 
575         err = aes_set_key_common(tfm, ctx->raw_aes_ctx, key,
576                                  keylen - AES_BLOCK_SIZE);
577         if (err)
578                 return err;
579 
580         return lrw_init_table(&ctx->lrw_table, key + keylen - AES_BLOCK_SIZE);
581 }
582 
583 static void lrw_aesni_exit_tfm(struct crypto_tfm *tfm)
584 {
585         struct aesni_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
586 
587         lrw_free_table(&ctx->lrw_table);
588 }
589 
590 static int lrw_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
591                        struct scatterlist *src, unsigned int nbytes)
592 {
593         struct aesni_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
594         be128 buf[8];
595         struct lrw_crypt_req req = {
596                 .tbuf = buf,
597                 .tbuflen = sizeof(buf),
598 
599                 .table_ctx = &ctx->lrw_table,
600                 .crypt_ctx = aes_ctx(ctx->raw_aes_ctx),
601                 .crypt_fn = lrw_xts_encrypt_callback,
602         };
603         int ret;
604 
605         desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
606 
607         kernel_fpu_begin();
608         ret = lrw_crypt(desc, dst, src, nbytes, &req);
609         kernel_fpu_end();
610 
611         return ret;
612 }
613 
614 static int lrw_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
615                        struct scatterlist *src, unsigned int nbytes)
616 {
617         struct aesni_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
618         be128 buf[8];
619         struct lrw_crypt_req req = {
620                 .tbuf = buf,
621                 .tbuflen = sizeof(buf),
622 
623                 .table_ctx = &ctx->lrw_table,
624                 .crypt_ctx = aes_ctx(ctx->raw_aes_ctx),
625                 .crypt_fn = lrw_xts_decrypt_callback,
626         };
627         int ret;
628 
629         desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
630 
631         kernel_fpu_begin();
632         ret = lrw_crypt(desc, dst, src, nbytes, &req);
633         kernel_fpu_end();
634 
635         return ret;
636 }
637 
638 static int xts_aesni_setkey(struct crypto_tfm *tfm, const u8 *key,
639                             unsigned int keylen)
640 {
641         struct aesni_xts_ctx *ctx = crypto_tfm_ctx(tfm);
642         int err;
643 
644         err = xts_check_key(tfm, key, keylen);
645         if (err)
646                 return err;
647 
648         /* first half of xts-key is for crypt */
649         err = aes_set_key_common(tfm, ctx->raw_crypt_ctx, key, keylen / 2);
650         if (err)
651                 return err;
652 
653         /* second half of xts-key is for tweak */
654         return aes_set_key_common(tfm, ctx->raw_tweak_ctx, key + keylen / 2,
655                                   keylen / 2);
656 }
657 
658 
659 static void aesni_xts_tweak(void *ctx, u8 *out, const u8 *in)
660 {
661         aesni_enc(ctx, out, in);
662 }
663 
664 #ifdef CONFIG_X86_64
665 
666 static void aesni_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv)
667 {
668         glue_xts_crypt_128bit_one(ctx, dst, src, iv, GLUE_FUNC_CAST(aesni_enc));
669 }
670 
671 static void aesni_xts_dec(void *ctx, u128 *dst, const u128 *src, le128 *iv)
672 {
673         glue_xts_crypt_128bit_one(ctx, dst, src, iv, GLUE_FUNC_CAST(aesni_dec));
674 }
675 
676 static void aesni_xts_enc8(void *ctx, u128 *dst, const u128 *src, le128 *iv)
677 {
678         aesni_xts_crypt8(ctx, (u8 *)dst, (const u8 *)src, true, (u8 *)iv);
679 }
680 
681 static void aesni_xts_dec8(void *ctx, u128 *dst, const u128 *src, le128 *iv)
682 {
683         aesni_xts_crypt8(ctx, (u8 *)dst, (const u8 *)src, false, (u8 *)iv);
684 }
685 
686 static const struct common_glue_ctx aesni_enc_xts = {
687         .num_funcs = 2,
688         .fpu_blocks_limit = 1,
689 
690         .funcs = { {
691                 .num_blocks = 8,
692                 .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_enc8) }
693         }, {
694                 .num_blocks = 1,
695                 .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_enc) }
696         } }
697 };
698 
699 static const struct common_glue_ctx aesni_dec_xts = {
700         .num_funcs = 2,
701         .fpu_blocks_limit = 1,
702 
703         .funcs = { {
704                 .num_blocks = 8,
705                 .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_dec8) }
706         }, {
707                 .num_blocks = 1,
708                 .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_dec) }
709         } }
710 };
711 
712 static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
713                        struct scatterlist *src, unsigned int nbytes)
714 {
715         struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
716 
717         return glue_xts_crypt_128bit(&aesni_enc_xts, desc, dst, src, nbytes,
718                                      XTS_TWEAK_CAST(aesni_xts_tweak),
719                                      aes_ctx(ctx->raw_tweak_ctx),
720                                      aes_ctx(ctx->raw_crypt_ctx));
721 }
722 
723 static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
724                        struct scatterlist *src, unsigned int nbytes)
725 {
726         struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
727 
728         return glue_xts_crypt_128bit(&aesni_dec_xts, desc, dst, src, nbytes,
729                                      XTS_TWEAK_CAST(aesni_xts_tweak),
730                                      aes_ctx(ctx->raw_tweak_ctx),
731                                      aes_ctx(ctx->raw_crypt_ctx));
732 }
733 
734 #else
735 
736 static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
737                        struct scatterlist *src, unsigned int nbytes)
738 {
739         struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
740         be128 buf[8];
741         struct xts_crypt_req req = {
742                 .tbuf = buf,
743                 .tbuflen = sizeof(buf),
744 
745                 .tweak_ctx = aes_ctx(ctx->raw_tweak_ctx),
746                 .tweak_fn = aesni_xts_tweak,
747                 .crypt_ctx = aes_ctx(ctx->raw_crypt_ctx),
748                 .crypt_fn = lrw_xts_encrypt_callback,
749         };
750         int ret;
751 
752         desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
753 
754         kernel_fpu_begin();
755         ret = xts_crypt(desc, dst, src, nbytes, &req);
756         kernel_fpu_end();
757 
758         return ret;
759 }
760 
761 static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
762                        struct scatterlist *src, unsigned int nbytes)
763 {
764         struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
765         be128 buf[8];
766         struct xts_crypt_req req = {
767                 .tbuf = buf,
768                 .tbuflen = sizeof(buf),
769 
770                 .tweak_ctx = aes_ctx(ctx->raw_tweak_ctx),
771                 .tweak_fn = aesni_xts_tweak,
772                 .crypt_ctx = aes_ctx(ctx->raw_crypt_ctx),
773                 .crypt_fn = lrw_xts_decrypt_callback,
774         };
775         int ret;
776 
777         desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
778 
779         kernel_fpu_begin();
780         ret = xts_crypt(desc, dst, src, nbytes, &req);
781         kernel_fpu_end();
782 
783         return ret;
784 }
785 
786 #endif
787 
788 #ifdef CONFIG_X86_64
789 static int rfc4106_init(struct crypto_aead *aead)
790 {
791         struct cryptd_aead *cryptd_tfm;
792         struct cryptd_aead **ctx = crypto_aead_ctx(aead);
793 
794         cryptd_tfm = cryptd_alloc_aead("__driver-gcm-aes-aesni",
795                                        CRYPTO_ALG_INTERNAL,
796                                        CRYPTO_ALG_INTERNAL);
797         if (IS_ERR(cryptd_tfm))
798                 return PTR_ERR(cryptd_tfm);
799 
800         *ctx = cryptd_tfm;
801         crypto_aead_set_reqsize(aead, crypto_aead_reqsize(&cryptd_tfm->base));
802         return 0;
803 }
804 
805 static void rfc4106_exit(struct crypto_aead *aead)
806 {
807         struct cryptd_aead **ctx = crypto_aead_ctx(aead);
808 
809         cryptd_free_aead(*ctx);
810 }
811 
812 static void
813 rfc4106_set_hash_subkey_done(struct crypto_async_request *req, int err)
814 {
815         struct aesni_gcm_set_hash_subkey_result *result = req->data;
816 
817         if (err == -EINPROGRESS)
818                 return;
819         result->err = err;
820         complete(&result->completion);
821 }
822 
823 static int
824 rfc4106_set_hash_subkey(u8 *hash_subkey, const u8 *key, unsigned int key_len)
825 {
826         struct crypto_ablkcipher *ctr_tfm;
827         struct ablkcipher_request *req;
828         int ret = -EINVAL;
829         struct aesni_hash_subkey_req_data *req_data;
830 
831         ctr_tfm = crypto_alloc_ablkcipher("ctr(aes)", 0, 0);
832         if (IS_ERR(ctr_tfm))
833                 return PTR_ERR(ctr_tfm);
834 
835         ret = crypto_ablkcipher_setkey(ctr_tfm, key, key_len);
836         if (ret)
837                 goto out_free_ablkcipher;
838 
839         ret = -ENOMEM;
840         req = ablkcipher_request_alloc(ctr_tfm, GFP_KERNEL);
841         if (!req)
842                 goto out_free_ablkcipher;
843 
844         req_data = kmalloc(sizeof(*req_data), GFP_KERNEL);
845         if (!req_data)
846                 goto out_free_request;
847 
848         memset(req_data->iv, 0, sizeof(req_data->iv));
849 
850         /* Clear the data in the hash sub key container to zero.*/
851         /* We want to cipher all zeros to create the hash sub key. */
852         memset(hash_subkey, 0, RFC4106_HASH_SUBKEY_SIZE);
853 
854         init_completion(&req_data->result.completion);
855         sg_init_one(&req_data->sg, hash_subkey, RFC4106_HASH_SUBKEY_SIZE);
856         ablkcipher_request_set_tfm(req, ctr_tfm);
857         ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP |
858                                         CRYPTO_TFM_REQ_MAY_BACKLOG,
859                                         rfc4106_set_hash_subkey_done,
860                                         &req_data->result);
861 
862         ablkcipher_request_set_crypt(req, &req_data->sg,
863                 &req_data->sg, RFC4106_HASH_SUBKEY_SIZE, req_data->iv);
864 
865         ret = crypto_ablkcipher_encrypt(req);
866         if (ret == -EINPROGRESS || ret == -EBUSY) {
867                 ret = wait_for_completion_interruptible
868                         (&req_data->result.completion);
869                 if (!ret)
870                         ret = req_data->result.err;
871         }
872         kfree(req_data);
873 out_free_request:
874         ablkcipher_request_free(req);
875 out_free_ablkcipher:
876         crypto_free_ablkcipher(ctr_tfm);
877         return ret;
878 }
879 
880 static int common_rfc4106_set_key(struct crypto_aead *aead, const u8 *key,
881                                   unsigned int key_len)
882 {
883         struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(aead);
884 
885         if (key_len < 4) {
886                 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
887                 return -EINVAL;
888         }
889         /*Account for 4 byte nonce at the end.*/
890         key_len -= 4;
891 
892         memcpy(ctx->nonce, key + key_len, sizeof(ctx->nonce));
893 
894         return aes_set_key_common(crypto_aead_tfm(aead),
895                                   &ctx->aes_key_expanded, key, key_len) ?:
896                rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len);
897 }
898 
899 static int rfc4106_set_key(struct crypto_aead *parent, const u8 *key,
900                            unsigned int key_len)
901 {
902         struct cryptd_aead **ctx = crypto_aead_ctx(parent);
903         struct cryptd_aead *cryptd_tfm = *ctx;
904 
905         return crypto_aead_setkey(&cryptd_tfm->base, key, key_len);
906 }
907 
908 static int common_rfc4106_set_authsize(struct crypto_aead *aead,
909                                        unsigned int authsize)
910 {
911         switch (authsize) {
912         case 8:
913         case 12:
914         case 16:
915                 break;
916         default:
917                 return -EINVAL;
918         }
919 
920         return 0;
921 }
922 
923 /* This is the Integrity Check Value (aka the authentication tag length and can
924  * be 8, 12 or 16 bytes long. */
925 static int rfc4106_set_authsize(struct crypto_aead *parent,
926                                 unsigned int authsize)
927 {
928         struct cryptd_aead **ctx = crypto_aead_ctx(parent);
929         struct cryptd_aead *cryptd_tfm = *ctx;
930 
931         return crypto_aead_setauthsize(&cryptd_tfm->base, authsize);
932 }
933 
934 static int helper_rfc4106_encrypt(struct aead_request *req)
935 {
936         u8 one_entry_in_sg = 0;
937         u8 *src, *dst, *assoc;
938         __be32 counter = cpu_to_be32(1);
939         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
940         struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
941         void *aes_ctx = &(ctx->aes_key_expanded);
942         unsigned long auth_tag_len = crypto_aead_authsize(tfm);
943         u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
944         struct scatter_walk src_sg_walk;
945         struct scatter_walk dst_sg_walk;
946         unsigned int i;
947 
948         /* Assuming we are supporting rfc4106 64-bit extended */
949         /* sequence numbers We need to have the AAD length equal */
950         /* to 16 or 20 bytes */
951         if (unlikely(req->assoclen != 16 && req->assoclen != 20))
952                 return -EINVAL;
953 
954         /* IV below built */
955         for (i = 0; i < 4; i++)
956                 *(iv+i) = ctx->nonce[i];
957         for (i = 0; i < 8; i++)
958                 *(iv+4+i) = req->iv[i];
959         *((__be32 *)(iv+12)) = counter;
960 
961         if (sg_is_last(req->src) &&
962             req->src->offset + req->src->length <= PAGE_SIZE &&
963             sg_is_last(req->dst) &&
964             req->dst->offset + req->dst->length <= PAGE_SIZE) {
965                 one_entry_in_sg = 1;
966                 scatterwalk_start(&src_sg_walk, req->src);
967                 assoc = scatterwalk_map(&src_sg_walk);
968                 src = assoc + req->assoclen;
969                 dst = src;
970                 if (unlikely(req->src != req->dst)) {
971                         scatterwalk_start(&dst_sg_walk, req->dst);
972                         dst = scatterwalk_map(&dst_sg_walk) + req->assoclen;
973                 }
974         } else {
975                 /* Allocate memory for src, dst, assoc */
976                 assoc = kmalloc(req->cryptlen + auth_tag_len + req->assoclen,
977                         GFP_ATOMIC);
978                 if (unlikely(!assoc))
979                         return -ENOMEM;
980                 scatterwalk_map_and_copy(assoc, req->src, 0,
981                                          req->assoclen + req->cryptlen, 0);
982                 src = assoc + req->assoclen;
983                 dst = src;
984         }
985 
986         kernel_fpu_begin();
987         aesni_gcm_enc_tfm(aes_ctx, dst, src, req->cryptlen, iv,
988                           ctx->hash_subkey, assoc, req->assoclen - 8,
989                           dst + req->cryptlen, auth_tag_len);
990         kernel_fpu_end();
991 
992         /* The authTag (aka the Integrity Check Value) needs to be written
993          * back to the packet. */
994         if (one_entry_in_sg) {
995                 if (unlikely(req->src != req->dst)) {
996                         scatterwalk_unmap(dst - req->assoclen);
997                         scatterwalk_advance(&dst_sg_walk, req->dst->length);
998                         scatterwalk_done(&dst_sg_walk, 1, 0);
999                 }
1000                 scatterwalk_unmap(assoc);
1001                 scatterwalk_advance(&src_sg_walk, req->src->length);
1002                 scatterwalk_done(&src_sg_walk, req->src == req->dst, 0);
1003         } else {
1004                 scatterwalk_map_and_copy(dst, req->dst, req->assoclen,
1005                                          req->cryptlen + auth_tag_len, 1);
1006                 kfree(assoc);
1007         }
1008         return 0;
1009 }
1010 
1011 static int helper_rfc4106_decrypt(struct aead_request *req)
1012 {
1013         u8 one_entry_in_sg = 0;
1014         u8 *src, *dst, *assoc;
1015         unsigned long tempCipherLen = 0;
1016         __be32 counter = cpu_to_be32(1);
1017         int retval = 0;
1018         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1019         struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
1020         void *aes_ctx = &(ctx->aes_key_expanded);
1021         unsigned long auth_tag_len = crypto_aead_authsize(tfm);
1022         u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
1023         u8 authTag[16];
1024         struct scatter_walk src_sg_walk;
1025         struct scatter_walk dst_sg_walk;
1026         unsigned int i;
1027 
1028         if (unlikely(req->assoclen != 16 && req->assoclen != 20))
1029                 return -EINVAL;
1030 
1031         /* Assuming we are supporting rfc4106 64-bit extended */
1032         /* sequence numbers We need to have the AAD length */
1033         /* equal to 16 or 20 bytes */
1034 
1035         tempCipherLen = (unsigned long)(req->cryptlen - auth_tag_len);
1036         /* IV below built */
1037         for (i = 0; i < 4; i++)
1038                 *(iv+i) = ctx->nonce[i];
1039         for (i = 0; i < 8; i++)
1040                 *(iv+4+i) = req->iv[i];
1041         *((__be32 *)(iv+12)) = counter;
1042 
1043         if (sg_is_last(req->src) &&
1044             req->src->offset + req->src->length <= PAGE_SIZE &&
1045             sg_is_last(req->dst) &&
1046             req->dst->offset + req->dst->length <= PAGE_SIZE) {
1047                 one_entry_in_sg = 1;
1048                 scatterwalk_start(&src_sg_walk, req->src);
1049                 assoc = scatterwalk_map(&src_sg_walk);
1050                 src = assoc + req->assoclen;
1051                 dst = src;
1052                 if (unlikely(req->src != req->dst)) {
1053                         scatterwalk_start(&dst_sg_walk, req->dst);
1054                         dst = scatterwalk_map(&dst_sg_walk) + req->assoclen;
1055                 }
1056 
1057         } else {
1058                 /* Allocate memory for src, dst, assoc */
1059                 assoc = kmalloc(req->cryptlen + req->assoclen, GFP_ATOMIC);
1060                 if (!assoc)
1061                         return -ENOMEM;
1062                 scatterwalk_map_and_copy(assoc, req->src, 0,
1063                                          req->assoclen + req->cryptlen, 0);
1064                 src = assoc + req->assoclen;
1065                 dst = src;
1066         }
1067 
1068         kernel_fpu_begin();
1069         aesni_gcm_dec_tfm(aes_ctx, dst, src, tempCipherLen, iv,
1070                           ctx->hash_subkey, assoc, req->assoclen - 8,
1071                           authTag, auth_tag_len);
1072         kernel_fpu_end();
1073 
1074         /* Compare generated tag with passed in tag. */
1075         retval = crypto_memneq(src + tempCipherLen, authTag, auth_tag_len) ?
1076                 -EBADMSG : 0;
1077 
1078         if (one_entry_in_sg) {
1079                 if (unlikely(req->src != req->dst)) {
1080                         scatterwalk_unmap(dst - req->assoclen);
1081                         scatterwalk_advance(&dst_sg_walk, req->dst->length);
1082                         scatterwalk_done(&dst_sg_walk, 1, 0);
1083                 }
1084                 scatterwalk_unmap(assoc);
1085                 scatterwalk_advance(&src_sg_walk, req->src->length);
1086                 scatterwalk_done(&src_sg_walk, req->src == req->dst, 0);
1087         } else {
1088                 scatterwalk_map_and_copy(dst, req->dst, req->assoclen,
1089                                          tempCipherLen, 1);
1090                 kfree(assoc);
1091         }
1092         return retval;
1093 }
1094 
1095 static int rfc4106_encrypt(struct aead_request *req)
1096 {
1097         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1098         struct cryptd_aead **ctx = crypto_aead_ctx(tfm);
1099         struct cryptd_aead *cryptd_tfm = *ctx;
1100 
1101         aead_request_set_tfm(req, irq_fpu_usable() ?
1102                                   cryptd_aead_child(cryptd_tfm) :
1103                                   &cryptd_tfm->base);
1104 
1105         return crypto_aead_encrypt(req);
1106 }
1107 
1108 static int rfc4106_decrypt(struct aead_request *req)
1109 {
1110         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1111         struct cryptd_aead **ctx = crypto_aead_ctx(tfm);
1112         struct cryptd_aead *cryptd_tfm = *ctx;
1113 
1114         aead_request_set_tfm(req, irq_fpu_usable() ?
1115                                   cryptd_aead_child(cryptd_tfm) :
1116                                   &cryptd_tfm->base);
1117 
1118         return crypto_aead_decrypt(req);
1119 }
1120 #endif
1121 
1122 static struct crypto_alg aesni_algs[] = { {
1123         .cra_name               = "aes",
1124         .cra_driver_name        = "aes-aesni",
1125         .cra_priority           = 300,
1126         .cra_flags              = CRYPTO_ALG_TYPE_CIPHER,
1127         .cra_blocksize          = AES_BLOCK_SIZE,
1128         .cra_ctxsize            = sizeof(struct crypto_aes_ctx) +
1129                                   AESNI_ALIGN - 1,
1130         .cra_alignmask          = 0,
1131         .cra_module             = THIS_MODULE,
1132         .cra_u  = {
1133                 .cipher = {
1134                         .cia_min_keysize        = AES_MIN_KEY_SIZE,
1135                         .cia_max_keysize        = AES_MAX_KEY_SIZE,
1136                         .cia_setkey             = aes_set_key,
1137                         .cia_encrypt            = aes_encrypt,
1138                         .cia_decrypt            = aes_decrypt
1139                 }
1140         }
1141 }, {
1142         .cra_name               = "__aes-aesni",
1143         .cra_driver_name        = "__driver-aes-aesni",
1144         .cra_priority           = 0,
1145         .cra_flags              = CRYPTO_ALG_TYPE_CIPHER | CRYPTO_ALG_INTERNAL,
1146         .cra_blocksize          = AES_BLOCK_SIZE,
1147         .cra_ctxsize            = sizeof(struct crypto_aes_ctx) +
1148                                   AESNI_ALIGN - 1,
1149         .cra_alignmask          = 0,
1150         .cra_module             = THIS_MODULE,
1151         .cra_u  = {
1152                 .cipher = {
1153                         .cia_min_keysize        = AES_MIN_KEY_SIZE,
1154                         .cia_max_keysize        = AES_MAX_KEY_SIZE,
1155                         .cia_setkey             = aes_set_key,
1156                         .cia_encrypt            = __aes_encrypt,
1157                         .cia_decrypt            = __aes_decrypt
1158                 }
1159         }
1160 }, {
1161         .cra_name               = "__ecb-aes-aesni",
1162         .cra_driver_name        = "__driver-ecb-aes-aesni",
1163         .cra_priority           = 0,
1164         .cra_flags              = CRYPTO_ALG_TYPE_BLKCIPHER |
1165                                   CRYPTO_ALG_INTERNAL,
1166         .cra_blocksize          = AES_BLOCK_SIZE,
1167         .cra_ctxsize            = sizeof(struct crypto_aes_ctx) +
1168                                   AESNI_ALIGN - 1,
1169         .cra_alignmask          = 0,
1170         .cra_type               = &crypto_blkcipher_type,
1171         .cra_module             = THIS_MODULE,
1172         .cra_u = {
1173                 .blkcipher = {
1174                         .min_keysize    = AES_MIN_KEY_SIZE,
1175                         .max_keysize    = AES_MAX_KEY_SIZE,
1176                         .setkey         = aes_set_key,
1177                         .encrypt        = ecb_encrypt,
1178                         .decrypt        = ecb_decrypt,
1179                 },
1180         },
1181 }, {
1182         .cra_name               = "__cbc-aes-aesni",
1183         .cra_driver_name        = "__driver-cbc-aes-aesni",
1184         .cra_priority           = 0,
1185         .cra_flags              = CRYPTO_ALG_TYPE_BLKCIPHER |
1186                                   CRYPTO_ALG_INTERNAL,
1187         .cra_blocksize          = AES_BLOCK_SIZE,
1188         .cra_ctxsize            = sizeof(struct crypto_aes_ctx) +
1189                                   AESNI_ALIGN - 1,
1190         .cra_alignmask          = 0,
1191         .cra_type               = &crypto_blkcipher_type,
1192         .cra_module             = THIS_MODULE,
1193         .cra_u = {
1194                 .blkcipher = {
1195                         .min_keysize    = AES_MIN_KEY_SIZE,
1196                         .max_keysize    = AES_MAX_KEY_SIZE,
1197                         .setkey         = aes_set_key,
1198                         .encrypt        = cbc_encrypt,
1199                         .decrypt        = cbc_decrypt,
1200                 },
1201         },
1202 }, {
1203         .cra_name               = "ecb(aes)",
1204         .cra_driver_name        = "ecb-aes-aesni",
1205         .cra_priority           = 400,
1206         .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1207         .cra_blocksize          = AES_BLOCK_SIZE,
1208         .cra_ctxsize            = sizeof(struct async_helper_ctx),
1209         .cra_alignmask          = 0,
1210         .cra_type               = &crypto_ablkcipher_type,
1211         .cra_module             = THIS_MODULE,
1212         .cra_init               = ablk_ecb_init,
1213         .cra_exit               = ablk_exit,
1214         .cra_u = {
1215                 .ablkcipher = {
1216                         .min_keysize    = AES_MIN_KEY_SIZE,
1217                         .max_keysize    = AES_MAX_KEY_SIZE,
1218                         .setkey         = ablk_set_key,
1219                         .encrypt        = ablk_encrypt,
1220                         .decrypt        = ablk_decrypt,
1221                 },
1222         },
1223 }, {
1224         .cra_name               = "cbc(aes)",
1225         .cra_driver_name        = "cbc-aes-aesni",
1226         .cra_priority           = 400,
1227         .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1228         .cra_blocksize          = AES_BLOCK_SIZE,
1229         .cra_ctxsize            = sizeof(struct async_helper_ctx),
1230         .cra_alignmask          = 0,
1231         .cra_type               = &crypto_ablkcipher_type,
1232         .cra_module             = THIS_MODULE,
1233         .cra_init               = ablk_cbc_init,
1234         .cra_exit               = ablk_exit,
1235         .cra_u = {
1236                 .ablkcipher = {
1237                         .min_keysize    = AES_MIN_KEY_SIZE,
1238                         .max_keysize    = AES_MAX_KEY_SIZE,
1239                         .ivsize         = AES_BLOCK_SIZE,
1240                         .setkey         = ablk_set_key,
1241                         .encrypt        = ablk_encrypt,
1242                         .decrypt        = ablk_decrypt,
1243                 },
1244         },
1245 #ifdef CONFIG_X86_64
1246 }, {
1247         .cra_name               = "__ctr-aes-aesni",
1248         .cra_driver_name        = "__driver-ctr-aes-aesni",
1249         .cra_priority           = 0,
1250         .cra_flags              = CRYPTO_ALG_TYPE_BLKCIPHER |
1251                                   CRYPTO_ALG_INTERNAL,
1252         .cra_blocksize          = 1,
1253         .cra_ctxsize            = sizeof(struct crypto_aes_ctx) +
1254                                   AESNI_ALIGN - 1,
1255         .cra_alignmask          = 0,
1256         .cra_type               = &crypto_blkcipher_type,
1257         .cra_module             = THIS_MODULE,
1258         .cra_u = {
1259                 .blkcipher = {
1260                         .min_keysize    = AES_MIN_KEY_SIZE,
1261                         .max_keysize    = AES_MAX_KEY_SIZE,
1262                         .ivsize         = AES_BLOCK_SIZE,
1263                         .setkey         = aes_set_key,
1264                         .encrypt        = ctr_crypt,
1265                         .decrypt        = ctr_crypt,
1266                 },
1267         },
1268 }, {
1269         .cra_name               = "ctr(aes)",
1270         .cra_driver_name        = "ctr-aes-aesni",
1271         .cra_priority           = 400,
1272         .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1273         .cra_blocksize          = 1,
1274         .cra_ctxsize            = sizeof(struct async_helper_ctx),
1275         .cra_alignmask          = 0,
1276         .cra_type               = &crypto_ablkcipher_type,
1277         .cra_module             = THIS_MODULE,
1278         .cra_init               = ablk_ctr_init,
1279         .cra_exit               = ablk_exit,
1280         .cra_u = {
1281                 .ablkcipher = {
1282                         .min_keysize    = AES_MIN_KEY_SIZE,
1283                         .max_keysize    = AES_MAX_KEY_SIZE,
1284                         .ivsize         = AES_BLOCK_SIZE,
1285                         .setkey         = ablk_set_key,
1286                         .encrypt        = ablk_encrypt,
1287                         .decrypt        = ablk_encrypt,
1288                         .geniv          = "chainiv",
1289                 },
1290         },
1291 #endif
1292 #if IS_ENABLED(CONFIG_CRYPTO_PCBC)
1293 }, {
1294         .cra_name               = "pcbc(aes)",
1295         .cra_driver_name        = "pcbc-aes-aesni",
1296         .cra_priority           = 400,
1297         .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1298         .cra_blocksize          = AES_BLOCK_SIZE,
1299         .cra_ctxsize            = sizeof(struct async_helper_ctx),
1300         .cra_alignmask          = 0,
1301         .cra_type               = &crypto_ablkcipher_type,
1302         .cra_module             = THIS_MODULE,
1303         .cra_init               = ablk_pcbc_init,
1304         .cra_exit               = ablk_exit,
1305         .cra_u = {
1306                 .ablkcipher = {
1307                         .min_keysize    = AES_MIN_KEY_SIZE,
1308                         .max_keysize    = AES_MAX_KEY_SIZE,
1309                         .ivsize         = AES_BLOCK_SIZE,
1310                         .setkey         = ablk_set_key,
1311                         .encrypt        = ablk_encrypt,
1312                         .decrypt        = ablk_decrypt,
1313                 },
1314         },
1315 #endif
1316 }, {
1317         .cra_name               = "__lrw-aes-aesni",
1318         .cra_driver_name        = "__driver-lrw-aes-aesni",
1319         .cra_priority           = 0,
1320         .cra_flags              = CRYPTO_ALG_TYPE_BLKCIPHER |
1321                                   CRYPTO_ALG_INTERNAL,
1322         .cra_blocksize          = AES_BLOCK_SIZE,
1323         .cra_ctxsize            = sizeof(struct aesni_lrw_ctx),
1324         .cra_alignmask          = 0,
1325         .cra_type               = &crypto_blkcipher_type,
1326         .cra_module             = THIS_MODULE,
1327         .cra_exit               = lrw_aesni_exit_tfm,
1328         .cra_u = {
1329                 .blkcipher = {
1330                         .min_keysize    = AES_MIN_KEY_SIZE + AES_BLOCK_SIZE,
1331                         .max_keysize    = AES_MAX_KEY_SIZE + AES_BLOCK_SIZE,
1332                         .ivsize         = AES_BLOCK_SIZE,
1333                         .setkey         = lrw_aesni_setkey,
1334                         .encrypt        = lrw_encrypt,
1335                         .decrypt        = lrw_decrypt,
1336                 },
1337         },
1338 }, {
1339         .cra_name               = "__xts-aes-aesni",
1340         .cra_driver_name        = "__driver-xts-aes-aesni",
1341         .cra_priority           = 0,
1342         .cra_flags              = CRYPTO_ALG_TYPE_BLKCIPHER |
1343                                   CRYPTO_ALG_INTERNAL,
1344         .cra_blocksize          = AES_BLOCK_SIZE,
1345         .cra_ctxsize            = sizeof(struct aesni_xts_ctx),
1346         .cra_alignmask          = 0,
1347         .cra_type               = &crypto_blkcipher_type,
1348         .cra_module             = THIS_MODULE,
1349         .cra_u = {
1350                 .blkcipher = {
1351                         .min_keysize    = 2 * AES_MIN_KEY_SIZE,
1352                         .max_keysize    = 2 * AES_MAX_KEY_SIZE,
1353                         .ivsize         = AES_BLOCK_SIZE,
1354                         .setkey         = xts_aesni_setkey,
1355                         .encrypt        = xts_encrypt,
1356                         .decrypt        = xts_decrypt,
1357                 },
1358         },
1359 }, {
1360         .cra_name               = "lrw(aes)",
1361         .cra_driver_name        = "lrw-aes-aesni",
1362         .cra_priority           = 400,
1363         .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1364         .cra_blocksize          = AES_BLOCK_SIZE,
1365         .cra_ctxsize            = sizeof(struct async_helper_ctx),
1366         .cra_alignmask          = 0,
1367         .cra_type               = &crypto_ablkcipher_type,
1368         .cra_module             = THIS_MODULE,
1369         .cra_init               = ablk_init,
1370         .cra_exit               = ablk_exit,
1371         .cra_u = {
1372                 .ablkcipher = {
1373                         .min_keysize    = AES_MIN_KEY_SIZE + AES_BLOCK_SIZE,
1374                         .max_keysize    = AES_MAX_KEY_SIZE + AES_BLOCK_SIZE,
1375                         .ivsize         = AES_BLOCK_SIZE,
1376                         .setkey         = ablk_set_key,
1377                         .encrypt        = ablk_encrypt,
1378                         .decrypt        = ablk_decrypt,
1379                 },
1380         },
1381 }, {
1382         .cra_name               = "xts(aes)",
1383         .cra_driver_name        = "xts-aes-aesni",
1384         .cra_priority           = 400,
1385         .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1386         .cra_blocksize          = AES_BLOCK_SIZE,
1387         .cra_ctxsize            = sizeof(struct async_helper_ctx),
1388         .cra_alignmask          = 0,
1389         .cra_type               = &crypto_ablkcipher_type,
1390         .cra_module             = THIS_MODULE,
1391         .cra_init               = ablk_init,
1392         .cra_exit               = ablk_exit,
1393         .cra_u = {
1394                 .ablkcipher = {
1395                         .min_keysize    = 2 * AES_MIN_KEY_SIZE,
1396                         .max_keysize    = 2 * AES_MAX_KEY_SIZE,
1397                         .ivsize         = AES_BLOCK_SIZE,
1398                         .setkey         = ablk_set_key,
1399                         .encrypt        = ablk_encrypt,
1400                         .decrypt        = ablk_decrypt,
1401                 },
1402         },
1403 } };
1404 
1405 #ifdef CONFIG_X86_64
1406 static struct aead_alg aesni_aead_algs[] = { {
1407         .setkey                 = common_rfc4106_set_key,
1408         .setauthsize            = common_rfc4106_set_authsize,
1409         .encrypt                = helper_rfc4106_encrypt,
1410         .decrypt                = helper_rfc4106_decrypt,
1411         .ivsize                 = 8,
1412         .maxauthsize            = 16,
1413         .base = {
1414                 .cra_name               = "__gcm-aes-aesni",
1415                 .cra_driver_name        = "__driver-gcm-aes-aesni",
1416                 .cra_flags              = CRYPTO_ALG_INTERNAL,
1417                 .cra_blocksize          = 1,
1418                 .cra_ctxsize            = sizeof(struct aesni_rfc4106_gcm_ctx),
1419                 .cra_alignmask          = AESNI_ALIGN - 1,
1420                 .cra_module             = THIS_MODULE,
1421         },
1422 }, {
1423         .init                   = rfc4106_init,
1424         .exit                   = rfc4106_exit,
1425         .setkey                 = rfc4106_set_key,
1426         .setauthsize            = rfc4106_set_authsize,
1427         .encrypt                = rfc4106_encrypt,
1428         .decrypt                = rfc4106_decrypt,
1429         .ivsize                 = 8,
1430         .maxauthsize            = 16,
1431         .base = {
1432                 .cra_name               = "rfc4106(gcm(aes))",
1433                 .cra_driver_name        = "rfc4106-gcm-aesni",
1434                 .cra_priority           = 400,
1435                 .cra_flags              = CRYPTO_ALG_ASYNC,
1436                 .cra_blocksize          = 1,
1437                 .cra_ctxsize            = sizeof(struct cryptd_aead *),
1438                 .cra_module             = THIS_MODULE,
1439         },
1440 } };
1441 #else
1442 static struct aead_alg aesni_aead_algs[0];
1443 #endif
1444 
1445 
1446 static const struct x86_cpu_id aesni_cpu_id[] = {
1447         X86_FEATURE_MATCH(X86_FEATURE_AES),
1448         {}
1449 };
1450 MODULE_DEVICE_TABLE(x86cpu, aesni_cpu_id);
1451 
1452 static int __init aesni_init(void)
1453 {
1454         int err;
1455 
1456         if (!x86_match_cpu(aesni_cpu_id))
1457                 return -ENODEV;
1458 #ifdef CONFIG_X86_64
1459 #ifdef CONFIG_AS_AVX2
1460         if (boot_cpu_has(X86_FEATURE_AVX2)) {
1461                 pr_info("AVX2 version of gcm_enc/dec engaged.\n");
1462                 aesni_gcm_enc_tfm = aesni_gcm_enc_avx2;
1463                 aesni_gcm_dec_tfm = aesni_gcm_dec_avx2;
1464         } else
1465 #endif
1466 #ifdef CONFIG_AS_AVX
1467         if (boot_cpu_has(X86_FEATURE_AVX)) {
1468                 pr_info("AVX version of gcm_enc/dec engaged.\n");
1469                 aesni_gcm_enc_tfm = aesni_gcm_enc_avx;
1470                 aesni_gcm_dec_tfm = aesni_gcm_dec_avx;
1471         } else
1472 #endif
1473         {
1474                 pr_info("SSE version of gcm_enc/dec engaged.\n");
1475                 aesni_gcm_enc_tfm = aesni_gcm_enc;
1476                 aesni_gcm_dec_tfm = aesni_gcm_dec;
1477         }
1478         aesni_ctr_enc_tfm = aesni_ctr_enc;
1479 #ifdef CONFIG_AS_AVX
1480         if (cpu_has_avx) {
1481                 /* optimize performance of ctr mode encryption transform */
1482                 aesni_ctr_enc_tfm = aesni_ctr_enc_avx_tfm;
1483                 pr_info("AES CTR mode by8 optimization enabled\n");
1484         }
1485 #endif
1486 #endif
1487 
1488         err = crypto_fpu_init();
1489         if (err)
1490                 return err;
1491 
1492         err = crypto_register_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
1493         if (err)
1494                 goto fpu_exit;
1495 
1496         err = crypto_register_aeads(aesni_aead_algs,
1497                                     ARRAY_SIZE(aesni_aead_algs));
1498         if (err)
1499                 goto unregister_algs;
1500 
1501         return err;
1502 
1503 unregister_algs:
1504         crypto_unregister_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
1505 fpu_exit:
1506         crypto_fpu_exit();
1507         return err;
1508 }
1509 
1510 static void __exit aesni_exit(void)
1511 {
1512         crypto_unregister_aeads(aesni_aead_algs, ARRAY_SIZE(aesni_aead_algs));
1513         crypto_unregister_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
1514 
1515         crypto_fpu_exit();
1516 }
1517 
1518 late_initcall(aesni_init);
1519 module_exit(aesni_exit);
1520 
1521 MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, Intel AES-NI instructions optimized");
1522 MODULE_LICENSE("GPL");
1523 MODULE_ALIAS_CRYPTO("aes");
1524 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp