~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/include/linux/crypto.h

Version: ~ [ linux-5.8 ] ~ [ linux-5.7.12 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.55 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.136 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.191 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.232 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.232 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.85 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* SPDX-License-Identifier: GPL-2.0-or-later */
  2 /*
  3  * Scatterlist Cryptographic API.
  4  *
  5  * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
  6  * Copyright (c) 2002 David S. Miller (davem@redhat.com)
  7  * Copyright (c) 2005 Herbert Xu <herbert@gondor.apana.org.au>
  8  *
  9  * Portions derived from Cryptoapi, by Alexander Kjeldaas <astor@fast.no>
 10  * and Nettle, by Niels Möller.
 11  */
 12 #ifndef _LINUX_CRYPTO_H
 13 #define _LINUX_CRYPTO_H
 14 
 15 #include <linux/atomic.h>
 16 #include <linux/kernel.h>
 17 #include <linux/list.h>
 18 #include <linux/bug.h>
 19 #include <linux/slab.h>
 20 #include <linux/string.h>
 21 #include <linux/uaccess.h>
 22 #include <linux/completion.h>
 23 
 24 /*
 25  * Autoloaded crypto modules should only use a prefixed name to avoid allowing
 26  * arbitrary modules to be loaded. Loading from userspace may still need the
 27  * unprefixed names, so retains those aliases as well.
 28  * This uses __MODULE_INFO directly instead of MODULE_ALIAS because pre-4.3
 29  * gcc (e.g. avr32 toolchain) uses __LINE__ for uniqueness, and this macro
 30  * expands twice on the same line. Instead, use a separate base name for the
 31  * alias.
 32  */
 33 #define MODULE_ALIAS_CRYPTO(name)       \
 34                 __MODULE_INFO(alias, alias_userspace, name);    \
 35                 __MODULE_INFO(alias, alias_crypto, "crypto-" name)
 36 
 37 /*
 38  * Algorithm masks and types.
 39  */
 40 #define CRYPTO_ALG_TYPE_MASK            0x0000000f
 41 #define CRYPTO_ALG_TYPE_CIPHER          0x00000001
 42 #define CRYPTO_ALG_TYPE_COMPRESS        0x00000002
 43 #define CRYPTO_ALG_TYPE_AEAD            0x00000003
 44 #define CRYPTO_ALG_TYPE_BLKCIPHER       0x00000004
 45 #define CRYPTO_ALG_TYPE_ABLKCIPHER      0x00000005
 46 #define CRYPTO_ALG_TYPE_SKCIPHER        0x00000005
 47 #define CRYPTO_ALG_TYPE_KPP             0x00000008
 48 #define CRYPTO_ALG_TYPE_ACOMPRESS       0x0000000a
 49 #define CRYPTO_ALG_TYPE_SCOMPRESS       0x0000000b
 50 #define CRYPTO_ALG_TYPE_RNG             0x0000000c
 51 #define CRYPTO_ALG_TYPE_AKCIPHER        0x0000000d
 52 #define CRYPTO_ALG_TYPE_DIGEST          0x0000000e
 53 #define CRYPTO_ALG_TYPE_HASH            0x0000000e
 54 #define CRYPTO_ALG_TYPE_SHASH           0x0000000e
 55 #define CRYPTO_ALG_TYPE_AHASH           0x0000000f
 56 
 57 #define CRYPTO_ALG_TYPE_HASH_MASK       0x0000000e
 58 #define CRYPTO_ALG_TYPE_AHASH_MASK      0x0000000e
 59 #define CRYPTO_ALG_TYPE_BLKCIPHER_MASK  0x0000000c
 60 #define CRYPTO_ALG_TYPE_ACOMPRESS_MASK  0x0000000e
 61 
 62 #define CRYPTO_ALG_LARVAL               0x00000010
 63 #define CRYPTO_ALG_DEAD                 0x00000020
 64 #define CRYPTO_ALG_DYING                0x00000040
 65 #define CRYPTO_ALG_ASYNC                0x00000080
 66 
 67 /*
 68  * Set this bit if and only if the algorithm requires another algorithm of
 69  * the same type to handle corner cases.
 70  */
 71 #define CRYPTO_ALG_NEED_FALLBACK        0x00000100
 72 
 73 /*
 74  * Set if the algorithm has passed automated run-time testing.  Note that
 75  * if there is no run-time testing for a given algorithm it is considered
 76  * to have passed.
 77  */
 78 
 79 #define CRYPTO_ALG_TESTED               0x00000400
 80 
 81 /*
 82  * Set if the algorithm is an instance that is built from templates.
 83  */
 84 #define CRYPTO_ALG_INSTANCE             0x00000800
 85 
 86 /* Set this bit if the algorithm provided is hardware accelerated but
 87  * not available to userspace via instruction set or so.
 88  */
 89 #define CRYPTO_ALG_KERN_DRIVER_ONLY     0x00001000
 90 
 91 /*
 92  * Mark a cipher as a service implementation only usable by another
 93  * cipher and never by a normal user of the kernel crypto API
 94  */
 95 #define CRYPTO_ALG_INTERNAL             0x00002000
 96 
 97 /*
 98  * Set if the algorithm has a ->setkey() method but can be used without
 99  * calling it first, i.e. there is a default key.
100  */
101 #define CRYPTO_ALG_OPTIONAL_KEY         0x00004000
102 
103 /*
104  * Don't trigger module loading
105  */
106 #define CRYPTO_NOLOAD                   0x00008000
107 
108 /*
109  * Transform masks and values (for crt_flags).
110  */
111 #define CRYPTO_TFM_NEED_KEY             0x00000001
112 
113 #define CRYPTO_TFM_REQ_MASK             0x000fff00
114 #define CRYPTO_TFM_RES_MASK             0xfff00000
115 
116 #define CRYPTO_TFM_REQ_FORBID_WEAK_KEYS 0x00000100
117 #define CRYPTO_TFM_REQ_MAY_SLEEP        0x00000200
118 #define CRYPTO_TFM_REQ_MAY_BACKLOG      0x00000400
119 #define CRYPTO_TFM_RES_WEAK_KEY         0x00100000
120 #define CRYPTO_TFM_RES_BAD_KEY_LEN      0x00200000
121 #define CRYPTO_TFM_RES_BAD_KEY_SCHED    0x00400000
122 #define CRYPTO_TFM_RES_BAD_BLOCK_LEN    0x00800000
123 #define CRYPTO_TFM_RES_BAD_FLAGS        0x01000000
124 
125 /*
126  * Miscellaneous stuff.
127  */
128 #define CRYPTO_MAX_ALG_NAME             128
129 
130 /*
131  * The macro CRYPTO_MINALIGN_ATTR (along with the void * type in the actual
132  * declaration) is used to ensure that the crypto_tfm context structure is
133  * aligned correctly for the given architecture so that there are no alignment
134  * faults for C data types.  In particular, this is required on platforms such
135  * as arm where pointers are 32-bit aligned but there are data types such as
136  * u64 which require 64-bit alignment.
137  */
138 #define CRYPTO_MINALIGN ARCH_KMALLOC_MINALIGN
139 
140 #define CRYPTO_MINALIGN_ATTR __attribute__ ((__aligned__(CRYPTO_MINALIGN)))
141 
142 struct scatterlist;
143 struct crypto_ablkcipher;
144 struct crypto_async_request;
145 struct crypto_blkcipher;
146 struct crypto_tfm;
147 struct crypto_type;
148 
149 typedef void (*crypto_completion_t)(struct crypto_async_request *req, int err);
150 
151 /**
152  * DOC: Block Cipher Context Data Structures
153  *
154  * These data structures define the operating context for each block cipher
155  * type.
156  */
157 
158 struct crypto_async_request {
159         struct list_head list;
160         crypto_completion_t complete;
161         void *data;
162         struct crypto_tfm *tfm;
163 
164         u32 flags;
165 };
166 
167 struct ablkcipher_request {
168         struct crypto_async_request base;
169 
170         unsigned int nbytes;
171 
172         void *info;
173 
174         struct scatterlist *src;
175         struct scatterlist *dst;
176 
177         void *__ctx[] CRYPTO_MINALIGN_ATTR;
178 };
179 
180 struct blkcipher_desc {
181         struct crypto_blkcipher *tfm;
182         void *info;
183         u32 flags;
184 };
185 
186 /**
187  * DOC: Block Cipher Algorithm Definitions
188  *
189  * These data structures define modular crypto algorithm implementations,
190  * managed via crypto_register_alg() and crypto_unregister_alg().
191  */
192 
193 /**
194  * struct ablkcipher_alg - asynchronous block cipher definition
195  * @min_keysize: Minimum key size supported by the transformation. This is the
196  *               smallest key length supported by this transformation algorithm.
197  *               This must be set to one of the pre-defined values as this is
198  *               not hardware specific. Possible values for this field can be
199  *               found via git grep "_MIN_KEY_SIZE" include/crypto/
200  * @max_keysize: Maximum key size supported by the transformation. This is the
201  *               largest key length supported by this transformation algorithm.
202  *               This must be set to one of the pre-defined values as this is
203  *               not hardware specific. Possible values for this field can be
204  *               found via git grep "_MAX_KEY_SIZE" include/crypto/
205  * @setkey: Set key for the transformation. This function is used to either
206  *          program a supplied key into the hardware or store the key in the
207  *          transformation context for programming it later. Note that this
208  *          function does modify the transformation context. This function can
209  *          be called multiple times during the existence of the transformation
210  *          object, so one must make sure the key is properly reprogrammed into
211  *          the hardware. This function is also responsible for checking the key
212  *          length for validity. In case a software fallback was put in place in
213  *          the @cra_init call, this function might need to use the fallback if
214  *          the algorithm doesn't support all of the key sizes.
215  * @encrypt: Encrypt a scatterlist of blocks. This function is used to encrypt
216  *           the supplied scatterlist containing the blocks of data. The crypto
217  *           API consumer is responsible for aligning the entries of the
218  *           scatterlist properly and making sure the chunks are correctly
219  *           sized. In case a software fallback was put in place in the
220  *           @cra_init call, this function might need to use the fallback if
221  *           the algorithm doesn't support all of the key sizes. In case the
222  *           key was stored in transformation context, the key might need to be
223  *           re-programmed into the hardware in this function. This function
224  *           shall not modify the transformation context, as this function may
225  *           be called in parallel with the same transformation object.
226  * @decrypt: Decrypt a single block. This is a reverse counterpart to @encrypt
227  *           and the conditions are exactly the same.
228  * @ivsize: IV size applicable for transformation. The consumer must provide an
229  *          IV of exactly that size to perform the encrypt or decrypt operation.
230  *
231  * All fields except @ivsize are mandatory and must be filled.
232  */
233 struct ablkcipher_alg {
234         int (*setkey)(struct crypto_ablkcipher *tfm, const u8 *key,
235                       unsigned int keylen);
236         int (*encrypt)(struct ablkcipher_request *req);
237         int (*decrypt)(struct ablkcipher_request *req);
238 
239         unsigned int min_keysize;
240         unsigned int max_keysize;
241         unsigned int ivsize;
242 };
243 
244 /**
245  * struct blkcipher_alg - synchronous block cipher definition
246  * @min_keysize: see struct ablkcipher_alg
247  * @max_keysize: see struct ablkcipher_alg
248  * @setkey: see struct ablkcipher_alg
249  * @encrypt: see struct ablkcipher_alg
250  * @decrypt: see struct ablkcipher_alg
251  * @ivsize: see struct ablkcipher_alg
252  *
253  * All fields except @ivsize are mandatory and must be filled.
254  */
255 struct blkcipher_alg {
256         int (*setkey)(struct crypto_tfm *tfm, const u8 *key,
257                       unsigned int keylen);
258         int (*encrypt)(struct blkcipher_desc *desc,
259                        struct scatterlist *dst, struct scatterlist *src,
260                        unsigned int nbytes);
261         int (*decrypt)(struct blkcipher_desc *desc,
262                        struct scatterlist *dst, struct scatterlist *src,
263                        unsigned int nbytes);
264 
265         unsigned int min_keysize;
266         unsigned int max_keysize;
267         unsigned int ivsize;
268 };
269 
270 /**
271  * struct cipher_alg - single-block symmetric ciphers definition
272  * @cia_min_keysize: Minimum key size supported by the transformation. This is
273  *                   the smallest key length supported by this transformation
274  *                   algorithm. This must be set to one of the pre-defined
275  *                   values as this is not hardware specific. Possible values
276  *                   for this field can be found via git grep "_MIN_KEY_SIZE"
277  *                   include/crypto/
278  * @cia_max_keysize: Maximum key size supported by the transformation. This is
279  *                  the largest key length supported by this transformation
280  *                  algorithm. This must be set to one of the pre-defined values
281  *                  as this is not hardware specific. Possible values for this
282  *                  field can be found via git grep "_MAX_KEY_SIZE"
283  *                  include/crypto/
284  * @cia_setkey: Set key for the transformation. This function is used to either
285  *              program a supplied key into the hardware or store the key in the
286  *              transformation context for programming it later. Note that this
287  *              function does modify the transformation context. This function
288  *              can be called multiple times during the existence of the
289  *              transformation object, so one must make sure the key is properly
290  *              reprogrammed into the hardware. This function is also
291  *              responsible for checking the key length for validity.
292  * @cia_encrypt: Encrypt a single block. This function is used to encrypt a
293  *               single block of data, which must be @cra_blocksize big. This
294  *               always operates on a full @cra_blocksize and it is not possible
295  *               to encrypt a block of smaller size. The supplied buffers must
296  *               therefore also be at least of @cra_blocksize size. Both the
297  *               input and output buffers are always aligned to @cra_alignmask.
298  *               In case either of the input or output buffer supplied by user
299  *               of the crypto API is not aligned to @cra_alignmask, the crypto
300  *               API will re-align the buffers. The re-alignment means that a
301  *               new buffer will be allocated, the data will be copied into the
302  *               new buffer, then the processing will happen on the new buffer,
303  *               then the data will be copied back into the original buffer and
304  *               finally the new buffer will be freed. In case a software
305  *               fallback was put in place in the @cra_init call, this function
306  *               might need to use the fallback if the algorithm doesn't support
307  *               all of the key sizes. In case the key was stored in
308  *               transformation context, the key might need to be re-programmed
309  *               into the hardware in this function. This function shall not
310  *               modify the transformation context, as this function may be
311  *               called in parallel with the same transformation object.
312  * @cia_decrypt: Decrypt a single block. This is a reverse counterpart to
313  *               @cia_encrypt, and the conditions are exactly the same.
314  *
315  * All fields are mandatory and must be filled.
316  */
317 struct cipher_alg {
318         unsigned int cia_min_keysize;
319         unsigned int cia_max_keysize;
320         int (*cia_setkey)(struct crypto_tfm *tfm, const u8 *key,
321                           unsigned int keylen);
322         void (*cia_encrypt)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
323         void (*cia_decrypt)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
324 };
325 
326 struct compress_alg {
327         int (*coa_compress)(struct crypto_tfm *tfm, const u8 *src,
328                             unsigned int slen, u8 *dst, unsigned int *dlen);
329         int (*coa_decompress)(struct crypto_tfm *tfm, const u8 *src,
330                               unsigned int slen, u8 *dst, unsigned int *dlen);
331 };
332 
333 #ifdef CONFIG_CRYPTO_STATS
334 /*
335  * struct crypto_istat_aead - statistics for AEAD algorithm
336  * @encrypt_cnt:        number of encrypt requests
337  * @encrypt_tlen:       total data size handled by encrypt requests
338  * @decrypt_cnt:        number of decrypt requests
339  * @decrypt_tlen:       total data size handled by decrypt requests
340  * @err_cnt:            number of error for AEAD requests
341  */
342 struct crypto_istat_aead {
343         atomic64_t encrypt_cnt;
344         atomic64_t encrypt_tlen;
345         atomic64_t decrypt_cnt;
346         atomic64_t decrypt_tlen;
347         atomic64_t err_cnt;
348 };
349 
350 /*
351  * struct crypto_istat_akcipher - statistics for akcipher algorithm
352  * @encrypt_cnt:        number of encrypt requests
353  * @encrypt_tlen:       total data size handled by encrypt requests
354  * @decrypt_cnt:        number of decrypt requests
355  * @decrypt_tlen:       total data size handled by decrypt requests
356  * @verify_cnt:         number of verify operation
357  * @sign_cnt:           number of sign requests
358  * @err_cnt:            number of error for akcipher requests
359  */
360 struct crypto_istat_akcipher {
361         atomic64_t encrypt_cnt;
362         atomic64_t encrypt_tlen;
363         atomic64_t decrypt_cnt;
364         atomic64_t decrypt_tlen;
365         atomic64_t verify_cnt;
366         atomic64_t sign_cnt;
367         atomic64_t err_cnt;
368 };
369 
370 /*
371  * struct crypto_istat_cipher - statistics for cipher algorithm
372  * @encrypt_cnt:        number of encrypt requests
373  * @encrypt_tlen:       total data size handled by encrypt requests
374  * @decrypt_cnt:        number of decrypt requests
375  * @decrypt_tlen:       total data size handled by decrypt requests
376  * @err_cnt:            number of error for cipher requests
377  */
378 struct crypto_istat_cipher {
379         atomic64_t encrypt_cnt;
380         atomic64_t encrypt_tlen;
381         atomic64_t decrypt_cnt;
382         atomic64_t decrypt_tlen;
383         atomic64_t err_cnt;
384 };
385 
386 /*
387  * struct crypto_istat_compress - statistics for compress algorithm
388  * @compress_cnt:       number of compress requests
389  * @compress_tlen:      total data size handled by compress requests
390  * @decompress_cnt:     number of decompress requests
391  * @decompress_tlen:    total data size handled by decompress requests
392  * @err_cnt:            number of error for compress requests
393  */
394 struct crypto_istat_compress {
395         atomic64_t compress_cnt;
396         atomic64_t compress_tlen;
397         atomic64_t decompress_cnt;
398         atomic64_t decompress_tlen;
399         atomic64_t err_cnt;
400 };
401 
402 /*
403  * struct crypto_istat_hash - statistics for has algorithm
404  * @hash_cnt:           number of hash requests
405  * @hash_tlen:          total data size hashed
406  * @err_cnt:            number of error for hash requests
407  */
408 struct crypto_istat_hash {
409         atomic64_t hash_cnt;
410         atomic64_t hash_tlen;
411         atomic64_t err_cnt;
412 };
413 
414 /*
415  * struct crypto_istat_kpp - statistics for KPP algorithm
416  * @setsecret_cnt:              number of setsecrey operation
417  * @generate_public_key_cnt:    number of generate_public_key operation
418  * @compute_shared_secret_cnt:  number of compute_shared_secret operation
419  * @err_cnt:                    number of error for KPP requests
420  */
421 struct crypto_istat_kpp {
422         atomic64_t setsecret_cnt;
423         atomic64_t generate_public_key_cnt;
424         atomic64_t compute_shared_secret_cnt;
425         atomic64_t err_cnt;
426 };
427 
428 /*
429  * struct crypto_istat_rng: statistics for RNG algorithm
430  * @generate_cnt:       number of RNG generate requests
431  * @generate_tlen:      total data size of generated data by the RNG
432  * @seed_cnt:           number of times the RNG was seeded
433  * @err_cnt:            number of error for RNG requests
434  */
435 struct crypto_istat_rng {
436         atomic64_t generate_cnt;
437         atomic64_t generate_tlen;
438         atomic64_t seed_cnt;
439         atomic64_t err_cnt;
440 };
441 #endif /* CONFIG_CRYPTO_STATS */
442 
443 #define cra_ablkcipher  cra_u.ablkcipher
444 #define cra_blkcipher   cra_u.blkcipher
445 #define cra_cipher      cra_u.cipher
446 #define cra_compress    cra_u.compress
447 
448 /**
449  * struct crypto_alg - definition of a cryptograpic cipher algorithm
450  * @cra_flags: Flags describing this transformation. See include/linux/crypto.h
451  *             CRYPTO_ALG_* flags for the flags which go in here. Those are
452  *             used for fine-tuning the description of the transformation
453  *             algorithm.
454  * @cra_blocksize: Minimum block size of this transformation. The size in bytes
455  *                 of the smallest possible unit which can be transformed with
456  *                 this algorithm. The users must respect this value.
457  *                 In case of HASH transformation, it is possible for a smaller
458  *                 block than @cra_blocksize to be passed to the crypto API for
459  *                 transformation, in case of any other transformation type, an
460  *                 error will be returned upon any attempt to transform smaller
461  *                 than @cra_blocksize chunks.
462  * @cra_ctxsize: Size of the operational context of the transformation. This
463  *               value informs the kernel crypto API about the memory size
464  *               needed to be allocated for the transformation context.
465  * @cra_alignmask: Alignment mask for the input and output data buffer. The data
466  *                 buffer containing the input data for the algorithm must be
467  *                 aligned to this alignment mask. The data buffer for the
468  *                 output data must be aligned to this alignment mask. Note that
469  *                 the Crypto API will do the re-alignment in software, but
470  *                 only under special conditions and there is a performance hit.
471  *                 The re-alignment happens at these occasions for different
472  *                 @cra_u types: cipher -- For both input data and output data
473  *                 buffer; ahash -- For output hash destination buf; shash --
474  *                 For output hash destination buf.
475  *                 This is needed on hardware which is flawed by design and
476  *                 cannot pick data from arbitrary addresses.
477  * @cra_priority: Priority of this transformation implementation. In case
478  *                multiple transformations with same @cra_name are available to
479  *                the Crypto API, the kernel will use the one with highest
480  *                @cra_priority.
481  * @cra_name: Generic name (usable by multiple implementations) of the
482  *            transformation algorithm. This is the name of the transformation
483  *            itself. This field is used by the kernel when looking up the
484  *            providers of particular transformation.
485  * @cra_driver_name: Unique name of the transformation provider. This is the
486  *                   name of the provider of the transformation. This can be any
487  *                   arbitrary value, but in the usual case, this contains the
488  *                   name of the chip or provider and the name of the
489  *                   transformation algorithm.
490  * @cra_type: Type of the cryptographic transformation. This is a pointer to
491  *            struct crypto_type, which implements callbacks common for all
492  *            transformation types. There are multiple options:
493  *            &crypto_blkcipher_type, &crypto_ablkcipher_type,
494  *            &crypto_ahash_type, &crypto_rng_type.
495  *            This field might be empty. In that case, there are no common
496  *            callbacks. This is the case for: cipher, compress, shash.
497  * @cra_u: Callbacks implementing the transformation. This is a union of
498  *         multiple structures. Depending on the type of transformation selected
499  *         by @cra_type and @cra_flags above, the associated structure must be
500  *         filled with callbacks. This field might be empty. This is the case
501  *         for ahash, shash.
502  * @cra_init: Initialize the cryptographic transformation object. This function
503  *            is used to initialize the cryptographic transformation object.
504  *            This function is called only once at the instantiation time, right
505  *            after the transformation context was allocated. In case the
506  *            cryptographic hardware has some special requirements which need to
507  *            be handled by software, this function shall check for the precise
508  *            requirement of the transformation and put any software fallbacks
509  *            in place.
510  * @cra_exit: Deinitialize the cryptographic transformation object. This is a
511  *            counterpart to @cra_init, used to remove various changes set in
512  *            @cra_init.
513  * @cra_u.ablkcipher: Union member which contains an asynchronous block cipher
514  *                    definition. See @struct @ablkcipher_alg.
515  * @cra_u.blkcipher: Union member which contains a synchronous block cipher
516  *                   definition See @struct @blkcipher_alg.
517  * @cra_u.cipher: Union member which contains a single-block symmetric cipher
518  *                definition. See @struct @cipher_alg.
519  * @cra_u.compress: Union member which contains a (de)compression algorithm.
520  *                  See @struct @compress_alg.
521  * @cra_module: Owner of this transformation implementation. Set to THIS_MODULE
522  * @cra_list: internally used
523  * @cra_users: internally used
524  * @cra_refcnt: internally used
525  * @cra_destroy: internally used
526  *
527  * @stats: union of all possible crypto_istat_xxx structures
528  * @stats.aead:         statistics for AEAD algorithm
529  * @stats.akcipher:     statistics for akcipher algorithm
530  * @stats.cipher:       statistics for cipher algorithm
531  * @stats.compress:     statistics for compress algorithm
532  * @stats.hash:         statistics for hash algorithm
533  * @stats.rng:          statistics for rng algorithm
534  * @stats.kpp:          statistics for KPP algorithm
535  *
536  * The struct crypto_alg describes a generic Crypto API algorithm and is common
537  * for all of the transformations. Any variable not documented here shall not
538  * be used by a cipher implementation as it is internal to the Crypto API.
539  */
540 struct crypto_alg {
541         struct list_head cra_list;
542         struct list_head cra_users;
543 
544         u32 cra_flags;
545         unsigned int cra_blocksize;
546         unsigned int cra_ctxsize;
547         unsigned int cra_alignmask;
548 
549         int cra_priority;
550         refcount_t cra_refcnt;
551 
552         char cra_name[CRYPTO_MAX_ALG_NAME];
553         char cra_driver_name[CRYPTO_MAX_ALG_NAME];
554 
555         const struct crypto_type *cra_type;
556 
557         union {
558                 struct ablkcipher_alg ablkcipher;
559                 struct blkcipher_alg blkcipher;
560                 struct cipher_alg cipher;
561                 struct compress_alg compress;
562         } cra_u;
563 
564         int (*cra_init)(struct crypto_tfm *tfm);
565         void (*cra_exit)(struct crypto_tfm *tfm);
566         void (*cra_destroy)(struct crypto_alg *alg);
567         
568         struct module *cra_module;
569 
570 #ifdef CONFIG_CRYPTO_STATS
571         union {
572                 struct crypto_istat_aead aead;
573                 struct crypto_istat_akcipher akcipher;
574                 struct crypto_istat_cipher cipher;
575                 struct crypto_istat_compress compress;
576                 struct crypto_istat_hash hash;
577                 struct crypto_istat_rng rng;
578                 struct crypto_istat_kpp kpp;
579         } stats;
580 #endif /* CONFIG_CRYPTO_STATS */
581 
582 } CRYPTO_MINALIGN_ATTR;
583 
584 #ifdef CONFIG_CRYPTO_STATS
585 void crypto_stats_init(struct crypto_alg *alg);
586 void crypto_stats_get(struct crypto_alg *alg);
587 void crypto_stats_ablkcipher_encrypt(unsigned int nbytes, int ret, struct crypto_alg *alg);
588 void crypto_stats_ablkcipher_decrypt(unsigned int nbytes, int ret, struct crypto_alg *alg);
589 void crypto_stats_aead_encrypt(unsigned int cryptlen, struct crypto_alg *alg, int ret);
590 void crypto_stats_aead_decrypt(unsigned int cryptlen, struct crypto_alg *alg, int ret);
591 void crypto_stats_ahash_update(unsigned int nbytes, int ret, struct crypto_alg *alg);
592 void crypto_stats_ahash_final(unsigned int nbytes, int ret, struct crypto_alg *alg);
593 void crypto_stats_akcipher_encrypt(unsigned int src_len, int ret, struct crypto_alg *alg);
594 void crypto_stats_akcipher_decrypt(unsigned int src_len, int ret, struct crypto_alg *alg);
595 void crypto_stats_akcipher_sign(int ret, struct crypto_alg *alg);
596 void crypto_stats_akcipher_verify(int ret, struct crypto_alg *alg);
597 void crypto_stats_compress(unsigned int slen, int ret, struct crypto_alg *alg);
598 void crypto_stats_decompress(unsigned int slen, int ret, struct crypto_alg *alg);
599 void crypto_stats_kpp_set_secret(struct crypto_alg *alg, int ret);
600 void crypto_stats_kpp_generate_public_key(struct crypto_alg *alg, int ret);
601 void crypto_stats_kpp_compute_shared_secret(struct crypto_alg *alg, int ret);
602 void crypto_stats_rng_seed(struct crypto_alg *alg, int ret);
603 void crypto_stats_rng_generate(struct crypto_alg *alg, unsigned int dlen, int ret);
604 void crypto_stats_skcipher_encrypt(unsigned int cryptlen, int ret, struct crypto_alg *alg);
605 void crypto_stats_skcipher_decrypt(unsigned int cryptlen, int ret, struct crypto_alg *alg);
606 #else
607 static inline void crypto_stats_init(struct crypto_alg *alg)
608 {}
609 static inline void crypto_stats_get(struct crypto_alg *alg)
610 {}
611 static inline void crypto_stats_ablkcipher_encrypt(unsigned int nbytes, int ret, struct crypto_alg *alg)
612 {}
613 static inline void crypto_stats_ablkcipher_decrypt(unsigned int nbytes, int ret, struct crypto_alg *alg)
614 {}
615 static inline void crypto_stats_aead_encrypt(unsigned int cryptlen, struct crypto_alg *alg, int ret)
616 {}
617 static inline void crypto_stats_aead_decrypt(unsigned int cryptlen, struct crypto_alg *alg, int ret)
618 {}
619 static inline void crypto_stats_ahash_update(unsigned int nbytes, int ret, struct crypto_alg *alg)
620 {}
621 static inline void crypto_stats_ahash_final(unsigned int nbytes, int ret, struct crypto_alg *alg)
622 {}
623 static inline void crypto_stats_akcipher_encrypt(unsigned int src_len, int ret, struct crypto_alg *alg)
624 {}
625 static inline void crypto_stats_akcipher_decrypt(unsigned int src_len, int ret, struct crypto_alg *alg)
626 {}
627 static inline void crypto_stats_akcipher_sign(int ret, struct crypto_alg *alg)
628 {}
629 static inline void crypto_stats_akcipher_verify(int ret, struct crypto_alg *alg)
630 {}
631 static inline void crypto_stats_compress(unsigned int slen, int ret, struct crypto_alg *alg)
632 {}
633 static inline void crypto_stats_decompress(unsigned int slen, int ret, struct crypto_alg *alg)
634 {}
635 static inline void crypto_stats_kpp_set_secret(struct crypto_alg *alg, int ret)
636 {}
637 static inline void crypto_stats_kpp_generate_public_key(struct crypto_alg *alg, int ret)
638 {}
639 static inline void crypto_stats_kpp_compute_shared_secret(struct crypto_alg *alg, int ret)
640 {}
641 static inline void crypto_stats_rng_seed(struct crypto_alg *alg, int ret)
642 {}
643 static inline void crypto_stats_rng_generate(struct crypto_alg *alg, unsigned int dlen, int ret)
644 {}
645 static inline void crypto_stats_skcipher_encrypt(unsigned int cryptlen, int ret, struct crypto_alg *alg)
646 {}
647 static inline void crypto_stats_skcipher_decrypt(unsigned int cryptlen, int ret, struct crypto_alg *alg)
648 {}
649 #endif
650 /*
651  * A helper struct for waiting for completion of async crypto ops
652  */
653 struct crypto_wait {
654         struct completion completion;
655         int err;
656 };
657 
658 /*
659  * Macro for declaring a crypto op async wait object on stack
660  */
661 #define DECLARE_CRYPTO_WAIT(_wait) \
662         struct crypto_wait _wait = { \
663                 COMPLETION_INITIALIZER_ONSTACK((_wait).completion), 0 }
664 
665 /*
666  * Async ops completion helper functioons
667  */
668 void crypto_req_done(struct crypto_async_request *req, int err);
669 
670 static inline int crypto_wait_req(int err, struct crypto_wait *wait)
671 {
672         switch (err) {
673         case -EINPROGRESS:
674         case -EBUSY:
675                 wait_for_completion(&wait->completion);
676                 reinit_completion(&wait->completion);
677                 err = wait->err;
678                 break;
679         };
680 
681         return err;
682 }
683 
684 static inline void crypto_init_wait(struct crypto_wait *wait)
685 {
686         init_completion(&wait->completion);
687 }
688 
689 /*
690  * Algorithm registration interface.
691  */
692 int crypto_register_alg(struct crypto_alg *alg);
693 int crypto_unregister_alg(struct crypto_alg *alg);
694 int crypto_register_algs(struct crypto_alg *algs, int count);
695 int crypto_unregister_algs(struct crypto_alg *algs, int count);
696 
697 /*
698  * Algorithm query interface.
699  */
700 int crypto_has_alg(const char *name, u32 type, u32 mask);
701 
702 /*
703  * Transforms: user-instantiated objects which encapsulate algorithms
704  * and core processing logic.  Managed via crypto_alloc_*() and
705  * crypto_free_*(), as well as the various helpers below.
706  */
707 
708 struct ablkcipher_tfm {
709         int (*setkey)(struct crypto_ablkcipher *tfm, const u8 *key,
710                       unsigned int keylen);
711         int (*encrypt)(struct ablkcipher_request *req);
712         int (*decrypt)(struct ablkcipher_request *req);
713 
714         struct crypto_ablkcipher *base;
715 
716         unsigned int ivsize;
717         unsigned int reqsize;
718 };
719 
720 struct blkcipher_tfm {
721         void *iv;
722         int (*setkey)(struct crypto_tfm *tfm, const u8 *key,
723                       unsigned int keylen);
724         int (*encrypt)(struct blkcipher_desc *desc, struct scatterlist *dst,
725                        struct scatterlist *src, unsigned int nbytes);
726         int (*decrypt)(struct blkcipher_desc *desc, struct scatterlist *dst,
727                        struct scatterlist *src, unsigned int nbytes);
728 };
729 
730 struct cipher_tfm {
731         int (*cit_setkey)(struct crypto_tfm *tfm,
732                           const u8 *key, unsigned int keylen);
733         void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
734         void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
735 };
736 
737 struct compress_tfm {
738         int (*cot_compress)(struct crypto_tfm *tfm,
739                             const u8 *src, unsigned int slen,
740                             u8 *dst, unsigned int *dlen);
741         int (*cot_decompress)(struct crypto_tfm *tfm,
742                               const u8 *src, unsigned int slen,
743                               u8 *dst, unsigned int *dlen);
744 };
745 
746 #define crt_ablkcipher  crt_u.ablkcipher
747 #define crt_blkcipher   crt_u.blkcipher
748 #define crt_cipher      crt_u.cipher
749 #define crt_compress    crt_u.compress
750 
751 struct crypto_tfm {
752 
753         u32 crt_flags;
754         
755         union {
756                 struct ablkcipher_tfm ablkcipher;
757                 struct blkcipher_tfm blkcipher;
758                 struct cipher_tfm cipher;
759                 struct compress_tfm compress;
760         } crt_u;
761 
762         void (*exit)(struct crypto_tfm *tfm);
763         
764         struct crypto_alg *__crt_alg;
765 
766         void *__crt_ctx[] CRYPTO_MINALIGN_ATTR;
767 };
768 
769 struct crypto_ablkcipher {
770         struct crypto_tfm base;
771 };
772 
773 struct crypto_blkcipher {
774         struct crypto_tfm base;
775 };
776 
777 struct crypto_cipher {
778         struct crypto_tfm base;
779 };
780 
781 struct crypto_comp {
782         struct crypto_tfm base;
783 };
784 
785 enum {
786         CRYPTOA_UNSPEC,
787         CRYPTOA_ALG,
788         CRYPTOA_TYPE,
789         CRYPTOA_U32,
790         __CRYPTOA_MAX,
791 };
792 
793 #define CRYPTOA_MAX (__CRYPTOA_MAX - 1)
794 
795 /* Maximum number of (rtattr) parameters for each template. */
796 #define CRYPTO_MAX_ATTRS 32
797 
798 struct crypto_attr_alg {
799         char name[CRYPTO_MAX_ALG_NAME];
800 };
801 
802 struct crypto_attr_type {
803         u32 type;
804         u32 mask;
805 };
806 
807 struct crypto_attr_u32 {
808         u32 num;
809 };
810 
811 /* 
812  * Transform user interface.
813  */
814  
815 struct crypto_tfm *crypto_alloc_base(const char *alg_name, u32 type, u32 mask);
816 void crypto_destroy_tfm(void *mem, struct crypto_tfm *tfm);
817 
818 static inline void crypto_free_tfm(struct crypto_tfm *tfm)
819 {
820         return crypto_destroy_tfm(tfm, tfm);
821 }
822 
823 int alg_test(const char *driver, const char *alg, u32 type, u32 mask);
824 
825 /*
826  * Transform helpers which query the underlying algorithm.
827  */
828 static inline const char *crypto_tfm_alg_name(struct crypto_tfm *tfm)
829 {
830         return tfm->__crt_alg->cra_name;
831 }
832 
833 static inline const char *crypto_tfm_alg_driver_name(struct crypto_tfm *tfm)
834 {
835         return tfm->__crt_alg->cra_driver_name;
836 }
837 
838 static inline int crypto_tfm_alg_priority(struct crypto_tfm *tfm)
839 {
840         return tfm->__crt_alg->cra_priority;
841 }
842 
843 static inline u32 crypto_tfm_alg_type(struct crypto_tfm *tfm)
844 {
845         return tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK;
846 }
847 
848 static inline unsigned int crypto_tfm_alg_blocksize(struct crypto_tfm *tfm)
849 {
850         return tfm->__crt_alg->cra_blocksize;
851 }
852 
853 static inline unsigned int crypto_tfm_alg_alignmask(struct crypto_tfm *tfm)
854 {
855         return tfm->__crt_alg->cra_alignmask;
856 }
857 
858 static inline u32 crypto_tfm_get_flags(struct crypto_tfm *tfm)
859 {
860         return tfm->crt_flags;
861 }
862 
863 static inline void crypto_tfm_set_flags(struct crypto_tfm *tfm, u32 flags)
864 {
865         tfm->crt_flags |= flags;
866 }
867 
868 static inline void crypto_tfm_clear_flags(struct crypto_tfm *tfm, u32 flags)
869 {
870         tfm->crt_flags &= ~flags;
871 }
872 
873 static inline void *crypto_tfm_ctx(struct crypto_tfm *tfm)
874 {
875         return tfm->__crt_ctx;
876 }
877 
878 static inline unsigned int crypto_tfm_ctx_alignment(void)
879 {
880         struct crypto_tfm *tfm;
881         return __alignof__(tfm->__crt_ctx);
882 }
883 
884 /*
885  * API wrappers.
886  */
887 static inline struct crypto_ablkcipher *__crypto_ablkcipher_cast(
888         struct crypto_tfm *tfm)
889 {
890         return (struct crypto_ablkcipher *)tfm;
891 }
892 
893 static inline u32 crypto_skcipher_type(u32 type)
894 {
895         type &= ~CRYPTO_ALG_TYPE_MASK;
896         type |= CRYPTO_ALG_TYPE_BLKCIPHER;
897         return type;
898 }
899 
900 static inline u32 crypto_skcipher_mask(u32 mask)
901 {
902         mask &= ~CRYPTO_ALG_TYPE_MASK;
903         mask |= CRYPTO_ALG_TYPE_BLKCIPHER_MASK;
904         return mask;
905 }
906 
907 /**
908  * DOC: Asynchronous Block Cipher API
909  *
910  * Asynchronous block cipher API is used with the ciphers of type
911  * CRYPTO_ALG_TYPE_ABLKCIPHER (listed as type "ablkcipher" in /proc/crypto).
912  *
913  * Asynchronous cipher operations imply that the function invocation for a
914  * cipher request returns immediately before the completion of the operation.
915  * The cipher request is scheduled as a separate kernel thread and therefore
916  * load-balanced on the different CPUs via the process scheduler. To allow
917  * the kernel crypto API to inform the caller about the completion of a cipher
918  * request, the caller must provide a callback function. That function is
919  * invoked with the cipher handle when the request completes.
920  *
921  * To support the asynchronous operation, additional information than just the
922  * cipher handle must be supplied to the kernel crypto API. That additional
923  * information is given by filling in the ablkcipher_request data structure.
924  *
925  * For the asynchronous block cipher API, the state is maintained with the tfm
926  * cipher handle. A single tfm can be used across multiple calls and in
927  * parallel. For asynchronous block cipher calls, context data supplied and
928  * only used by the caller can be referenced the request data structure in
929  * addition to the IV used for the cipher request. The maintenance of such
930  * state information would be important for a crypto driver implementer to
931  * have, because when calling the callback function upon completion of the
932  * cipher operation, that callback function may need some information about
933  * which operation just finished if it invoked multiple in parallel. This
934  * state information is unused by the kernel crypto API.
935  */
936 
937 static inline struct crypto_tfm *crypto_ablkcipher_tfm(
938         struct crypto_ablkcipher *tfm)
939 {
940         return &tfm->base;
941 }
942 
943 /**
944  * crypto_free_ablkcipher() - zeroize and free cipher handle
945  * @tfm: cipher handle to be freed
946  */
947 static inline void crypto_free_ablkcipher(struct crypto_ablkcipher *tfm)
948 {
949         crypto_free_tfm(crypto_ablkcipher_tfm(tfm));
950 }
951 
952 /**
953  * crypto_has_ablkcipher() - Search for the availability of an ablkcipher.
954  * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
955  *            ablkcipher
956  * @type: specifies the type of the cipher
957  * @mask: specifies the mask for the cipher
958  *
959  * Return: true when the ablkcipher is known to the kernel crypto API; false
960  *         otherwise
961  */
962 static inline int crypto_has_ablkcipher(const char *alg_name, u32 type,
963                                         u32 mask)
964 {
965         return crypto_has_alg(alg_name, crypto_skcipher_type(type),
966                               crypto_skcipher_mask(mask));
967 }
968 
969 static inline struct ablkcipher_tfm *crypto_ablkcipher_crt(
970         struct crypto_ablkcipher *tfm)
971 {
972         return &crypto_ablkcipher_tfm(tfm)->crt_ablkcipher;
973 }
974 
975 /**
976  * crypto_ablkcipher_ivsize() - obtain IV size
977  * @tfm: cipher handle
978  *
979  * The size of the IV for the ablkcipher referenced by the cipher handle is
980  * returned. This IV size may be zero if the cipher does not need an IV.
981  *
982  * Return: IV size in bytes
983  */
984 static inline unsigned int crypto_ablkcipher_ivsize(
985         struct crypto_ablkcipher *tfm)
986 {
987         return crypto_ablkcipher_crt(tfm)->ivsize;
988 }
989 
990 /**
991  * crypto_ablkcipher_blocksize() - obtain block size of cipher
992  * @tfm: cipher handle
993  *
994  * The block size for the ablkcipher referenced with the cipher handle is
995  * returned. The caller may use that information to allocate appropriate
996  * memory for the data returned by the encryption or decryption operation
997  *
998  * Return: block size of cipher
999  */
1000 static inline unsigned int crypto_ablkcipher_blocksize(
1001         struct crypto_ablkcipher *tfm)
1002 {
1003         return crypto_tfm_alg_blocksize(crypto_ablkcipher_tfm(tfm));
1004 }
1005 
1006 static inline unsigned int crypto_ablkcipher_alignmask(
1007         struct crypto_ablkcipher *tfm)
1008 {
1009         return crypto_tfm_alg_alignmask(crypto_ablkcipher_tfm(tfm));
1010 }
1011 
1012 static inline u32 crypto_ablkcipher_get_flags(struct crypto_ablkcipher *tfm)
1013 {
1014         return crypto_tfm_get_flags(crypto_ablkcipher_tfm(tfm));
1015 }
1016 
1017 static inline void crypto_ablkcipher_set_flags(struct crypto_ablkcipher *tfm,
1018                                                u32 flags)
1019 {
1020         crypto_tfm_set_flags(crypto_ablkcipher_tfm(tfm), flags);
1021 }
1022 
1023 static inline void crypto_ablkcipher_clear_flags(struct crypto_ablkcipher *tfm,
1024                                                  u32 flags)
1025 {
1026         crypto_tfm_clear_flags(crypto_ablkcipher_tfm(tfm), flags);
1027 }
1028 
1029 /**
1030  * crypto_ablkcipher_setkey() - set key for cipher
1031  * @tfm: cipher handle
1032  * @key: buffer holding the key
1033  * @keylen: length of the key in bytes
1034  *
1035  * The caller provided key is set for the ablkcipher referenced by the cipher
1036  * handle.
1037  *
1038  * Note, the key length determines the cipher type. Many block ciphers implement
1039  * different cipher modes depending on the key size, such as AES-128 vs AES-192
1040  * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128
1041  * is performed.
1042  *
1043  * Return: 0 if the setting of the key was successful; < 0 if an error occurred
1044  */
1045 static inline int crypto_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
1046                                            const u8 *key, unsigned int keylen)
1047 {
1048         struct ablkcipher_tfm *crt = crypto_ablkcipher_crt(tfm);
1049 
1050         return crt->setkey(crt->base, key, keylen);
1051 }
1052 
1053 /**
1054  * crypto_ablkcipher_reqtfm() - obtain cipher handle from request
1055  * @req: ablkcipher_request out of which the cipher handle is to be obtained
1056  *
1057  * Return the crypto_ablkcipher handle when furnishing an ablkcipher_request
1058  * data structure.
1059  *
1060  * Return: crypto_ablkcipher handle
1061  */
1062 static inline struct crypto_ablkcipher *crypto_ablkcipher_reqtfm(
1063         struct ablkcipher_request *req)
1064 {
1065         return __crypto_ablkcipher_cast(req->base.tfm);
1066 }
1067 
1068 /**
1069  * crypto_ablkcipher_encrypt() - encrypt plaintext
1070  * @req: reference to the ablkcipher_request handle that holds all information
1071  *       needed to perform the cipher operation
1072  *
1073  * Encrypt plaintext data using the ablkcipher_request handle. That data
1074  * structure and how it is filled with data is discussed with the
1075  * ablkcipher_request_* functions.
1076  *
1077  * Return: 0 if the cipher operation was successful; < 0 if an error occurred
1078  */
1079 static inline int crypto_ablkcipher_encrypt(struct ablkcipher_request *req)
1080 {
1081         struct ablkcipher_tfm *crt =
1082                 crypto_ablkcipher_crt(crypto_ablkcipher_reqtfm(req));
1083         struct crypto_alg *alg = crt->base->base.__crt_alg;
1084         unsigned int nbytes = req->nbytes;
1085         int ret;
1086 
1087         crypto_stats_get(alg);
1088         ret = crt->encrypt(req);
1089         crypto_stats_ablkcipher_encrypt(nbytes, ret, alg);
1090         return ret;
1091 }
1092 
1093 /**
1094  * crypto_ablkcipher_decrypt() - decrypt ciphertext
1095  * @req: reference to the ablkcipher_request handle that holds all information
1096  *       needed to perform the cipher operation
1097  *
1098  * Decrypt ciphertext data using the ablkcipher_request handle. That data
1099  * structure and how it is filled with data is discussed with the
1100  * ablkcipher_request_* functions.
1101  *
1102  * Return: 0 if the cipher operation was successful; < 0 if an error occurred
1103  */
1104 static inline int crypto_ablkcipher_decrypt(struct ablkcipher_request *req)
1105 {
1106         struct ablkcipher_tfm *crt =
1107                 crypto_ablkcipher_crt(crypto_ablkcipher_reqtfm(req));
1108         struct crypto_alg *alg = crt->base->base.__crt_alg;
1109         unsigned int nbytes = req->nbytes;
1110         int ret;
1111 
1112         crypto_stats_get(alg);
1113         ret = crt->decrypt(req);
1114         crypto_stats_ablkcipher_decrypt(nbytes, ret, alg);
1115         return ret;
1116 }
1117 
1118 /**
1119  * DOC: Asynchronous Cipher Request Handle
1120  *
1121  * The ablkcipher_request data structure contains all pointers to data
1122  * required for the asynchronous cipher operation. This includes the cipher
1123  * handle (which can be used by multiple ablkcipher_request instances), pointer
1124  * to plaintext and ciphertext, asynchronous callback function, etc. It acts
1125  * as a handle to the ablkcipher_request_* API calls in a similar way as
1126  * ablkcipher handle to the crypto_ablkcipher_* API calls.
1127  */
1128 
1129 /**
1130  * crypto_ablkcipher_reqsize() - obtain size of the request data structure
1131  * @tfm: cipher handle
1132  *
1133  * Return: number of bytes
1134  */
1135 static inline unsigned int crypto_ablkcipher_reqsize(
1136         struct crypto_ablkcipher *tfm)
1137 {
1138         return crypto_ablkcipher_crt(tfm)->reqsize;
1139 }
1140 
1141 /**
1142  * ablkcipher_request_set_tfm() - update cipher handle reference in request
1143  * @req: request handle to be modified
1144  * @tfm: cipher handle that shall be added to the request handle
1145  *
1146  * Allow the caller to replace the existing ablkcipher handle in the request
1147  * data structure with a different one.
1148  */
1149 static inline void ablkcipher_request_set_tfm(
1150         struct ablkcipher_request *req, struct crypto_ablkcipher *tfm)
1151 {
1152         req->base.tfm = crypto_ablkcipher_tfm(crypto_ablkcipher_crt(tfm)->base);
1153 }
1154 
1155 static inline struct ablkcipher_request *ablkcipher_request_cast(
1156         struct crypto_async_request *req)
1157 {
1158         return container_of(req, struct ablkcipher_request, base);
1159 }
1160 
1161 /**
1162  * ablkcipher_request_alloc() - allocate request data structure
1163  * @tfm: cipher handle to be registered with the request
1164  * @gfp: memory allocation flag that is handed to kmalloc by the API call.
1165  *
1166  * Allocate the request data structure that must be used with the ablkcipher
1167  * encrypt and decrypt API calls. During the allocation, the provided ablkcipher
1168  * handle is registered in the request data structure.
1169  *
1170  * Return: allocated request handle in case of success, or NULL if out of memory
1171  */
1172 static inline struct ablkcipher_request *ablkcipher_request_alloc(
1173         struct crypto_ablkcipher *tfm, gfp_t gfp)
1174 {
1175         struct ablkcipher_request *req;
1176 
1177         req = kmalloc(sizeof(struct ablkcipher_request) +
1178                       crypto_ablkcipher_reqsize(tfm), gfp);
1179 
1180         if (likely(req))
1181                 ablkcipher_request_set_tfm(req, tfm);
1182 
1183         return req;
1184 }
1185 
1186 /**
1187  * ablkcipher_request_free() - zeroize and free request data structure
1188  * @req: request data structure cipher handle to be freed
1189  */
1190 static inline void ablkcipher_request_free(struct ablkcipher_request *req)
1191 {
1192         kzfree(req);
1193 }
1194 
1195 /**
1196  * ablkcipher_request_set_callback() - set asynchronous callback function
1197  * @req: request handle
1198  * @flags: specify zero or an ORing of the flags
1199  *         CRYPTO_TFM_REQ_MAY_BACKLOG the request queue may back log and
1200  *         increase the wait queue beyond the initial maximum size;
1201  *         CRYPTO_TFM_REQ_MAY_SLEEP the request processing may sleep
1202  * @compl: callback function pointer to be registered with the request handle
1203  * @data: The data pointer refers to memory that is not used by the kernel
1204  *        crypto API, but provided to the callback function for it to use. Here,
1205  *        the caller can provide a reference to memory the callback function can
1206  *        operate on. As the callback function is invoked asynchronously to the
1207  *        related functionality, it may need to access data structures of the
1208  *        related functionality which can be referenced using this pointer. The
1209  *        callback function can access the memory via the "data" field in the
1210  *        crypto_async_request data structure provided to the callback function.
1211  *
1212  * This function allows setting the callback function that is triggered once the
1213  * cipher operation completes.
1214  *
1215  * The callback function is registered with the ablkcipher_request handle and
1216  * must comply with the following template::
1217  *
1218  *      void callback_function(struct crypto_async_request *req, int error)
1219  */
1220 static inline void ablkcipher_request_set_callback(
1221         struct ablkcipher_request *req,
1222         u32 flags, crypto_completion_t compl, void *data)
1223 {
1224         req->base.complete = compl;
1225         req->base.data = data;
1226         req->base.flags = flags;
1227 }
1228 
1229 /**
1230  * ablkcipher_request_set_crypt() - set data buffers
1231  * @req: request handle
1232  * @src: source scatter / gather list
1233  * @dst: destination scatter / gather list
1234  * @nbytes: number of bytes to process from @src
1235  * @iv: IV for the cipher operation which must comply with the IV size defined
1236  *      by crypto_ablkcipher_ivsize
1237  *
1238  * This function allows setting of the source data and destination data
1239  * scatter / gather lists.
1240  *
1241  * For encryption, the source is treated as the plaintext and the
1242  * destination is the ciphertext. For a decryption operation, the use is
1243  * reversed - the source is the ciphertext and the destination is the plaintext.
1244  */
1245 static inline void ablkcipher_request_set_crypt(
1246         struct ablkcipher_request *req,
1247         struct scatterlist *src, struct scatterlist *dst,
1248         unsigned int nbytes, void *iv)
1249 {
1250         req->src = src;
1251         req->dst = dst;
1252         req->nbytes = nbytes;
1253         req->info = iv;
1254 }
1255 
1256 /**
1257  * DOC: Synchronous Block Cipher API
1258  *
1259  * The synchronous block cipher API is used with the ciphers of type
1260  * CRYPTO_ALG_TYPE_BLKCIPHER (listed as type "blkcipher" in /proc/crypto)
1261  *
1262  * Synchronous calls, have a context in the tfm. But since a single tfm can be
1263  * used in multiple calls and in parallel, this info should not be changeable
1264  * (unless a lock is used). This applies, for example, to the symmetric key.
1265  * However, the IV is changeable, so there is an iv field in blkcipher_tfm
1266  * structure for synchronous blkcipher api. So, its the only state info that can
1267  * be kept for synchronous calls without using a big lock across a tfm.
1268  *
1269  * The block cipher API allows the use of a complete cipher, i.e. a cipher
1270  * consisting of a template (a block chaining mode) and a single block cipher
1271  * primitive (e.g. AES).
1272  *
1273  * The plaintext data buffer and the ciphertext data buffer are pointed to
1274  * by using scatter/gather lists. The cipher operation is performed
1275  * on all segments of the provided scatter/gather lists.
1276  *
1277  * The kernel crypto API supports a cipher operation "in-place" which means that
1278  * the caller may provide the same scatter/gather list for the plaintext and
1279  * cipher text. After the completion of the cipher operation, the plaintext
1280  * data is replaced with the ciphertext data in case of an encryption and vice
1281  * versa for a decryption. The caller must ensure that the scatter/gather lists
1282  * for the output data point to sufficiently large buffers, i.e. multiples of
1283  * the block size of the cipher.
1284  */
1285 
1286 static inline struct crypto_blkcipher *__crypto_blkcipher_cast(
1287         struct crypto_tfm *tfm)
1288 {
1289         return (struct crypto_blkcipher *)tfm;
1290 }
1291 
1292 static inline struct crypto_blkcipher *crypto_blkcipher_cast(
1293         struct crypto_tfm *tfm)
1294 {
1295         BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_BLKCIPHER);
1296         return __crypto_blkcipher_cast(tfm);
1297 }
1298 
1299 /**
1300  * crypto_alloc_blkcipher() - allocate synchronous block cipher handle
1301  * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
1302  *            blkcipher cipher
1303  * @type: specifies the type of the cipher
1304  * @mask: specifies the mask for the cipher
1305  *
1306  * Allocate a cipher handle for a block cipher. The returned struct
1307  * crypto_blkcipher is the cipher handle that is required for any subsequent
1308  * API invocation for that block cipher.
1309  *
1310  * Return: allocated cipher handle in case of success; IS_ERR() is true in case
1311  *         of an error, PTR_ERR() returns the error code.
1312  */
1313 static inline struct crypto_blkcipher *crypto_alloc_blkcipher(
1314         const char *alg_name, u32 type, u32 mask)
1315 {
1316         type &= ~CRYPTO_ALG_TYPE_MASK;
1317         type |= CRYPTO_ALG_TYPE_BLKCIPHER;
1318         mask |= CRYPTO_ALG_TYPE_MASK;
1319 
1320         return __crypto_blkcipher_cast(crypto_alloc_base(alg_name, type, mask));
1321 }
1322 
1323 static inline struct crypto_tfm *crypto_blkcipher_tfm(
1324         struct crypto_blkcipher *tfm)
1325 {
1326         return &tfm->base;
1327 }
1328 
1329 /**
1330  * crypto_free_blkcipher() - zeroize and free the block cipher handle
1331  * @tfm: cipher handle to be freed
1332  */
1333 static inline void crypto_free_blkcipher(struct crypto_blkcipher *tfm)
1334 {
1335         crypto_free_tfm(crypto_blkcipher_tfm(tfm));
1336 }
1337 
1338 /**
1339  * crypto_has_blkcipher() - Search for the availability of a block cipher
1340  * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
1341  *            block cipher
1342  * @type: specifies the type of the cipher
1343  * @mask: specifies the mask for the cipher
1344  *
1345  * Return: true when the block cipher is known to the kernel crypto API; false
1346  *         otherwise
1347  */
1348 static inline int crypto_has_blkcipher(const char *alg_name, u32 type, u32 mask)
1349 {
1350         type &= ~CRYPTO_ALG_TYPE_MASK;
1351         type |= CRYPTO_ALG_TYPE_BLKCIPHER;
1352         mask |= CRYPTO_ALG_TYPE_MASK;
1353 
1354         return crypto_has_alg(alg_name, type, mask);
1355 }
1356 
1357 /**
1358  * crypto_blkcipher_name() - return the name / cra_name from the cipher handle
1359  * @tfm: cipher handle
1360  *
1361  * Return: The character string holding the name of the cipher
1362  */
1363 static inline const char *crypto_blkcipher_name(struct crypto_blkcipher *tfm)
1364 {
1365         return crypto_tfm_alg_name(crypto_blkcipher_tfm(tfm));
1366 }
1367 
1368 static inline struct blkcipher_tfm *crypto_blkcipher_crt(
1369         struct crypto_blkcipher *tfm)
1370 {
1371         return &crypto_blkcipher_tfm(tfm)->crt_blkcipher;
1372 }
1373 
1374 static inline struct blkcipher_alg *crypto_blkcipher_alg(
1375         struct crypto_blkcipher *tfm)
1376 {
1377         return &crypto_blkcipher_tfm(tfm)->__crt_alg->cra_blkcipher;
1378 }
1379 
1380 /**
1381  * crypto_blkcipher_ivsize() - obtain IV size
1382  * @tfm: cipher handle
1383  *
1384  * The size of the IV for the block cipher referenced by the cipher handle is
1385  * returned. This IV size may be zero if the cipher does not need an IV.
1386  *
1387  * Return: IV size in bytes
1388  */
1389 static inline unsigned int crypto_blkcipher_ivsize(struct crypto_blkcipher *tfm)
1390 {
1391         return crypto_blkcipher_alg(tfm)->ivsize;
1392 }
1393 
1394 /**
1395  * crypto_blkcipher_blocksize() - obtain block size of cipher
1396  * @tfm: cipher handle
1397  *
1398  * The block size for the block cipher referenced with the cipher handle is
1399  * returned. The caller may use that information to allocate appropriate
1400  * memory for the data returned by the encryption or decryption operation.
1401  *
1402  * Return: block size of cipher
1403  */
1404 static inline unsigned int crypto_blkcipher_blocksize(
1405         struct crypto_blkcipher *tfm)
1406 {
1407         return crypto_tfm_alg_blocksize(crypto_blkcipher_tfm(tfm));
1408 }
1409 
1410 static inline unsigned int crypto_blkcipher_alignmask(
1411         struct crypto_blkcipher *tfm)
1412 {
1413         return crypto_tfm_alg_alignmask(crypto_blkcipher_tfm(tfm));
1414 }
1415 
1416 static inline u32 crypto_blkcipher_get_flags(struct crypto_blkcipher *tfm)
1417 {
1418         return crypto_tfm_get_flags(crypto_blkcipher_tfm(tfm));
1419 }
1420 
1421 static inline void crypto_blkcipher_set_flags(struct crypto_blkcipher *tfm,
1422                                               u32 flags)
1423 {
1424         crypto_tfm_set_flags(crypto_blkcipher_tfm(tfm), flags);
1425 }
1426 
1427 static inline void crypto_blkcipher_clear_flags(struct crypto_blkcipher *tfm,
1428                                                 u32 flags)
1429 {
1430         crypto_tfm_clear_flags(crypto_blkcipher_tfm(tfm), flags);
1431 }
1432 
1433 /**
1434  * crypto_blkcipher_setkey() - set key for cipher
1435  * @tfm: cipher handle
1436  * @key: buffer holding the key
1437  * @keylen: length of the key in bytes
1438  *
1439  * The caller provided key is set for the block cipher referenced by the cipher
1440  * handle.
1441  *
1442  * Note, the key length determines the cipher type. Many block ciphers implement
1443  * different cipher modes depending on the key size, such as AES-128 vs AES-192
1444  * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128
1445  * is performed.
1446  *
1447  * Return: 0 if the setting of the key was successful; < 0 if an error occurred
1448  */
1449 static inline int crypto_blkcipher_setkey(struct crypto_blkcipher *tfm,
1450                                           const u8 *key, unsigned int keylen)
1451 {
1452         return crypto_blkcipher_crt(tfm)->setkey(crypto_blkcipher_tfm(tfm),
1453                                                  key, keylen);
1454 }
1455 
1456 /**
1457  * crypto_blkcipher_encrypt() - encrypt plaintext
1458  * @desc: reference to the block cipher handle with meta data
1459  * @dst: scatter/gather list that is filled by the cipher operation with the
1460  *      ciphertext
1461  * @src: scatter/gather list that holds the plaintext
1462  * @nbytes: number of bytes of the plaintext to encrypt.
1463  *
1464  * Encrypt plaintext data using the IV set by the caller with a preceding
1465  * call of crypto_blkcipher_set_iv.
1466  *
1467  * The blkcipher_desc data structure must be filled by the caller and can
1468  * reside on the stack. The caller must fill desc as follows: desc.tfm is filled
1469  * with the block cipher handle; desc.flags is filled with either
1470  * CRYPTO_TFM_REQ_MAY_SLEEP or 0.
1471  *
1472  * Return: 0 if the cipher operation was successful; < 0 if an error occurred
1473  */
1474 static inline int crypto_blkcipher_encrypt(struct blkcipher_desc *desc,
1475                                            struct scatterlist *dst,
1476                                            struct scatterlist *src,
1477                                            unsigned int nbytes)
1478 {
1479         desc->info = crypto_blkcipher_crt(desc->tfm)->iv;
1480         return crypto_blkcipher_crt(desc->tfm)->encrypt(desc, dst, src, nbytes);
1481 }
1482 
1483 /**
1484  * crypto_blkcipher_encrypt_iv() - encrypt plaintext with dedicated IV
1485  * @desc: reference to the block cipher handle with meta data
1486  * @dst: scatter/gather list that is filled by the cipher operation with the
1487  *      ciphertext
1488  * @src: scatter/gather list that holds the plaintext
1489  * @nbytes: number of bytes of the plaintext to encrypt.
1490  *
1491  * Encrypt plaintext data with the use of an IV that is solely used for this
1492  * cipher operation. Any previously set IV is not used.
1493  *
1494  * The blkcipher_desc data structure must be filled by the caller and can
1495  * reside on the stack. The caller must fill desc as follows: desc.tfm is filled
1496  * with the block cipher handle; desc.info is filled with the IV to be used for
1497  * the current operation; desc.flags is filled with either
1498  * CRYPTO_TFM_REQ_MAY_SLEEP or 0.
1499  *
1500  * Return: 0 if the cipher operation was successful; < 0 if an error occurred
1501  */
1502 static inline int crypto_blkcipher_encrypt_iv(struct blkcipher_desc *desc,
1503                                               struct scatterlist *dst,
1504                                               struct scatterlist *src,
1505                                               unsigned int nbytes)
1506 {
1507         return crypto_blkcipher_crt(desc->tfm)->encrypt(desc, dst, src, nbytes);
1508 }
1509 
1510 /**
1511  * crypto_blkcipher_decrypt() - decrypt ciphertext
1512  * @desc: reference to the block cipher handle with meta data
1513  * @dst: scatter/gather list that is filled by the cipher operation with the
1514  *      plaintext
1515  * @src: scatter/gather list that holds the ciphertext
1516  * @nbytes: number of bytes of the ciphertext to decrypt.
1517  *
1518  * Decrypt ciphertext data using the IV set by the caller with a preceding
1519  * call of crypto_blkcipher_set_iv.
1520  *
1521  * The blkcipher_desc data structure must be filled by the caller as documented
1522  * for the crypto_blkcipher_encrypt call above.
1523  *
1524  * Return: 0 if the cipher operation was successful; < 0 if an error occurred
1525  *
1526  */
1527 static inline int crypto_blkcipher_decrypt(struct blkcipher_desc *desc,
1528                                            struct scatterlist *dst,
1529                                            struct scatterlist *src,
1530                                            unsigned int nbytes)
1531 {
1532         desc->info = crypto_blkcipher_crt(desc->tfm)->iv;
1533         return crypto_blkcipher_crt(desc->tfm)->decrypt(desc, dst, src, nbytes);
1534 }
1535 
1536 /**
1537  * crypto_blkcipher_decrypt_iv() - decrypt ciphertext with dedicated IV
1538  * @desc: reference to the block cipher handle with meta data
1539  * @dst: scatter/gather list that is filled by the cipher operation with the
1540  *      plaintext
1541  * @src: scatter/gather list that holds the ciphertext
1542  * @nbytes: number of bytes of the ciphertext to decrypt.
1543  *
1544  * Decrypt ciphertext data with the use of an IV that is solely used for this
1545  * cipher operation. Any previously set IV is not used.
1546  *
1547  * The blkcipher_desc data structure must be filled by the caller as documented
1548  * for the crypto_blkcipher_encrypt_iv call above.
1549  *
1550  * Return: 0 if the cipher operation was successful; < 0 if an error occurred
1551  */
1552 static inline int crypto_blkcipher_decrypt_iv(struct blkcipher_desc *desc,
1553                                               struct scatterlist *dst,
1554                                               struct scatterlist *src,
1555                                               unsigned int nbytes)
1556 {
1557         return crypto_blkcipher_crt(desc->tfm)->decrypt(desc, dst, src, nbytes);
1558 }
1559 
1560 /**
1561  * crypto_blkcipher_set_iv() - set IV for cipher
1562  * @tfm: cipher handle
1563  * @src: buffer holding the IV
1564  * @len: length of the IV in bytes
1565  *
1566  * The caller provided IV is set for the block cipher referenced by the cipher
1567  * handle.
1568  */
1569 static inline void crypto_blkcipher_set_iv(struct crypto_blkcipher *tfm,
1570                                            const u8 *src, unsigned int len)
1571 {
1572         memcpy(crypto_blkcipher_crt(tfm)->iv, src, len);
1573 }
1574 
1575 /**
1576  * crypto_blkcipher_get_iv() - obtain IV from cipher
1577  * @tfm: cipher handle
1578  * @dst: buffer filled with the IV
1579  * @len: length of the buffer dst
1580  *
1581  * The caller can obtain the IV set for the block cipher referenced by the
1582  * cipher handle and store it into the user-provided buffer. If the buffer
1583  * has an insufficient space, the IV is truncated to fit the buffer.
1584  */
1585 static inline void crypto_blkcipher_get_iv(struct crypto_blkcipher *tfm,
1586                                            u8 *dst, unsigned int len)
1587 {
1588         memcpy(dst, crypto_blkcipher_crt(tfm)->iv, len);
1589 }
1590 
1591 /**
1592  * DOC: Single Block Cipher API
1593  *
1594  * The single block cipher API is used with the ciphers of type
1595  * CRYPTO_ALG_TYPE_CIPHER (listed as type "cipher" in /proc/crypto).
1596  *
1597  * Using the single block cipher API calls, operations with the basic cipher
1598  * primitive can be implemented. These cipher primitives exclude any block
1599  * chaining operations including IV handling.
1600  *
1601  * The purpose of this single block cipher API is to support the implementation
1602  * of templates or other concepts that only need to perform the cipher operation
1603  * on one block at a time. Templates invoke the underlying cipher primitive
1604  * block-wise and process either the input or the output data of these cipher
1605  * operations.
1606  */
1607 
1608 static inline struct crypto_cipher *__crypto_cipher_cast(struct crypto_tfm *tfm)
1609 {
1610         return (struct crypto_cipher *)tfm;
1611 }
1612 
1613 static inline struct crypto_cipher *crypto_cipher_cast(struct crypto_tfm *tfm)
1614 {
1615         BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER);
1616         return __crypto_cipher_cast(tfm);
1617 }
1618 
1619 /**
1620  * crypto_alloc_cipher() - allocate single block cipher handle
1621  * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
1622  *           single block cipher
1623  * @type: specifies the type of the cipher
1624  * @mask: specifies the mask for the cipher
1625  *
1626  * Allocate a cipher handle for a single block cipher. The returned struct
1627  * crypto_cipher is the cipher handle that is required for any subsequent API
1628  * invocation for that single block cipher.
1629  *
1630  * Return: allocated cipher handle in case of success; IS_ERR() is true in case
1631  *         of an error, PTR_ERR() returns the error code.
1632  */
1633 static inline struct crypto_cipher *crypto_alloc_cipher(const char *alg_name,
1634                                                         u32 type, u32 mask)
1635 {
1636         type &= ~CRYPTO_ALG_TYPE_MASK;
1637         type |= CRYPTO_ALG_TYPE_CIPHER;
1638         mask |= CRYPTO_ALG_TYPE_MASK;
1639 
1640         return __crypto_cipher_cast(crypto_alloc_base(alg_name, type, mask));
1641 }
1642 
1643 static inline struct crypto_tfm *crypto_cipher_tfm(struct crypto_cipher *tfm)
1644 {
1645         return &tfm->base;
1646 }
1647 
1648 /**
1649  * crypto_free_cipher() - zeroize and free the single block cipher handle
1650  * @tfm: cipher handle to be freed
1651  */
1652 static inline void crypto_free_cipher(struct crypto_cipher *tfm)
1653 {
1654         crypto_free_tfm(crypto_cipher_tfm(tfm));
1655 }
1656 
1657 /**
1658  * crypto_has_cipher() - Search for the availability of a single block cipher
1659  * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
1660  *           single block cipher
1661  * @type: specifies the type of the cipher
1662  * @mask: specifies the mask for the cipher
1663  *
1664  * Return: true when the single block cipher is known to the kernel crypto API;
1665  *         false otherwise
1666  */
1667 static inline int crypto_has_cipher(const char *alg_name, u32 type, u32 mask)
1668 {
1669         type &= ~CRYPTO_ALG_TYPE_MASK;
1670         type |= CRYPTO_ALG_TYPE_CIPHER;
1671         mask |= CRYPTO_ALG_TYPE_MASK;
1672 
1673         return crypto_has_alg(alg_name, type, mask);
1674 }
1675 
1676 static inline struct cipher_tfm *crypto_cipher_crt(struct crypto_cipher *tfm)
1677 {
1678         return &crypto_cipher_tfm(tfm)->crt_cipher;
1679 }
1680 
1681 /**
1682  * crypto_cipher_blocksize() - obtain block size for cipher
1683  * @tfm: cipher handle
1684  *
1685  * The block size for the single block cipher referenced with the cipher handle
1686  * tfm is returned. The caller may use that information to allocate appropriate
1687  * memory for the data returned by the encryption or decryption operation
1688  *
1689  * Return: block size of cipher
1690  */
1691 static inline unsigned int crypto_cipher_blocksize(struct crypto_cipher *tfm)
1692 {
1693         return crypto_tfm_alg_blocksize(crypto_cipher_tfm(tfm));
1694 }
1695 
1696 static inline unsigned int crypto_cipher_alignmask(struct crypto_cipher *tfm)
1697 {
1698         return crypto_tfm_alg_alignmask(crypto_cipher_tfm(tfm));
1699 }
1700 
1701 static inline u32 crypto_cipher_get_flags(struct crypto_cipher *tfm)
1702 {
1703         return crypto_tfm_get_flags(crypto_cipher_tfm(tfm));
1704 }
1705 
1706 static inline void crypto_cipher_set_flags(struct crypto_cipher *tfm,
1707                                            u32 flags)
1708 {
1709         crypto_tfm_set_flags(crypto_cipher_tfm(tfm), flags);
1710 }
1711 
1712 static inline void crypto_cipher_clear_flags(struct crypto_cipher *tfm,
1713                                              u32 flags)
1714 {
1715         crypto_tfm_clear_flags(crypto_cipher_tfm(tfm), flags);
1716 }
1717 
1718 /**
1719  * crypto_cipher_setkey() - set key for cipher
1720  * @tfm: cipher handle
1721  * @key: buffer holding the key
1722  * @keylen: length of the key in bytes
1723  *
1724  * The caller provided key is set for the single block cipher referenced by the
1725  * cipher handle.
1726  *
1727  * Note, the key length determines the cipher type. Many block ciphers implement
1728  * different cipher modes depending on the key size, such as AES-128 vs AES-192
1729  * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128
1730  * is performed.
1731  *
1732  * Return: 0 if the setting of the key was successful; < 0 if an error occurred
1733  */
1734 static inline int crypto_cipher_setkey(struct crypto_cipher *tfm,
1735                                        const u8 *key, unsigned int keylen)
1736 {
1737         return crypto_cipher_crt(tfm)->cit_setkey(crypto_cipher_tfm(tfm),
1738                                                   key, keylen);
1739 }
1740 
1741 /**
1742  * crypto_cipher_encrypt_one() - encrypt one block of plaintext
1743  * @tfm: cipher handle
1744  * @dst: points to the buffer that will be filled with the ciphertext
1745  * @src: buffer holding the plaintext to be encrypted
1746  *
1747  * Invoke the encryption operation of one block. The caller must ensure that
1748  * the plaintext and ciphertext buffers are at least one block in size.
1749  */
1750 static inline void crypto_cipher_encrypt_one(struct crypto_cipher *tfm,
1751                                              u8 *dst, const u8 *src)
1752 {
1753         crypto_cipher_crt(tfm)->cit_encrypt_one(crypto_cipher_tfm(tfm),
1754                                                 dst, src);
1755 }
1756 
1757 /**
1758  * crypto_cipher_decrypt_one() - decrypt one block of ciphertext
1759  * @tfm: cipher handle
1760  * @dst: points to the buffer that will be filled with the plaintext
1761  * @src: buffer holding the ciphertext to be decrypted
1762  *
1763  * Invoke the decryption operation of one block. The caller must ensure that
1764  * the plaintext and ciphertext buffers are at least one block in size.
1765  */
1766 static inline void crypto_cipher_decrypt_one(struct crypto_cipher *tfm,
1767                                              u8 *dst, const u8 *src)
1768 {
1769         crypto_cipher_crt(tfm)->cit_decrypt_one(crypto_cipher_tfm(tfm),
1770                                                 dst, src);
1771 }
1772 
1773 static inline struct crypto_comp *__crypto_comp_cast(struct crypto_tfm *tfm)
1774 {
1775         return (struct crypto_comp *)tfm;
1776 }
1777 
1778 static inline struct crypto_comp *crypto_comp_cast(struct crypto_tfm *tfm)
1779 {
1780         BUG_ON((crypto_tfm_alg_type(tfm) ^ CRYPTO_ALG_TYPE_COMPRESS) &
1781                CRYPTO_ALG_TYPE_MASK);
1782         return __crypto_comp_cast(tfm);
1783 }
1784 
1785 static inline struct crypto_comp *crypto_alloc_comp(const char *alg_name,
1786                                                     u32 type, u32 mask)
1787 {
1788         type &= ~CRYPTO_ALG_TYPE_MASK;
1789         type |= CRYPTO_ALG_TYPE_COMPRESS;
1790         mask |= CRYPTO_ALG_TYPE_MASK;
1791 
1792         return __crypto_comp_cast(crypto_alloc_base(alg_name, type, mask));
1793 }
1794 
1795 static inline struct crypto_tfm *crypto_comp_tfm(struct crypto_comp *tfm)
1796 {
1797         return &tfm->base;
1798 }
1799 
1800 static inline void crypto_free_comp(struct crypto_comp *tfm)
1801 {
1802         crypto_free_tfm(crypto_comp_tfm(tfm));
1803 }
1804 
1805 static inline int crypto_has_comp(const char *alg_name, u32 type, u32 mask)
1806 {
1807         type &= ~CRYPTO_ALG_TYPE_MASK;
1808         type |= CRYPTO_ALG_TYPE_COMPRESS;
1809         mask |= CRYPTO_ALG_TYPE_MASK;
1810 
1811         return crypto_has_alg(alg_name, type, mask);
1812 }
1813 
1814 static inline const char *crypto_comp_name(struct crypto_comp *tfm)
1815 {
1816         return crypto_tfm_alg_name(crypto_comp_tfm(tfm));
1817 }
1818 
1819 static inline struct compress_tfm *crypto_comp_crt(struct crypto_comp *tfm)
1820 {
1821         return &crypto_comp_tfm(tfm)->crt_compress;
1822 }
1823 
1824 static inline int crypto_comp_compress(struct crypto_comp *tfm,
1825                                        const u8 *src, unsigned int slen,
1826                                        u8 *dst, unsigned int *dlen)
1827 {
1828         return crypto_comp_crt(tfm)->cot_compress(crypto_comp_tfm(tfm),
1829                                                   src, slen, dst, dlen);
1830 }
1831 
1832 static inline int crypto_comp_decompress(struct crypto_comp *tfm,
1833                                          const u8 *src, unsigned int slen,
1834                                          u8 *dst, unsigned int *dlen)
1835 {
1836         return crypto_comp_crt(tfm)->cot_decompress(crypto_comp_tfm(tfm),
1837                                                     src, slen, dst, dlen);
1838 }
1839 
1840 #endif  /* _LINUX_CRYPTO_H */
1841 
1842 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp