1 /* 2 * Synchronous Compression operations 3 * 4 * Copyright 2015 LG Electronics Inc. 5 * Copyright (c) 2016, Intel Corporation 6 * Author: Giovanni Cabiddu <giovanni.cabiddu@intel.com> 7 * 8 * This program is free software; you can redistribute it and/or modify it 9 * under the terms of the GNU General Public License as published by the Free 10 * Software Foundation; either version 2 of the License, or (at your option) 11 * any later version. 12 * 13 */ 14 #include <linux/errno.h> 15 #include <linux/kernel.h> 16 #include <linux/module.h> 17 #include <linux/seq_file.h> 18 #include <linux/slab.h> 19 #include <linux/string.h> 20 #include <linux/crypto.h> 21 #include <linux/compiler.h> 22 #include <linux/vmalloc.h> 23 #include <crypto/algapi.h> 24 #include <linux/cryptouser.h> 25 #include <net/netlink.h> 26 #include <linux/scatterlist.h> 27 #include <crypto/scatterwalk.h> 28 #include <crypto/internal/acompress.h> 29 #include <crypto/internal/scompress.h> 30 #include "internal.h" 31 32 static const struct crypto_type crypto_scomp_type; 33 static void * __percpu *scomp_src_scratches; 34 static void * __percpu *scomp_dst_scratches; 35 static int scomp_scratch_users; 36 static DEFINE_MUTEX(scomp_lock); 37 38 #ifdef CONFIG_NET 39 static int crypto_scomp_report(struct sk_buff *skb, struct crypto_alg *alg) 40 { 41 struct crypto_report_comp rscomp; 42 43 strncpy(rscomp.type, "scomp", sizeof(rscomp.type)); 44 45 if (nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS, 46 sizeof(struct crypto_report_comp), &rscomp)) 47 goto nla_put_failure; 48 return 0; 49 50 nla_put_failure: 51 return -EMSGSIZE; 52 } 53 #else 54 static int crypto_scomp_report(struct sk_buff *skb, struct crypto_alg *alg) 55 { 56 return -ENOSYS; 57 } 58 #endif 59 60 static void crypto_scomp_show(struct seq_file *m, struct crypto_alg *alg) 61 __maybe_unused; 62 63 static void crypto_scomp_show(struct seq_file *m, struct crypto_alg *alg) 64 { 65 seq_puts(m, "type : scomp\n"); 66 } 67 68 static int crypto_scomp_init_tfm(struct crypto_tfm *tfm) 69 { 70 return 0; 71 } 72 73 static void crypto_scomp_free_scratches(void * __percpu *scratches) 74 { 75 int i; 76 77 if (!scratches) 78 return; 79 80 for_each_possible_cpu(i) 81 vfree(*per_cpu_ptr(scratches, i)); 82 83 free_percpu(scratches); 84 } 85 86 static void * __percpu *crypto_scomp_alloc_scratches(void) 87 { 88 void * __percpu *scratches; 89 int i; 90 91 scratches = alloc_percpu(void *); 92 if (!scratches) 93 return NULL; 94 95 for_each_possible_cpu(i) { 96 void *scratch; 97 98 scratch = vmalloc_node(SCOMP_SCRATCH_SIZE, cpu_to_node(i)); 99 if (!scratch) 100 goto error; 101 *per_cpu_ptr(scratches, i) = scratch; 102 } 103 104 return scratches; 105 106 error: 107 crypto_scomp_free_scratches(scratches); 108 return NULL; 109 } 110 111 static void crypto_scomp_free_all_scratches(void) 112 { 113 if (!--scomp_scratch_users) { 114 crypto_scomp_free_scratches(scomp_src_scratches); 115 crypto_scomp_free_scratches(scomp_dst_scratches); 116 scomp_src_scratches = NULL; 117 scomp_dst_scratches = NULL; 118 } 119 } 120 121 static int crypto_scomp_alloc_all_scratches(void) 122 { 123 if (!scomp_scratch_users++) { 124 scomp_src_scratches = crypto_scomp_alloc_scratches(); 125 if (!scomp_src_scratches) 126 return -ENOMEM; 127 scomp_dst_scratches = crypto_scomp_alloc_scratches(); 128 if (!scomp_dst_scratches) 129 return -ENOMEM; 130 } 131 return 0; 132 } 133 134 static void crypto_scomp_sg_free(struct scatterlist *sgl) 135 { 136 int i, n; 137 struct page *page; 138 139 if (!sgl) 140 return; 141 142 n = sg_nents(sgl); 143 for_each_sg(sgl, sgl, n, i) { 144 page = sg_page(sgl); 145 if (page) 146 __free_page(page); 147 } 148 149 kfree(sgl); 150 } 151 152 static struct scatterlist *crypto_scomp_sg_alloc(size_t size, gfp_t gfp) 153 { 154 struct scatterlist *sgl; 155 struct page *page; 156 int i, n; 157 158 n = ((size - 1) >> PAGE_SHIFT) + 1; 159 160 sgl = kmalloc_array(n, sizeof(struct scatterlist), gfp); 161 if (!sgl) 162 return NULL; 163 164 sg_init_table(sgl, n); 165 166 for (i = 0; i < n; i++) { 167 page = alloc_page(gfp); 168 if (!page) 169 goto err; 170 sg_set_page(sgl + i, page, PAGE_SIZE, 0); 171 } 172 173 return sgl; 174 175 err: 176 sg_mark_end(sgl + i); 177 crypto_scomp_sg_free(sgl); 178 return NULL; 179 } 180 181 static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir) 182 { 183 struct crypto_acomp *tfm = crypto_acomp_reqtfm(req); 184 void **tfm_ctx = acomp_tfm_ctx(tfm); 185 struct crypto_scomp *scomp = *tfm_ctx; 186 void **ctx = acomp_request_ctx(req); 187 const int cpu = get_cpu(); 188 u8 *scratch_src = *per_cpu_ptr(scomp_src_scratches, cpu); 189 u8 *scratch_dst = *per_cpu_ptr(scomp_dst_scratches, cpu); 190 int ret; 191 192 if (!req->src || !req->slen || req->slen > SCOMP_SCRATCH_SIZE) { 193 ret = -EINVAL; 194 goto out; 195 } 196 197 if (req->dst && !req->dlen) { 198 ret = -EINVAL; 199 goto out; 200 } 201 202 if (!req->dlen || req->dlen > SCOMP_SCRATCH_SIZE) 203 req->dlen = SCOMP_SCRATCH_SIZE; 204 205 scatterwalk_map_and_copy(scratch_src, req->src, 0, req->slen, 0); 206 if (dir) 207 ret = crypto_scomp_compress(scomp, scratch_src, req->slen, 208 scratch_dst, &req->dlen, *ctx); 209 else 210 ret = crypto_scomp_decompress(scomp, scratch_src, req->slen, 211 scratch_dst, &req->dlen, *ctx); 212 if (!ret) { 213 if (!req->dst) { 214 req->dst = crypto_scomp_sg_alloc(req->dlen, GFP_ATOMIC); 215 if (!req->dst) 216 goto out; 217 } 218 scatterwalk_map_and_copy(scratch_dst, req->dst, 0, req->dlen, 219 1); 220 } 221 out: 222 put_cpu(); 223 return ret; 224 } 225 226 static int scomp_acomp_compress(struct acomp_req *req) 227 { 228 return scomp_acomp_comp_decomp(req, 1); 229 } 230 231 static int scomp_acomp_decompress(struct acomp_req *req) 232 { 233 return scomp_acomp_comp_decomp(req, 0); 234 } 235 236 static void crypto_exit_scomp_ops_async(struct crypto_tfm *tfm) 237 { 238 struct crypto_scomp **ctx = crypto_tfm_ctx(tfm); 239 240 crypto_free_scomp(*ctx); 241 } 242 243 int crypto_init_scomp_ops_async(struct crypto_tfm *tfm) 244 { 245 struct crypto_alg *calg = tfm->__crt_alg; 246 struct crypto_acomp *crt = __crypto_acomp_tfm(tfm); 247 struct crypto_scomp **ctx = crypto_tfm_ctx(tfm); 248 struct crypto_scomp *scomp; 249 250 if (!crypto_mod_get(calg)) 251 return -EAGAIN; 252 253 scomp = crypto_create_tfm(calg, &crypto_scomp_type); 254 if (IS_ERR(scomp)) { 255 crypto_mod_put(calg); 256 return PTR_ERR(scomp); 257 } 258 259 *ctx = scomp; 260 tfm->exit = crypto_exit_scomp_ops_async; 261 262 crt->compress = scomp_acomp_compress; 263 crt->decompress = scomp_acomp_decompress; 264 crt->dst_free = crypto_scomp_sg_free; 265 crt->reqsize = sizeof(void *); 266 267 return 0; 268 } 269 270 struct acomp_req *crypto_acomp_scomp_alloc_ctx(struct acomp_req *req) 271 { 272 struct crypto_acomp *acomp = crypto_acomp_reqtfm(req); 273 struct crypto_tfm *tfm = crypto_acomp_tfm(acomp); 274 struct crypto_scomp **tfm_ctx = crypto_tfm_ctx(tfm); 275 struct crypto_scomp *scomp = *tfm_ctx; 276 void *ctx; 277 278 ctx = crypto_scomp_alloc_ctx(scomp); 279 if (IS_ERR(ctx)) { 280 kfree(req); 281 return NULL; 282 } 283 284 *req->__ctx = ctx; 285 286 return req; 287 } 288 289 void crypto_acomp_scomp_free_ctx(struct acomp_req *req) 290 { 291 struct crypto_acomp *acomp = crypto_acomp_reqtfm(req); 292 struct crypto_tfm *tfm = crypto_acomp_tfm(acomp); 293 struct crypto_scomp **tfm_ctx = crypto_tfm_ctx(tfm); 294 struct crypto_scomp *scomp = *tfm_ctx; 295 void *ctx = *req->__ctx; 296 297 if (ctx) 298 crypto_scomp_free_ctx(scomp, ctx); 299 } 300 301 static const struct crypto_type crypto_scomp_type = { 302 .extsize = crypto_alg_extsize, 303 .init_tfm = crypto_scomp_init_tfm, 304 #ifdef CONFIG_PROC_FS 305 .show = crypto_scomp_show, 306 #endif 307 .report = crypto_scomp_report, 308 .maskclear = ~CRYPTO_ALG_TYPE_MASK, 309 .maskset = CRYPTO_ALG_TYPE_MASK, 310 .type = CRYPTO_ALG_TYPE_SCOMPRESS, 311 .tfmsize = offsetof(struct crypto_scomp, base), 312 }; 313 314 int crypto_register_scomp(struct scomp_alg *alg) 315 { 316 struct crypto_alg *base = &alg->base; 317 int ret = -ENOMEM; 318 319 mutex_lock(&scomp_lock); 320 if (crypto_scomp_alloc_all_scratches()) 321 goto error; 322 323 base->cra_type = &crypto_scomp_type; 324 base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; 325 base->cra_flags |= CRYPTO_ALG_TYPE_SCOMPRESS; 326 327 ret = crypto_register_alg(base); 328 if (ret) 329 goto error; 330 331 mutex_unlock(&scomp_lock); 332 return ret; 333 334 error: 335 crypto_scomp_free_all_scratches(); 336 mutex_unlock(&scomp_lock); 337 return ret; 338 } 339 EXPORT_SYMBOL_GPL(crypto_register_scomp); 340 341 int crypto_unregister_scomp(struct scomp_alg *alg) 342 { 343 int ret; 344 345 mutex_lock(&scomp_lock); 346 ret = crypto_unregister_alg(&alg->base); 347 crypto_scomp_free_all_scratches(); 348 mutex_unlock(&scomp_lock); 349 350 return ret; 351 } 352 EXPORT_SYMBOL_GPL(crypto_unregister_scomp); 353 354 int crypto_register_scomps(struct scomp_alg *algs, int count) 355 { 356 int i, ret; 357 358 for (i = 0; i < count; i++) { 359 ret = crypto_register_scomp(&algs[i]); 360 if (ret) 361 goto err; 362 } 363 364 return 0; 365 366 err: 367 for (--i; i >= 0; --i) 368 crypto_unregister_scomp(&algs[i]); 369 370 return ret; 371 } 372 EXPORT_SYMBOL_GPL(crypto_register_scomps); 373 374 void crypto_unregister_scomps(struct scomp_alg *algs, int count) 375 { 376 int i; 377 378 for (i = count - 1; i >= 0; --i) 379 crypto_unregister_scomp(&algs[i]); 380 } 381 EXPORT_SYMBOL_GPL(crypto_unregister_scomps); 382 383 MODULE_LICENSE("GPL"); 384 MODULE_DESCRIPTION("Synchronous compression type"); 385
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.