~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/crypto/crypto_engine.c

Version: ~ [ linux-5.12-rc1 ] ~ [ linux-5.11.2 ] ~ [ linux-5.10.19 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.101 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.177 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.222 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.258 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.258 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.18.140 ] ~ [ linux-3.16.85 ] ~ [ linux-3.14.79 ] ~ [ linux-3.12.74 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  * Handle async block request by crypto hardware engine.
  3  *
  4  * Copyright (C) 2016 Linaro, Inc.
  5  *
  6  * Author: Baolin Wang <baolin.wang@linaro.org>
  7  *
  8  * This program is free software; you can redistribute it and/or modify it
  9  * under the terms of the GNU General Public License as published by the Free
 10  * Software Foundation; either version 2 of the License, or (at your option)
 11  * any later version.
 12  *
 13  */
 14 
 15 #include <linux/err.h>
 16 #include <linux/delay.h>
 17 #include "internal.h"
 18 
 19 #define CRYPTO_ENGINE_MAX_QLEN 10
 20 
 21 void crypto_finalize_request(struct crypto_engine *engine,
 22                              struct ablkcipher_request *req, int err);
 23 
 24 /**
 25  * crypto_pump_requests - dequeue one request from engine queue to process
 26  * @engine: the hardware engine
 27  * @in_kthread: true if we are in the context of the request pump thread
 28  *
 29  * This function checks if there is any request in the engine queue that
 30  * needs processing and if so call out to the driver to initialize hardware
 31  * and handle each request.
 32  */
 33 static void crypto_pump_requests(struct crypto_engine *engine,
 34                                  bool in_kthread)
 35 {
 36         struct crypto_async_request *async_req, *backlog;
 37         struct ablkcipher_request *req;
 38         unsigned long flags;
 39         bool was_busy = false;
 40         int ret;
 41 
 42         spin_lock_irqsave(&engine->queue_lock, flags);
 43 
 44         /* Make sure we are not already running a request */
 45         if (engine->cur_req)
 46                 goto out;
 47 
 48         /* If another context is idling then defer */
 49         if (engine->idling) {
 50                 queue_kthread_work(&engine->kworker, &engine->pump_requests);
 51                 goto out;
 52         }
 53 
 54         /* Check if the engine queue is idle */
 55         if (!crypto_queue_len(&engine->queue) || !engine->running) {
 56                 if (!engine->busy)
 57                         goto out;
 58 
 59                 /* Only do teardown in the thread */
 60                 if (!in_kthread) {
 61                         queue_kthread_work(&engine->kworker,
 62                                            &engine->pump_requests);
 63                         goto out;
 64                 }
 65 
 66                 engine->busy = false;
 67                 engine->idling = true;
 68                 spin_unlock_irqrestore(&engine->queue_lock, flags);
 69 
 70                 if (engine->unprepare_crypt_hardware &&
 71                     engine->unprepare_crypt_hardware(engine))
 72                         pr_err("failed to unprepare crypt hardware\n");
 73 
 74                 spin_lock_irqsave(&engine->queue_lock, flags);
 75                 engine->idling = false;
 76                 goto out;
 77         }
 78 
 79         /* Get the fist request from the engine queue to handle */
 80         backlog = crypto_get_backlog(&engine->queue);
 81         async_req = crypto_dequeue_request(&engine->queue);
 82         if (!async_req)
 83                 goto out;
 84 
 85         req = ablkcipher_request_cast(async_req);
 86 
 87         engine->cur_req = req;
 88         if (backlog)
 89                 backlog->complete(backlog, -EINPROGRESS);
 90 
 91         if (engine->busy)
 92                 was_busy = true;
 93         else
 94                 engine->busy = true;
 95 
 96         spin_unlock_irqrestore(&engine->queue_lock, flags);
 97 
 98         /* Until here we get the request need to be encrypted successfully */
 99         if (!was_busy && engine->prepare_crypt_hardware) {
100                 ret = engine->prepare_crypt_hardware(engine);
101                 if (ret) {
102                         pr_err("failed to prepare crypt hardware\n");
103                         goto req_err;
104                 }
105         }
106 
107         if (engine->prepare_request) {
108                 ret = engine->prepare_request(engine, engine->cur_req);
109                 if (ret) {
110                         pr_err("failed to prepare request: %d\n", ret);
111                         goto req_err;
112                 }
113                 engine->cur_req_prepared = true;
114         }
115 
116         ret = engine->crypt_one_request(engine, engine->cur_req);
117         if (ret) {
118                 pr_err("failed to crypt one request from queue\n");
119                 goto req_err;
120         }
121         return;
122 
123 req_err:
124         crypto_finalize_request(engine, engine->cur_req, ret);
125         return;
126 
127 out:
128         spin_unlock_irqrestore(&engine->queue_lock, flags);
129 }
130 
131 static void crypto_pump_work(struct kthread_work *work)
132 {
133         struct crypto_engine *engine =
134                 container_of(work, struct crypto_engine, pump_requests);
135 
136         crypto_pump_requests(engine, true);
137 }
138 
139 /**
140  * crypto_transfer_request - transfer the new request into the engine queue
141  * @engine: the hardware engine
142  * @req: the request need to be listed into the engine queue
143  */
144 int crypto_transfer_request(struct crypto_engine *engine,
145                             struct ablkcipher_request *req, bool need_pump)
146 {
147         unsigned long flags;
148         int ret;
149 
150         spin_lock_irqsave(&engine->queue_lock, flags);
151 
152         if (!engine->running) {
153                 spin_unlock_irqrestore(&engine->queue_lock, flags);
154                 return -ESHUTDOWN;
155         }
156 
157         ret = ablkcipher_enqueue_request(&engine->queue, req);
158 
159         if (!engine->busy && need_pump)
160                 queue_kthread_work(&engine->kworker, &engine->pump_requests);
161 
162         spin_unlock_irqrestore(&engine->queue_lock, flags);
163         return ret;
164 }
165 EXPORT_SYMBOL_GPL(crypto_transfer_request);
166 
167 /**
168  * crypto_transfer_request_to_engine - transfer one request to list into the
169  * engine queue
170  * @engine: the hardware engine
171  * @req: the request need to be listed into the engine queue
172  */
173 int crypto_transfer_request_to_engine(struct crypto_engine *engine,
174                                       struct ablkcipher_request *req)
175 {
176         return crypto_transfer_request(engine, req, true);
177 }
178 EXPORT_SYMBOL_GPL(crypto_transfer_request_to_engine);
179 
180 /**
181  * crypto_finalize_request - finalize one request if the request is done
182  * @engine: the hardware engine
183  * @req: the request need to be finalized
184  * @err: error number
185  */
186 void crypto_finalize_request(struct crypto_engine *engine,
187                              struct ablkcipher_request *req, int err)
188 {
189         unsigned long flags;
190         bool finalize_cur_req = false;
191         int ret;
192 
193         spin_lock_irqsave(&engine->queue_lock, flags);
194         if (engine->cur_req == req)
195                 finalize_cur_req = true;
196         spin_unlock_irqrestore(&engine->queue_lock, flags);
197 
198         if (finalize_cur_req) {
199                 if (engine->cur_req_prepared && engine->unprepare_request) {
200                         ret = engine->unprepare_request(engine, req);
201                         if (ret)
202                                 pr_err("failed to unprepare request\n");
203                 }
204 
205                 spin_lock_irqsave(&engine->queue_lock, flags);
206                 engine->cur_req = NULL;
207                 engine->cur_req_prepared = false;
208                 spin_unlock_irqrestore(&engine->queue_lock, flags);
209         }
210 
211         req->base.complete(&req->base, err);
212 
213         queue_kthread_work(&engine->kworker, &engine->pump_requests);
214 }
215 EXPORT_SYMBOL_GPL(crypto_finalize_request);
216 
217 /**
218  * crypto_engine_start - start the hardware engine
219  * @engine: the hardware engine need to be started
220  *
221  * Return 0 on success, else on fail.
222  */
223 int crypto_engine_start(struct crypto_engine *engine)
224 {
225         unsigned long flags;
226 
227         spin_lock_irqsave(&engine->queue_lock, flags);
228 
229         if (engine->running || engine->busy) {
230                 spin_unlock_irqrestore(&engine->queue_lock, flags);
231                 return -EBUSY;
232         }
233 
234         engine->running = true;
235         spin_unlock_irqrestore(&engine->queue_lock, flags);
236 
237         queue_kthread_work(&engine->kworker, &engine->pump_requests);
238 
239         return 0;
240 }
241 EXPORT_SYMBOL_GPL(crypto_engine_start);
242 
243 /**
244  * crypto_engine_stop - stop the hardware engine
245  * @engine: the hardware engine need to be stopped
246  *
247  * Return 0 on success, else on fail.
248  */
249 int crypto_engine_stop(struct crypto_engine *engine)
250 {
251         unsigned long flags;
252         unsigned limit = 500;
253         int ret = 0;
254 
255         spin_lock_irqsave(&engine->queue_lock, flags);
256 
257         /*
258          * If the engine queue is not empty or the engine is on busy state,
259          * we need to wait for a while to pump the requests of engine queue.
260          */
261         while ((crypto_queue_len(&engine->queue) || engine->busy) && limit--) {
262                 spin_unlock_irqrestore(&engine->queue_lock, flags);
263                 msleep(20);
264                 spin_lock_irqsave(&engine->queue_lock, flags);
265         }
266 
267         if (crypto_queue_len(&engine->queue) || engine->busy)
268                 ret = -EBUSY;
269         else
270                 engine->running = false;
271 
272         spin_unlock_irqrestore(&engine->queue_lock, flags);
273 
274         if (ret)
275                 pr_warn("could not stop engine\n");
276 
277         return ret;
278 }
279 EXPORT_SYMBOL_GPL(crypto_engine_stop);
280 
281 /**
282  * crypto_engine_alloc_init - allocate crypto hardware engine structure and
283  * initialize it.
284  * @dev: the device attached with one hardware engine
285  * @rt: whether this queue is set to run as a realtime task
286  *
287  * This must be called from context that can sleep.
288  * Return: the crypto engine structure on success, else NULL.
289  */
290 struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt)
291 {
292         struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
293         struct crypto_engine *engine;
294 
295         if (!dev)
296                 return NULL;
297 
298         engine = devm_kzalloc(dev, sizeof(*engine), GFP_KERNEL);
299         if (!engine)
300                 return NULL;
301 
302         engine->rt = rt;
303         engine->running = false;
304         engine->busy = false;
305         engine->idling = false;
306         engine->cur_req_prepared = false;
307         engine->priv_data = dev;
308         snprintf(engine->name, sizeof(engine->name),
309                  "%s-engine", dev_name(dev));
310 
311         crypto_init_queue(&engine->queue, CRYPTO_ENGINE_MAX_QLEN);
312         spin_lock_init(&engine->queue_lock);
313 
314         init_kthread_worker(&engine->kworker);
315         engine->kworker_task = kthread_run(kthread_worker_fn,
316                                            &engine->kworker, "%s",
317                                            engine->name);
318         if (IS_ERR(engine->kworker_task)) {
319                 dev_err(dev, "failed to create crypto request pump task\n");
320                 return NULL;
321         }
322         init_kthread_work(&engine->pump_requests, crypto_pump_work);
323 
324         if (engine->rt) {
325                 dev_info(dev, "will run requests pump with realtime priority\n");
326                 sched_setscheduler(engine->kworker_task, SCHED_FIFO, &param);
327         }
328 
329         return engine;
330 }
331 EXPORT_SYMBOL_GPL(crypto_engine_alloc_init);
332 
333 /**
334  * crypto_engine_exit - free the resources of hardware engine when exit
335  * @engine: the hardware engine need to be freed
336  *
337  * Return 0 for success.
338  */
339 int crypto_engine_exit(struct crypto_engine *engine)
340 {
341         int ret;
342 
343         ret = crypto_engine_stop(engine);
344         if (ret)
345                 return ret;
346 
347         flush_kthread_worker(&engine->kworker);
348         kthread_stop(engine->kworker_task);
349 
350         return 0;
351 }
352 EXPORT_SYMBOL_GPL(crypto_engine_exit);
353 
354 MODULE_LICENSE("GPL");
355 MODULE_DESCRIPTION("Crypto hardware engine framework");
356 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp