1 From 55be37e9e308990b2eeeef7f974dfbfbb1120266 Mon Sep 17 00:00:00 2001
2 From: Yashpal Dutta <yashpal.dutta@freescale.com>
3 Date: Fri, 7 Mar 2014 06:16:09 +0545
4 Subject: [[Patch][fsl 09/16] PKC support added in cryptodev module
6 Upstream-status: Pending
8 Signed-off-by: Yashpal Dutta <yashpal.dutta@freescale.com>
10 cryptlib.c | 66 ++++++++-
12 crypto/cryptodev.h | 15 ++-
13 cryptodev_int.h | 20 ++-
14 ioctl.c | 196 +++++++++++++++++++++++++--
15 main.c | 378 ++++++++++++++++++++++++++++++++++++++++++++++++++++
16 6 files changed, 685 insertions(+), 18 deletions(-)
18 diff --git a/cryptlib.c b/cryptlib.c
19 index 3576f39..fa0b63f 100644
23 * Portions Copyright (c) 2010 Michael Weiser
24 * Portions Copyright (c) 2010 Phil Sutter
26 + * Copyright 2012 Freescale Semiconductor, Inc.
28 * This file is part of linux cryptodev.
30 * This program is free software; you can redistribute it and/or
32 #include "cryptodev_int.h"
35 -struct cryptodev_result {
36 - struct completion completion;
40 static void cryptodev_complete(struct crypto_async_request *req, int err)
42 struct cryptodev_result *res = req->data;
43 @@ -244,7 +241,6 @@ static inline int waitfor(struct cryptodev_result *cr, ssize_t ret)
48 wait_for_completion(&cr->completion);
49 /* At this point we known for sure the request has finished,
50 * because wait_for_completion above was not interruptible.
51 @@ -424,3 +420,61 @@ int cryptodev_hash_final(struct hash_data *hdata, void *output)
52 return waitfor(hdata->async.result, ret);
55 +int cryptodev_pkc_offload(struct cryptodev_pkc *pkc)
58 + struct pkc_request *pkc_req = &pkc->req, *pkc_requested;
60 + switch (pkc_req->type) {
62 + case RSA_PRIV_FORM1:
63 + case RSA_PRIV_FORM2:
64 + case RSA_PRIV_FORM3:
65 + pkc->s = crypto_alloc_pkc("pkc(rsa)",
66 + CRYPTO_ALG_TYPE_PKC_RSA, 0);
72 + pkc->s = crypto_alloc_pkc("pkc(dsa)",
73 + CRYPTO_ALG_TYPE_PKC_DSA, 0);
75 + case DH_COMPUTE_KEY:
76 + case ECDH_COMPUTE_KEY:
77 + pkc->s = crypto_alloc_pkc("pkc(dh)",
78 + CRYPTO_ALG_TYPE_PKC_DH, 0);
84 + if (IS_ERR_OR_NULL(pkc->s))
87 + init_completion(&pkc->result.completion);
88 + pkc_requested = pkc_request_alloc(pkc->s, GFP_KERNEL);
90 + if (unlikely(IS_ERR_OR_NULL(pkc_requested))) {
94 + pkc_requested->type = pkc_req->type;
95 + pkc_requested->curve_type = pkc_req->curve_type;
96 + memcpy(&pkc_requested->req_u, &pkc_req->req_u, sizeof(pkc_req->req_u));
97 + pkc_request_set_callback(pkc_requested, CRYPTO_TFM_REQ_MAY_BACKLOG,
98 + cryptodev_complete_asym, pkc);
99 + ret = crypto_pkc_op(pkc_requested);
100 + if (ret != -EINPROGRESS && ret != 0)
103 + if (pkc->type == SYNCHRONOUS)
104 + ret = waitfor(&pkc->result, ret);
108 + kfree(pkc_requested);
110 + crypto_free_pkc(pkc->s);
113 diff --git a/cryptlib.h b/cryptlib.h
114 index 4cb66ad..e77edc5 100644
119 + * Copyright 2012 Freescale Semiconductor, Inc.
124 @@ -89,5 +92,30 @@ void cryptodev_hash_deinit(struct hash_data *hdata);
125 int cryptodev_hash_init(struct hash_data *hdata, const char *alg_name,
126 int hmac_mode, void *mackey, unsigned int mackeylen);
128 +/* Operation Type */
134 +struct cryptodev_result {
135 + struct completion completion;
139 +struct cryptodev_pkc {
140 + struct list_head list; /* To maintain the Jobs in completed
142 + struct kernel_crypt_kop kop;
143 + struct crypto_pkc *s; /* Transform pointer from CryptoAPI */
144 + struct cryptodev_result result; /* Result to be updated by
145 + completion handler */
146 + struct pkc_request req; /* PKC request structure allocated
148 + enum offload_type type; /* Synchronous Vs Asynchronous request */
149 + void *cookie; /*Additional opaque cookie to be used in future */
150 + struct crypt_priv *priv;
153 +int cryptodev_pkc_offload(struct cryptodev_pkc *);
155 diff --git a/crypto/cryptodev.h b/crypto/cryptodev.h
156 index 3ea3d35..575ce63 100644
157 --- a/crypto/cryptodev.h
158 +++ b/crypto/cryptodev.h
160 -/* This is a source compatible implementation with the original API of
162 + * Copyright 2012 Freescale Semiconductor, Inc.
164 + * This is a source compatible implementation with the original API of
165 * cryptodev by Angelos D. Keromytis, found at openbsd cryptodev.h.
166 - * Placed under public domain */
167 + * Placed under public domain
170 #ifndef L_CRYPTODEV_H
171 #define L_CRYPTODEV_H
172 @@ -246,6 +250,9 @@ struct crypt_kop {
175 struct crparam crk_param[CRK_MAXPARAM];
176 + enum curve_t curve_type; /* 0 == Discrete Log,
177 + 1 = EC_PRIME, 2 = EC_BINARY */
181 enum cryptodev_crk_op_t {
182 @@ -290,5 +297,7 @@ enum cryptodev_crk_op_t {
184 #define CIOCASYNCCRYPT _IOW('c', 110, struct crypt_op)
185 #define CIOCASYNCFETCH _IOR('c', 111, struct crypt_op)
187 +/* additional ioctls for asynchronous operation for asymmetric ciphers*/
188 +#define CIOCASYMASYNCRYPT _IOW('c', 112, struct crypt_kop)
189 +#define CIOCASYMASYNFETCH _IOR('c', 113, struct crypt_kop)
190 #endif /* L_CRYPTODEV_H */
191 diff --git a/cryptodev_int.h b/cryptodev_int.h
192 index 8891837..b08c253 100644
193 --- a/cryptodev_int.h
194 +++ b/cryptodev_int.h
198 + * Copyright 2012 Freescale Semiconductor, Inc.
200 #ifndef CRYPTODEV_INT_H
201 # define CRYPTODEV_INT_H
203 @@ -113,6 +115,14 @@ struct compat_crypt_auth_op {
205 #endif /* CONFIG_COMPAT */
207 +/* kernel-internal extension to struct crypt_kop */
208 +struct kernel_crypt_kop {
209 + struct crypt_kop kop;
211 + struct task_struct *task;
212 + struct mm_struct *mm;
215 /* kernel-internal extension to struct crypt_op */
216 struct kernel_crypt_op {
218 @@ -158,6 +168,14 @@ int crypto_run(struct fcrypt *fcr, struct kernel_crypt_op *kcop);
220 #include <cryptlib.h>
222 +/* Cryptodev Key operation handler */
223 +int crypto_bn_modexp(struct cryptodev_pkc *);
224 +int crypto_modexp_crt(struct cryptodev_pkc *);
225 +int crypto_kop_dsasign(struct cryptodev_pkc *);
226 +int crypto_kop_dsaverify(struct cryptodev_pkc *);
227 +int crypto_run_asym(struct cryptodev_pkc *);
228 +void cryptodev_complete_asym(struct crypto_async_request *, int);
230 /* other internal structs */
232 struct list_head entry;
233 diff --git a/ioctl.c b/ioctl.c
234 index 63467e0..44070e1 100644
238 * Copyright (c) 2004 Michal Ludvig <mludvig@logix.net.nz>, SuSE Labs
239 * Copyright (c) 2009,2010,2011 Nikos Mavrogiannopoulos <nmav@gnutls.org>
240 * Copyright (c) 2010 Phil Sutter
241 + * Copyright 2012 Freescale Semiconductor, Inc.
243 * This file is part of linux cryptodev.
245 @@ -87,8 +88,37 @@ struct crypt_priv {
247 struct work_struct cryptask;
248 wait_queue_head_t user_waiter;
249 + /* List of pending cryptodev_pkc asym requests */
250 + struct list_head asym_completed_list;
251 + /* For addition/removal of entry in pending list of asymmetric request*/
252 + spinlock_t completion_lock;
255 +/* Asymmetric request Completion handler */
256 +void cryptodev_complete_asym(struct crypto_async_request *req, int err)
258 + struct cryptodev_pkc *pkc = req->data;
259 + struct cryptodev_result *res = &pkc->result;
261 + crypto_free_pkc(pkc->s);
263 + if (pkc->type == SYNCHRONOUS) {
264 + if (err == -EINPROGRESS)
266 + complete(&res->completion);
268 + struct crypt_priv *pcr = pkc->priv;
269 + unsigned long flags;
270 + spin_lock_irqsave(&pcr->completion_lock, flags);
271 + list_add_tail(&pkc->list, &pcr->asym_completed_list);
272 + spin_unlock_irqrestore(&pcr->completion_lock, flags);
273 + /* wake for POLLIN */
274 + wake_up_interruptible(&pcr->user_waiter);
280 #define FILL_SG(sg, ptr, len) \
282 (sg)->page = virt_to_page(ptr); \
283 @@ -467,7 +497,8 @@ cryptodev_open(struct inode *inode, struct file *filp)
284 INIT_LIST_HEAD(&pcr->free.list);
285 INIT_LIST_HEAD(&pcr->todo.list);
286 INIT_LIST_HEAD(&pcr->done.list);
288 + INIT_LIST_HEAD(&pcr->asym_completed_list);
289 + spin_lock_init(&pcr->completion_lock);
290 INIT_WORK(&pcr->cryptask, cryptask_routine);
292 init_waitqueue_head(&pcr->user_waiter);
293 @@ -634,6 +665,79 @@ static int crypto_async_fetch(struct crypt_priv *pcr,
297 +/* get the first asym cipher completed job from the "done" queue
300 + * -EBUSY if no completed jobs are ready (yet)
301 + * the return value otherwise */
302 +static int crypto_async_fetch_asym(struct cryptodev_pkc *pkc)
305 + struct kernel_crypt_kop *kop = &pkc->kop;
306 + struct crypt_kop *ckop = &kop->kop;
307 + struct pkc_request *pkc_req = &pkc->req;
309 + switch (ckop->crk_op) {
312 + struct rsa_pub_req_s *rsa_req = &pkc_req->req_u.rsa_pub_req;
313 + copy_to_user(ckop->crk_param[3].crp_p, rsa_req->g,
317 + case CRK_MOD_EXP_CRT:
319 + struct rsa_priv_frm3_req_s *rsa_req =
320 + &pkc_req->req_u.rsa_priv_f3;
321 + copy_to_user(ckop->crk_param[6].crp_p,
322 + rsa_req->f, rsa_req->f_len);
327 + struct dsa_sign_req_s *dsa_req = &pkc_req->req_u.dsa_sign;
329 + if (pkc_req->type == ECDSA_SIGN) {
330 + copy_to_user(ckop->crk_param[6].crp_p,
331 + dsa_req->c, dsa_req->d_len);
332 + copy_to_user(ckop->crk_param[7].crp_p,
333 + dsa_req->d, dsa_req->d_len);
335 + copy_to_user(ckop->crk_param[5].crp_p,
336 + dsa_req->c, dsa_req->d_len);
337 + copy_to_user(ckop->crk_param[6].crp_p,
338 + dsa_req->d, dsa_req->d_len);
342 + case CRK_DSA_VERIFY:
344 + case CRK_DH_COMPUTE_KEY:
346 + struct dh_key_req_s *dh_req = &pkc_req->req_u.dh_req;
347 + if (pkc_req->type == ECDH_COMPUTE_KEY)
348 + copy_to_user(ckop->crk_param[4].crp_p,
349 + dh_req->z, dh_req->z_len);
351 + copy_to_user(ckop->crk_param[3].crp_p,
352 + dh_req->z, dh_req->z_len);
358 + kfree(pkc->cookie);
362 +/* this function has to be called from process context */
363 +static int fill_kop_from_cop(struct kernel_crypt_kop *kop)
365 + kop->task = current;
366 + kop->mm = current->mm;
370 /* this function has to be called from process context */
371 static int fill_kcop_from_cop(struct kernel_crypt_op *kcop, struct fcrypt *fcr)
373 @@ -657,11 +761,8 @@ static int fill_kcop_from_cop(struct kernel_crypt_op *kcop, struct fcrypt *fcr)
376 rc = copy_from_user(kcop->iv, cop->iv, kcop->ivlen);
377 - if (unlikely(rc)) {
378 - derr(1, "error copying IV (%d bytes), copy_from_user returned %d for address %p",
379 - kcop->ivlen, rc, cop->iv);
386 @@ -687,6 +788,25 @@ static int fill_cop_from_kcop(struct kernel_crypt_op *kcop, struct fcrypt *fcr)
390 +static int kop_from_user(struct kernel_crypt_kop *kop,
393 + if (unlikely(copy_from_user(&kop->kop, arg, sizeof(kop->kop))))
396 + return fill_kop_from_cop(kop);
399 +static int kop_to_user(struct kernel_crypt_kop *kop,
402 + if (unlikely(copy_to_user(arg, &kop->kop, sizeof(kop->kop)))) {
403 + dprintk(1, KERN_ERR, "Cannot copy to userspace\n");
409 static int kcop_from_user(struct kernel_crypt_op *kcop,
410 struct fcrypt *fcr, void __user *arg)
412 @@ -816,7 +936,8 @@ cryptodev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg_)
416 - return put_user(0, p);
417 + return put_user(CRF_MOD_EXP_CRT | CRF_MOD_EXP |
418 + CRF_DSA_SIGN | CRF_DSA_VERIFY | CRF_DH_COMPUTE_KEY, p);
421 ret = put_user(fd, p);
422 @@ -852,6 +973,24 @@ cryptodev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg_)
425 return copy_to_user(arg, &siop, sizeof(siop));
428 + struct cryptodev_pkc *pkc =
429 + kzalloc(sizeof(struct cryptodev_pkc), GFP_KERNEL);
434 + ret = kop_from_user(&pkc->kop, arg);
435 + if (unlikely(ret)) {
439 + pkc->type = SYNCHRONOUS;
440 + ret = crypto_run_asym(pkc);
445 if (unlikely(ret = kcop_from_user(&kcop, fcr, arg))) {
446 dwarning(1, "Error copying from user");
447 @@ -890,6 +1029,45 @@ cryptodev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg_)
449 return kcop_to_user(&kcop, fcr, arg);
451 + case CIOCASYMASYNCRYPT:
453 + struct cryptodev_pkc *pkc =
454 + kzalloc(sizeof(struct cryptodev_pkc), GFP_KERNEL);
455 + ret = kop_from_user(&pkc->kop, arg);
460 + /* Store associated FD priv data with asymmetric request */
462 + pkc->type = ASYNCHRONOUS;
463 + ret = crypto_run_asym(pkc);
464 + if (ret == -EINPROGRESS)
468 + case CIOCASYMASYNFETCH:
470 + struct cryptodev_pkc *pkc;
471 + unsigned long flags;
473 + spin_lock_irqsave(&pcr->completion_lock, flags);
474 + if (list_empty(&pcr->asym_completed_list)) {
475 + spin_unlock_irqrestore(&pcr->completion_lock, flags);
478 + pkc = list_first_entry(&pcr->asym_completed_list,
479 + struct cryptodev_pkc, list);
480 + list_del(&pkc->list);
481 + spin_unlock_irqrestore(&pcr->completion_lock, flags);
482 + ret = crypto_async_fetch_asym(pkc);
484 + /* Reflect the updated request to user-space */
486 + kop_to_user(&pkc->kop, arg);
493 @@ -1078,9 +1256,11 @@ static unsigned int cryptodev_poll(struct file *file, poll_table *wait)
495 poll_wait(file, &pcr->user_waiter, wait);
497 - if (!list_empty_careful(&pcr->done.list))
498 + if (!list_empty_careful(&pcr->done.list) ||
499 + !list_empty_careful(&pcr->asym_completed_list))
500 ret |= POLLIN | POLLRDNORM;
501 - if (!list_empty_careful(&pcr->free.list) || pcr->itemcount < MAX_COP_RINGSIZE)
502 + if (!list_empty_careful(&pcr->free.list) ||
503 + pcr->itemcount < MAX_COP_RINGSIZE)
504 ret |= POLLOUT | POLLWRNORM;
507 diff --git a/main.c b/main.c
508 index 57e5c38..0b7951e 100644
511 @@ -181,6 +181,384 @@ __crypto_run_zc(struct csession *ses_ptr, struct kernel_crypt_op *kcop)
515 +int crypto_kop_dsasign(struct cryptodev_pkc *pkc)
517 + struct kernel_crypt_kop *kop = &pkc->kop;
518 + struct crypt_kop *cop = &kop->kop;
519 + struct pkc_request *pkc_req = &pkc->req;
520 + struct dsa_sign_req_s *dsa_req = &pkc_req->req_u.dsa_sign;
524 + if (!cop->crk_param[0].crp_nbits || !cop->crk_param[1].crp_nbits ||
525 + !cop->crk_param[2].crp_nbits || !cop->crk_param[3].crp_nbits ||
526 + !cop->crk_param[4].crp_nbits || !cop->crk_param[5].crp_nbits ||
527 + !cop->crk_param[6].crp_nbits || (cop->crk_iparams == 6 &&
528 + !cop->crk_param[7].crp_nbits))
531 + dsa_req->m_len = (cop->crk_param[0].crp_nbits + 7)/8;
532 + dsa_req->q_len = (cop->crk_param[1].crp_nbits + 7)/8;
533 + dsa_req->r_len = (cop->crk_param[2].crp_nbits + 7)/8;
534 + dsa_req->g_len = (cop->crk_param[3].crp_nbits + 7)/8;
535 + dsa_req->priv_key_len = (cop->crk_param[4].crp_nbits + 7)/8;
536 + dsa_req->d_len = (cop->crk_param[6].crp_nbits + 7)/8;
537 + buf_size = dsa_req->m_len + dsa_req->q_len + dsa_req->r_len +
538 + dsa_req->g_len + dsa_req->priv_key_len + dsa_req->d_len +
540 + if (cop->crk_iparams == 6) {
541 + dsa_req->ab_len = (cop->crk_param[5].crp_nbits + 7)/8;
542 + buf_size += dsa_req->ab_len;
543 + pkc_req->type = ECDSA_SIGN;
544 + pkc_req->curve_type = cop->curve_type;
546 + pkc_req->type = DSA_SIGN;
549 + buf = kzalloc(buf_size, GFP_DMA);
552 + dsa_req->r = dsa_req->q + dsa_req->q_len;
553 + dsa_req->g = dsa_req->r + dsa_req->r_len;
554 + dsa_req->priv_key = dsa_req->g + dsa_req->g_len;
555 + dsa_req->m = dsa_req->priv_key + dsa_req->priv_key_len;
556 + dsa_req->c = dsa_req->m + dsa_req->m_len;
557 + dsa_req->d = dsa_req->c + dsa_req->d_len;
558 + copy_from_user(dsa_req->m, cop->crk_param[0].crp_p, dsa_req->m_len);
559 + copy_from_user(dsa_req->q, cop->crk_param[1].crp_p, dsa_req->q_len);
560 + copy_from_user(dsa_req->r, cop->crk_param[2].crp_p, dsa_req->r_len);
561 + copy_from_user(dsa_req->g, cop->crk_param[3].crp_p, dsa_req->g_len);
562 + copy_from_user(dsa_req->priv_key, cop->crk_param[4].crp_p,
563 + dsa_req->priv_key_len);
564 + if (cop->crk_iparams == 6) {
565 + dsa_req->ab = dsa_req->d + dsa_req->d_len;
566 + copy_from_user(dsa_req->ab, cop->crk_param[5].crp_p,
569 + rc = cryptodev_pkc_offload(pkc);
570 + if (pkc->type == SYNCHRONOUS) {
573 + if (cop->crk_iparams == 6) {
574 + copy_to_user(cop->crk_param[6].crp_p, dsa_req->c,
576 + copy_to_user(cop->crk_param[7].crp_p, dsa_req->d,
579 + copy_to_user(cop->crk_param[5].crp_p, dsa_req->c,
581 + copy_to_user(cop->crk_param[6].crp_p, dsa_req->d,
585 + if (rc != -EINPROGRESS && rc != 0)
596 +int crypto_kop_dsaverify(struct cryptodev_pkc *pkc)
598 + struct kernel_crypt_kop *kop = &pkc->kop;
599 + struct crypt_kop *cop = &kop->kop;
600 + struct pkc_request *pkc_req;
601 + struct dsa_verify_req_s *dsa_req;
605 + if (!cop->crk_param[0].crp_nbits || !cop->crk_param[1].crp_nbits ||
606 + !cop->crk_param[2].crp_nbits || !cop->crk_param[3].crp_nbits ||
607 + !cop->crk_param[4].crp_nbits || !cop->crk_param[5].crp_nbits ||
608 + !cop->crk_param[6].crp_nbits || (cop->crk_iparams == 8 &&
609 + !cop->crk_param[7].crp_nbits))
612 + pkc_req = &pkc->req;
613 + dsa_req = &pkc_req->req_u.dsa_verify;
614 + dsa_req->m_len = (cop->crk_param[0].crp_nbits + 7)/8;
615 + dsa_req->q_len = (cop->crk_param[1].crp_nbits + 7)/8;
616 + dsa_req->r_len = (cop->crk_param[2].crp_nbits + 7)/8;
617 + dsa_req->g_len = (cop->crk_param[3].crp_nbits + 7)/8;
618 + dsa_req->pub_key_len = (cop->crk_param[4].crp_nbits + 7)/8;
619 + dsa_req->d_len = (cop->crk_param[6].crp_nbits + 7)/8;
620 + buf_size = dsa_req->m_len + dsa_req->q_len + dsa_req->r_len +
621 + dsa_req->g_len + dsa_req->pub_key_len + dsa_req->d_len +
623 + if (cop->crk_iparams == 8) {
624 + dsa_req->ab_len = (cop->crk_param[5].crp_nbits + 7)/8;
625 + buf_size += dsa_req->ab_len;
626 + pkc_req->type = ECDSA_VERIFY;
627 + pkc_req->curve_type = cop->curve_type;
629 + pkc_req->type = DSA_VERIFY;
632 + buf = kzalloc(buf_size, GFP_DMA);
635 + dsa_req->r = dsa_req->q + dsa_req->q_len;
636 + dsa_req->g = dsa_req->r + dsa_req->r_len;
637 + dsa_req->pub_key = dsa_req->g + dsa_req->g_len;
638 + dsa_req->m = dsa_req->pub_key + dsa_req->pub_key_len;
639 + dsa_req->c = dsa_req->m + dsa_req->m_len;
640 + dsa_req->d = dsa_req->c + dsa_req->d_len;
641 + copy_from_user(dsa_req->m, cop->crk_param[0].crp_p, dsa_req->m_len);
642 + copy_from_user(dsa_req->q, cop->crk_param[1].crp_p, dsa_req->q_len);
643 + copy_from_user(dsa_req->r, cop->crk_param[2].crp_p, dsa_req->r_len);
644 + copy_from_user(dsa_req->g, cop->crk_param[3].crp_p, dsa_req->g_len);
645 + copy_from_user(dsa_req->pub_key, cop->crk_param[4].crp_p,
646 + dsa_req->pub_key_len);
647 + if (cop->crk_iparams == 8) {
648 + dsa_req->ab = dsa_req->d + dsa_req->d_len;
649 + copy_from_user(dsa_req->ab, cop->crk_param[5].crp_p,
651 + copy_from_user(dsa_req->c, cop->crk_param[6].crp_p,
653 + copy_from_user(dsa_req->d, cop->crk_param[7].crp_p,
656 + copy_from_user(dsa_req->c, cop->crk_param[5].crp_p,
658 + copy_from_user(dsa_req->d, cop->crk_param[6].crp_p,
661 + rc = cryptodev_pkc_offload(pkc);
662 + if (pkc->type == SYNCHRONOUS) {
666 + if (rc != -EINPROGRESS && !rc)
676 +int crypto_kop_dh_key(struct cryptodev_pkc *pkc)
678 + struct kernel_crypt_kop *kop = &pkc->kop;
679 + struct crypt_kop *cop = &kop->kop;
680 + struct pkc_request *pkc_req;
681 + struct dh_key_req_s *dh_req;
686 + pkc_req = &pkc->req;
687 + dh_req = &pkc_req->req_u.dh_req;
688 + dh_req->s_len = (cop->crk_param[0].crp_nbits + 7)/8;
689 + dh_req->pub_key_len = (cop->crk_param[1].crp_nbits + 7)/8;
690 + dh_req->q_len = (cop->crk_param[2].crp_nbits + 7)/8;
691 + buf_size = dh_req->q_len + dh_req->pub_key_len + dh_req->s_len;
692 + if (cop->crk_iparams == 4) {
693 + pkc_req->type = ECDH_COMPUTE_KEY;
694 + dh_req->ab_len = (cop->crk_param[3].crp_nbits + 7)/8;
695 + dh_req->z_len = (cop->crk_param[4].crp_nbits + 7)/8;
696 + buf_size += dh_req->ab_len;
698 + dh_req->z_len = (cop->crk_param[3].crp_nbits + 7)/8;
699 + pkc_req->type = DH_COMPUTE_KEY;
701 + buf_size += dh_req->z_len;
702 + buf = kzalloc(buf_size, GFP_DMA);
704 + dh_req->s = dh_req->q + dh_req->q_len;
705 + dh_req->pub_key = dh_req->s + dh_req->s_len;
706 + dh_req->z = dh_req->pub_key + dh_req->pub_key_len;
707 + if (cop->crk_iparams == 4) {
708 + dh_req->ab = dh_req->z + dh_req->z_len;
709 + pkc_req->curve_type = cop->curve_type;
710 + copy_from_user(dh_req->ab, cop->crk_param[3].crp_p,
713 + copy_from_user(dh_req->s, cop->crk_param[0].crp_p, dh_req->s_len);
714 + copy_from_user(dh_req->pub_key, cop->crk_param[1].crp_p,
715 + dh_req->pub_key_len);
716 + copy_from_user(dh_req->q, cop->crk_param[2].crp_p, dh_req->q_len);
717 + rc = cryptodev_pkc_offload(pkc);
718 + if (pkc->type == SYNCHRONOUS) {
721 + if (cop->crk_iparams == 4)
722 + copy_to_user(cop->crk_param[4].crp_p, dh_req->z,
725 + copy_to_user(cop->crk_param[3].crp_p, dh_req->z,
728 + if (rc != -EINPROGRESS && rc != 0)
739 +int crypto_modexp_crt(struct cryptodev_pkc *pkc)
741 + struct kernel_crypt_kop *kop = &pkc->kop;
742 + struct crypt_kop *cop = &kop->kop;
743 + struct pkc_request *pkc_req;
744 + struct rsa_priv_frm3_req_s *rsa_req;
748 + if (!cop->crk_param[0].crp_nbits || !cop->crk_param[1].crp_nbits ||
749 + !cop->crk_param[2].crp_nbits || !cop->crk_param[3].crp_nbits ||
750 + !cop->crk_param[4].crp_nbits || !cop->crk_param[5].crp_nbits)
753 + pkc_req = &pkc->req;
754 + pkc_req->type = RSA_PRIV_FORM3;
755 + rsa_req = &pkc_req->req_u.rsa_priv_f3;
756 + rsa_req->p_len = (cop->crk_param[0].crp_nbits + 7)/8;
757 + rsa_req->q_len = (cop->crk_param[1].crp_nbits + 7)/8;
758 + rsa_req->g_len = (cop->crk_param[2].crp_nbits + 7)/8;
759 + rsa_req->dp_len = (cop->crk_param[3].crp_nbits + 7)/8;
760 + rsa_req->dq_len = (cop->crk_param[4].crp_nbits + 7)/8;
761 + rsa_req->c_len = (cop->crk_param[5].crp_nbits + 7)/8;
762 + rsa_req->f_len = (cop->crk_param[6].crp_nbits + 7)/8;
763 + buf = kzalloc(rsa_req->p_len + rsa_req->q_len + rsa_req->f_len +
764 + rsa_req->dp_len + rsa_req->dp_len + rsa_req->c_len +
765 + rsa_req->g_len, GFP_DMA);
767 + rsa_req->q = rsa_req->p + rsa_req->p_len;
768 + rsa_req->g = rsa_req->q + rsa_req->q_len;
769 + rsa_req->dp = rsa_req->g + rsa_req->g_len;
770 + rsa_req->dq = rsa_req->dp + rsa_req->dp_len;
771 + rsa_req->c = rsa_req->dq + rsa_req->dq_len;
772 + rsa_req->f = rsa_req->c + rsa_req->c_len;
773 + copy_from_user(rsa_req->p, cop->crk_param[0].crp_p, rsa_req->p_len);
774 + copy_from_user(rsa_req->q, cop->crk_param[1].crp_p, rsa_req->q_len);
775 + copy_from_user(rsa_req->g, cop->crk_param[2].crp_p, rsa_req->g_len);
776 + copy_from_user(rsa_req->dp, cop->crk_param[3].crp_p, rsa_req->dp_len);
777 + copy_from_user(rsa_req->dq, cop->crk_param[4].crp_p, rsa_req->dq_len);
778 + copy_from_user(rsa_req->c, cop->crk_param[5].crp_p, rsa_req->c_len);
779 + rc = cryptodev_pkc_offload(pkc);
781 + if (pkc->type == SYNCHRONOUS) {
784 + copy_to_user(cop->crk_param[6].crp_p, rsa_req->f,
787 + if (rc != -EINPROGRESS && rc != 0)
798 +int crypto_bn_modexp(struct cryptodev_pkc *pkc)
800 + struct pkc_request *pkc_req;
801 + struct rsa_pub_req_s *rsa_req;
803 + struct kernel_crypt_kop *kop = &pkc->kop;
804 + struct crypt_kop *cop = &kop->kop;
807 + if (!cop->crk_param[0].crp_nbits || !cop->crk_param[1].crp_nbits ||
808 + !cop->crk_param[2].crp_nbits || !cop->crk_param[3].crp_nbits)
811 + pkc_req = &pkc->req;
812 + pkc_req->type = RSA_PUB;
813 + rsa_req = &pkc_req->req_u.rsa_pub_req;
814 + rsa_req->f_len = (cop->crk_param[0].crp_nbits + 7)/8;
815 + rsa_req->e_len = (cop->crk_param[1].crp_nbits + 7)/8;
816 + rsa_req->n_len = (cop->crk_param[2].crp_nbits + 7)/8;
817 + rsa_req->g_len = (cop->crk_param[3].crp_nbits + 7)/8;
818 + buf = kzalloc(rsa_req->f_len + rsa_req->e_len + rsa_req->n_len
819 + + rsa_req->g_len, GFP_DMA);
824 + rsa_req->f = rsa_req->e + rsa_req->e_len;
825 + rsa_req->g = rsa_req->f + rsa_req->f_len;
826 + rsa_req->n = rsa_req->g + rsa_req->g_len;
827 + copy_from_user(rsa_req->f, cop->crk_param[0].crp_p, rsa_req->f_len);
828 + copy_from_user(rsa_req->e, cop->crk_param[1].crp_p, rsa_req->e_len);
829 + copy_from_user(rsa_req->n, cop->crk_param[2].crp_p, rsa_req->n_len);
830 + rc = cryptodev_pkc_offload(pkc);
831 + if (pkc->type == SYNCHRONOUS) {
835 + copy_to_user(cop->crk_param[3].crp_p, rsa_req->g,
838 + if (rc != -EINPROGRESS && rc != 0)
841 + /* This one will be freed later in fetch handler */
850 +int crypto_run_asym(struct cryptodev_pkc *pkc)
853 + struct kernel_crypt_kop *kop = &pkc->kop;
855 + switch (kop->kop.crk_op) {
857 + if (kop->kop.crk_iparams != 3 && kop->kop.crk_oparams != 1)
860 + ret = crypto_bn_modexp(pkc);
862 + case CRK_MOD_EXP_CRT:
863 + if (kop->kop.crk_iparams != 6 && kop->kop.crk_oparams != 1)
866 + ret = crypto_modexp_crt(pkc);
869 + if ((kop->kop.crk_iparams != 5 && kop->kop.crk_iparams != 6) ||
870 + kop->kop.crk_oparams != 2)
873 + ret = crypto_kop_dsasign(pkc);
875 + case CRK_DSA_VERIFY:
876 + if ((kop->kop.crk_iparams != 7 && kop->kop.crk_iparams != 8) ||
877 + kop->kop.crk_oparams != 0)
880 + ret = crypto_kop_dsaverify(pkc);
882 + case CRK_DH_COMPUTE_KEY:
883 + if ((kop->kop.crk_iparams != 3 && kop->kop.crk_iparams != 4) ||
884 + kop->kop.crk_oparams != 1)
886 + ret = crypto_kop_dh_key(pkc);
893 int crypto_run(struct fcrypt *fcr, struct kernel_crypt_op *kcop)
895 struct csession *ses_ptr;