1 From 78c01e1882def52c72966c0e86913950ec201af9 Mon Sep 17 00:00:00 2001
2 From: Yashpal Dutta <yashpal.dutta@freescale.com>
3 Date: Fri, 7 Mar 2014 08:49:15 +0545
4 Subject: [PATCH 07/15] RCU stall fixed in PKC asynchronous interface
6 Upstream-status: Pending
8 Signed-off-by: Yashpal Dutta <yashpal.dutta@freescale.com>
10 ioctl.c | 23 +++++++++++------------
11 main.c | 43 +++++++++++++++++++++++++++----------------
12 2 files changed, 38 insertions(+), 28 deletions(-)
14 diff --git a/ioctl.c b/ioctl.c
15 index 1f0741a..e4e16a8 100644
18 @@ -108,10 +108,9 @@ void cryptodev_complete_asym(struct crypto_async_request *req, int err)
19 complete(&res->completion);
21 struct crypt_priv *pcr = pkc->priv;
22 - unsigned long flags;
23 - spin_lock_irqsave(&pcr->completion_lock, flags);
24 + spin_lock_bh(&pcr->completion_lock);
25 list_add_tail(&pkc->list, &pcr->asym_completed_list);
26 - spin_unlock_irqrestore(&pcr->completion_lock, flags);
27 + spin_unlock_bh(&pcr->completion_lock);
29 wake_up_interruptible(&pcr->user_waiter);
31 @@ -958,7 +957,7 @@ cryptodev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg_)
33 return put_user(CRF_MOD_EXP_CRT | CRF_MOD_EXP | CRF_DSA_SIGN |
34 CRF_DSA_VERIFY | CRF_DH_COMPUTE_KEY |
35 - CRF_DSA_GENERATE_KEY, p);
36 + CRF_DSA_GENERATE_KEY | CRF_DH_GENERATE_KEY, p);
39 ret = put_user(fd, p);
40 @@ -997,7 +996,7 @@ cryptodev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg_)
43 struct cryptodev_pkc *pkc =
44 - kzalloc(sizeof(struct cryptodev_pkc), GFP_KERNEL);
45 + kmalloc(sizeof(struct cryptodev_pkc), GFP_KERNEL);
49 @@ -1053,7 +1052,7 @@ cryptodev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg_)
50 case CIOCASYMASYNCRYPT:
52 struct cryptodev_pkc *pkc =
53 - kzalloc(sizeof(struct cryptodev_pkc), GFP_KERNEL);
54 + kmalloc(sizeof(struct cryptodev_pkc), GFP_KERNEL);
55 ret = kop_from_user(&pkc->kop, arg);
58 @@ -1070,13 +1069,12 @@ cryptodev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg_)
59 case CIOCASYMFETCHCOOKIE:
61 struct cryptodev_pkc *pkc;
62 - unsigned long flags;
64 struct pkc_cookie_list_s cookie_list;
66 - spin_lock_irqsave(&pcr->completion_lock, flags);
67 cookie_list.cookie_available = 0;
68 for (i = 0; i < MAX_COOKIES; i++) {
69 + spin_lock_bh(&pcr->completion_lock);
70 if (!list_empty(&pcr->asym_completed_list)) {
71 /* Run a loop in the list for upto elements
72 and copy their response back */
73 @@ -1084,6 +1082,7 @@ cryptodev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg_)
74 list_first_entry(&pcr->asym_completed_list,
75 struct cryptodev_pkc, list);
77 + spin_unlock_bh(&pcr->completion_lock);
78 ret = crypto_async_fetch_asym(pkc);
80 cookie_list.cookie_available++;
81 @@ -1093,10 +1092,10 @@ cryptodev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg_)
85 + spin_unlock_bh(&pcr->completion_lock);
89 - spin_unlock_irqrestore(&pcr->completion_lock, flags);
91 /* Reflect the updated request to user-space */
92 if (cookie_list.cookie_available)
93 @@ -1386,14 +1385,13 @@ cryptodev_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg_)
94 case COMPAT_CIOCASYMFETCHCOOKIE:
96 struct cryptodev_pkc *pkc;
97 - unsigned long flags;
99 struct compat_pkc_cookie_list_s cookie_list;
101 - spin_lock_irqsave(&pcr->completion_lock, flags);
102 cookie_list.cookie_available = 0;
104 for (i = 0; i < MAX_COOKIES; i++) {
105 + spin_lock_bh(&pcr->completion_lock);
106 if (!list_empty(&pcr->asym_completed_list)) {
107 /* Run a loop in the list for upto elements
108 and copy their response back */
109 @@ -1401,6 +1399,7 @@ cryptodev_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg_)
110 list_first_entry(&pcr->asym_completed_list,
111 struct cryptodev_pkc, list);
112 list_del(&pkc->list);
113 + spin_unlock_bh(&pcr->completion_lock);
114 ret = crypto_async_fetch_asym(pkc);
116 cookie_list.cookie_available++;
117 @@ -1409,10 +1408,10 @@ cryptodev_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg_)
121 + spin_unlock_bh(&pcr->completion_lock);
125 - spin_unlock_irqrestore(&pcr->completion_lock, flags);
127 /* Reflect the updated request to user-space */
128 if (cookie_list.cookie_available)
129 diff --git a/main.c b/main.c
130 index c901bc7..2747706 100644
133 @@ -215,7 +215,9 @@ int crypto_kop_dsasign(struct cryptodev_pkc *pkc)
134 pkc_req->type = DSA_SIGN;
137 - buf = kzalloc(buf_size, GFP_DMA);
138 + buf = kmalloc(buf_size, GFP_DMA);
143 dsa_req->r = dsa_req->q + dsa_req->q_len;
144 @@ -298,7 +300,9 @@ int crypto_kop_dsaverify(struct cryptodev_pkc *pkc)
145 pkc_req->type = DSA_VERIFY;
148 - buf = kzalloc(buf_size, GFP_DMA);
149 + buf = kmalloc(buf_size, GFP_DMA);
154 dsa_req->r = dsa_req->q + dsa_req->q_len;
155 @@ -378,7 +382,7 @@ int crypto_kop_keygen(struct cryptodev_pkc *pkc)
156 pkc_req->curve_type = cop->curve_type;
159 - buf = kzalloc(buf_size, GFP_DMA);
160 + buf = kmalloc(buf_size, GFP_DMA);
164 @@ -390,25 +394,28 @@ int crypto_kop_keygen(struct cryptodev_pkc *pkc)
165 copy_from_user(key_req->q, cop->crk_param[0].crp_p, key_req->q_len);
166 copy_from_user(key_req->r, cop->crk_param[1].crp_p, key_req->r_len);
167 copy_from_user(key_req->g, cop->crk_param[2].crp_p, key_req->g_len);
168 - if (cop->crk_iparams == 3) {
169 - copy_from_user(key_req->pub_key, cop->crk_param[3].crp_p,
170 - key_req->pub_key_len);
171 - copy_from_user(key_req->priv_key, cop->crk_param[4].crp_p,
172 - key_req->priv_key_len);
174 + if (cop->crk_iparams == 4) {
175 key_req->ab = key_req->priv_key + key_req->priv_key_len;
176 copy_from_user(key_req->ab, cop->crk_param[3].crp_p,
178 - copy_from_user(key_req->pub_key, cop->crk_param[4].crp_p,
179 - key_req->pub_key_len);
180 - copy_from_user(key_req->priv_key, cop->crk_param[5].crp_p,
181 - key_req->priv_key_len);
184 rc = cryptodev_pkc_offload(pkc);
185 if (pkc->type == SYNCHRONOUS) {
189 + if (cop->crk_iparams == 4) {
190 + copy_to_user(cop->crk_param[4].crp_p, key_req->pub_key,
191 + key_req->pub_key_len);
192 + copy_to_user(cop->crk_param[5].crp_p, key_req->priv_key,
193 + key_req->priv_key_len);
195 + copy_to_user(cop->crk_param[3].crp_p, key_req->pub_key,
196 + key_req->pub_key_len);
197 + copy_to_user(cop->crk_param[4].crp_p,
198 + key_req->priv_key, key_req->priv_key_len);
201 if (rc != -EINPROGRESS && !rc)
203 @@ -447,7 +454,9 @@ int crypto_kop_dh_key(struct cryptodev_pkc *pkc)
204 pkc_req->type = DH_COMPUTE_KEY;
206 buf_size += dh_req->z_len;
207 - buf = kzalloc(buf_size, GFP_DMA);
208 + buf = kmalloc(buf_size, GFP_DMA);
212 dh_req->s = dh_req->q + dh_req->q_len;
213 dh_req->pub_key = dh_req->s + dh_req->s_len;
214 @@ -508,9 +517,11 @@ int crypto_modexp_crt(struct cryptodev_pkc *pkc)
215 rsa_req->dq_len = (cop->crk_param[4].crp_nbits + 7)/8;
216 rsa_req->c_len = (cop->crk_param[5].crp_nbits + 7)/8;
217 rsa_req->f_len = (cop->crk_param[6].crp_nbits + 7)/8;
218 - buf = kzalloc(rsa_req->p_len + rsa_req->q_len + rsa_req->f_len +
219 + buf = kmalloc(rsa_req->p_len + rsa_req->q_len + rsa_req->f_len +
220 rsa_req->dp_len + rsa_req->dp_len + rsa_req->c_len +
221 rsa_req->g_len, GFP_DMA);
225 rsa_req->q = rsa_req->p + rsa_req->p_len;
226 rsa_req->g = rsa_req->q + rsa_req->q_len;
227 @@ -563,7 +574,7 @@ int crypto_bn_modexp(struct cryptodev_pkc *pkc)
228 rsa_req->e_len = (cop->crk_param[1].crp_nbits + 7)/8;
229 rsa_req->n_len = (cop->crk_param[2].crp_nbits + 7)/8;
230 rsa_req->g_len = (cop->crk_param[3].crp_nbits + 7)/8;
231 - buf = kzalloc(rsa_req->f_len + rsa_req->e_len + rsa_req->n_len
232 + buf = kmalloc(rsa_req->f_len + rsa_req->e_len + rsa_req->n_len
233 + rsa_req->g_len, GFP_DMA);