1 From 6213ae5228a2ff0bb3521474ae37effda95a5d46 Mon Sep 17 00:00:00 2001
2 From: Cristian Stoica <cristian.stoica@nxp.com>
3 Date: Fri, 12 May 2017 17:04:40 +0300
4 Subject: [PATCH 7/9] add support for RSA public and private key operations
6 Only form 1 support is added with this patch. To maintain
7 compatibility with OpenBSD we need to reverse bignum buffers before
8 giving them to the kernel. This adds an artificial performance
9 penalty that can be resolved only with a CIOCKEY extension in
12 As of Linux kernel 4.12 it is not possible to give to the kernel
13 directly a pointer to a RSA key structure and must resort to a BER
16 Support for private keys in form 3 (CRT) must wait for updates and
17 fixes in Linux kernel crypto API.
20 Kernels <= v4.7 strip leading zeros from the result and we get padding
21 errors from Openssl: RSA_EAY_PUBLIC_DECRYPT: padding check failed
22 (Fixed with kernel commit "crypto: rsa - Generate fixed-length output"
23 9b45b7bba3d22de52e09df63c50f390a193a3f53)
25 Signed-off-by: Cristian Stoica <cristian.stoica@nxp.com>
27 cryptlib.c | 234 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++
29 cryptodev_int.h | 17 ++++
31 main.c | 42 ++++++++++
32 5 files changed, 312 insertions(+), 2 deletions(-)
34 diff --git a/cryptlib.c b/cryptlib.c
35 index 2c6028e..1c044a4 100644
39 #include <crypto/authenc.h>
40 #include "cryptodev_int.h"
41 #include "cipherapi.h"
42 +#if (LINUX_VERSION_CODE > KERNEL_VERSION(4, 3, 0))
43 +#include <linux/asn1_ber_bytecode.h>
44 +#include <crypto/akcipher.h>
47 extern const struct crypto_type crypto_givcipher_type;
49 @@ -435,3 +439,233 @@ int cryptodev_hash_final(struct hash_data *hdata, void *output)
50 return waitfor(&hdata->async.result, ret);
53 +#if (LINUX_VERSION_CODE > KERNEL_VERSION(4, 3, 0))
54 +/* This function is necessary because the bignums in Linux kernel are MSB first
55 + * (big endian) as opposed to LSB first as OpenBSD crypto layer uses */
56 +void reverse_buf(uint8_t *buf, size_t sz)
64 + for (i = 0; i < sz/2; i++) {
75 +int ber_wr_tag(uint8_t **ber_ptr, uint8_t tag)
83 +int ber_wr_len(uint8_t **ber_ptr, size_t len, size_t sz)
89 + size_t sz_save = sz;
92 + **ber_ptr = 0x80 | sz;
95 + *(*ber_ptr + sz) = len & 0xff;
99 + *ber_ptr += sz_save;
105 +int ber_wr_int(uint8_t **ber_ptr, uint8_t *crp_p, size_t sz)
109 + ret = copy_from_user(*ber_ptr, crp_p, sz);
110 + reverse_buf(*ber_ptr, sz);
117 +/* calculate the size of the length field itself in BER encoding */
118 +size_t ber_enc_len(size_t len)
123 + if (len > 127) { /* long encoding */
133 +void *cryptodev_alloc_rsa_pub_key(struct kernel_crypt_pkop *pkop,
136 + struct crypt_kop *cop = &pkop->pkop;
139 + uint32_t ber_key_len;
148 + /* BER public key format:
149 + * SEQUENCE TAG 1 byte
150 + * SEQUENCE LENGTH s_enc_len bytes
151 + * INTEGER TAG 1 byte
152 + * INTEGER LENGTH n_enc_len bytes
153 + * INTEGER (n modulus) n_sz bytes
154 + * INTEGER TAG 1 byte
155 + * INTEGER LENGTH e_enc_len bytes
156 + * INTEGER (e exponent) e_sz bytes
159 + e_sz = (cop->crk_param[1].crp_nbits + 7)/8;
160 + n_sz = (cop->crk_param[2].crp_nbits + 7)/8;
162 + e_enc_len = ber_enc_len(e_sz);
163 + n_enc_len = ber_enc_len(n_sz);
166 + * Sequence length is the size of all the fields following the sequence
167 + * tag, added together. The two added bytes account for the two INT
168 + * tags in the Public Key sequence
170 + s_sz = e_sz + e_enc_len + n_sz + n_enc_len + 2;
171 + s_enc_len = ber_enc_len(s_sz);
173 + /* The added byte accounts for the SEQ tag at the start of the key */
174 + ber_key_len = s_sz + s_enc_len + 1;
176 + /* Linux asn1_ber_decoder doesn't like keys that are too large */
177 + if (ber_key_len > 65535) {
181 + ber_key = kmalloc(ber_key_len, GFP_DMA);
188 + err = ber_wr_tag(&ber_ptr, _tag(UNIV, CONS, SEQ)) ||
189 + ber_wr_len(&ber_ptr, s_sz, s_enc_len) ||
190 + ber_wr_tag(&ber_ptr, _tag(UNIV, PRIM, INT)) ||
191 + ber_wr_len(&ber_ptr, n_sz, n_enc_len) ||
192 + ber_wr_int(&ber_ptr, cop->crk_param[2].crp_p, n_sz) ||
193 + ber_wr_tag(&ber_ptr, _tag(UNIV, PRIM, INT)) ||
194 + ber_wr_len(&ber_ptr, e_sz, e_enc_len) ||
195 + ber_wr_int(&ber_ptr, cop->crk_param[1].crp_p, e_sz);
200 + *key_len = ber_key_len;
208 +int crypto_bn_modexp(struct kernel_crypt_pkop *pkop)
210 + struct crypt_kop *cop = &pkop->pkop;
212 + uint32_t ber_key_len;
218 + struct scatterlist src;
219 + struct scatterlist dst;
222 + ber_key = cryptodev_alloc_rsa_pub_key(pkop, &ber_key_len);
227 + err = crypto_akcipher_set_pub_key(pkop->s, ber_key, ber_key_len);
232 + m_sz = (cop->crk_param[0].crp_nbits + 7)/8;
233 + c_sz = (cop->crk_param[3].crp_nbits + 7)/8;
235 + m_buf = kmalloc(m_sz, GFP_DMA);
241 + err = copy_from_user(m_buf, cop->crk_param[0].crp_p, m_sz);
245 + reverse_buf(m_buf, m_sz);
247 + c_sz_max = crypto_akcipher_maxsize(pkop->s);
248 + if (c_sz > c_sz_max) {
253 + c_buf = kzalloc(c_sz_max, GFP_KERNEL);
258 + sg_init_one(&src, m_buf, m_sz);
259 + sg_init_one(&dst, c_buf, c_sz);
261 + init_completion(&pkop->result.completion);
262 + akcipher_request_set_callback(pkop->req, 0,
263 + cryptodev_complete, &pkop->result);
264 + akcipher_request_set_crypt(pkop->req, &src, &dst, m_sz, c_sz);
266 + err = crypto_akcipher_encrypt(pkop->req);
267 + err = waitfor(&pkop->result, err);
270 + reverse_buf(c_buf, c_sz);
271 + err = copy_to_user(cop->crk_param[3].crp_p, c_buf, c_sz);
283 diff --git a/cryptlib.h b/cryptlib.h
284 index 48fe9bd..f909c34 100644
287 @@ -95,6 +95,8 @@ int cryptodev_hash_reset(struct hash_data *hdata);
288 void cryptodev_hash_deinit(struct hash_data *hdata);
289 int cryptodev_hash_init(struct hash_data *hdata, const char *alg_name,
290 int hmac_mode, void *mackey, size_t mackeylen);
292 +#if (LINUX_VERSION_CODE > KERNEL_VERSION(4, 3, 0))
293 +int crypto_bn_modexp(struct kernel_crypt_pkop *pkop);
297 diff --git a/cryptodev_int.h b/cryptodev_int.h
298 index c1879fd..7860c39 100644
299 --- a/cryptodev_int.h
300 +++ b/cryptodev_int.h
302 #include <linux/scatterlist.h>
303 #include <crypto/cryptodev.h>
304 #include <crypto/aead.h>
305 +#if (LINUX_VERSION_CODE > KERNEL_VERSION(4, 3, 0))
306 +#include <crypto/internal/rsa.h>
310 #define PFX "cryptodev: "
311 #define dprintk(level, severity, format, a...) \
312 @@ -111,6 +115,18 @@ struct kernel_crypt_auth_op {
313 struct mm_struct *mm;
316 +#if (LINUX_VERSION_CODE > KERNEL_VERSION(4, 3, 0))
317 +struct kernel_crypt_pkop {
318 + struct crypt_kop pkop;
320 + struct crypto_akcipher *s; /* Transform pointer from CryptoAPI */
321 + struct akcipher_request *req; /* PKC request allocated from CryptoAPI */
322 + struct cryptodev_result result; /* updated by completion handler */
325 +int crypto_run_asym(struct kernel_crypt_pkop *pkop);
330 int kcaop_from_user(struct kernel_crypt_auth_op *kcop,
331 @@ -122,6 +138,7 @@ int crypto_run(struct fcrypt *fcr, struct kernel_crypt_op *kcop);
333 #include <cryptlib.h>
336 /* other internal structs */
338 struct list_head entry;
339 diff --git a/ioctl.c b/ioctl.c
340 index db7207a..8b0df4e 100644
343 @@ -810,6 +810,9 @@ cryptodev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg_)
344 struct session_op sop;
345 struct kernel_crypt_op kcop;
346 struct kernel_crypt_auth_op kcaop;
347 +#if (LINUX_VERSION_CODE > KERNEL_VERSION(4, 3, 0))
348 + struct kernel_crypt_pkop pkop;
350 struct crypt_priv *pcr = filp->private_data;
352 struct session_info_op siop;
353 @@ -823,7 +826,11 @@ cryptodev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg_)
357 - return put_user(0, p);
359 + if (crypto_has_alg("rsa", 0, 0)) {
362 + return put_user(ses, p);
365 ret = put_user(fd, p);
366 @@ -859,6 +866,14 @@ cryptodev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg_)
369 return copy_to_user(arg, &siop, sizeof(siop));
370 +#if (LINUX_VERSION_CODE > KERNEL_VERSION(4, 3, 0))
372 + ret = copy_from_user(&pkop.pkop, arg, sizeof(struct crypt_kop));
374 + ret = crypto_run_asym(&pkop);
379 if (unlikely(ret = kcop_from_user(&kcop, fcr, arg))) {
380 dwarning(1, "Error copying from user");
381 diff --git a/main.c b/main.c
382 index 57e5c38..2bfe6f0 100644
387 #include "cryptlib.h"
389 +#if (LINUX_VERSION_CODE > KERNEL_VERSION(4, 3, 0))
390 +#include <crypto/akcipher.h>
393 /* This file contains the traditional operations of encryption
394 * and hashing of /dev/crypto.
395 @@ -265,3 +268,42 @@ out_unlock:
396 crypto_put_session(ses_ptr);
400 +#if (LINUX_VERSION_CODE > KERNEL_VERSION(4, 3, 0))
401 +int crypto_run_asym(struct kernel_crypt_pkop *pkop)
405 + pkop->s = crypto_alloc_akcipher("rsa", 0, 0);
406 + if (IS_ERR(pkop->s)) {
407 + return PTR_ERR(pkop->s);
410 + pkop->req = akcipher_request_alloc(pkop->s, GFP_KERNEL);
411 + if (pkop->req == NULL) {
416 + switch (pkop->pkop.crk_op) {
417 + case CRK_MOD_EXP: /* RSA_PUB or PRIV form 1 */
418 + if (pkop->pkop.crk_iparams != 3 && pkop->pkop.crk_oparams != 1) {
422 + err = crypto_bn_modexp(pkop);
433 + crypto_free_akcipher(pkop->s);