1 From db9d8be9d0d81bdb2ddb78f8616243593a3d24c5 Mon Sep 17 00:00:00 2001
2 From: Pankaj Gupta <pankaj.gupta@nxp.com>
3 Date: Fri, 10 Jan 2020 15:38:38 +0530
4 Subject: [PATCH 2/2] eng_devcrypto: add support for TLS1.2 algorithms offload
6 - aes-128-cbc-hmac-sha256
7 - aes-256-cbc-hmac-sha256
9 Enabled the support of TLS1.1 algorithms offload
11 - aes-128-cbc-hmac-sha1
12 - aes-256-cbc-hmac-sha1
14 Requires TLS patches on cryptodev and TLS algorithm support in Linux
17 Fix: Remove the support for TLS1.0.
19 Signed-off-by: Pankaj Gupta <pankaj.gupta@nxp.com>
20 Signed-off-by: Arun Pathak <arun.pathak@nxp.com>
22 crypto/engine/eng_devcrypto.c | 133 +++++++++++++++++++++++-----------
23 1 file changed, 90 insertions(+), 43 deletions(-)
25 diff --git a/crypto/engine/eng_devcrypto.c b/crypto/engine/eng_devcrypto.c
26 index 727a660e75..be63f65e04 100644
27 --- a/crypto/engine/eng_devcrypto.c
28 +++ b/crypto/engine/eng_devcrypto.c
30 #include "crypto/engine.h"
32 /* #define ENGINE_DEVCRYPTO_DEBUG */
33 +#define TLS1_1_VERSION 0x0302
35 #if CRYPTO_ALGORITHM_MIN < CRYPTO_ALGORITHM_MAX
36 # define CHECK_BSD_STYLE_MACROS
37 @@ -67,6 +68,7 @@ struct cipher_ctx {
38 /* to handle ctr mode being a stream cipher */
39 unsigned char partial[EVP_MAX_BLOCK_LENGTH];
40 unsigned int blocksize, num;
41 + unsigned int tls_ver;
44 static const struct cipher_data_st {
45 @@ -92,11 +94,17 @@ static const struct cipher_data_st {
46 { NID_aes_192_cbc, 16, 192 / 8, 16, EVP_CIPH_CBC_MODE, CRYPTO_AES_CBC, 0 },
47 { NID_aes_256_cbc, 16, 256 / 8, 16, EVP_CIPH_CBC_MODE, CRYPTO_AES_CBC, 0 },
48 { NID_aes_128_cbc_hmac_sha1, 16, 16, 16,
49 - EVP_CIPH_CBC_MODE | EVP_CIPH_FLAG_AEAD_CIPHER,
50 - CRYPTO_TLS10_AES_CBC_HMAC_SHA1, 20 },
51 + EVP_CIPH_CBC_MODE | EVP_CIPH_FLAG_AEAD_CIPHER,
52 + CRYPTO_TLS11_AES_CBC_HMAC_SHA1, 20 },
53 { NID_aes_256_cbc_hmac_sha1, 16, 32, 16,
54 - EVP_CIPH_CBC_MODE | EVP_CIPH_FLAG_AEAD_CIPHER,
55 - CRYPTO_TLS10_AES_CBC_HMAC_SHA1, 20 },
56 + EVP_CIPH_CBC_MODE | EVP_CIPH_FLAG_AEAD_CIPHER,
57 + CRYPTO_TLS11_AES_CBC_HMAC_SHA1, 20 },
58 + { NID_aes_128_cbc_hmac_sha256, 16, 16, 16,
59 + EVP_CIPH_CBC_MODE | EVP_CIPH_FLAG_AEAD_CIPHER,
60 + CRYPTO_TLS12_AES_CBC_HMAC_SHA256, 32 },
61 + { NID_aes_256_cbc_hmac_sha256, 16, 32, 16,
62 + EVP_CIPH_CBC_MODE | EVP_CIPH_FLAG_AEAD_CIPHER,
63 + CRYPTO_TLS12_AES_CBC_HMAC_SHA256, 32 },
64 #ifndef OPENSSL_NO_RC4
65 { NID_rc4, 1, 16, 0, EVP_CIPH_STREAM_CIPHER, CRYPTO_ARC4, 0 },
67 @@ -107,9 +115,9 @@ static const struct cipher_data_st {
69 #if 0 /* Not yet supported */
70 { NID_aes_128_xts, 16, 128 / 8 * 2, 16, EVP_CIPH_XTS_MODE, CRYPTO_AES_XTS,
73 { NID_aes_256_xts, 16, 256 / 8 * 2, 16, EVP_CIPH_XTS_MODE, CRYPTO_AES_XTS,
77 #if !defined(CHECK_BSD_STYLE_MACROS) || defined(CRYPTO_AES_ECB)
78 { NID_aes_128_ecb, 16, 128 / 8, 0, EVP_CIPH_ECB_MODE, CRYPTO_AES_ECB, 0 },
79 @@ -166,7 +174,7 @@ static const struct cipher_data_st *get_cipher_data(int nid)
80 * with both the crypto and hmac keys.
82 static int cryptodev_init_aead_key(EVP_CIPHER_CTX *ctx,
83 - const unsigned char *key, const unsigned char *iv, int enc)
84 + const unsigned char *key, const unsigned char *iv, int enc)
86 struct cipher_ctx *state = EVP_CIPHER_CTX_get_cipher_data(ctx);
87 struct session_op *sess = &state->sess;
88 @@ -212,10 +220,29 @@ static int cryptodev_aead_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
90 memset(&cryp, 0, sizeof(cryp));
92 + if (EVP_CIPHER_CTX_iv_length(ctx) > 0) {
93 + if (!EVP_CIPHER_CTX_encrypting(ctx)) {
94 + iiv = in + len - EVP_CIPHER_CTX_iv_length(ctx);
95 + memcpy(save_iv, iiv, EVP_CIPHER_CTX_iv_length(ctx));
97 + if (state->tls_ver >= TLS1_1_VERSION) {
98 + memcpy(EVP_CIPHER_CTX_iv_noconst(ctx), in,
99 + EVP_CIPHER_CTX_iv_length(ctx));
100 + in += EVP_CIPHER_CTX_iv_length(ctx);
101 + out += EVP_CIPHER_CTX_iv_length(ctx);
102 + len -= EVP_CIPHER_CTX_iv_length(ctx);
105 + cryp.iv = (void *) EVP_CIPHER_CTX_iv(ctx);
109 /* TODO: make a seamless integration with cryptodev flags */
110 switch (EVP_CIPHER_CTX_nid(ctx)) {
111 case NID_aes_128_cbc_hmac_sha1:
112 case NID_aes_256_cbc_hmac_sha1:
113 + case NID_aes_128_cbc_hmac_sha256:
114 + case NID_aes_256_cbc_hmac_sha256:
115 cryp.flags = COP_FLAG_AEAD_TLS_TYPE;
117 cryp.ses = sess->ses;
118 @@ -227,15 +254,6 @@ static int cryptodev_aead_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
120 cryp.op = EVP_CIPHER_CTX_encrypting(ctx) ? COP_ENCRYPT : COP_DECRYPT;
122 - if (EVP_CIPHER_CTX_iv_length(ctx) > 0) {
123 - cryp.iv = (void *) EVP_CIPHER_CTX_iv(ctx);
124 - if (!EVP_CIPHER_CTX_encrypting(ctx)) {
125 - iiv = in + len - EVP_CIPHER_CTX_iv_length(ctx);
126 - memcpy(save_iv, iiv, EVP_CIPHER_CTX_iv_length(ctx));
131 if (ioctl(cfd, CIOCAUTHCRYPT, &cryp) == -1) {
133 * XXX need better errror handling this can fail for a number of
134 @@ -262,7 +280,7 @@ static int cryptodev_cbc_hmac_sha1_ctrl(EVP_CIPHER_CTX *ctx, int type,
136 case EVP_CTRL_AEAD_SET_MAC_KEY:
138 - /* TODO: what happens with hmac keys larger than 64 bytes? */
139 + /* TODO: what happens with hmac keys larger than 64 bytes? */
140 struct cipher_ctx *state =
141 EVP_CIPHER_CTX_get_cipher_data(ctx);
142 struct session_op *sess = &state->sess;
143 @@ -282,27 +300,52 @@ static int cryptodev_cbc_hmac_sha1_ctrl(EVP_CIPHER_CTX *ctx, int type,
144 EVP_CIPHER_CTX_get_cipher_data(ctx);
145 unsigned char *p = ptr;
146 unsigned int cryptlen = p[arg - 2] << 8 | p[arg - 1];
147 - unsigned int maclen, padlen;
148 - unsigned int bs = EVP_CIPHER_CTX_block_size(ctx);
149 + unsigned int maclen;
150 + unsigned int blocksize = EVP_CIPHER_CTX_block_size(ctx);
153 + state->tls_ver = p[arg - 4] << 8 | p[arg - 3];
155 state->aad_len = arg;
156 - state->len = cryptlen;
158 /* TODO: this should be an extension of EVP_CIPHER struct */
159 switch (EVP_CIPHER_CTX_nid(ctx)) {
160 case NID_aes_128_cbc_hmac_sha1:
161 case NID_aes_256_cbc_hmac_sha1:
162 maclen = SHA_DIGEST_LENGTH;
164 + case NID_aes_128_cbc_hmac_sha256:
165 + case NID_aes_256_cbc_hmac_sha256:
166 + maclen = SHA256_DIGEST_LENGTH;
170 + * Only above 4 supported NIDs are used to enter to this
171 + * function. If any other NID reaches this function,
172 + * there's a grave coding error further down.
174 + assert("Code that never should be reached" == NULL);
178 /* space required for encryption (not only TLS padding) */
180 if (EVP_CIPHER_CTX_encrypting(ctx)) {
181 - cryptlen += maclen;
182 - padlen += bs - (cryptlen % bs);
183 + if (state->tls_ver >= TLS1_1_VERSION) {
184 + p[arg - 2] = (cryptlen - blocksize) >> 8;
185 + p[arg - 1] = (cryptlen - blocksize);
187 + ret = (int)(((cryptlen + maclen +
188 + blocksize) & -blocksize) - cryptlen);
190 + if (state->tls_ver >= TLS1_1_VERSION) {
191 + cryptlen -= blocksize;
192 + p[arg - 2] = cryptlen >> 8;
193 + p[arg - 1] = cryptlen;
198 + state->len = cryptlen;
203 @@ -510,11 +553,11 @@ static int cipher_cleanup(EVP_CIPHER_CTX *ctx)
204 static int known_cipher_nids[OSSL_NELEM(cipher_data)];
205 static int known_cipher_nids_amount = -1; /* -1 indicates not yet initialised */
206 static EVP_CIPHER *known_cipher_methods[OSSL_NELEM(cipher_data)] = { NULL, };
207 -int (*init) (EVP_CIPHER_CTX *ctx, const unsigned char *key,
208 - const unsigned char *iv, int enc);
209 -int (*do_cipher) (EVP_CIPHER_CTX *ctx, unsigned char *out,
210 - const unsigned char *in, size_t inl);
211 -int (*ctrl) (EVP_CIPHER_CTX *, int type, int arg, void *ptr);
212 +int (*init)(EVP_CIPHER_CTX *ctx, const unsigned char *key,
213 + const unsigned char *iv, int enc);
214 +int (*do_cipher)(EVP_CIPHER_CTX *ctx, unsigned char *out,
215 + const unsigned char *in, size_t inl);
216 +int (*ctrl)(EVP_CIPHER_CTX *ctx, int type, int arg, void *ptr);
218 static void prepare_cipher_methods(void)
220 @@ -543,26 +586,28 @@ static void prepare_cipher_methods(void)
222 sess.cipher = cipher_data[i].devcryptoid;
223 sess.keylen = cipher_data[i].keylen;
224 - sess.mackeylen = cipher_data[i].mackeylen;
225 + sess.mackeylen = cipher_data[i].mackeylen;
227 cipher_mode = cipher_data[i].flags & EVP_CIPH_MODE;
229 - do_cipher = (cipher_mode == EVP_CIPH_CTR_MODE ?
230 + do_cipher = (cipher_mode == EVP_CIPH_CTR_MODE ?
233 - if (cipher_data[i].nid == NID_aes_128_cbc_hmac_sha1
234 - || cipher_data[i].nid == NID_aes_256_cbc_hmac_sha1) {
235 - init = cryptodev_init_aead_key;
236 - do_cipher = cryptodev_aead_cipher;
237 - ctrl = cryptodev_cbc_hmac_sha1_ctrl;
238 - flags = cipher_data[i].flags;
240 + if (cipher_data[i].nid == NID_aes_128_cbc_hmac_sha1
241 + || cipher_data[i].nid == NID_aes_256_cbc_hmac_sha1
242 + || cipher_data[i].nid == NID_aes_128_cbc_hmac_sha256
243 + || cipher_data[i].nid == NID_aes_256_cbc_hmac_sha256) {
244 + init = cryptodev_init_aead_key;
245 + do_cipher = cryptodev_aead_cipher;
246 + ctrl = cryptodev_cbc_hmac_sha1_ctrl;
247 + flags = cipher_data[i].flags;
250 if (ioctl(cfd, CIOCGSESSION, &sess) < 0
251 || ioctl(cfd, CIOCFSESSION, &sess.ses) < 0)
254 - if ((known_cipher_methods[i] =
255 + if ((known_cipher_methods[i] =
256 EVP_CIPHER_meth_new(cipher_data[i].nid,
257 cipher_mode == EVP_CIPH_CTR_MODE ? 1 :
258 cipher_data[i].blocksize,
259 @@ -574,7 +619,7 @@ static void prepare_cipher_methods(void)
260 || !EVP_CIPHER_meth_set_init(known_cipher_methods[i], init)
261 || !EVP_CIPHER_meth_set_do_cipher(known_cipher_methods[i],
263 - /* AEAD Support to be added. */
264 + /* AEAD Support to be added. */
265 || !EVP_CIPHER_meth_set_ctrl(known_cipher_methods[i], ctrl)
266 || !EVP_CIPHER_meth_set_cleanup(known_cipher_methods[i],
268 @@ -587,9 +632,11 @@ static void prepare_cipher_methods(void)
272 - if (cipher_data[i].nid == NID_aes_128_cbc_hmac_sha1
273 - || cipher_data[i].nid == NID_aes_256_cbc_hmac_sha1)
274 - EVP_add_cipher(known_cipher_methods[i]);
275 + if (cipher_data[i].nid == NID_aes_128_cbc_hmac_sha1
276 + || cipher_data[i].nid == NID_aes_256_cbc_hmac_sha1
277 + || cipher_data[i].nid == NID_aes_128_cbc_hmac_sha256
278 + || cipher_data[i].nid == NID_aes_256_cbc_hmac_sha256)
279 + EVP_add_cipher(known_cipher_methods[i]);