xref: /illumos-gate/usr/src/uts/common/crypto/io/sha1_mod.c (revision 581cede61ac9c14d8d4ea452562a567189eead78)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #include <sys/modctl.h>
28 #include <sys/cmn_err.h>
29 #include <sys/note.h>
30 #include <sys/crypto/common.h>
31 #include <sys/crypto/spi.h>
32 #include <sys/strsun.h>
33 #include <sys/systm.h>
34 #include <sys/sysmacros.h>
35 
36 #include <sys/sha1.h>
37 
38 /*
39  * The sha1 module is created with two modlinkages:
40  * - a modlmisc that allows consumers to directly call the entry points
41  *   SHA1Init, SHA1Update, and SHA1Final.
42  * - a modlcrypto that allows the module to register with the Kernel
43  *   Cryptographic Framework (KCF) as a software provider for the SHA1
44  *   mechanisms.
45  */
46 
47 static struct modlmisc modlmisc = {
48 	&mod_miscops,
49 	"SHA1 Message-Digest Algorithm"
50 };
51 
52 static struct modlcrypto modlcrypto = {
53 	&mod_cryptoops,
54 	"SHA1 Kernel SW Provider 1.1"
55 };
56 
57 static struct modlinkage modlinkage = {
58 	MODREV_1, &modlmisc, &modlcrypto, NULL
59 };
60 
61 /*
62  * CSPI information (entry points, provider info, etc.)
63  */
64 
65 typedef enum sha1_mech_type {
66 	SHA1_MECH_INFO_TYPE,		/* SUN_CKM_SHA1 */
67 	SHA1_HMAC_MECH_INFO_TYPE,	/* SUN_CKM_SHA1_HMAC */
68 	SHA1_HMAC_GEN_MECH_INFO_TYPE	/* SUN_CKM_SHA1_HMAC_GENERAL */
69 } sha1_mech_type_t;
70 
71 #define	SHA1_DIGEST_LENGTH	20	/* SHA1 digest length in bytes */
72 #define	SHA1_HMAC_BLOCK_SIZE	64	/* SHA1-HMAC block size */
73 #define	SHA1_HMAC_MIN_KEY_LEN	1	/* SHA1-HMAC min key length in bytes */
74 #define	SHA1_HMAC_MAX_KEY_LEN	INT_MAX /* SHA1-HMAC max key length in bytes */
75 #define	SHA1_HMAC_INTS_PER_BLOCK	(SHA1_HMAC_BLOCK_SIZE/sizeof (uint32_t))
76 
77 /*
78  * Context for SHA1 mechanism.
79  */
80 typedef struct sha1_ctx {
81 	sha1_mech_type_t	sc_mech_type;	/* type of context */
82 	SHA1_CTX		sc_sha1_ctx;	/* SHA1 context */
83 } sha1_ctx_t;
84 
85 /*
86  * Context for SHA1-HMAC and SHA1-HMAC-GENERAL mechanisms.
87  */
88 typedef struct sha1_hmac_ctx {
89 	sha1_mech_type_t	hc_mech_type;	/* type of context */
90 	uint32_t		hc_digest_len;	/* digest len in bytes */
91 	SHA1_CTX		hc_icontext;	/* inner SHA1 context */
92 	SHA1_CTX		hc_ocontext;	/* outer SHA1 context */
93 } sha1_hmac_ctx_t;
94 
95 /*
96  * Macros to access the SHA1 or SHA1-HMAC contexts from a context passed
97  * by KCF to one of the entry points.
98  */
99 
100 #define	PROV_SHA1_CTX(ctx)	((sha1_ctx_t *)(ctx)->cc_provider_private)
101 #define	PROV_SHA1_HMAC_CTX(ctx)	((sha1_hmac_ctx_t *)(ctx)->cc_provider_private)
102 
103 /* to extract the digest length passed as mechanism parameter */
104 #define	PROV_SHA1_GET_DIGEST_LEN(m, len) {				\
105 	if (IS_P2ALIGNED((m)->cm_param, sizeof (ulong_t)))		\
106 		(len) = (uint32_t)*((ulong_t *)mechanism->cm_param);	\
107 	else {								\
108 		ulong_t tmp_ulong;					\
109 		bcopy((m)->cm_param, &tmp_ulong, sizeof (ulong_t));	\
110 		(len) = (uint32_t)tmp_ulong;				\
111 	}								\
112 }
113 
114 #define	PROV_SHA1_DIGEST_KEY(ctx, key, len, digest) {	\
115 	SHA1Init(ctx);					\
116 	SHA1Update(ctx, key, len);			\
117 	SHA1Final(digest, ctx);				\
118 }
119 
120 /*
121  * Mechanism info structure passed to KCF during registration.
122  */
123 static crypto_mech_info_t sha1_mech_info_tab[] = {
124 	/* SHA1 */
125 	{SUN_CKM_SHA1, SHA1_MECH_INFO_TYPE,
126 	    CRYPTO_FG_DIGEST | CRYPTO_FG_DIGEST_ATOMIC,
127 	    0, 0, CRYPTO_KEYSIZE_UNIT_IN_BITS},
128 	/* SHA1-HMAC */
129 	{SUN_CKM_SHA1_HMAC, SHA1_HMAC_MECH_INFO_TYPE,
130 	    CRYPTO_FG_MAC | CRYPTO_FG_MAC_ATOMIC,
131 	    SHA1_HMAC_MIN_KEY_LEN, SHA1_HMAC_MAX_KEY_LEN,
132 	    CRYPTO_KEYSIZE_UNIT_IN_BYTES},
133 	/* SHA1-HMAC GENERAL */
134 	{SUN_CKM_SHA1_HMAC_GENERAL, SHA1_HMAC_GEN_MECH_INFO_TYPE,
135 	    CRYPTO_FG_MAC | CRYPTO_FG_MAC_ATOMIC,
136 	    SHA1_HMAC_MIN_KEY_LEN, SHA1_HMAC_MAX_KEY_LEN,
137 	    CRYPTO_KEYSIZE_UNIT_IN_BYTES}
138 };
139 
140 static void sha1_provider_status(crypto_provider_handle_t, uint_t *);
141 
142 static crypto_control_ops_t sha1_control_ops = {
143 	sha1_provider_status
144 };
145 
146 static int sha1_digest_init(crypto_ctx_t *, crypto_mechanism_t *,
147     crypto_req_handle_t);
148 static int sha1_digest(crypto_ctx_t *, crypto_data_t *, crypto_data_t *,
149     crypto_req_handle_t);
150 static int sha1_digest_update(crypto_ctx_t *, crypto_data_t *,
151     crypto_req_handle_t);
152 static int sha1_digest_final(crypto_ctx_t *, crypto_data_t *,
153     crypto_req_handle_t);
154 static int sha1_digest_atomic(crypto_provider_handle_t, crypto_session_id_t,
155     crypto_mechanism_t *, crypto_data_t *, crypto_data_t *,
156     crypto_req_handle_t);
157 
158 static crypto_digest_ops_t sha1_digest_ops = {
159 	sha1_digest_init,
160 	sha1_digest,
161 	sha1_digest_update,
162 	NULL,
163 	sha1_digest_final,
164 	sha1_digest_atomic
165 };
166 
167 static int sha1_mac_init(crypto_ctx_t *, crypto_mechanism_t *, crypto_key_t *,
168     crypto_spi_ctx_template_t, crypto_req_handle_t);
169 static int sha1_mac_update(crypto_ctx_t *, crypto_data_t *,
170     crypto_req_handle_t);
171 static int sha1_mac_final(crypto_ctx_t *, crypto_data_t *, crypto_req_handle_t);
172 static int sha1_mac_atomic(crypto_provider_handle_t, crypto_session_id_t,
173     crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, crypto_data_t *,
174     crypto_spi_ctx_template_t, crypto_req_handle_t);
175 static int sha1_mac_verify_atomic(crypto_provider_handle_t, crypto_session_id_t,
176     crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, crypto_data_t *,
177     crypto_spi_ctx_template_t, crypto_req_handle_t);
178 
179 static crypto_mac_ops_t sha1_mac_ops = {
180 	sha1_mac_init,
181 	NULL,
182 	sha1_mac_update,
183 	sha1_mac_final,
184 	sha1_mac_atomic,
185 	sha1_mac_verify_atomic
186 };
187 
188 static int sha1_create_ctx_template(crypto_provider_handle_t,
189     crypto_mechanism_t *, crypto_key_t *, crypto_spi_ctx_template_t *,
190     size_t *, crypto_req_handle_t);
191 static int sha1_free_context(crypto_ctx_t *);
192 
193 static crypto_ctx_ops_t sha1_ctx_ops = {
194 	sha1_create_ctx_template,
195 	sha1_free_context
196 };
197 
198 static crypto_ops_t sha1_crypto_ops = {
199 	&sha1_control_ops,
200 	&sha1_digest_ops,
201 	NULL,
202 	&sha1_mac_ops,
203 	NULL,
204 	NULL,
205 	NULL,
206 	NULL,
207 	NULL,
208 	NULL,
209 	NULL,
210 	NULL,
211 	NULL,
212 	&sha1_ctx_ops
213 };
214 
215 static crypto_provider_info_t sha1_prov_info = {
216 	CRYPTO_SPI_VERSION_1,
217 	"SHA1 Software Provider",
218 	CRYPTO_SW_PROVIDER,
219 	{&modlinkage},
220 	NULL,
221 	&sha1_crypto_ops,
222 	sizeof (sha1_mech_info_tab)/sizeof (crypto_mech_info_t),
223 	sha1_mech_info_tab
224 };
225 
226 static crypto_kcf_provider_handle_t sha1_prov_handle = NULL;
227 
228 int
229 _init()
230 {
231 	int ret;
232 
233 	if ((ret = mod_install(&modlinkage)) != 0)
234 		return (ret);
235 
236 	/*
237 	 * Register with KCF. If the registration fails, log an
238 	 * error but do not uninstall the module, since the functionality
239 	 * provided by misc/sha1 should still be available.
240 	 */
241 	if ((ret = crypto_register_provider(&sha1_prov_info,
242 	    &sha1_prov_handle)) != CRYPTO_SUCCESS)
243 		cmn_err(CE_WARN, "sha1 _init: "
244 		    "crypto_register_provider() failed (0x%x)", ret);
245 
246 	return (0);
247 }
248 
249 int
250 _info(struct modinfo *modinfop)
251 {
252 	return (mod_info(&modlinkage, modinfop));
253 }
254 
255 /*
256  * KCF software provider control entry points.
257  */
258 /* ARGSUSED */
259 static void
260 sha1_provider_status(crypto_provider_handle_t provider, uint_t *status)
261 {
262 	*status = CRYPTO_PROVIDER_READY;
263 }
264 
265 /*
266  * KCF software provider digest entry points.
267  */
268 
269 static int
270 sha1_digest_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
271     crypto_req_handle_t req)
272 {
273 	if (mechanism->cm_type != SHA1_MECH_INFO_TYPE)
274 		return (CRYPTO_MECHANISM_INVALID);
275 
276 	/*
277 	 * Allocate and initialize SHA1 context.
278 	 */
279 	ctx->cc_provider_private = kmem_alloc(sizeof (sha1_ctx_t),
280 	    crypto_kmflag(req));
281 	if (ctx->cc_provider_private == NULL)
282 		return (CRYPTO_HOST_MEMORY);
283 
284 	PROV_SHA1_CTX(ctx)->sc_mech_type = SHA1_MECH_INFO_TYPE;
285 	SHA1Init(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx);
286 
287 	return (CRYPTO_SUCCESS);
288 }
289 
290 /*
291  * Helper SHA1 digest update function for uio data.
292  */
293 static int
294 sha1_digest_update_uio(SHA1_CTX *sha1_ctx, crypto_data_t *data)
295 {
296 	off_t offset = data->cd_offset;
297 	size_t length = data->cd_length;
298 	uint_t vec_idx;
299 	size_t cur_len;
300 
301 	/* we support only kernel buffer */
302 	if (data->cd_uio->uio_segflg != UIO_SYSSPACE)
303 		return (CRYPTO_ARGUMENTS_BAD);
304 
305 	/*
306 	 * Jump to the first iovec containing data to be
307 	 * digested.
308 	 */
309 	for (vec_idx = 0; vec_idx < data->cd_uio->uio_iovcnt &&
310 	    offset >= data->cd_uio->uio_iov[vec_idx].iov_len;
311 	    offset -= data->cd_uio->uio_iov[vec_idx++].iov_len)
312 		;
313 	if (vec_idx == data->cd_uio->uio_iovcnt) {
314 		/*
315 		 * The caller specified an offset that is larger than the
316 		 * total size of the buffers it provided.
317 		 */
318 		return (CRYPTO_DATA_LEN_RANGE);
319 	}
320 
321 	/*
322 	 * Now do the digesting on the iovecs.
323 	 */
324 	while (vec_idx < data->cd_uio->uio_iovcnt && length > 0) {
325 		cur_len = MIN(data->cd_uio->uio_iov[vec_idx].iov_len -
326 		    offset, length);
327 
328 		SHA1Update(sha1_ctx,
329 		    (uint8_t *)data->cd_uio->uio_iov[vec_idx].iov_base + offset,
330 		    cur_len);
331 
332 		length -= cur_len;
333 		vec_idx++;
334 		offset = 0;
335 	}
336 
337 	if (vec_idx == data->cd_uio->uio_iovcnt && length > 0) {
338 		/*
339 		 * The end of the specified iovec's was reached but
340 		 * the length requested could not be processed, i.e.
341 		 * The caller requested to digest more data than it provided.
342 		 */
343 		return (CRYPTO_DATA_LEN_RANGE);
344 	}
345 
346 	return (CRYPTO_SUCCESS);
347 }
348 
349 /*
350  * Helper SHA1 digest final function for uio data.
351  * digest_len is the length of the desired digest. If digest_len
352  * is smaller than the default SHA1 digest length, the caller
353  * must pass a scratch buffer, digest_scratch, which must
354  * be at least SHA1_DIGEST_LENGTH bytes.
355  */
356 static int
357 sha1_digest_final_uio(SHA1_CTX *sha1_ctx, crypto_data_t *digest,
358     ulong_t digest_len, uchar_t *digest_scratch)
359 {
360 	off_t offset = digest->cd_offset;
361 	uint_t vec_idx;
362 
363 	/* we support only kernel buffer */
364 	if (digest->cd_uio->uio_segflg != UIO_SYSSPACE)
365 		return (CRYPTO_ARGUMENTS_BAD);
366 
367 	/*
368 	 * Jump to the first iovec containing ptr to the digest to
369 	 * be returned.
370 	 */
371 	for (vec_idx = 0; offset >= digest->cd_uio->uio_iov[vec_idx].iov_len &&
372 	    vec_idx < digest->cd_uio->uio_iovcnt;
373 	    offset -= digest->cd_uio->uio_iov[vec_idx++].iov_len)
374 		;
375 	if (vec_idx == digest->cd_uio->uio_iovcnt) {
376 		/*
377 		 * The caller specified an offset that is
378 		 * larger than the total size of the buffers
379 		 * it provided.
380 		 */
381 		return (CRYPTO_DATA_LEN_RANGE);
382 	}
383 
384 	if (offset + digest_len <=
385 	    digest->cd_uio->uio_iov[vec_idx].iov_len) {
386 		/*
387 		 * The computed SHA1 digest will fit in the current
388 		 * iovec.
389 		 */
390 		if (digest_len != SHA1_DIGEST_LENGTH) {
391 			/*
392 			 * The caller requested a short digest. Digest
393 			 * into a scratch buffer and return to
394 			 * the user only what was requested.
395 			 */
396 			SHA1Final(digest_scratch, sha1_ctx);
397 			bcopy(digest_scratch, (uchar_t *)digest->
398 			    cd_uio->uio_iov[vec_idx].iov_base + offset,
399 			    digest_len);
400 		} else {
401 			SHA1Final((uchar_t *)digest->
402 			    cd_uio->uio_iov[vec_idx].iov_base + offset,
403 			    sha1_ctx);
404 		}
405 	} else {
406 		/*
407 		 * The computed digest will be crossing one or more iovec's.
408 		 * This is bad performance-wise but we need to support it.
409 		 * Allocate a small scratch buffer on the stack and
410 		 * copy it piece meal to the specified digest iovec's.
411 		 */
412 		uchar_t digest_tmp[SHA1_DIGEST_LENGTH];
413 		off_t scratch_offset = 0;
414 		size_t length = digest_len;
415 		size_t cur_len;
416 
417 		SHA1Final(digest_tmp, sha1_ctx);
418 
419 		while (vec_idx < digest->cd_uio->uio_iovcnt && length > 0) {
420 			cur_len = MIN(digest->cd_uio->uio_iov[vec_idx].iov_len -
421 			    offset, length);
422 			bcopy(digest_tmp + scratch_offset,
423 			    digest->cd_uio->uio_iov[vec_idx].iov_base + offset,
424 			    cur_len);
425 
426 			length -= cur_len;
427 			vec_idx++;
428 			scratch_offset += cur_len;
429 			offset = 0;
430 		}
431 
432 		if (vec_idx == digest->cd_uio->uio_iovcnt && length > 0) {
433 			/*
434 			 * The end of the specified iovec's was reached but
435 			 * the length requested could not be processed, i.e.
436 			 * The caller requested to digest more data than it
437 			 * provided.
438 			 */
439 			return (CRYPTO_DATA_LEN_RANGE);
440 		}
441 	}
442 
443 	return (CRYPTO_SUCCESS);
444 }
445 
446 /*
447  * Helper SHA1 digest update for mblk's.
448  */
449 static int
450 sha1_digest_update_mblk(SHA1_CTX *sha1_ctx, crypto_data_t *data)
451 {
452 	off_t offset = data->cd_offset;
453 	size_t length = data->cd_length;
454 	mblk_t *mp;
455 	size_t cur_len;
456 
457 	/*
458 	 * Jump to the first mblk_t containing data to be digested.
459 	 */
460 	for (mp = data->cd_mp; mp != NULL && offset >= MBLKL(mp);
461 	    offset -= MBLKL(mp), mp = mp->b_cont)
462 		;
463 	if (mp == NULL) {
464 		/*
465 		 * The caller specified an offset that is larger than the
466 		 * total size of the buffers it provided.
467 		 */
468 		return (CRYPTO_DATA_LEN_RANGE);
469 	}
470 
471 	/*
472 	 * Now do the digesting on the mblk chain.
473 	 */
474 	while (mp != NULL && length > 0) {
475 		cur_len = MIN(MBLKL(mp) - offset, length);
476 		SHA1Update(sha1_ctx, mp->b_rptr + offset, cur_len);
477 		length -= cur_len;
478 		offset = 0;
479 		mp = mp->b_cont;
480 	}
481 
482 	if (mp == NULL && length > 0) {
483 		/*
484 		 * The end of the mblk was reached but the length requested
485 		 * could not be processed, i.e. The caller requested
486 		 * to digest more data than it provided.
487 		 */
488 		return (CRYPTO_DATA_LEN_RANGE);
489 	}
490 
491 	return (CRYPTO_SUCCESS);
492 }
493 
494 /*
495  * Helper SHA1 digest final for mblk's.
496  * digest_len is the length of the desired digest. If digest_len
497  * is smaller than the default SHA1 digest length, the caller
498  * must pass a scratch buffer, digest_scratch, which must
499  * be at least SHA1_DIGEST_LENGTH bytes.
500  */
501 static int
502 sha1_digest_final_mblk(SHA1_CTX *sha1_ctx, crypto_data_t *digest,
503     ulong_t digest_len, uchar_t *digest_scratch)
504 {
505 	off_t offset = digest->cd_offset;
506 	mblk_t *mp;
507 
508 	/*
509 	 * Jump to the first mblk_t that will be used to store the digest.
510 	 */
511 	for (mp = digest->cd_mp; mp != NULL && offset >= MBLKL(mp);
512 	    offset -= MBLKL(mp), mp = mp->b_cont)
513 		;
514 	if (mp == NULL) {
515 		/*
516 		 * The caller specified an offset that is larger than the
517 		 * total size of the buffers it provided.
518 		 */
519 		return (CRYPTO_DATA_LEN_RANGE);
520 	}
521 
522 	if (offset + digest_len <= MBLKL(mp)) {
523 		/*
524 		 * The computed SHA1 digest will fit in the current mblk.
525 		 * Do the SHA1Final() in-place.
526 		 */
527 		if (digest_len != SHA1_DIGEST_LENGTH) {
528 			/*
529 			 * The caller requested a short digest. Digest
530 			 * into a scratch buffer and return to
531 			 * the user only what was requested.
532 			 */
533 			SHA1Final(digest_scratch, sha1_ctx);
534 			bcopy(digest_scratch, mp->b_rptr + offset, digest_len);
535 		} else {
536 			SHA1Final(mp->b_rptr + offset, sha1_ctx);
537 		}
538 	} else {
539 		/*
540 		 * The computed digest will be crossing one or more mblk's.
541 		 * This is bad performance-wise but we need to support it.
542 		 * Allocate a small scratch buffer on the stack and
543 		 * copy it piece meal to the specified digest iovec's.
544 		 */
545 		uchar_t digest_tmp[SHA1_DIGEST_LENGTH];
546 		off_t scratch_offset = 0;
547 		size_t length = digest_len;
548 		size_t cur_len;
549 
550 		SHA1Final(digest_tmp, sha1_ctx);
551 
552 		while (mp != NULL && length > 0) {
553 			cur_len = MIN(MBLKL(mp) - offset, length);
554 			bcopy(digest_tmp + scratch_offset,
555 			    mp->b_rptr + offset, cur_len);
556 
557 			length -= cur_len;
558 			mp = mp->b_cont;
559 			scratch_offset += cur_len;
560 			offset = 0;
561 		}
562 
563 		if (mp == NULL && length > 0) {
564 			/*
565 			 * The end of the specified mblk was reached but
566 			 * the length requested could not be processed, i.e.
567 			 * The caller requested to digest more data than it
568 			 * provided.
569 			 */
570 			return (CRYPTO_DATA_LEN_RANGE);
571 		}
572 	}
573 
574 	return (CRYPTO_SUCCESS);
575 }
576 
577 /* ARGSUSED */
578 static int
579 sha1_digest(crypto_ctx_t *ctx, crypto_data_t *data, crypto_data_t *digest,
580     crypto_req_handle_t req)
581 {
582 	int ret = CRYPTO_SUCCESS;
583 
584 	ASSERT(ctx->cc_provider_private != NULL);
585 
586 	/*
587 	 * We need to just return the length needed to store the output.
588 	 * We should not destroy the context for the following cases.
589 	 */
590 	if ((digest->cd_length == 0) ||
591 	    (digest->cd_length < SHA1_DIGEST_LENGTH)) {
592 		digest->cd_length = SHA1_DIGEST_LENGTH;
593 		return (CRYPTO_BUFFER_TOO_SMALL);
594 	}
595 
596 	/*
597 	 * Do the SHA1 update on the specified input data.
598 	 */
599 	switch (data->cd_format) {
600 	case CRYPTO_DATA_RAW:
601 		SHA1Update(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx,
602 		    (uint8_t *)data->cd_raw.iov_base + data->cd_offset,
603 		    data->cd_length);
604 		break;
605 	case CRYPTO_DATA_UIO:
606 		ret = sha1_digest_update_uio(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx,
607 		    data);
608 		break;
609 	case CRYPTO_DATA_MBLK:
610 		ret = sha1_digest_update_mblk(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx,
611 		    data);
612 		break;
613 	default:
614 		ret = CRYPTO_ARGUMENTS_BAD;
615 	}
616 
617 	if (ret != CRYPTO_SUCCESS) {
618 		/* the update failed, free context and bail */
619 		kmem_free(ctx->cc_provider_private, sizeof (sha1_ctx_t));
620 		ctx->cc_provider_private = NULL;
621 		digest->cd_length = 0;
622 		return (ret);
623 	}
624 
625 	/*
626 	 * Do a SHA1 final, must be done separately since the digest
627 	 * type can be different than the input data type.
628 	 */
629 	switch (digest->cd_format) {
630 	case CRYPTO_DATA_RAW:
631 		SHA1Final((unsigned char *)digest->cd_raw.iov_base +
632 		    digest->cd_offset, &PROV_SHA1_CTX(ctx)->sc_sha1_ctx);
633 		break;
634 	case CRYPTO_DATA_UIO:
635 		ret = sha1_digest_final_uio(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx,
636 		    digest, SHA1_DIGEST_LENGTH, NULL);
637 		break;
638 	case CRYPTO_DATA_MBLK:
639 		ret = sha1_digest_final_mblk(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx,
640 		    digest, SHA1_DIGEST_LENGTH, NULL);
641 		break;
642 	default:
643 		ret = CRYPTO_ARGUMENTS_BAD;
644 	}
645 
646 	/* all done, free context and return */
647 
648 	if (ret == CRYPTO_SUCCESS) {
649 		digest->cd_length = SHA1_DIGEST_LENGTH;
650 	} else {
651 		digest->cd_length = 0;
652 	}
653 
654 	kmem_free(ctx->cc_provider_private, sizeof (sha1_ctx_t));
655 	ctx->cc_provider_private = NULL;
656 	return (ret);
657 }
658 
659 /* ARGSUSED */
660 static int
661 sha1_digest_update(crypto_ctx_t *ctx, crypto_data_t *data,
662     crypto_req_handle_t req)
663 {
664 	int ret = CRYPTO_SUCCESS;
665 
666 	ASSERT(ctx->cc_provider_private != NULL);
667 
668 	/*
669 	 * Do the SHA1 update on the specified input data.
670 	 */
671 	switch (data->cd_format) {
672 	case CRYPTO_DATA_RAW:
673 		SHA1Update(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx,
674 		    (uint8_t *)data->cd_raw.iov_base + data->cd_offset,
675 		    data->cd_length);
676 		break;
677 	case CRYPTO_DATA_UIO:
678 		ret = sha1_digest_update_uio(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx,
679 		    data);
680 		break;
681 	case CRYPTO_DATA_MBLK:
682 		ret = sha1_digest_update_mblk(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx,
683 		    data);
684 		break;
685 	default:
686 		ret = CRYPTO_ARGUMENTS_BAD;
687 	}
688 
689 	return (ret);
690 }
691 
692 /* ARGSUSED */
693 static int
694 sha1_digest_final(crypto_ctx_t *ctx, crypto_data_t *digest,
695     crypto_req_handle_t req)
696 {
697 	int ret = CRYPTO_SUCCESS;
698 
699 	ASSERT(ctx->cc_provider_private != NULL);
700 
701 	/*
702 	 * We need to just return the length needed to store the output.
703 	 * We should not destroy the context for the following cases.
704 	 */
705 	if ((digest->cd_length == 0) ||
706 	    (digest->cd_length < SHA1_DIGEST_LENGTH)) {
707 		digest->cd_length = SHA1_DIGEST_LENGTH;
708 		return (CRYPTO_BUFFER_TOO_SMALL);
709 	}
710 
711 	/*
712 	 * Do a SHA1 final.
713 	 */
714 	switch (digest->cd_format) {
715 	case CRYPTO_DATA_RAW:
716 		SHA1Final((unsigned char *)digest->cd_raw.iov_base +
717 		    digest->cd_offset, &PROV_SHA1_CTX(ctx)->sc_sha1_ctx);
718 		break;
719 	case CRYPTO_DATA_UIO:
720 		ret = sha1_digest_final_uio(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx,
721 		    digest, SHA1_DIGEST_LENGTH, NULL);
722 		break;
723 	case CRYPTO_DATA_MBLK:
724 		ret = sha1_digest_final_mblk(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx,
725 		    digest, SHA1_DIGEST_LENGTH, NULL);
726 		break;
727 	default:
728 		ret = CRYPTO_ARGUMENTS_BAD;
729 	}
730 
731 	/* all done, free context and return */
732 
733 	if (ret == CRYPTO_SUCCESS) {
734 		digest->cd_length = SHA1_DIGEST_LENGTH;
735 	} else {
736 		digest->cd_length = 0;
737 	}
738 
739 	kmem_free(ctx->cc_provider_private, sizeof (sha1_ctx_t));
740 	ctx->cc_provider_private = NULL;
741 
742 	return (ret);
743 }
744 
745 /* ARGSUSED */
746 static int
747 sha1_digest_atomic(crypto_provider_handle_t provider,
748     crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
749     crypto_data_t *data, crypto_data_t *digest,
750     crypto_req_handle_t req)
751 {
752 	int ret = CRYPTO_SUCCESS;
753 	SHA1_CTX sha1_ctx;
754 
755 	if (mechanism->cm_type != SHA1_MECH_INFO_TYPE)
756 		return (CRYPTO_MECHANISM_INVALID);
757 
758 	/*
759 	 * Do the SHA1 init.
760 	 */
761 	SHA1Init(&sha1_ctx);
762 
763 	/*
764 	 * Do the SHA1 update on the specified input data.
765 	 */
766 	switch (data->cd_format) {
767 	case CRYPTO_DATA_RAW:
768 		SHA1Update(&sha1_ctx,
769 		    (uint8_t *)data->cd_raw.iov_base + data->cd_offset,
770 		    data->cd_length);
771 		break;
772 	case CRYPTO_DATA_UIO:
773 		ret = sha1_digest_update_uio(&sha1_ctx, data);
774 		break;
775 	case CRYPTO_DATA_MBLK:
776 		ret = sha1_digest_update_mblk(&sha1_ctx, data);
777 		break;
778 	default:
779 		ret = CRYPTO_ARGUMENTS_BAD;
780 	}
781 
782 	if (ret != CRYPTO_SUCCESS) {
783 		/* the update failed, bail */
784 		digest->cd_length = 0;
785 		return (ret);
786 	}
787 
788 	/*
789 	 * Do a SHA1 final, must be done separately since the digest
790 	 * type can be different than the input data type.
791 	 */
792 	switch (digest->cd_format) {
793 	case CRYPTO_DATA_RAW:
794 		SHA1Final((unsigned char *)digest->cd_raw.iov_base +
795 		    digest->cd_offset, &sha1_ctx);
796 		break;
797 	case CRYPTO_DATA_UIO:
798 		ret = sha1_digest_final_uio(&sha1_ctx, digest,
799 		    SHA1_DIGEST_LENGTH, NULL);
800 		break;
801 	case CRYPTO_DATA_MBLK:
802 		ret = sha1_digest_final_mblk(&sha1_ctx, digest,
803 		    SHA1_DIGEST_LENGTH, NULL);
804 		break;
805 	default:
806 		ret = CRYPTO_ARGUMENTS_BAD;
807 	}
808 
809 	if (ret == CRYPTO_SUCCESS) {
810 		digest->cd_length = SHA1_DIGEST_LENGTH;
811 	} else {
812 		digest->cd_length = 0;
813 	}
814 
815 	return (ret);
816 }
817 
818 /*
819  * KCF software provider mac entry points.
820  *
821  * SHA1 HMAC is: SHA1(key XOR opad, SHA1(key XOR ipad, text))
822  *
823  * Init:
824  * The initialization routine initializes what we denote
825  * as the inner and outer contexts by doing
826  * - for inner context: SHA1(key XOR ipad)
827  * - for outer context: SHA1(key XOR opad)
828  *
829  * Update:
830  * Each subsequent SHA1 HMAC update will result in an
831  * update of the inner context with the specified data.
832  *
833  * Final:
834  * The SHA1 HMAC final will do a SHA1 final operation on the
835  * inner context, and the resulting digest will be used
836  * as the data for an update on the outer context. Last
837  * but not least, a SHA1 final on the outer context will
838  * be performed to obtain the SHA1 HMAC digest to return
839  * to the user.
840  */
841 
842 /*
843  * Initialize a SHA1-HMAC context.
844  */
845 static void
846 sha1_mac_init_ctx(sha1_hmac_ctx_t *ctx, void *keyval, uint_t length_in_bytes)
847 {
848 	uint32_t ipad[SHA1_HMAC_INTS_PER_BLOCK];
849 	uint32_t opad[SHA1_HMAC_INTS_PER_BLOCK];
850 	uint_t i;
851 
852 	bzero(ipad, SHA1_HMAC_BLOCK_SIZE);
853 	bzero(opad, SHA1_HMAC_BLOCK_SIZE);
854 
855 	bcopy(keyval, ipad, length_in_bytes);
856 	bcopy(keyval, opad, length_in_bytes);
857 
858 	/* XOR key with ipad (0x36) and opad (0x5c) */
859 	for (i = 0; i < SHA1_HMAC_INTS_PER_BLOCK; i++) {
860 		ipad[i] ^= 0x36363636;
861 		opad[i] ^= 0x5c5c5c5c;
862 	}
863 
864 	/* perform SHA1 on ipad */
865 	SHA1Init(&ctx->hc_icontext);
866 	SHA1Update(&ctx->hc_icontext, (uint8_t *)ipad, SHA1_HMAC_BLOCK_SIZE);
867 
868 	/* perform SHA1 on opad */
869 	SHA1Init(&ctx->hc_ocontext);
870 	SHA1Update(&ctx->hc_ocontext, (uint8_t *)opad, SHA1_HMAC_BLOCK_SIZE);
871 }
872 
873 /*
874  */
875 static int
876 sha1_mac_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
877     crypto_key_t *key, crypto_spi_ctx_template_t ctx_template,
878     crypto_req_handle_t req)
879 {
880 	int ret = CRYPTO_SUCCESS;
881 	uint_t keylen_in_bytes = CRYPTO_BITS2BYTES(key->ck_length);
882 
883 	if (mechanism->cm_type != SHA1_HMAC_MECH_INFO_TYPE &&
884 	    mechanism->cm_type != SHA1_HMAC_GEN_MECH_INFO_TYPE)
885 		return (CRYPTO_MECHANISM_INVALID);
886 
887 	/* Add support for key by attributes (RFE 4706552) */
888 	if (key->ck_format != CRYPTO_KEY_RAW)
889 		return (CRYPTO_ARGUMENTS_BAD);
890 
891 	ctx->cc_provider_private = kmem_alloc(sizeof (sha1_hmac_ctx_t),
892 	    crypto_kmflag(req));
893 	if (ctx->cc_provider_private == NULL)
894 		return (CRYPTO_HOST_MEMORY);
895 
896 	if (ctx_template != NULL) {
897 		/* reuse context template */
898 		bcopy(ctx_template, PROV_SHA1_HMAC_CTX(ctx),
899 		    sizeof (sha1_hmac_ctx_t));
900 	} else {
901 		/* no context template, compute context */
902 		if (keylen_in_bytes > SHA1_HMAC_BLOCK_SIZE) {
903 			uchar_t digested_key[SHA1_DIGEST_LENGTH];
904 			sha1_hmac_ctx_t *hmac_ctx = ctx->cc_provider_private;
905 
906 			/*
907 			 * Hash the passed-in key to get a smaller key.
908 			 * The inner context is used since it hasn't been
909 			 * initialized yet.
910 			 */
911 			PROV_SHA1_DIGEST_KEY(&hmac_ctx->hc_icontext,
912 			    key->ck_data, keylen_in_bytes, digested_key);
913 			sha1_mac_init_ctx(PROV_SHA1_HMAC_CTX(ctx),
914 			    digested_key, SHA1_DIGEST_LENGTH);
915 		} else {
916 			sha1_mac_init_ctx(PROV_SHA1_HMAC_CTX(ctx),
917 			    key->ck_data, keylen_in_bytes);
918 		}
919 	}
920 
921 	/*
922 	 * Get the mechanism parameters, if applicable.
923 	 */
924 	PROV_SHA1_HMAC_CTX(ctx)->hc_mech_type = mechanism->cm_type;
925 	if (mechanism->cm_type == SHA1_HMAC_GEN_MECH_INFO_TYPE) {
926 		if (mechanism->cm_param == NULL ||
927 		    mechanism->cm_param_len != sizeof (ulong_t))
928 			ret = CRYPTO_MECHANISM_PARAM_INVALID;
929 		PROV_SHA1_GET_DIGEST_LEN(mechanism,
930 		    PROV_SHA1_HMAC_CTX(ctx)->hc_digest_len);
931 		if (PROV_SHA1_HMAC_CTX(ctx)->hc_digest_len >
932 		    SHA1_DIGEST_LENGTH)
933 			ret = CRYPTO_MECHANISM_PARAM_INVALID;
934 	}
935 
936 	if (ret != CRYPTO_SUCCESS) {
937 		bzero(ctx->cc_provider_private, sizeof (sha1_hmac_ctx_t));
938 		kmem_free(ctx->cc_provider_private, sizeof (sha1_hmac_ctx_t));
939 		ctx->cc_provider_private = NULL;
940 	}
941 
942 	return (ret);
943 }
944 
945 /* ARGSUSED */
946 static int
947 sha1_mac_update(crypto_ctx_t *ctx, crypto_data_t *data, crypto_req_handle_t req)
948 {
949 	int ret = CRYPTO_SUCCESS;
950 
951 	ASSERT(ctx->cc_provider_private != NULL);
952 
953 	/*
954 	 * Do a SHA1 update of the inner context using the specified
955 	 * data.
956 	 */
957 	switch (data->cd_format) {
958 	case CRYPTO_DATA_RAW:
959 		SHA1Update(&PROV_SHA1_HMAC_CTX(ctx)->hc_icontext,
960 		    (uint8_t *)data->cd_raw.iov_base + data->cd_offset,
961 		    data->cd_length);
962 		break;
963 	case CRYPTO_DATA_UIO:
964 		ret = sha1_digest_update_uio(
965 		    &PROV_SHA1_HMAC_CTX(ctx)->hc_icontext, data);
966 		break;
967 	case CRYPTO_DATA_MBLK:
968 		ret = sha1_digest_update_mblk(
969 		    &PROV_SHA1_HMAC_CTX(ctx)->hc_icontext, data);
970 		break;
971 	default:
972 		ret = CRYPTO_ARGUMENTS_BAD;
973 	}
974 
975 	return (ret);
976 }
977 
978 /* ARGSUSED */
979 static int
980 sha1_mac_final(crypto_ctx_t *ctx, crypto_data_t *mac, crypto_req_handle_t req)
981 {
982 	int ret = CRYPTO_SUCCESS;
983 	uchar_t digest[SHA1_DIGEST_LENGTH];
984 	uint32_t digest_len = SHA1_DIGEST_LENGTH;
985 
986 	ASSERT(ctx->cc_provider_private != NULL);
987 
988 	if (PROV_SHA1_HMAC_CTX(ctx)->hc_mech_type ==
989 	    SHA1_HMAC_GEN_MECH_INFO_TYPE)
990 		digest_len = PROV_SHA1_HMAC_CTX(ctx)->hc_digest_len;
991 
992 	/*
993 	 * We need to just return the length needed to store the output.
994 	 * We should not destroy the context for the following cases.
995 	 */
996 	if ((mac->cd_length == 0) || (mac->cd_length < digest_len)) {
997 		mac->cd_length = digest_len;
998 		return (CRYPTO_BUFFER_TOO_SMALL);
999 	}
1000 
1001 	/*
1002 	 * Do a SHA1 final on the inner context.
1003 	 */
1004 	SHA1Final(digest, &PROV_SHA1_HMAC_CTX(ctx)->hc_icontext);
1005 
1006 	/*
1007 	 * Do a SHA1 update on the outer context, feeding the inner
1008 	 * digest as data.
1009 	 */
1010 	SHA1Update(&PROV_SHA1_HMAC_CTX(ctx)->hc_ocontext, digest,
1011 	    SHA1_DIGEST_LENGTH);
1012 
1013 	/*
1014 	 * Do a SHA1 final on the outer context, storing the computing
1015 	 * digest in the users buffer.
1016 	 */
1017 	switch (mac->cd_format) {
1018 	case CRYPTO_DATA_RAW:
1019 		if (digest_len != SHA1_DIGEST_LENGTH) {
1020 			/*
1021 			 * The caller requested a short digest. Digest
1022 			 * into a scratch buffer and return to
1023 			 * the user only what was requested.
1024 			 */
1025 			SHA1Final(digest,
1026 			    &PROV_SHA1_HMAC_CTX(ctx)->hc_ocontext);
1027 			bcopy(digest, (unsigned char *)mac->cd_raw.iov_base +
1028 			    mac->cd_offset, digest_len);
1029 		} else {
1030 			SHA1Final((unsigned char *)mac->cd_raw.iov_base +
1031 			    mac->cd_offset,
1032 			    &PROV_SHA1_HMAC_CTX(ctx)->hc_ocontext);
1033 		}
1034 		break;
1035 	case CRYPTO_DATA_UIO:
1036 		ret = sha1_digest_final_uio(
1037 		    &PROV_SHA1_HMAC_CTX(ctx)->hc_ocontext, mac,
1038 		    digest_len, digest);
1039 		break;
1040 	case CRYPTO_DATA_MBLK:
1041 		ret = sha1_digest_final_mblk(
1042 		    &PROV_SHA1_HMAC_CTX(ctx)->hc_ocontext, mac,
1043 		    digest_len, digest);
1044 		break;
1045 	default:
1046 		ret = CRYPTO_ARGUMENTS_BAD;
1047 	}
1048 
1049 	if (ret == CRYPTO_SUCCESS) {
1050 		mac->cd_length = digest_len;
1051 	} else {
1052 		mac->cd_length = 0;
1053 	}
1054 
1055 	bzero(ctx->cc_provider_private, sizeof (sha1_hmac_ctx_t));
1056 	kmem_free(ctx->cc_provider_private, sizeof (sha1_hmac_ctx_t));
1057 	ctx->cc_provider_private = NULL;
1058 
1059 	return (ret);
1060 }
1061 
1062 #define	SHA1_MAC_UPDATE(data, ctx, ret) {				\
1063 	switch (data->cd_format) {					\
1064 	case CRYPTO_DATA_RAW:						\
1065 		SHA1Update(&(ctx).hc_icontext,				\
1066 		    (uint8_t *)data->cd_raw.iov_base +			\
1067 		    data->cd_offset, data->cd_length);			\
1068 		break;							\
1069 	case CRYPTO_DATA_UIO:						\
1070 		ret = sha1_digest_update_uio(&(ctx).hc_icontext, data); \
1071 		break;							\
1072 	case CRYPTO_DATA_MBLK:						\
1073 		ret = sha1_digest_update_mblk(&(ctx).hc_icontext,	\
1074 		    data);						\
1075 		break;							\
1076 	default:							\
1077 		ret = CRYPTO_ARGUMENTS_BAD;				\
1078 	}								\
1079 }
1080 
1081 /* ARGSUSED */
1082 static int
1083 sha1_mac_atomic(crypto_provider_handle_t provider,
1084     crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
1085     crypto_key_t *key, crypto_data_t *data, crypto_data_t *mac,
1086     crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req)
1087 {
1088 	int ret = CRYPTO_SUCCESS;
1089 	uchar_t digest[SHA1_DIGEST_LENGTH];
1090 	sha1_hmac_ctx_t sha1_hmac_ctx;
1091 	uint32_t digest_len = SHA1_DIGEST_LENGTH;
1092 	uint_t keylen_in_bytes = CRYPTO_BITS2BYTES(key->ck_length);
1093 
1094 	if (mechanism->cm_type != SHA1_HMAC_MECH_INFO_TYPE &&
1095 	    mechanism->cm_type != SHA1_HMAC_GEN_MECH_INFO_TYPE)
1096 		return (CRYPTO_MECHANISM_INVALID);
1097 
1098 	/* Add support for key by attributes (RFE 4706552) */
1099 	if (key->ck_format != CRYPTO_KEY_RAW)
1100 		return (CRYPTO_ARGUMENTS_BAD);
1101 
1102 	if (ctx_template != NULL) {
1103 		/* reuse context template */
1104 		bcopy(ctx_template, &sha1_hmac_ctx, sizeof (sha1_hmac_ctx_t));
1105 	} else {
1106 		/* no context template, initialize context */
1107 		if (keylen_in_bytes > SHA1_HMAC_BLOCK_SIZE) {
1108 			/*
1109 			 * Hash the passed-in key to get a smaller key.
1110 			 * The inner context is used since it hasn't been
1111 			 * initialized yet.
1112 			 */
1113 			PROV_SHA1_DIGEST_KEY(&sha1_hmac_ctx.hc_icontext,
1114 			    key->ck_data, keylen_in_bytes, digest);
1115 			sha1_mac_init_ctx(&sha1_hmac_ctx, digest,
1116 			    SHA1_DIGEST_LENGTH);
1117 		} else {
1118 			sha1_mac_init_ctx(&sha1_hmac_ctx, key->ck_data,
1119 			    keylen_in_bytes);
1120 		}
1121 	}
1122 
1123 	/* get the mechanism parameters, if applicable */
1124 	if (mechanism->cm_type == SHA1_HMAC_GEN_MECH_INFO_TYPE) {
1125 		if (mechanism->cm_param == NULL ||
1126 		    mechanism->cm_param_len != sizeof (ulong_t)) {
1127 			ret = CRYPTO_MECHANISM_PARAM_INVALID;
1128 			goto bail;
1129 		}
1130 		PROV_SHA1_GET_DIGEST_LEN(mechanism, digest_len);
1131 		if (digest_len > SHA1_DIGEST_LENGTH) {
1132 			ret = CRYPTO_MECHANISM_PARAM_INVALID;
1133 			goto bail;
1134 		}
1135 	}
1136 
1137 	/* do a SHA1 update of the inner context using the specified data */
1138 	SHA1_MAC_UPDATE(data, sha1_hmac_ctx, ret);
1139 	if (ret != CRYPTO_SUCCESS)
1140 		/* the update failed, free context and bail */
1141 		goto bail;
1142 
1143 	/*
1144 	 * Do a SHA1 final on the inner context.
1145 	 */
1146 	SHA1Final(digest, &sha1_hmac_ctx.hc_icontext);
1147 
1148 	/*
1149 	 * Do an SHA1 update on the outer context, feeding the inner
1150 	 * digest as data.
1151 	 */
1152 	SHA1Update(&sha1_hmac_ctx.hc_ocontext, digest, SHA1_DIGEST_LENGTH);
1153 
1154 	/*
1155 	 * Do a SHA1 final on the outer context, storing the computed
1156 	 * digest in the users buffer.
1157 	 */
1158 	switch (mac->cd_format) {
1159 	case CRYPTO_DATA_RAW:
1160 		if (digest_len != SHA1_DIGEST_LENGTH) {
1161 			/*
1162 			 * The caller requested a short digest. Digest
1163 			 * into a scratch buffer and return to
1164 			 * the user only what was requested.
1165 			 */
1166 			SHA1Final(digest, &sha1_hmac_ctx.hc_ocontext);
1167 			bcopy(digest, (unsigned char *)mac->cd_raw.iov_base +
1168 			    mac->cd_offset, digest_len);
1169 		} else {
1170 			SHA1Final((unsigned char *)mac->cd_raw.iov_base +
1171 			    mac->cd_offset, &sha1_hmac_ctx.hc_ocontext);
1172 		}
1173 		break;
1174 	case CRYPTO_DATA_UIO:
1175 		ret = sha1_digest_final_uio(&sha1_hmac_ctx.hc_ocontext, mac,
1176 		    digest_len, digest);
1177 		break;
1178 	case CRYPTO_DATA_MBLK:
1179 		ret = sha1_digest_final_mblk(&sha1_hmac_ctx.hc_ocontext, mac,
1180 		    digest_len, digest);
1181 		break;
1182 	default:
1183 		ret = CRYPTO_ARGUMENTS_BAD;
1184 	}
1185 
1186 	if (ret == CRYPTO_SUCCESS) {
1187 		mac->cd_length = digest_len;
1188 	} else {
1189 		mac->cd_length = 0;
1190 	}
1191 	/* Extra paranoia: zeroize the context on the stack */
1192 	bzero(&sha1_hmac_ctx, sizeof (sha1_hmac_ctx_t));
1193 
1194 	return (ret);
1195 bail:
1196 	bzero(&sha1_hmac_ctx, sizeof (sha1_hmac_ctx_t));
1197 	mac->cd_length = 0;
1198 	return (ret);
1199 }
1200 
1201 /* ARGSUSED */
1202 static int
1203 sha1_mac_verify_atomic(crypto_provider_handle_t provider,
1204     crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
1205     crypto_key_t *key, crypto_data_t *data, crypto_data_t *mac,
1206     crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req)
1207 {
1208 	int ret = CRYPTO_SUCCESS;
1209 	uchar_t digest[SHA1_DIGEST_LENGTH];
1210 	sha1_hmac_ctx_t sha1_hmac_ctx;
1211 	uint32_t digest_len = SHA1_DIGEST_LENGTH;
1212 	uint_t keylen_in_bytes = CRYPTO_BITS2BYTES(key->ck_length);
1213 
1214 	if (mechanism->cm_type != SHA1_HMAC_MECH_INFO_TYPE &&
1215 	    mechanism->cm_type != SHA1_HMAC_GEN_MECH_INFO_TYPE)
1216 		return (CRYPTO_MECHANISM_INVALID);
1217 
1218 	/* Add support for key by attributes (RFE 4706552) */
1219 	if (key->ck_format != CRYPTO_KEY_RAW)
1220 		return (CRYPTO_ARGUMENTS_BAD);
1221 
1222 	if (ctx_template != NULL) {
1223 		/* reuse context template */
1224 		bcopy(ctx_template, &sha1_hmac_ctx, sizeof (sha1_hmac_ctx_t));
1225 	} else {
1226 		/* no context template, initialize context */
1227 		if (keylen_in_bytes > SHA1_HMAC_BLOCK_SIZE) {
1228 			/*
1229 			 * Hash the passed-in key to get a smaller key.
1230 			 * The inner context is used since it hasn't been
1231 			 * initialized yet.
1232 			 */
1233 			PROV_SHA1_DIGEST_KEY(&sha1_hmac_ctx.hc_icontext,
1234 			    key->ck_data, keylen_in_bytes, digest);
1235 			sha1_mac_init_ctx(&sha1_hmac_ctx, digest,
1236 			    SHA1_DIGEST_LENGTH);
1237 		} else {
1238 			sha1_mac_init_ctx(&sha1_hmac_ctx, key->ck_data,
1239 			    keylen_in_bytes);
1240 		}
1241 	}
1242 
1243 	/* get the mechanism parameters, if applicable */
1244 	if (mechanism->cm_type == SHA1_HMAC_GEN_MECH_INFO_TYPE) {
1245 		if (mechanism->cm_param == NULL ||
1246 		    mechanism->cm_param_len != sizeof (ulong_t)) {
1247 			ret = CRYPTO_MECHANISM_PARAM_INVALID;
1248 			goto bail;
1249 		}
1250 		PROV_SHA1_GET_DIGEST_LEN(mechanism, digest_len);
1251 		if (digest_len > SHA1_DIGEST_LENGTH) {
1252 			ret = CRYPTO_MECHANISM_PARAM_INVALID;
1253 			goto bail;
1254 		}
1255 	}
1256 
1257 	if (mac->cd_length != digest_len) {
1258 		ret = CRYPTO_INVALID_MAC;
1259 		goto bail;
1260 	}
1261 
1262 	/* do a SHA1 update of the inner context using the specified data */
1263 	SHA1_MAC_UPDATE(data, sha1_hmac_ctx, ret);
1264 	if (ret != CRYPTO_SUCCESS)
1265 		/* the update failed, free context and bail */
1266 		goto bail;
1267 
1268 	/* do a SHA1 final on the inner context */
1269 	SHA1Final(digest, &sha1_hmac_ctx.hc_icontext);
1270 
1271 	/*
1272 	 * Do an SHA1 update on the outer context, feeding the inner
1273 	 * digest as data.
1274 	 */
1275 	SHA1Update(&sha1_hmac_ctx.hc_ocontext, digest, SHA1_DIGEST_LENGTH);
1276 
1277 	/*
1278 	 * Do a SHA1 final on the outer context, storing the computed
1279 	 * digest in the users buffer.
1280 	 */
1281 	SHA1Final(digest, &sha1_hmac_ctx.hc_ocontext);
1282 
1283 	/*
1284 	 * Compare the computed digest against the expected digest passed
1285 	 * as argument.
1286 	 */
1287 
1288 	switch (mac->cd_format) {
1289 
1290 	case CRYPTO_DATA_RAW:
1291 		if (bcmp(digest, (unsigned char *)mac->cd_raw.iov_base +
1292 		    mac->cd_offset, digest_len) != 0)
1293 			ret = CRYPTO_INVALID_MAC;
1294 		break;
1295 
1296 	case CRYPTO_DATA_UIO: {
1297 		off_t offset = mac->cd_offset;
1298 		uint_t vec_idx;
1299 		off_t scratch_offset = 0;
1300 		size_t length = digest_len;
1301 		size_t cur_len;
1302 
1303 		/* we support only kernel buffer */
1304 		if (mac->cd_uio->uio_segflg != UIO_SYSSPACE)
1305 			return (CRYPTO_ARGUMENTS_BAD);
1306 
1307 		/* jump to the first iovec containing the expected digest */
1308 		for (vec_idx = 0;
1309 		    offset >= mac->cd_uio->uio_iov[vec_idx].iov_len &&
1310 		    vec_idx < mac->cd_uio->uio_iovcnt;
1311 		    offset -= mac->cd_uio->uio_iov[vec_idx++].iov_len)
1312 			;
1313 		if (vec_idx == mac->cd_uio->uio_iovcnt) {
1314 			/*
1315 			 * The caller specified an offset that is
1316 			 * larger than the total size of the buffers
1317 			 * it provided.
1318 			 */
1319 			ret = CRYPTO_DATA_LEN_RANGE;
1320 			break;
1321 		}
1322 
1323 		/* do the comparison of computed digest vs specified one */
1324 		while (vec_idx < mac->cd_uio->uio_iovcnt && length > 0) {
1325 			cur_len = MIN(mac->cd_uio->uio_iov[vec_idx].iov_len -
1326 			    offset, length);
1327 
1328 			if (bcmp(digest + scratch_offset,
1329 			    mac->cd_uio->uio_iov[vec_idx].iov_base + offset,
1330 			    cur_len) != 0) {
1331 				ret = CRYPTO_INVALID_MAC;
1332 				break;
1333 			}
1334 
1335 			length -= cur_len;
1336 			vec_idx++;
1337 			scratch_offset += cur_len;
1338 			offset = 0;
1339 		}
1340 		break;
1341 	}
1342 
1343 	case CRYPTO_DATA_MBLK: {
1344 		off_t offset = mac->cd_offset;
1345 		mblk_t *mp;
1346 		off_t scratch_offset = 0;
1347 		size_t length = digest_len;
1348 		size_t cur_len;
1349 
1350 		/* jump to the first mblk_t containing the expected digest */
1351 		for (mp = mac->cd_mp; mp != NULL && offset >= MBLKL(mp);
1352 		    offset -= MBLKL(mp), mp = mp->b_cont)
1353 			;
1354 		if (mp == NULL) {
1355 			/*
1356 			 * The caller specified an offset that is larger than
1357 			 * the total size of the buffers it provided.
1358 			 */
1359 			ret = CRYPTO_DATA_LEN_RANGE;
1360 			break;
1361 		}
1362 
1363 		while (mp != NULL && length > 0) {
1364 			cur_len = MIN(MBLKL(mp) - offset, length);
1365 			if (bcmp(digest + scratch_offset,
1366 			    mp->b_rptr + offset, cur_len) != 0) {
1367 				ret = CRYPTO_INVALID_MAC;
1368 				break;
1369 			}
1370 
1371 			length -= cur_len;
1372 			mp = mp->b_cont;
1373 			scratch_offset += cur_len;
1374 			offset = 0;
1375 		}
1376 		break;
1377 	}
1378 
1379 	default:
1380 		ret = CRYPTO_ARGUMENTS_BAD;
1381 	}
1382 
1383 	bzero(&sha1_hmac_ctx, sizeof (sha1_hmac_ctx_t));
1384 	return (ret);
1385 bail:
1386 	bzero(&sha1_hmac_ctx, sizeof (sha1_hmac_ctx_t));
1387 	mac->cd_length = 0;
1388 	return (ret);
1389 }
1390 
1391 /*
1392  * KCF software provider context management entry points.
1393  */
1394 
1395 /* ARGSUSED */
1396 static int
1397 sha1_create_ctx_template(crypto_provider_handle_t provider,
1398     crypto_mechanism_t *mechanism, crypto_key_t *key,
1399     crypto_spi_ctx_template_t *ctx_template, size_t *ctx_template_size,
1400     crypto_req_handle_t req)
1401 {
1402 	sha1_hmac_ctx_t *sha1_hmac_ctx_tmpl;
1403 	uint_t keylen_in_bytes = CRYPTO_BITS2BYTES(key->ck_length);
1404 
1405 	if ((mechanism->cm_type != SHA1_HMAC_MECH_INFO_TYPE) &&
1406 	    (mechanism->cm_type != SHA1_HMAC_GEN_MECH_INFO_TYPE)) {
1407 		return (CRYPTO_MECHANISM_INVALID);
1408 	}
1409 
1410 	/* Add support for key by attributes (RFE 4706552) */
1411 	if (key->ck_format != CRYPTO_KEY_RAW)
1412 		return (CRYPTO_ARGUMENTS_BAD);
1413 
1414 	/*
1415 	 * Allocate and initialize SHA1 context.
1416 	 */
1417 	sha1_hmac_ctx_tmpl = kmem_alloc(sizeof (sha1_hmac_ctx_t),
1418 	    crypto_kmflag(req));
1419 	if (sha1_hmac_ctx_tmpl == NULL)
1420 		return (CRYPTO_HOST_MEMORY);
1421 
1422 	if (keylen_in_bytes > SHA1_HMAC_BLOCK_SIZE) {
1423 		uchar_t digested_key[SHA1_DIGEST_LENGTH];
1424 
1425 		/*
1426 		 * Hash the passed-in key to get a smaller key.
1427 		 * The inner context is used since it hasn't been
1428 		 * initialized yet.
1429 		 */
1430 		PROV_SHA1_DIGEST_KEY(&sha1_hmac_ctx_tmpl->hc_icontext,
1431 		    key->ck_data, keylen_in_bytes, digested_key);
1432 		sha1_mac_init_ctx(sha1_hmac_ctx_tmpl, digested_key,
1433 		    SHA1_DIGEST_LENGTH);
1434 	} else {
1435 		sha1_mac_init_ctx(sha1_hmac_ctx_tmpl, key->ck_data,
1436 		    keylen_in_bytes);
1437 	}
1438 
1439 	sha1_hmac_ctx_tmpl->hc_mech_type = mechanism->cm_type;
1440 	*ctx_template = (crypto_spi_ctx_template_t)sha1_hmac_ctx_tmpl;
1441 	*ctx_template_size = sizeof (sha1_hmac_ctx_t);
1442 
1443 
1444 	return (CRYPTO_SUCCESS);
1445 }
1446 
1447 static int
1448 sha1_free_context(crypto_ctx_t *ctx)
1449 {
1450 	uint_t ctx_len;
1451 	sha1_mech_type_t mech_type;
1452 
1453 	if (ctx->cc_provider_private == NULL)
1454 		return (CRYPTO_SUCCESS);
1455 
1456 	/*
1457 	 * We have to free either SHA1 or SHA1-HMAC contexts, which
1458 	 * have different lengths.
1459 	 */
1460 
1461 	mech_type = PROV_SHA1_CTX(ctx)->sc_mech_type;
1462 	if (mech_type == SHA1_MECH_INFO_TYPE)
1463 		ctx_len = sizeof (sha1_ctx_t);
1464 	else {
1465 		ASSERT(mech_type == SHA1_HMAC_MECH_INFO_TYPE ||
1466 		    mech_type == SHA1_HMAC_GEN_MECH_INFO_TYPE);
1467 		ctx_len = sizeof (sha1_hmac_ctx_t);
1468 	}
1469 
1470 	bzero(ctx->cc_provider_private, ctx_len);
1471 	kmem_free(ctx->cc_provider_private, ctx_len);
1472 	ctx->cc_provider_private = NULL;
1473 
1474 	return (CRYPTO_SUCCESS);
1475 }
1476