1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 /* 27 * This file contains routines which call into a provider's 28 * entry points and do other related work. 29 */ 30 31 #include <sys/types.h> 32 #include <sys/systm.h> 33 #include <sys/taskq_impl.h> 34 #include <sys/cmn_err.h> 35 36 #include <sys/crypto/common.h> 37 #include <sys/crypto/impl.h> 38 #include <sys/crypto/sched_impl.h> 39 40 /* 41 * Return B_TRUE if the specified entry point is NULL. We rely on the 42 * caller to provide, with offset_1 and offset_2, information to calculate 43 * the location of the entry point. The ops argument is a temporary local 44 * variable defined as caddr_t *. 45 */ 46 #define KCF_PROV_NULL_ENTRY_POINT(pd, o1, o2, ops) \ 47 (ops = (caddr_t *)((caddr_t)(pd)->pd_ops_vector + (o1)), \ 48 (*ops == NULL || *(caddr_t *)((caddr_t)(*ops) + (o2)) == NULL)) 49 50 51 static int kcf_emulate_dual(kcf_provider_desc_t *, crypto_ctx_t *, 52 kcf_req_params_t *); 53 54 void 55 kcf_free_triedlist(kcf_prov_tried_t *list) 56 { 57 kcf_prov_tried_t *l; 58 59 while ((l = list) != NULL) { 60 list = list->pt_next; 61 KCF_PROV_REFRELE(l->pt_pd); 62 kmem_free(l, sizeof (kcf_prov_tried_t)); 63 } 64 } 65 66 /* 67 * The typical caller of this routine does a kcf_get_mech_provider() 68 * which holds the provider and then calls this routine. So, for the 69 * common case (no KCF_HOLD_PROV flag) we skip doing a KCF_PROV_REFHOLD. 70 */ 71 kcf_prov_tried_t * 72 kcf_insert_triedlist(kcf_prov_tried_t **list, kcf_provider_desc_t *pd, 73 int flags) 74 { 75 kcf_prov_tried_t *l; 76 77 l = kmem_alloc(sizeof (kcf_prov_tried_t), 78 flags & (KM_SLEEP | KM_NOSLEEP)); 79 if (l == NULL) 80 return (NULL); 81 82 if (flags & KCF_HOLD_PROV) 83 KCF_PROV_REFHOLD(pd); 84 l->pt_pd = pd; 85 l->pt_next = *list; 86 *list = l; 87 88 return (l); 89 } 90 91 static boolean_t 92 is_in_triedlist(kcf_provider_desc_t *pd, kcf_prov_tried_t *triedl) 93 { 94 while (triedl != NULL) { 95 if (triedl->pt_pd == pd) 96 return (B_TRUE); 97 triedl = triedl->pt_next; 98 }; 99 100 return (B_FALSE); 101 } 102 103 /* 104 * Search a mech entry's hardware provider list for the specified 105 * provider. Return true if found. 106 */ 107 static boolean_t 108 is_valid_provider_for_mech(kcf_provider_desc_t *pd, kcf_mech_entry_t *me, 109 crypto_func_group_t fg) 110 { 111 kcf_prov_mech_desc_t *prov_chain; 112 113 prov_chain = me->me_hw_prov_chain; 114 if (prov_chain != NULL) { 115 ASSERT(me->me_num_hwprov > 0); 116 for (; prov_chain != NULL; prov_chain = prov_chain->pm_next) { 117 if (prov_chain->pm_prov_desc == pd && 118 IS_FG_SUPPORTED(prov_chain, fg)) { 119 return (B_TRUE); 120 } 121 } 122 } 123 return (B_FALSE); 124 } 125 126 /* 127 * This routine, given a logical provider, returns the least loaded 128 * provider belonging to the logical provider. The provider must be 129 * able to do the specified mechanism, i.e. check that the mechanism 130 * hasn't been disabled. In addition, just in case providers are not 131 * entirely equivalent, the provider's entry point is checked for 132 * non-nullness. This is accomplished by having the caller pass, as 133 * arguments, the offset of the function group (offset_1), and the 134 * offset of the function within the function group (offset_2). 135 * Returns NULL if no provider can be found. 136 */ 137 int 138 kcf_get_hardware_provider(crypto_mech_type_t mech_type_1, 139 crypto_mech_type_t mech_type_2, boolean_t call_restrict, 140 kcf_provider_desc_t *old, kcf_provider_desc_t **new, crypto_func_group_t fg) 141 { 142 kcf_provider_desc_t *provider, *real_pd = old; 143 kcf_provider_desc_t *gpd = NULL; /* good provider */ 144 kcf_provider_desc_t *bpd = NULL; /* busy provider */ 145 kcf_provider_list_t *p; 146 kcf_ops_class_t class; 147 kcf_mech_entry_t *me; 148 kcf_mech_entry_tab_t *me_tab; 149 int index, len, gqlen = INT_MAX, rv = CRYPTO_SUCCESS; 150 kcf_lock_withpad_t *mp; 151 152 /* get the mech entry for the specified mechanism */ 153 class = KCF_MECH2CLASS(mech_type_1); 154 if ((class < KCF_FIRST_OPSCLASS) || (class > KCF_LAST_OPSCLASS)) { 155 return (CRYPTO_MECHANISM_INVALID); 156 } 157 158 me_tab = &kcf_mech_tabs_tab[class]; 159 index = KCF_MECH2INDEX(mech_type_1); 160 if ((index < 0) || (index >= me_tab->met_size)) { 161 return (CRYPTO_MECHANISM_INVALID); 162 } 163 164 me = &((me_tab->met_tab)[index]); 165 mp = &me_mutexes[CPU_SEQID]; 166 mutex_enter(&mp->kl_lock); 167 168 /* 169 * We assume the provider descriptor will not go away because 170 * it is being held somewhere, i.e. its reference count has been 171 * incremented. In the case of the crypto module, the provider 172 * descriptor is held by the session structure. 173 */ 174 if (old->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) { 175 if (old->pd_provider_list == NULL) { 176 real_pd = NULL; 177 rv = CRYPTO_DEVICE_ERROR; 178 goto out; 179 } 180 /* 181 * Find the least loaded real provider. KCF_PROV_LOAD gives 182 * the load (number of pending requests) of the provider. 183 */ 184 mutex_enter(&old->pd_lock); 185 p = old->pd_provider_list; 186 while (p != NULL) { 187 provider = p->pl_provider; 188 189 ASSERT(provider->pd_prov_type != 190 CRYPTO_LOGICAL_PROVIDER); 191 192 if (call_restrict && 193 (provider->pd_flags & KCF_PROV_RESTRICTED)) { 194 p = p->pl_next; 195 continue; 196 } 197 198 if (!is_valid_provider_for_mech(provider, me, fg)) { 199 p = p->pl_next; 200 continue; 201 } 202 203 /* provider does second mech */ 204 if (mech_type_2 != CRYPTO_MECH_INVALID) { 205 int i; 206 207 i = KCF_TO_PROV_MECH_INDX(provider, 208 mech_type_2); 209 if (i == KCF_INVALID_INDX) { 210 p = p->pl_next; 211 continue; 212 } 213 } 214 215 if (provider->pd_state != KCF_PROV_READY) { 216 /* choose BUSY if no READY providers */ 217 if (provider->pd_state == KCF_PROV_BUSY) 218 bpd = provider; 219 p = p->pl_next; 220 continue; 221 } 222 223 /* Do load calculation only if needed */ 224 if ((p = p->pl_next) == NULL && gpd == NULL) { 225 gpd = provider; 226 } else { 227 len = KCF_PROV_LOAD(provider); 228 if (len < gqlen) { 229 gqlen = len; 230 gpd = provider; 231 } 232 } 233 } 234 235 if (gpd != NULL) { 236 real_pd = gpd; 237 KCF_PROV_REFHOLD(real_pd); 238 } else if (bpd != NULL) { 239 real_pd = bpd; 240 KCF_PROV_REFHOLD(real_pd); 241 } else { 242 /* can't find provider */ 243 real_pd = NULL; 244 rv = CRYPTO_MECHANISM_INVALID; 245 } 246 mutex_exit(&old->pd_lock); 247 248 } else { 249 if (!KCF_IS_PROV_USABLE(old) || 250 (call_restrict && (old->pd_flags & KCF_PROV_RESTRICTED))) { 251 real_pd = NULL; 252 rv = CRYPTO_DEVICE_ERROR; 253 goto out; 254 } 255 256 if (!is_valid_provider_for_mech(old, me, fg)) { 257 real_pd = NULL; 258 rv = CRYPTO_MECHANISM_INVALID; 259 goto out; 260 } 261 262 KCF_PROV_REFHOLD(real_pd); 263 } 264 out: 265 mutex_exit(&mp->kl_lock); 266 *new = real_pd; 267 return (rv); 268 } 269 270 /* 271 * This routine, given a logical provider, returns the least loaded 272 * provider belonging to the logical provider. Just in case providers 273 * are not entirely equivalent, the provider's entry point is checked 274 * for non-nullness. This is accomplished by having the caller pass, as 275 * arguments, the offset of the function group (offset_1), and the 276 * offset of the function within the function group (offset_2). 277 * Returns NULL if no provider can be found. 278 */ 279 int 280 kcf_get_hardware_provider_nomech(offset_t offset_1, offset_t offset_2, 281 boolean_t call_restrict, kcf_provider_desc_t *old, 282 kcf_provider_desc_t **new) 283 { 284 kcf_provider_desc_t *provider, *real_pd = old; 285 kcf_provider_desc_t *gpd = NULL; /* good provider */ 286 kcf_provider_desc_t *bpd = NULL; /* busy provider */ 287 kcf_provider_list_t *p; 288 caddr_t *ops; 289 int len, gqlen = INT_MAX, rv = CRYPTO_SUCCESS; 290 291 /* 292 * We assume the provider descriptor will not go away because 293 * it is being held somewhere, i.e. its reference count has been 294 * incremented. In the case of the crypto module, the provider 295 * descriptor is held by the session structure. 296 */ 297 if (old->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) { 298 if (old->pd_provider_list == NULL) { 299 real_pd = NULL; 300 rv = CRYPTO_DEVICE_ERROR; 301 goto out; 302 } 303 /* 304 * Find the least loaded real provider. KCF_PROV_LOAD gives 305 * the load (number of pending requests) of the provider. 306 */ 307 mutex_enter(&old->pd_lock); 308 p = old->pd_provider_list; 309 while (p != NULL) { 310 provider = p->pl_provider; 311 312 ASSERT(provider->pd_prov_type != 313 CRYPTO_LOGICAL_PROVIDER); 314 315 if (call_restrict && 316 (provider->pd_flags & KCF_PROV_RESTRICTED)) { 317 p = p->pl_next; 318 continue; 319 } 320 if (KCF_PROV_NULL_ENTRY_POINT(provider, offset_1, 321 offset_2, ops)) { 322 p = p->pl_next; 323 continue; 324 } 325 326 if (provider->pd_state != KCF_PROV_READY) { 327 /* choose BUSY if no READY providers */ 328 if (provider->pd_state == KCF_PROV_BUSY) 329 bpd = provider; 330 p = p->pl_next; 331 continue; 332 } 333 334 /* Do load calculation only if needed */ 335 if ((p = p->pl_next) == NULL && gpd == NULL) { 336 gpd = provider; 337 } else { 338 len = KCF_PROV_LOAD(provider); 339 if (len < gqlen) { 340 gqlen = len; 341 gpd = provider; 342 } 343 } 344 } 345 mutex_exit(&old->pd_lock); 346 347 if (gpd != NULL) { 348 real_pd = gpd; 349 KCF_PROV_REFHOLD(real_pd); 350 } else if (bpd != NULL) { 351 real_pd = bpd; 352 KCF_PROV_REFHOLD(real_pd); 353 } else { 354 /* can't find provider */ 355 real_pd = NULL; 356 rv = CRYPTO_DEVICE_ERROR; 357 } 358 359 } else { 360 if (!KCF_IS_PROV_USABLE(old) || 361 (call_restrict && (old->pd_flags & KCF_PROV_RESTRICTED))) { 362 real_pd = NULL; 363 rv = CRYPTO_DEVICE_ERROR; 364 goto out; 365 } 366 367 if (KCF_PROV_NULL_ENTRY_POINT(old, offset_1, offset_2, ops)) { 368 real_pd = NULL; 369 rv = CRYPTO_NOT_SUPPORTED; 370 goto out; 371 } 372 KCF_PROV_REFHOLD(real_pd); 373 } 374 out: 375 *new = real_pd; 376 return (rv); 377 } 378 379 /* 380 * Return the next member of a logical provider, given the previous 381 * member. The function returns true if the next member is found and 382 * bumps its refcnt before returning. 383 */ 384 boolean_t 385 kcf_get_next_logical_provider_member(kcf_provider_desc_t *logical_provider, 386 kcf_provider_desc_t *prev, kcf_provider_desc_t **pd) 387 { 388 kcf_provider_list_t *p; 389 kcf_provider_desc_t *next; 390 391 ASSERT(MUTEX_HELD(&logical_provider->pd_lock)); 392 p = logical_provider->pd_provider_list; 393 while (p != NULL) { 394 /* start the search */ 395 if (prev == NULL) { 396 next = p->pl_provider; 397 goto found; 398 } else { 399 /* find where we were before */ 400 if (p->pl_provider == prev) { 401 if (p->pl_next != NULL) { 402 next = p->pl_next->pl_provider; 403 goto found; 404 } 405 } 406 } 407 p = p->pl_next; 408 } 409 return (B_FALSE); 410 411 found: 412 KCF_PROV_REFHOLD(next); 413 *pd = next; 414 return (B_TRUE); 415 } 416 417 /* 418 * Return the best provider for the specified mechanism. The provider 419 * is held and it is the caller's responsibility to release it when done. 420 * The fg input argument is used as a search criterion to pick a provider. 421 * A provider has to support this function group to be picked. 422 * 423 * Find the least loaded provider in the list of providers. We do a linear 424 * search to find one. This is fine as we assume there are only a few 425 * number of providers in this list. If this assumption ever changes, 426 * we should revisit this. 427 * 428 * call_restrict represents if the caller should not be allowed to 429 * use restricted providers. 430 */ 431 kcf_provider_desc_t * 432 kcf_get_mech_provider(crypto_mech_type_t mech_type, kcf_mech_entry_t **mepp, 433 int *error, kcf_prov_tried_t *triedl, crypto_func_group_t fg, 434 boolean_t call_restrict, size_t data_size) 435 { 436 kcf_provider_desc_t *pd = NULL, *gpd = NULL; 437 kcf_prov_mech_desc_t *prov_chain, *mdesc; 438 int len, gqlen = INT_MAX; 439 kcf_ops_class_t class; 440 int index; 441 kcf_mech_entry_t *me; 442 kcf_mech_entry_tab_t *me_tab; 443 kcf_lock_withpad_t *mp; 444 445 class = KCF_MECH2CLASS(mech_type); 446 if ((class < KCF_FIRST_OPSCLASS) || (class > KCF_LAST_OPSCLASS)) { 447 *error = CRYPTO_MECHANISM_INVALID; 448 return (NULL); 449 } 450 451 me_tab = &kcf_mech_tabs_tab[class]; 452 index = KCF_MECH2INDEX(mech_type); 453 if ((index < 0) || (index >= me_tab->met_size)) { 454 *error = CRYPTO_MECHANISM_INVALID; 455 return (NULL); 456 } 457 458 me = &((me_tab->met_tab)[index]); 459 if (mepp != NULL) 460 *mepp = me; 461 462 mp = &me_mutexes[CPU_SEQID]; 463 mutex_enter(&mp->kl_lock); 464 465 prov_chain = me->me_hw_prov_chain; 466 467 /* 468 * We check for the threshold for using a hardware provider for 469 * this amount of data. If there is no software provider available 470 * for the mechanism, then the threshold is ignored. 471 */ 472 if ((prov_chain != NULL) && 473 ((data_size == 0) || (me->me_threshold == 0) || 474 (data_size >= me->me_threshold) || 475 ((mdesc = me->me_sw_prov) == NULL) || 476 (!IS_FG_SUPPORTED(mdesc, fg)) || 477 (!KCF_IS_PROV_USABLE(mdesc->pm_prov_desc)))) { 478 ASSERT(me->me_num_hwprov > 0); 479 /* there is at least one provider */ 480 481 /* 482 * Find the least loaded real provider. KCF_PROV_LOAD gives 483 * the load (number of pending requests) of the provider. 484 */ 485 while (prov_chain != NULL) { 486 pd = prov_chain->pm_prov_desc; 487 488 if (!IS_FG_SUPPORTED(prov_chain, fg) || 489 !KCF_IS_PROV_USABLE(pd) || 490 IS_PROVIDER_TRIED(pd, triedl) || 491 (call_restrict && 492 (pd->pd_flags & KCF_PROV_RESTRICTED))) { 493 prov_chain = prov_chain->pm_next; 494 continue; 495 } 496 497 /* Do load calculation only if needed */ 498 if ((prov_chain = prov_chain->pm_next) == NULL && 499 gpd == NULL) { 500 gpd = pd; 501 } else { 502 len = KCF_PROV_LOAD(pd); 503 if (len < gqlen) { 504 gqlen = len; 505 gpd = pd; 506 } 507 } 508 } 509 510 pd = gpd; 511 } 512 513 /* No HW provider for this mech, is there a SW provider? */ 514 if (pd == NULL && (mdesc = me->me_sw_prov) != NULL) { 515 pd = mdesc->pm_prov_desc; 516 if (!IS_FG_SUPPORTED(mdesc, fg) || 517 !KCF_IS_PROV_USABLE(pd) || 518 IS_PROVIDER_TRIED(pd, triedl) || 519 (call_restrict && (pd->pd_flags & KCF_PROV_RESTRICTED))) 520 pd = NULL; 521 } 522 523 if (pd == NULL) { 524 /* 525 * We do not want to report CRYPTO_MECH_NOT_SUPPORTED, when 526 * we are in the "fallback to the next provider" case. Rather 527 * we preserve the error, so that the client gets the right 528 * error code. 529 */ 530 if (triedl == NULL) 531 *error = CRYPTO_MECH_NOT_SUPPORTED; 532 } else { 533 KCF_PROV_REFHOLD(pd); 534 } 535 536 mutex_exit(&mp->kl_lock); 537 return (pd); 538 } 539 540 /* 541 * Very similar to kcf_get_mech_provider(). Finds the best provider capable of 542 * a dual operation with both me1 and me2. 543 * When no dual-ops capable providers are available, return the best provider 544 * for me1 only, and sets *prov_mt2 to CRYPTO_INVALID_MECHID; 545 * We assume/expect that a slower HW capable of the dual is still 546 * faster than the 2 fastest providers capable of the individual ops 547 * separately. 548 */ 549 kcf_provider_desc_t * 550 kcf_get_dual_provider(crypto_mechanism_t *mech1, crypto_mechanism_t *mech2, 551 kcf_mech_entry_t **mepp, crypto_mech_type_t *prov_mt1, 552 crypto_mech_type_t *prov_mt2, int *error, kcf_prov_tried_t *triedl, 553 crypto_func_group_t fg1, crypto_func_group_t fg2, boolean_t call_restrict, 554 size_t data_size) 555 { 556 kcf_provider_desc_t *pd = NULL, *pdm1 = NULL, *pdm1m2 = NULL; 557 kcf_prov_mech_desc_t *prov_chain, *mdesc; 558 int len, gqlen = INT_MAX, dgqlen = INT_MAX; 559 crypto_mech_info_list_t *mil; 560 crypto_mech_type_t m2id = mech2->cm_type; 561 kcf_mech_entry_t *me; 562 kcf_lock_withpad_t *mp; 563 564 /* when mech is a valid mechanism, me will be its mech_entry */ 565 if (kcf_get_mech_entry(mech1->cm_type, &me) != KCF_SUCCESS) { 566 *error = CRYPTO_MECHANISM_INVALID; 567 return (NULL); 568 } 569 570 *prov_mt2 = CRYPTO_MECH_INVALID; 571 572 if (mepp != NULL) 573 *mepp = me; 574 575 mp = &me_mutexes[CPU_SEQID]; 576 mutex_enter(&mp->kl_lock); 577 578 prov_chain = me->me_hw_prov_chain; 579 /* 580 * We check the threshold for using a hardware provider for 581 * this amount of data. If there is no software provider available 582 * for the first mechanism, then the threshold is ignored. 583 */ 584 if ((prov_chain != NULL) && 585 ((data_size == 0) || (me->me_threshold == 0) || 586 (data_size >= me->me_threshold) || 587 ((mdesc = me->me_sw_prov) == NULL) || 588 (!IS_FG_SUPPORTED(mdesc, fg1)) || 589 (!KCF_IS_PROV_USABLE(mdesc->pm_prov_desc)))) { 590 /* there is at least one provider */ 591 ASSERT(me->me_num_hwprov > 0); 592 593 /* 594 * Find the least loaded provider capable of the combo 595 * me1 + me2, and save a pointer to the least loaded 596 * provider capable of me1 only. 597 */ 598 while (prov_chain != NULL) { 599 pd = prov_chain->pm_prov_desc; 600 601 if (!IS_FG_SUPPORTED(prov_chain, fg1) || 602 !KCF_IS_PROV_USABLE(pd) || 603 IS_PROVIDER_TRIED(pd, triedl) || 604 (call_restrict && 605 (pd->pd_flags & KCF_PROV_RESTRICTED))) { 606 prov_chain = prov_chain->pm_next; 607 continue; 608 } 609 610 #define PMD_MECH_NUM(pmdp) (pmdp)->pm_mech_info.cm_mech_number 611 612 /* Do load calculation only if needed */ 613 if (prov_chain->pm_next == NULL && pdm1 == NULL) { 614 *prov_mt1 = PMD_MECH_NUM(prov_chain); 615 pdm1 = pd; 616 } else { 617 len = KCF_PROV_LOAD(pd); 618 619 /* Save the best provider capable of m1 */ 620 if (len < gqlen) { 621 *prov_mt1 = PMD_MECH_NUM(prov_chain); 622 gqlen = len; 623 pdm1 = pd; 624 } 625 } 626 627 /* See if pd can do me2 too */ 628 for (mil = prov_chain->pm_mi_list; 629 mil != NULL; mil = mil->ml_next) { 630 if ((mil->ml_mech_info.cm_func_group_mask & 631 fg2) == 0) 632 continue; 633 634 #define MIL_MECH_NUM(mil) (mil)->ml_mech_info.cm_mech_number 635 636 if (mil->ml_kcf_mechid == m2id) { /* Bingo! */ 637 638 /* Do load calculation only if needed */ 639 if (prov_chain->pm_next == NULL && 640 pdm1m2 == NULL) { 641 pdm1m2 = pd; 642 *prov_mt2 = MIL_MECH_NUM(mil); 643 } else { 644 if (len < dgqlen) { 645 dgqlen = len; 646 pdm1m2 = pd; 647 *prov_mt2 = 648 MIL_MECH_NUM(mil); 649 } 650 } 651 break; 652 } 653 } 654 655 prov_chain = prov_chain->pm_next; 656 } 657 658 pd = (pdm1m2 != NULL) ? pdm1m2 : pdm1; 659 } 660 661 /* no HW provider for this mech, is there a SW provider? */ 662 if (pd == NULL && (mdesc = me->me_sw_prov) != NULL) { 663 pd = mdesc->pm_prov_desc; 664 if (!IS_FG_SUPPORTED(mdesc, fg1) || 665 !KCF_IS_PROV_USABLE(pd) || 666 IS_PROVIDER_TRIED(pd, triedl) || 667 (call_restrict && (pd->pd_flags & KCF_PROV_RESTRICTED))) 668 pd = NULL; 669 else { 670 /* See if pd can do me2 too */ 671 for (mil = me->me_sw_prov->pm_mi_list; 672 mil != NULL; mil = mil->ml_next) { 673 if ((mil->ml_mech_info.cm_func_group_mask & 674 fg2) == 0) 675 continue; 676 677 if (mil->ml_kcf_mechid == m2id) { 678 /* Bingo! */ 679 *prov_mt2 = 680 mil->ml_mech_info.cm_mech_number; 681 break; 682 } 683 } 684 *prov_mt1 = me->me_sw_prov->pm_mech_info.cm_mech_number; 685 } 686 } 687 688 if (pd == NULL) 689 *error = CRYPTO_MECH_NOT_SUPPORTED; 690 else 691 KCF_PROV_REFHOLD(pd); 692 693 mutex_exit(&mp->kl_lock); 694 return (pd); 695 } 696 697 /* 698 * Do the actual work of calling the provider routines. 699 * 700 * pd - Provider structure 701 * ctx - Context for this operation 702 * params - Parameters for this operation 703 * rhndl - Request handle to use for notification 704 * 705 * The return values are the same as that of the respective SPI. 706 */ 707 int 708 common_submit_request(kcf_provider_desc_t *pd, crypto_ctx_t *ctx, 709 kcf_req_params_t *params, crypto_req_handle_t rhndl) 710 { 711 int err = CRYPTO_ARGUMENTS_BAD; 712 kcf_op_type_t optype; 713 714 optype = params->rp_optype; 715 716 switch (params->rp_opgrp) { 717 case KCF_OG_DIGEST: { 718 kcf_digest_ops_params_t *dops = ¶ms->rp_u.digest_params; 719 720 switch (optype) { 721 case KCF_OP_INIT: 722 /* 723 * We should do this only here and not in KCF_WRAP_* 724 * macros. This is because we may want to try other 725 * providers, in case we recover from a failure. 726 */ 727 KCF_SET_PROVIDER_MECHNUM(dops->do_framework_mechtype, 728 pd, &dops->do_mech); 729 730 err = KCF_PROV_DIGEST_INIT(pd, ctx, &dops->do_mech, 731 rhndl); 732 break; 733 734 case KCF_OP_SINGLE: 735 err = KCF_PROV_DIGEST(pd, ctx, dops->do_data, 736 dops->do_digest, rhndl); 737 break; 738 739 case KCF_OP_UPDATE: 740 err = KCF_PROV_DIGEST_UPDATE(pd, ctx, 741 dops->do_data, rhndl); 742 break; 743 744 case KCF_OP_FINAL: 745 err = KCF_PROV_DIGEST_FINAL(pd, ctx, 746 dops->do_digest, rhndl); 747 break; 748 749 case KCF_OP_ATOMIC: 750 ASSERT(ctx == NULL); 751 KCF_SET_PROVIDER_MECHNUM(dops->do_framework_mechtype, 752 pd, &dops->do_mech); 753 err = KCF_PROV_DIGEST_ATOMIC(pd, dops->do_sid, 754 &dops->do_mech, dops->do_data, dops->do_digest, 755 rhndl); 756 break; 757 758 case KCF_OP_DIGEST_KEY: 759 err = KCF_PROV_DIGEST_KEY(pd, ctx, dops->do_digest_key, 760 rhndl); 761 break; 762 763 default: 764 break; 765 } 766 break; 767 } 768 769 case KCF_OG_MAC: { 770 kcf_mac_ops_params_t *mops = ¶ms->rp_u.mac_params; 771 772 switch (optype) { 773 case KCF_OP_INIT: 774 KCF_SET_PROVIDER_MECHNUM(mops->mo_framework_mechtype, 775 pd, &mops->mo_mech); 776 777 err = KCF_PROV_MAC_INIT(pd, ctx, &mops->mo_mech, 778 mops->mo_key, mops->mo_templ, rhndl); 779 break; 780 781 case KCF_OP_SINGLE: 782 err = KCF_PROV_MAC(pd, ctx, mops->mo_data, 783 mops->mo_mac, rhndl); 784 break; 785 786 case KCF_OP_UPDATE: 787 err = KCF_PROV_MAC_UPDATE(pd, ctx, mops->mo_data, 788 rhndl); 789 break; 790 791 case KCF_OP_FINAL: 792 err = KCF_PROV_MAC_FINAL(pd, ctx, mops->mo_mac, rhndl); 793 break; 794 795 case KCF_OP_ATOMIC: 796 ASSERT(ctx == NULL); 797 KCF_SET_PROVIDER_MECHNUM(mops->mo_framework_mechtype, 798 pd, &mops->mo_mech); 799 800 err = KCF_PROV_MAC_ATOMIC(pd, mops->mo_sid, 801 &mops->mo_mech, mops->mo_key, mops->mo_data, 802 mops->mo_mac, mops->mo_templ, rhndl); 803 break; 804 805 case KCF_OP_MAC_VERIFY_ATOMIC: 806 ASSERT(ctx == NULL); 807 KCF_SET_PROVIDER_MECHNUM(mops->mo_framework_mechtype, 808 pd, &mops->mo_mech); 809 810 err = KCF_PROV_MAC_VERIFY_ATOMIC(pd, mops->mo_sid, 811 &mops->mo_mech, mops->mo_key, mops->mo_data, 812 mops->mo_mac, mops->mo_templ, rhndl); 813 break; 814 815 default: 816 break; 817 } 818 break; 819 } 820 821 case KCF_OG_ENCRYPT: { 822 kcf_encrypt_ops_params_t *eops = ¶ms->rp_u.encrypt_params; 823 824 switch (optype) { 825 case KCF_OP_INIT: 826 KCF_SET_PROVIDER_MECHNUM(eops->eo_framework_mechtype, 827 pd, &eops->eo_mech); 828 829 err = KCF_PROV_ENCRYPT_INIT(pd, ctx, &eops->eo_mech, 830 eops->eo_key, eops->eo_templ, rhndl); 831 break; 832 833 case KCF_OP_SINGLE: 834 err = KCF_PROV_ENCRYPT(pd, ctx, eops->eo_plaintext, 835 eops->eo_ciphertext, rhndl); 836 break; 837 838 case KCF_OP_UPDATE: 839 err = KCF_PROV_ENCRYPT_UPDATE(pd, ctx, 840 eops->eo_plaintext, eops->eo_ciphertext, rhndl); 841 break; 842 843 case KCF_OP_FINAL: 844 err = KCF_PROV_ENCRYPT_FINAL(pd, ctx, 845 eops->eo_ciphertext, rhndl); 846 break; 847 848 case KCF_OP_ATOMIC: 849 ASSERT(ctx == NULL); 850 KCF_SET_PROVIDER_MECHNUM(eops->eo_framework_mechtype, 851 pd, &eops->eo_mech); 852 853 err = KCF_PROV_ENCRYPT_ATOMIC(pd, eops->eo_sid, 854 &eops->eo_mech, eops->eo_key, eops->eo_plaintext, 855 eops->eo_ciphertext, eops->eo_templ, rhndl); 856 break; 857 858 default: 859 break; 860 } 861 break; 862 } 863 864 case KCF_OG_DECRYPT: { 865 kcf_decrypt_ops_params_t *dcrops = ¶ms->rp_u.decrypt_params; 866 867 switch (optype) { 868 case KCF_OP_INIT: 869 KCF_SET_PROVIDER_MECHNUM(dcrops->dop_framework_mechtype, 870 pd, &dcrops->dop_mech); 871 872 err = KCF_PROV_DECRYPT_INIT(pd, ctx, &dcrops->dop_mech, 873 dcrops->dop_key, dcrops->dop_templ, rhndl); 874 break; 875 876 case KCF_OP_SINGLE: 877 err = KCF_PROV_DECRYPT(pd, ctx, dcrops->dop_ciphertext, 878 dcrops->dop_plaintext, rhndl); 879 break; 880 881 case KCF_OP_UPDATE: 882 err = KCF_PROV_DECRYPT_UPDATE(pd, ctx, 883 dcrops->dop_ciphertext, dcrops->dop_plaintext, 884 rhndl); 885 break; 886 887 case KCF_OP_FINAL: 888 err = KCF_PROV_DECRYPT_FINAL(pd, ctx, 889 dcrops->dop_plaintext, rhndl); 890 break; 891 892 case KCF_OP_ATOMIC: 893 ASSERT(ctx == NULL); 894 KCF_SET_PROVIDER_MECHNUM(dcrops->dop_framework_mechtype, 895 pd, &dcrops->dop_mech); 896 897 err = KCF_PROV_DECRYPT_ATOMIC(pd, dcrops->dop_sid, 898 &dcrops->dop_mech, dcrops->dop_key, 899 dcrops->dop_ciphertext, dcrops->dop_plaintext, 900 dcrops->dop_templ, rhndl); 901 break; 902 903 default: 904 break; 905 } 906 break; 907 } 908 909 case KCF_OG_SIGN: { 910 kcf_sign_ops_params_t *sops = ¶ms->rp_u.sign_params; 911 912 switch (optype) { 913 case KCF_OP_INIT: 914 KCF_SET_PROVIDER_MECHNUM(sops->so_framework_mechtype, 915 pd, &sops->so_mech); 916 917 err = KCF_PROV_SIGN_INIT(pd, ctx, &sops->so_mech, 918 sops->so_key, sops->so_templ, rhndl); 919 break; 920 921 case KCF_OP_SIGN_RECOVER_INIT: 922 KCF_SET_PROVIDER_MECHNUM(sops->so_framework_mechtype, 923 pd, &sops->so_mech); 924 925 err = KCF_PROV_SIGN_RECOVER_INIT(pd, ctx, 926 &sops->so_mech, sops->so_key, sops->so_templ, 927 rhndl); 928 break; 929 930 case KCF_OP_SINGLE: 931 err = KCF_PROV_SIGN(pd, ctx, sops->so_data, 932 sops->so_signature, rhndl); 933 break; 934 935 case KCF_OP_SIGN_RECOVER: 936 err = KCF_PROV_SIGN_RECOVER(pd, ctx, 937 sops->so_data, sops->so_signature, rhndl); 938 break; 939 940 case KCF_OP_UPDATE: 941 err = KCF_PROV_SIGN_UPDATE(pd, ctx, sops->so_data, 942 rhndl); 943 break; 944 945 case KCF_OP_FINAL: 946 err = KCF_PROV_SIGN_FINAL(pd, ctx, sops->so_signature, 947 rhndl); 948 break; 949 950 case KCF_OP_ATOMIC: 951 ASSERT(ctx == NULL); 952 KCF_SET_PROVIDER_MECHNUM(sops->so_framework_mechtype, 953 pd, &sops->so_mech); 954 955 err = KCF_PROV_SIGN_ATOMIC(pd, sops->so_sid, 956 &sops->so_mech, sops->so_key, sops->so_data, 957 sops->so_templ, sops->so_signature, rhndl); 958 break; 959 960 case KCF_OP_SIGN_RECOVER_ATOMIC: 961 ASSERT(ctx == NULL); 962 KCF_SET_PROVIDER_MECHNUM(sops->so_framework_mechtype, 963 pd, &sops->so_mech); 964 965 err = KCF_PROV_SIGN_RECOVER_ATOMIC(pd, sops->so_sid, 966 &sops->so_mech, sops->so_key, sops->so_data, 967 sops->so_templ, sops->so_signature, rhndl); 968 break; 969 970 default: 971 break; 972 } 973 break; 974 } 975 976 case KCF_OG_VERIFY: { 977 kcf_verify_ops_params_t *vops = ¶ms->rp_u.verify_params; 978 979 switch (optype) { 980 case KCF_OP_INIT: 981 KCF_SET_PROVIDER_MECHNUM(vops->vo_framework_mechtype, 982 pd, &vops->vo_mech); 983 984 err = KCF_PROV_VERIFY_INIT(pd, ctx, &vops->vo_mech, 985 vops->vo_key, vops->vo_templ, rhndl); 986 break; 987 988 case KCF_OP_VERIFY_RECOVER_INIT: 989 KCF_SET_PROVIDER_MECHNUM(vops->vo_framework_mechtype, 990 pd, &vops->vo_mech); 991 992 err = KCF_PROV_VERIFY_RECOVER_INIT(pd, ctx, 993 &vops->vo_mech, vops->vo_key, vops->vo_templ, 994 rhndl); 995 break; 996 997 case KCF_OP_SINGLE: 998 err = KCF_PROV_VERIFY(pd, ctx, vops->vo_data, 999 vops->vo_signature, rhndl); 1000 break; 1001 1002 case KCF_OP_VERIFY_RECOVER: 1003 err = KCF_PROV_VERIFY_RECOVER(pd, ctx, 1004 vops->vo_signature, vops->vo_data, rhndl); 1005 break; 1006 1007 case KCF_OP_UPDATE: 1008 err = KCF_PROV_VERIFY_UPDATE(pd, ctx, vops->vo_data, 1009 rhndl); 1010 break; 1011 1012 case KCF_OP_FINAL: 1013 err = KCF_PROV_VERIFY_FINAL(pd, ctx, vops->vo_signature, 1014 rhndl); 1015 break; 1016 1017 case KCF_OP_ATOMIC: 1018 ASSERT(ctx == NULL); 1019 KCF_SET_PROVIDER_MECHNUM(vops->vo_framework_mechtype, 1020 pd, &vops->vo_mech); 1021 1022 err = KCF_PROV_VERIFY_ATOMIC(pd, vops->vo_sid, 1023 &vops->vo_mech, vops->vo_key, vops->vo_data, 1024 vops->vo_templ, vops->vo_signature, rhndl); 1025 break; 1026 1027 case KCF_OP_VERIFY_RECOVER_ATOMIC: 1028 ASSERT(ctx == NULL); 1029 KCF_SET_PROVIDER_MECHNUM(vops->vo_framework_mechtype, 1030 pd, &vops->vo_mech); 1031 1032 err = KCF_PROV_VERIFY_RECOVER_ATOMIC(pd, vops->vo_sid, 1033 &vops->vo_mech, vops->vo_key, vops->vo_signature, 1034 vops->vo_templ, vops->vo_data, rhndl); 1035 break; 1036 1037 default: 1038 break; 1039 } 1040 break; 1041 } 1042 1043 case KCF_OG_ENCRYPT_MAC: { 1044 kcf_encrypt_mac_ops_params_t *eops = 1045 ¶ms->rp_u.encrypt_mac_params; 1046 kcf_context_t *kcf_secondctx; 1047 1048 switch (optype) { 1049 case KCF_OP_INIT: 1050 kcf_secondctx = ((kcf_context_t *) 1051 (ctx->cc_framework_private))->kc_secondctx; 1052 1053 if (kcf_secondctx != NULL) { 1054 err = kcf_emulate_dual(pd, ctx, params); 1055 break; 1056 } 1057 KCF_SET_PROVIDER_MECHNUM( 1058 eops->em_framework_encr_mechtype, 1059 pd, &eops->em_encr_mech); 1060 1061 KCF_SET_PROVIDER_MECHNUM( 1062 eops->em_framework_mac_mechtype, 1063 pd, &eops->em_mac_mech); 1064 1065 err = KCF_PROV_ENCRYPT_MAC_INIT(pd, ctx, 1066 &eops->em_encr_mech, eops->em_encr_key, 1067 &eops->em_mac_mech, eops->em_mac_key, 1068 eops->em_encr_templ, eops->em_mac_templ, 1069 rhndl); 1070 1071 break; 1072 1073 case KCF_OP_SINGLE: 1074 err = KCF_PROV_ENCRYPT_MAC(pd, ctx, 1075 eops->em_plaintext, eops->em_ciphertext, 1076 eops->em_mac, rhndl); 1077 break; 1078 1079 case KCF_OP_UPDATE: 1080 kcf_secondctx = ((kcf_context_t *) 1081 (ctx->cc_framework_private))->kc_secondctx; 1082 if (kcf_secondctx != NULL) { 1083 err = kcf_emulate_dual(pd, ctx, params); 1084 break; 1085 } 1086 err = KCF_PROV_ENCRYPT_MAC_UPDATE(pd, ctx, 1087 eops->em_plaintext, eops->em_ciphertext, rhndl); 1088 break; 1089 1090 case KCF_OP_FINAL: 1091 kcf_secondctx = ((kcf_context_t *) 1092 (ctx->cc_framework_private))->kc_secondctx; 1093 if (kcf_secondctx != NULL) { 1094 err = kcf_emulate_dual(pd, ctx, params); 1095 break; 1096 } 1097 err = KCF_PROV_ENCRYPT_MAC_FINAL(pd, ctx, 1098 eops->em_ciphertext, eops->em_mac, rhndl); 1099 break; 1100 1101 case KCF_OP_ATOMIC: 1102 ASSERT(ctx == NULL); 1103 1104 KCF_SET_PROVIDER_MECHNUM( 1105 eops->em_framework_encr_mechtype, 1106 pd, &eops->em_encr_mech); 1107 1108 KCF_SET_PROVIDER_MECHNUM( 1109 eops->em_framework_mac_mechtype, 1110 pd, &eops->em_mac_mech); 1111 1112 err = KCF_PROV_ENCRYPT_MAC_ATOMIC(pd, eops->em_sid, 1113 &eops->em_encr_mech, eops->em_encr_key, 1114 &eops->em_mac_mech, eops->em_mac_key, 1115 eops->em_plaintext, eops->em_ciphertext, 1116 eops->em_mac, 1117 eops->em_encr_templ, eops->em_mac_templ, 1118 rhndl); 1119 1120 break; 1121 1122 default: 1123 break; 1124 } 1125 break; 1126 } 1127 1128 case KCF_OG_MAC_DECRYPT: { 1129 kcf_mac_decrypt_ops_params_t *dops = 1130 ¶ms->rp_u.mac_decrypt_params; 1131 kcf_context_t *kcf_secondctx; 1132 1133 switch (optype) { 1134 case KCF_OP_INIT: 1135 kcf_secondctx = ((kcf_context_t *) 1136 (ctx->cc_framework_private))->kc_secondctx; 1137 1138 if (kcf_secondctx != NULL) { 1139 err = kcf_emulate_dual(pd, ctx, params); 1140 break; 1141 } 1142 KCF_SET_PROVIDER_MECHNUM( 1143 dops->md_framework_mac_mechtype, 1144 pd, &dops->md_mac_mech); 1145 1146 KCF_SET_PROVIDER_MECHNUM( 1147 dops->md_framework_decr_mechtype, 1148 pd, &dops->md_decr_mech); 1149 1150 err = KCF_PROV_MAC_DECRYPT_INIT(pd, ctx, 1151 &dops->md_mac_mech, dops->md_mac_key, 1152 &dops->md_decr_mech, dops->md_decr_key, 1153 dops->md_mac_templ, dops->md_decr_templ, 1154 rhndl); 1155 1156 break; 1157 1158 case KCF_OP_SINGLE: 1159 err = KCF_PROV_MAC_DECRYPT(pd, ctx, 1160 dops->md_ciphertext, dops->md_mac, 1161 dops->md_plaintext, rhndl); 1162 break; 1163 1164 case KCF_OP_UPDATE: 1165 kcf_secondctx = ((kcf_context_t *) 1166 (ctx->cc_framework_private))->kc_secondctx; 1167 if (kcf_secondctx != NULL) { 1168 err = kcf_emulate_dual(pd, ctx, params); 1169 break; 1170 } 1171 err = KCF_PROV_MAC_DECRYPT_UPDATE(pd, ctx, 1172 dops->md_ciphertext, dops->md_plaintext, rhndl); 1173 break; 1174 1175 case KCF_OP_FINAL: 1176 kcf_secondctx = ((kcf_context_t *) 1177 (ctx->cc_framework_private))->kc_secondctx; 1178 if (kcf_secondctx != NULL) { 1179 err = kcf_emulate_dual(pd, ctx, params); 1180 break; 1181 } 1182 err = KCF_PROV_MAC_DECRYPT_FINAL(pd, ctx, 1183 dops->md_mac, dops->md_plaintext, rhndl); 1184 break; 1185 1186 case KCF_OP_ATOMIC: 1187 ASSERT(ctx == NULL); 1188 1189 KCF_SET_PROVIDER_MECHNUM( 1190 dops->md_framework_mac_mechtype, 1191 pd, &dops->md_mac_mech); 1192 1193 KCF_SET_PROVIDER_MECHNUM( 1194 dops->md_framework_decr_mechtype, 1195 pd, &dops->md_decr_mech); 1196 1197 err = KCF_PROV_MAC_DECRYPT_ATOMIC(pd, dops->md_sid, 1198 &dops->md_mac_mech, dops->md_mac_key, 1199 &dops->md_decr_mech, dops->md_decr_key, 1200 dops->md_ciphertext, dops->md_mac, 1201 dops->md_plaintext, 1202 dops->md_mac_templ, dops->md_decr_templ, 1203 rhndl); 1204 1205 break; 1206 1207 case KCF_OP_MAC_VERIFY_DECRYPT_ATOMIC: 1208 ASSERT(ctx == NULL); 1209 1210 KCF_SET_PROVIDER_MECHNUM( 1211 dops->md_framework_mac_mechtype, 1212 pd, &dops->md_mac_mech); 1213 1214 KCF_SET_PROVIDER_MECHNUM( 1215 dops->md_framework_decr_mechtype, 1216 pd, &dops->md_decr_mech); 1217 1218 err = KCF_PROV_MAC_VERIFY_DECRYPT_ATOMIC(pd, 1219 dops->md_sid, &dops->md_mac_mech, dops->md_mac_key, 1220 &dops->md_decr_mech, dops->md_decr_key, 1221 dops->md_ciphertext, dops->md_mac, 1222 dops->md_plaintext, 1223 dops->md_mac_templ, dops->md_decr_templ, 1224 rhndl); 1225 1226 break; 1227 1228 default: 1229 break; 1230 } 1231 break; 1232 } 1233 1234 case KCF_OG_KEY: { 1235 kcf_key_ops_params_t *kops = ¶ms->rp_u.key_params; 1236 1237 ASSERT(ctx == NULL); 1238 KCF_SET_PROVIDER_MECHNUM(kops->ko_framework_mechtype, pd, 1239 &kops->ko_mech); 1240 1241 switch (optype) { 1242 case KCF_OP_KEY_GENERATE: 1243 err = KCF_PROV_KEY_GENERATE(pd, kops->ko_sid, 1244 &kops->ko_mech, 1245 kops->ko_key_template, kops->ko_key_attribute_count, 1246 kops->ko_key_object_id_ptr, rhndl); 1247 break; 1248 1249 case KCF_OP_KEY_GENERATE_PAIR: 1250 err = KCF_PROV_KEY_GENERATE_PAIR(pd, kops->ko_sid, 1251 &kops->ko_mech, 1252 kops->ko_key_template, kops->ko_key_attribute_count, 1253 kops->ko_private_key_template, 1254 kops->ko_private_key_attribute_count, 1255 kops->ko_key_object_id_ptr, 1256 kops->ko_private_key_object_id_ptr, rhndl); 1257 break; 1258 1259 case KCF_OP_KEY_WRAP: 1260 err = KCF_PROV_KEY_WRAP(pd, kops->ko_sid, 1261 &kops->ko_mech, 1262 kops->ko_key, kops->ko_key_object_id_ptr, 1263 kops->ko_wrapped_key, kops->ko_wrapped_key_len_ptr, 1264 rhndl); 1265 break; 1266 1267 case KCF_OP_KEY_UNWRAP: 1268 err = KCF_PROV_KEY_UNWRAP(pd, kops->ko_sid, 1269 &kops->ko_mech, 1270 kops->ko_key, kops->ko_wrapped_key, 1271 kops->ko_wrapped_key_len_ptr, 1272 kops->ko_key_template, kops->ko_key_attribute_count, 1273 kops->ko_key_object_id_ptr, rhndl); 1274 break; 1275 1276 case KCF_OP_KEY_DERIVE: 1277 err = KCF_PROV_KEY_DERIVE(pd, kops->ko_sid, 1278 &kops->ko_mech, 1279 kops->ko_key, kops->ko_key_template, 1280 kops->ko_key_attribute_count, 1281 kops->ko_key_object_id_ptr, rhndl); 1282 break; 1283 1284 default: 1285 break; 1286 } 1287 break; 1288 } 1289 1290 case KCF_OG_RANDOM: { 1291 kcf_random_number_ops_params_t *rops = 1292 ¶ms->rp_u.random_number_params; 1293 1294 ASSERT(ctx == NULL); 1295 1296 switch (optype) { 1297 case KCF_OP_RANDOM_SEED: 1298 err = KCF_PROV_SEED_RANDOM(pd, rops->rn_sid, 1299 rops->rn_buf, rops->rn_buflen, rops->rn_entropy_est, 1300 rops->rn_flags, rhndl); 1301 break; 1302 1303 case KCF_OP_RANDOM_GENERATE: 1304 err = KCF_PROV_GENERATE_RANDOM(pd, rops->rn_sid, 1305 rops->rn_buf, rops->rn_buflen, rhndl); 1306 break; 1307 1308 default: 1309 break; 1310 } 1311 break; 1312 } 1313 1314 case KCF_OG_SESSION: { 1315 kcf_session_ops_params_t *sops = ¶ms->rp_u.session_params; 1316 1317 ASSERT(ctx == NULL); 1318 switch (optype) { 1319 case KCF_OP_SESSION_OPEN: 1320 /* 1321 * so_pd may be a logical provider, in which case 1322 * we need to check whether it has been removed. 1323 */ 1324 if (KCF_IS_PROV_REMOVED(sops->so_pd)) { 1325 err = CRYPTO_DEVICE_ERROR; 1326 break; 1327 } 1328 err = KCF_PROV_SESSION_OPEN(pd, sops->so_sid_ptr, 1329 rhndl, sops->so_pd); 1330 break; 1331 1332 case KCF_OP_SESSION_CLOSE: 1333 /* 1334 * so_pd may be a logical provider, in which case 1335 * we need to check whether it has been removed. 1336 */ 1337 if (KCF_IS_PROV_REMOVED(sops->so_pd)) { 1338 err = CRYPTO_DEVICE_ERROR; 1339 break; 1340 } 1341 err = KCF_PROV_SESSION_CLOSE(pd, sops->so_sid, 1342 rhndl, sops->so_pd); 1343 break; 1344 1345 case KCF_OP_SESSION_LOGIN: 1346 err = KCF_PROV_SESSION_LOGIN(pd, sops->so_sid, 1347 sops->so_user_type, sops->so_pin, 1348 sops->so_pin_len, rhndl); 1349 break; 1350 1351 case KCF_OP_SESSION_LOGOUT: 1352 err = KCF_PROV_SESSION_LOGOUT(pd, sops->so_sid, rhndl); 1353 break; 1354 1355 default: 1356 break; 1357 } 1358 break; 1359 } 1360 1361 case KCF_OG_OBJECT: { 1362 kcf_object_ops_params_t *jops = ¶ms->rp_u.object_params; 1363 1364 ASSERT(ctx == NULL); 1365 switch (optype) { 1366 case KCF_OP_OBJECT_CREATE: 1367 err = KCF_PROV_OBJECT_CREATE(pd, jops->oo_sid, 1368 jops->oo_template, jops->oo_attribute_count, 1369 jops->oo_object_id_ptr, rhndl); 1370 break; 1371 1372 case KCF_OP_OBJECT_COPY: 1373 err = KCF_PROV_OBJECT_COPY(pd, jops->oo_sid, 1374 jops->oo_object_id, 1375 jops->oo_template, jops->oo_attribute_count, 1376 jops->oo_object_id_ptr, rhndl); 1377 break; 1378 1379 case KCF_OP_OBJECT_DESTROY: 1380 err = KCF_PROV_OBJECT_DESTROY(pd, jops->oo_sid, 1381 jops->oo_object_id, rhndl); 1382 break; 1383 1384 case KCF_OP_OBJECT_GET_SIZE: 1385 err = KCF_PROV_OBJECT_GET_SIZE(pd, jops->oo_sid, 1386 jops->oo_object_id, jops->oo_object_size, rhndl); 1387 break; 1388 1389 case KCF_OP_OBJECT_GET_ATTRIBUTE_VALUE: 1390 err = KCF_PROV_OBJECT_GET_ATTRIBUTE_VALUE(pd, 1391 jops->oo_sid, jops->oo_object_id, 1392 jops->oo_template, jops->oo_attribute_count, rhndl); 1393 break; 1394 1395 case KCF_OP_OBJECT_SET_ATTRIBUTE_VALUE: 1396 err = KCF_PROV_OBJECT_SET_ATTRIBUTE_VALUE(pd, 1397 jops->oo_sid, jops->oo_object_id, 1398 jops->oo_template, jops->oo_attribute_count, rhndl); 1399 break; 1400 1401 case KCF_OP_OBJECT_FIND_INIT: 1402 err = KCF_PROV_OBJECT_FIND_INIT(pd, jops->oo_sid, 1403 jops->oo_template, jops->oo_attribute_count, 1404 jops->oo_find_init_pp_ptr, rhndl); 1405 break; 1406 1407 case KCF_OP_OBJECT_FIND: 1408 err = KCF_PROV_OBJECT_FIND(pd, jops->oo_find_pp, 1409 jops->oo_object_id_ptr, jops->oo_max_object_count, 1410 jops->oo_object_count_ptr, rhndl); 1411 break; 1412 1413 case KCF_OP_OBJECT_FIND_FINAL: 1414 err = KCF_PROV_OBJECT_FIND_FINAL(pd, jops->oo_find_pp, 1415 rhndl); 1416 break; 1417 1418 default: 1419 break; 1420 } 1421 break; 1422 } 1423 1424 case KCF_OG_PROVMGMT: { 1425 kcf_provmgmt_ops_params_t *pops = ¶ms->rp_u.provmgmt_params; 1426 1427 ASSERT(ctx == NULL); 1428 switch (optype) { 1429 case KCF_OP_MGMT_EXTINFO: 1430 /* 1431 * po_pd may be a logical provider, in which case 1432 * we need to check whether it has been removed. 1433 */ 1434 if (KCF_IS_PROV_REMOVED(pops->po_pd)) { 1435 err = CRYPTO_DEVICE_ERROR; 1436 break; 1437 } 1438 err = KCF_PROV_EXT_INFO(pd, pops->po_ext_info, rhndl, 1439 pops->po_pd); 1440 break; 1441 1442 case KCF_OP_MGMT_INITTOKEN: 1443 err = KCF_PROV_INIT_TOKEN(pd, pops->po_pin, 1444 pops->po_pin_len, pops->po_label, rhndl); 1445 break; 1446 1447 case KCF_OP_MGMT_INITPIN: 1448 err = KCF_PROV_INIT_PIN(pd, pops->po_sid, pops->po_pin, 1449 pops->po_pin_len, rhndl); 1450 break; 1451 1452 case KCF_OP_MGMT_SETPIN: 1453 err = KCF_PROV_SET_PIN(pd, pops->po_sid, 1454 pops->po_old_pin, pops->po_old_pin_len, 1455 pops->po_pin, pops->po_pin_len, rhndl); 1456 break; 1457 1458 default: 1459 break; 1460 } 1461 break; 1462 } 1463 1464 case KCF_OG_NOSTORE_KEY: { 1465 kcf_key_ops_params_t *kops = ¶ms->rp_u.key_params; 1466 1467 ASSERT(ctx == NULL); 1468 KCF_SET_PROVIDER_MECHNUM(kops->ko_framework_mechtype, pd, 1469 &kops->ko_mech); 1470 1471 switch (optype) { 1472 case KCF_OP_KEY_GENERATE: 1473 err = KCF_PROV_NOSTORE_KEY_GENERATE(pd, kops->ko_sid, 1474 &kops->ko_mech, kops->ko_key_template, 1475 kops->ko_key_attribute_count, 1476 kops->ko_out_template1, 1477 kops->ko_out_attribute_count1, rhndl); 1478 break; 1479 1480 case KCF_OP_KEY_GENERATE_PAIR: 1481 err = KCF_PROV_NOSTORE_KEY_GENERATE_PAIR(pd, 1482 kops->ko_sid, &kops->ko_mech, 1483 kops->ko_key_template, kops->ko_key_attribute_count, 1484 kops->ko_private_key_template, 1485 kops->ko_private_key_attribute_count, 1486 kops->ko_out_template1, 1487 kops->ko_out_attribute_count1, 1488 kops->ko_out_template2, 1489 kops->ko_out_attribute_count2, 1490 rhndl); 1491 break; 1492 1493 case KCF_OP_KEY_DERIVE: 1494 err = KCF_PROV_NOSTORE_KEY_DERIVE(pd, kops->ko_sid, 1495 &kops->ko_mech, kops->ko_key, 1496 kops->ko_key_template, 1497 kops->ko_key_attribute_count, 1498 kops->ko_out_template1, 1499 kops->ko_out_attribute_count1, rhndl); 1500 break; 1501 1502 default: 1503 break; 1504 } 1505 break; 1506 } 1507 default: 1508 break; 1509 } /* end of switch(params->rp_opgrp) */ 1510 1511 KCF_PROV_INCRSTATS(pd, err); 1512 return (err); 1513 } 1514 1515 /* 1516 * Emulate the call for a multipart dual ops with 2 single steps. 1517 * This routine is always called in the context of a working thread 1518 * running kcf_svc_do_run(). 1519 * The single steps are submitted in a pure synchronous way (blocking). 1520 * When this routine returns, kcf_svc_do_run() will call kcf_aop_done() 1521 * so the originating consumer's callback gets invoked. kcf_aop_done() 1522 * takes care of freeing the operation context. So, this routine does 1523 * not free the operation context. 1524 * 1525 * The provider descriptor is assumed held by the callers. 1526 */ 1527 static int 1528 kcf_emulate_dual(kcf_provider_desc_t *pd, crypto_ctx_t *ctx, 1529 kcf_req_params_t *params) 1530 { 1531 int err = CRYPTO_ARGUMENTS_BAD; 1532 kcf_op_type_t optype; 1533 size_t save_len; 1534 off_t save_offset; 1535 1536 optype = params->rp_optype; 1537 1538 switch (params->rp_opgrp) { 1539 case KCF_OG_ENCRYPT_MAC: { 1540 kcf_encrypt_mac_ops_params_t *cmops = 1541 ¶ms->rp_u.encrypt_mac_params; 1542 kcf_context_t *encr_kcf_ctx; 1543 crypto_ctx_t *mac_ctx; 1544 kcf_req_params_t encr_params; 1545 1546 encr_kcf_ctx = (kcf_context_t *)(ctx->cc_framework_private); 1547 1548 switch (optype) { 1549 case KCF_OP_INIT: { 1550 encr_kcf_ctx->kc_secondctx = NULL; 1551 1552 KCF_WRAP_ENCRYPT_OPS_PARAMS(&encr_params, KCF_OP_INIT, 1553 pd->pd_sid, &cmops->em_encr_mech, 1554 cmops->em_encr_key, NULL, NULL, 1555 cmops->em_encr_templ); 1556 1557 err = kcf_submit_request(pd, ctx, NULL, &encr_params, 1558 B_FALSE); 1559 1560 /* It can't be CRYPTO_QUEUED */ 1561 if (err != CRYPTO_SUCCESS) { 1562 break; 1563 } 1564 1565 err = crypto_mac_init(&cmops->em_mac_mech, 1566 cmops->em_mac_key, cmops->em_mac_templ, 1567 (crypto_context_t *)&mac_ctx, NULL); 1568 1569 if (err == CRYPTO_SUCCESS) { 1570 encr_kcf_ctx->kc_secondctx = (kcf_context_t *) 1571 mac_ctx->cc_framework_private; 1572 KCF_CONTEXT_REFHOLD((kcf_context_t *) 1573 mac_ctx->cc_framework_private); 1574 } 1575 1576 break; 1577 1578 } 1579 case KCF_OP_UPDATE: { 1580 crypto_dual_data_t *ct = cmops->em_ciphertext; 1581 crypto_data_t *pt = cmops->em_plaintext; 1582 kcf_context_t *mac_kcf_ctx = encr_kcf_ctx->kc_secondctx; 1583 crypto_ctx_t *mac_ctx = &mac_kcf_ctx->kc_glbl_ctx; 1584 1585 KCF_WRAP_ENCRYPT_OPS_PARAMS(&encr_params, KCF_OP_UPDATE, 1586 pd->pd_sid, NULL, NULL, pt, (crypto_data_t *)ct, 1587 NULL); 1588 1589 err = kcf_submit_request(pd, ctx, NULL, &encr_params, 1590 B_FALSE); 1591 1592 /* It can't be CRYPTO_QUEUED */ 1593 if (err != CRYPTO_SUCCESS) { 1594 break; 1595 } 1596 1597 save_offset = ct->dd_offset1; 1598 save_len = ct->dd_len1; 1599 if (ct->dd_len2 == 0) { 1600 /* 1601 * The previous encrypt step was an 1602 * accumulation only and didn't produce any 1603 * partial output 1604 */ 1605 if (ct->dd_len1 == 0) 1606 break; 1607 1608 } else { 1609 ct->dd_offset1 = ct->dd_offset2; 1610 ct->dd_len1 = ct->dd_len2; 1611 } 1612 err = crypto_mac_update((crypto_context_t)mac_ctx, 1613 (crypto_data_t *)ct, NULL); 1614 1615 ct->dd_offset1 = save_offset; 1616 ct->dd_len1 = save_len; 1617 1618 break; 1619 } 1620 case KCF_OP_FINAL: { 1621 crypto_dual_data_t *ct = cmops->em_ciphertext; 1622 crypto_data_t *mac = cmops->em_mac; 1623 kcf_context_t *mac_kcf_ctx = encr_kcf_ctx->kc_secondctx; 1624 crypto_ctx_t *mac_ctx = &mac_kcf_ctx->kc_glbl_ctx; 1625 crypto_context_t mac_context = mac_ctx; 1626 1627 KCF_WRAP_ENCRYPT_OPS_PARAMS(&encr_params, KCF_OP_FINAL, 1628 pd->pd_sid, NULL, NULL, NULL, (crypto_data_t *)ct, 1629 NULL); 1630 1631 err = kcf_submit_request(pd, ctx, NULL, &encr_params, 1632 B_FALSE); 1633 1634 /* It can't be CRYPTO_QUEUED */ 1635 if (err != CRYPTO_SUCCESS) { 1636 crypto_cancel_ctx(mac_context); 1637 break; 1638 } 1639 1640 if (ct->dd_len2 > 0) { 1641 save_offset = ct->dd_offset1; 1642 save_len = ct->dd_len1; 1643 ct->dd_offset1 = ct->dd_offset2; 1644 ct->dd_len1 = ct->dd_len2; 1645 1646 err = crypto_mac_update(mac_context, 1647 (crypto_data_t *)ct, NULL); 1648 1649 ct->dd_offset1 = save_offset; 1650 ct->dd_len1 = save_len; 1651 1652 if (err != CRYPTO_SUCCESS) { 1653 crypto_cancel_ctx(mac_context); 1654 return (err); 1655 } 1656 } 1657 1658 /* and finally, collect the MAC */ 1659 err = crypto_mac_final(mac_context, mac, NULL); 1660 break; 1661 } 1662 1663 default: 1664 break; 1665 } 1666 KCF_PROV_INCRSTATS(pd, err); 1667 break; 1668 } 1669 case KCF_OG_MAC_DECRYPT: { 1670 kcf_mac_decrypt_ops_params_t *mdops = 1671 ¶ms->rp_u.mac_decrypt_params; 1672 kcf_context_t *decr_kcf_ctx; 1673 crypto_ctx_t *mac_ctx; 1674 kcf_req_params_t decr_params; 1675 1676 decr_kcf_ctx = (kcf_context_t *)(ctx->cc_framework_private); 1677 1678 switch (optype) { 1679 case KCF_OP_INIT: { 1680 decr_kcf_ctx->kc_secondctx = NULL; 1681 1682 err = crypto_mac_init(&mdops->md_mac_mech, 1683 mdops->md_mac_key, mdops->md_mac_templ, 1684 (crypto_context_t *)&mac_ctx, NULL); 1685 1686 /* It can't be CRYPTO_QUEUED */ 1687 if (err != CRYPTO_SUCCESS) { 1688 break; 1689 } 1690 1691 KCF_WRAP_DECRYPT_OPS_PARAMS(&decr_params, KCF_OP_INIT, 1692 pd->pd_sid, &mdops->md_decr_mech, 1693 mdops->md_decr_key, NULL, NULL, 1694 mdops->md_decr_templ); 1695 1696 err = kcf_submit_request(pd, ctx, NULL, &decr_params, 1697 B_FALSE); 1698 1699 /* It can't be CRYPTO_QUEUED */ 1700 if (err != CRYPTO_SUCCESS) { 1701 crypto_cancel_ctx((crypto_context_t)mac_ctx); 1702 break; 1703 } 1704 1705 decr_kcf_ctx->kc_secondctx = (kcf_context_t *) 1706 mac_ctx->cc_framework_private; 1707 KCF_CONTEXT_REFHOLD((kcf_context_t *) 1708 mac_ctx->cc_framework_private); 1709 1710 break; 1711 1712 } 1713 case KCF_OP_UPDATE: { 1714 crypto_dual_data_t *ct = mdops->md_ciphertext; 1715 crypto_data_t *pt = mdops->md_plaintext; 1716 kcf_context_t *mac_kcf_ctx = decr_kcf_ctx->kc_secondctx; 1717 crypto_ctx_t *mac_ctx = &mac_kcf_ctx->kc_glbl_ctx; 1718 1719 err = crypto_mac_update((crypto_context_t)mac_ctx, 1720 (crypto_data_t *)ct, NULL); 1721 1722 if (err != CRYPTO_SUCCESS) 1723 break; 1724 1725 save_offset = ct->dd_offset1; 1726 save_len = ct->dd_len1; 1727 1728 /* zero ct->dd_len2 means decrypt everything */ 1729 if (ct->dd_len2 > 0) { 1730 ct->dd_offset1 = ct->dd_offset2; 1731 ct->dd_len1 = ct->dd_len2; 1732 } 1733 1734 err = crypto_decrypt_update((crypto_context_t)ctx, 1735 (crypto_data_t *)ct, pt, NULL); 1736 1737 ct->dd_offset1 = save_offset; 1738 ct->dd_len1 = save_len; 1739 1740 break; 1741 } 1742 case KCF_OP_FINAL: { 1743 crypto_data_t *pt = mdops->md_plaintext; 1744 crypto_data_t *mac = mdops->md_mac; 1745 kcf_context_t *mac_kcf_ctx = decr_kcf_ctx->kc_secondctx; 1746 crypto_ctx_t *mac_ctx = &mac_kcf_ctx->kc_glbl_ctx; 1747 1748 err = crypto_mac_final((crypto_context_t)mac_ctx, 1749 mac, NULL); 1750 1751 if (err != CRYPTO_SUCCESS) { 1752 crypto_cancel_ctx(ctx); 1753 break; 1754 } 1755 1756 /* Get the last chunk of plaintext */ 1757 KCF_CONTEXT_REFHOLD(decr_kcf_ctx); 1758 err = crypto_decrypt_final((crypto_context_t)ctx, pt, 1759 NULL); 1760 1761 break; 1762 } 1763 } 1764 break; 1765 } 1766 default: 1767 1768 break; 1769 } /* end of switch(params->rp_opgrp) */ 1770 1771 return (err); 1772 } 1773