xref: /illumos-gate/usr/src/uts/common/inet/ip/spdsock.c (revision 861a91627796c35220e75654dac61e5707536dcd)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #include <sys/param.h>
27 #include <sys/types.h>
28 #include <sys/stream.h>
29 #include <sys/strsubr.h>
30 #include <sys/strsun.h>
31 #include <sys/stropts.h>
32 #include <sys/zone.h>
33 #include <sys/vnode.h>
34 #include <sys/sysmacros.h>
35 #define	_SUN_TPI_VERSION 2
36 #include <sys/tihdr.h>
37 #include <sys/ddi.h>
38 #include <sys/sunddi.h>
39 #include <sys/mkdev.h>
40 #include <sys/debug.h>
41 #include <sys/kmem.h>
42 #include <sys/cmn_err.h>
43 #include <sys/suntpi.h>
44 #include <sys/policy.h>
45 #include <sys/dls.h>
46 
47 #include <sys/socket.h>
48 #include <netinet/in.h>
49 #include <net/pfkeyv2.h>
50 #include <net/pfpolicy.h>
51 
52 #include <inet/common.h>
53 #include <netinet/ip6.h>
54 #include <inet/ip.h>
55 #include <inet/ip6.h>
56 #include <inet/mi.h>
57 #include <inet/proto_set.h>
58 #include <inet/nd.h>
59 #include <inet/ip_if.h>
60 #include <inet/optcom.h>
61 #include <inet/ipsec_impl.h>
62 #include <inet/spdsock.h>
63 #include <inet/sadb.h>
64 #include <inet/iptun.h>
65 #include <inet/iptun/iptun_impl.h>
66 
67 #include <sys/isa_defs.h>
68 
69 #include <c2/audit.h>
70 
71 /*
72  * This is a transport provider for the PF_POLICY IPsec policy
73  * management socket, which provides a management interface into the
74  * SPD, allowing policy rules to be added, deleted, and queried.
75  *
76  * This effectively replaces the old private SIOC*IPSECONFIG ioctls
77  * with an extensible interface which will hopefully be public some
78  * day.
79  *
80  * See <net/pfpolicy.h> for more details on the protocol.
81  *
82  * We link against drv/ip and call directly into it to manipulate the
83  * SPD; see ipsec_impl.h for the policy data structures and spd.c for
84  * the code which maintains them.
85  *
86  * The MT model of this is QPAIR with the addition of some explicit
87  * locking to protect system-wide policy data structures.
88  */
89 
90 static vmem_t *spdsock_vmem;		/* for minor numbers. */
91 
92 #define	ALIGNED64(x) IS_P2ALIGNED((x), sizeof (uint64_t))
93 
94 /* Default structure copied into T_INFO_ACK messages (from rts.c...) */
95 static struct T_info_ack spdsock_g_t_info_ack = {
96 	T_INFO_ACK,
97 	T_INFINITE,	/* TSDU_size. Maximum size messages. */
98 	T_INVALID,	/* ETSDU_size. No expedited data. */
99 	T_INVALID,	/* CDATA_size. No connect data. */
100 	T_INVALID,	/* DDATA_size. No disconnect data. */
101 	0,		/* ADDR_size. */
102 	0,		/* OPT_size. No user-settable options */
103 	64 * 1024,	/* TIDU_size. spdsock allows maximum size messages. */
104 	T_COTS,		/* SERV_type. spdsock supports connection oriented. */
105 	TS_UNBND,	/* CURRENT_state. This is set from spdsock_state. */
106 	(XPG4_1)	/* Provider flags */
107 };
108 
109 /* Named Dispatch Parameter Management Structure */
110 typedef struct spdsockparam_s {
111 	uint_t	spdsock_param_min;
112 	uint_t	spdsock_param_max;
113 	uint_t	spdsock_param_value;
114 	char *spdsock_param_name;
115 } spdsockparam_t;
116 
117 /*
118  * Table of NDD variables supported by spdsock. These are loaded into
119  * spdsock_g_nd in spdsock_init_nd.
120  * All of these are alterable, within the min/max values given, at run time.
121  */
122 static	spdsockparam_t	lcl_param_arr[] = {
123 	/* min	max	value	name */
124 	{ 4096, 65536,	8192,	"spdsock_xmit_hiwat"},
125 	{ 0,	65536,	1024,	"spdsock_xmit_lowat"},
126 	{ 4096, 65536,	8192,	"spdsock_recv_hiwat"},
127 	{ 65536, 1024*1024*1024, 256*1024,	"spdsock_max_buf"},
128 	{ 0,	3,	0,	"spdsock_debug"},
129 };
130 #define	spds_xmit_hiwat	spds_params[0].spdsock_param_value
131 #define	spds_xmit_lowat	spds_params[1].spdsock_param_value
132 #define	spds_recv_hiwat	spds_params[2].spdsock_param_value
133 #define	spds_max_buf	spds_params[3].spdsock_param_value
134 #define	spds_debug		spds_params[4].spdsock_param_value
135 
136 #define	ss0dbg(a)	printf a
137 /* NOTE:  != 0 instead of > 0 so lint doesn't complain. */
138 #define	ss1dbg(spds, a)	if (spds->spds_debug != 0) printf a
139 #define	ss2dbg(spds, a)	if (spds->spds_debug > 1) printf a
140 #define	ss3dbg(spds, a)	if (spds->spds_debug > 2) printf a
141 
142 #define	RESET_SPDSOCK_DUMP_POLHEAD(ss, iph) { \
143 	ASSERT(RW_READ_HELD(&(iph)->iph_lock)); \
144 	(ss)->spdsock_dump_head = (iph); \
145 	(ss)->spdsock_dump_gen = (iph)->iph_gen; \
146 	(ss)->spdsock_dump_cur_type = 0; \
147 	(ss)->spdsock_dump_cur_af = IPSEC_AF_V4; \
148 	(ss)->spdsock_dump_cur_rule = NULL; \
149 	(ss)->spdsock_dump_count = 0; \
150 	(ss)->spdsock_dump_cur_chain = 0; \
151 }
152 
153 static int spdsock_close(queue_t *);
154 static int spdsock_open(queue_t *, dev_t *, int, int, cred_t *);
155 static void spdsock_wput(queue_t *, mblk_t *);
156 static void spdsock_wsrv(queue_t *);
157 static void spdsock_rsrv(queue_t *);
158 static void *spdsock_stack_init(netstackid_t stackid, netstack_t *ns);
159 static void spdsock_stack_fini(netstackid_t stackid, void *arg);
160 static void spdsock_loadcheck(void *);
161 static void spdsock_merge_algs(spd_stack_t *);
162 static void spdsock_flush_one(ipsec_policy_head_t *, netstack_t *);
163 static mblk_t *spdsock_dump_next_record(spdsock_t *);
164 static void update_iptun_policy(ipsec_tun_pol_t *);
165 
166 static struct module_info info = {
167 	5138, "spdsock", 1, INFPSZ, 512, 128
168 };
169 
170 static struct qinit rinit = {
171 	NULL, (pfi_t)spdsock_rsrv, spdsock_open, spdsock_close,
172 	NULL, &info
173 };
174 
175 static struct qinit winit = {
176 	(pfi_t)spdsock_wput, (pfi_t)spdsock_wsrv, NULL, NULL, NULL, &info
177 };
178 
179 struct streamtab spdsockinfo = {
180 	&rinit, &winit
181 };
182 
183 /* mapping from alg type to protocol number, as per RFC 2407 */
184 static const uint_t algproto[] = {
185 	PROTO_IPSEC_AH,
186 	PROTO_IPSEC_ESP,
187 };
188 
189 #define	NALGPROTOS	(sizeof (algproto) / sizeof (algproto[0]))
190 
191 /* mapping from kernel exec mode to spdsock exec mode */
192 static const uint_t execmodes[] = {
193 	SPD_ALG_EXEC_MODE_SYNC,
194 	SPD_ALG_EXEC_MODE_ASYNC
195 };
196 
197 #define	NEXECMODES	(sizeof (execmodes) / sizeof (execmodes[0]))
198 
199 #define	ALL_ACTIVE_POLHEADS ((ipsec_policy_head_t *)-1)
200 #define	ALL_INACTIVE_POLHEADS ((ipsec_policy_head_t *)-2)
201 
202 #define	ITP_NAME(itp) (itp != NULL ? itp->itp_name : NULL)
203 
204 /* ARGSUSED */
205 static int
206 spdsock_param_get(q, mp, cp, cr)
207 	queue_t	*q;
208 	mblk_t	*mp;
209 	caddr_t	cp;
210 	cred_t *cr;
211 {
212 	spdsockparam_t	*spdsockpa = (spdsockparam_t *)cp;
213 	uint_t value;
214 	spdsock_t *ss = (spdsock_t *)q->q_ptr;
215 	spd_stack_t	*spds = ss->spdsock_spds;
216 
217 	mutex_enter(&spds->spds_param_lock);
218 	value = spdsockpa->spdsock_param_value;
219 	mutex_exit(&spds->spds_param_lock);
220 
221 	(void) mi_mpprintf(mp, "%u", value);
222 	return (0);
223 }
224 
225 /* This routine sets an NDD variable in a spdsockparam_t structure. */
226 /* ARGSUSED */
227 static int
228 spdsock_param_set(q, mp, value, cp, cr)
229 	queue_t	*q;
230 	mblk_t	*mp;
231 	char *value;
232 	caddr_t	cp;
233 	cred_t *cr;
234 {
235 	ulong_t	new_value;
236 	spdsockparam_t	*spdsockpa = (spdsockparam_t *)cp;
237 	spdsock_t *ss = (spdsock_t *)q->q_ptr;
238 	spd_stack_t	*spds = ss->spdsock_spds;
239 
240 	/* Convert the value from a string into a long integer. */
241 	if (ddi_strtoul(value, NULL, 10, &new_value) != 0)
242 		return (EINVAL);
243 
244 	mutex_enter(&spds->spds_param_lock);
245 	/*
246 	 * Fail the request if the new value does not lie within the
247 	 * required bounds.
248 	 */
249 	if (new_value < spdsockpa->spdsock_param_min ||
250 	    new_value > spdsockpa->spdsock_param_max) {
251 		mutex_exit(&spds->spds_param_lock);
252 		return (EINVAL);
253 	}
254 
255 	/* Set the new value */
256 	spdsockpa->spdsock_param_value = new_value;
257 	mutex_exit(&spds->spds_param_lock);
258 
259 	return (0);
260 }
261 
262 /*
263  * Initialize at module load time
264  */
265 boolean_t
266 spdsock_ddi_init(void)
267 {
268 	spdsock_max_optsize = optcom_max_optsize(
269 	    spdsock_opt_obj.odb_opt_des_arr, spdsock_opt_obj.odb_opt_arr_cnt);
270 
271 	spdsock_vmem = vmem_create("spdsock", (void *)1, MAXMIN, 1,
272 	    NULL, NULL, NULL, 1, VM_SLEEP | VMC_IDENTIFIER);
273 
274 	/*
275 	 * We want to be informed each time a stack is created or
276 	 * destroyed in the kernel, so we can maintain the
277 	 * set of spd_stack_t's.
278 	 */
279 	netstack_register(NS_SPDSOCK, spdsock_stack_init, NULL,
280 	    spdsock_stack_fini);
281 
282 	return (B_TRUE);
283 }
284 
285 /*
286  * Walk through the param array specified registering each element with the
287  * named dispatch handler.
288  */
289 static boolean_t
290 spdsock_param_register(IDP *ndp, spdsockparam_t *ssp, int cnt)
291 {
292 	for (; cnt-- > 0; ssp++) {
293 		if (ssp->spdsock_param_name != NULL &&
294 		    ssp->spdsock_param_name[0]) {
295 			if (!nd_load(ndp,
296 			    ssp->spdsock_param_name,
297 			    spdsock_param_get, spdsock_param_set,
298 			    (caddr_t)ssp)) {
299 				nd_free(ndp);
300 				return (B_FALSE);
301 			}
302 		}
303 	}
304 	return (B_TRUE);
305 }
306 
307 /*
308  * Initialize for each stack instance
309  */
310 /* ARGSUSED */
311 static void *
312 spdsock_stack_init(netstackid_t stackid, netstack_t *ns)
313 {
314 	spd_stack_t	*spds;
315 	spdsockparam_t	*ssp;
316 
317 	spds = (spd_stack_t *)kmem_zalloc(sizeof (*spds), KM_SLEEP);
318 	spds->spds_netstack = ns;
319 
320 	ASSERT(spds->spds_g_nd == NULL);
321 
322 	ssp = (spdsockparam_t *)kmem_alloc(sizeof (lcl_param_arr), KM_SLEEP);
323 	spds->spds_params = ssp;
324 	bcopy(lcl_param_arr, ssp, sizeof (lcl_param_arr));
325 
326 	(void) spdsock_param_register(&spds->spds_g_nd, ssp,
327 	    A_CNT(lcl_param_arr));
328 
329 	mutex_init(&spds->spds_param_lock, NULL, MUTEX_DEFAULT, NULL);
330 	mutex_init(&spds->spds_alg_lock, NULL, MUTEX_DEFAULT, NULL);
331 
332 	return (spds);
333 }
334 
335 void
336 spdsock_ddi_destroy(void)
337 {
338 	vmem_destroy(spdsock_vmem);
339 
340 	netstack_unregister(NS_SPDSOCK);
341 }
342 
343 /* ARGSUSED */
344 static void
345 spdsock_stack_fini(netstackid_t stackid, void *arg)
346 {
347 	spd_stack_t *spds = (spd_stack_t *)arg;
348 
349 	freemsg(spds->spds_mp_algs);
350 	mutex_destroy(&spds->spds_param_lock);
351 	mutex_destroy(&spds->spds_alg_lock);
352 	nd_free(&spds->spds_g_nd);
353 	kmem_free(spds->spds_params, sizeof (lcl_param_arr));
354 	spds->spds_params = NULL;
355 
356 	kmem_free(spds, sizeof (*spds));
357 }
358 
359 /*
360  * NOTE: large quantities of this should be shared with keysock.
361  * Would be nice to combine some of this into a common module, but
362  * not possible given time pressures.
363  */
364 
365 /*
366  * High-level reality checking of extensions.
367  */
368 /* ARGSUSED */ /* XXX */
369 static boolean_t
370 ext_check(spd_ext_t *ext)
371 {
372 	spd_if_t *tunname = (spd_if_t *)ext;
373 	int i;
374 	char *idstr;
375 
376 	if (ext->spd_ext_type == SPD_EXT_TUN_NAME) {
377 		/* (NOTE:  Modified from SADB_EXT_IDENTITY..) */
378 
379 		/*
380 		 * Make sure the strings in these identities are
381 		 * null-terminated.  Let's "proactively" null-terminate the
382 		 * string at the last byte if it's not terminated sooner.
383 		 */
384 		i = SPD_64TO8(tunname->spd_if_len) - sizeof (spd_if_t);
385 		idstr = (char *)(tunname + 1);
386 		while (*idstr != '\0' && i > 0) {
387 			i--;
388 			idstr++;
389 		}
390 		if (i == 0) {
391 			/*
392 			 * I.e., if the bozo user didn't NULL-terminate the
393 			 * string...
394 			 */
395 			idstr--;
396 			*idstr = '\0';
397 		}
398 	}
399 	return (B_TRUE);	/* For now... */
400 }
401 
402 
403 
404 /* Return values for spdsock_get_ext(). */
405 #define	KGE_OK	0
406 #define	KGE_DUP	1
407 #define	KGE_UNK	2
408 #define	KGE_LEN	3
409 #define	KGE_CHK	4
410 
411 /*
412  * Parse basic extension headers and return in the passed-in pointer vector.
413  * Return values include:
414  *
415  *	KGE_OK	Everything's nice and parsed out.
416  *		If there are no extensions, place NULL in extv[0].
417  *	KGE_DUP	There is a duplicate extension.
418  *		First instance in appropriate bin.  First duplicate in
419  *		extv[0].
420  *	KGE_UNK	Unknown extension type encountered.  extv[0] contains
421  *		unknown header.
422  *	KGE_LEN	Extension length error.
423  *	KGE_CHK	High-level reality check failed on specific extension.
424  *
425  * My apologies for some of the pointer arithmetic in here.  I'm thinking
426  * like an assembly programmer, yet trying to make the compiler happy.
427  */
428 static int
429 spdsock_get_ext(spd_ext_t *extv[], spd_msg_t *basehdr, uint_t msgsize)
430 {
431 	bzero(extv, sizeof (spd_ext_t *) * (SPD_EXT_MAX + 1));
432 
433 	/* Use extv[0] as the "current working pointer". */
434 
435 	extv[0] = (spd_ext_t *)(basehdr + 1);
436 
437 	while (extv[0] < (spd_ext_t *)(((uint8_t *)basehdr) + msgsize)) {
438 		/* Check for unknown headers. */
439 		if (extv[0]->spd_ext_type == 0 ||
440 		    extv[0]->spd_ext_type > SPD_EXT_MAX)
441 			return (KGE_UNK);
442 
443 		/*
444 		 * Check length.  Use uint64_t because extlen is in units
445 		 * of 64-bit words.  If length goes beyond the msgsize,
446 		 * return an error.  (Zero length also qualifies here.)
447 		 */
448 		if (extv[0]->spd_ext_len == 0 ||
449 		    (void *)((uint64_t *)extv[0] + extv[0]->spd_ext_len) >
450 		    (void *)((uint8_t *)basehdr + msgsize))
451 			return (KGE_LEN);
452 
453 		/* Check for redundant headers. */
454 		if (extv[extv[0]->spd_ext_type] != NULL)
455 			return (KGE_DUP);
456 
457 		/*
458 		 * Reality check the extension if possible at the spdsock
459 		 * level.
460 		 */
461 		if (!ext_check(extv[0]))
462 			return (KGE_CHK);
463 
464 		/* If I make it here, assign the appropriate bin. */
465 		extv[extv[0]->spd_ext_type] = extv[0];
466 
467 		/* Advance pointer (See above for uint64_t ptr reasoning.) */
468 		extv[0] = (spd_ext_t *)
469 		    ((uint64_t *)extv[0] + extv[0]->spd_ext_len);
470 	}
471 
472 	/* Everything's cool. */
473 
474 	/*
475 	 * If extv[0] == NULL, then there are no extension headers in this
476 	 * message.  Ensure that this is the case.
477 	 */
478 	if (extv[0] == (spd_ext_t *)(basehdr + 1))
479 		extv[0] = NULL;
480 
481 	return (KGE_OK);
482 }
483 
484 static const int bad_ext_diag[] = {
485 	SPD_DIAGNOSTIC_MALFORMED_LCLPORT,
486 	SPD_DIAGNOSTIC_MALFORMED_REMPORT,
487 	SPD_DIAGNOSTIC_MALFORMED_PROTO,
488 	SPD_DIAGNOSTIC_MALFORMED_LCLADDR,
489 	SPD_DIAGNOSTIC_MALFORMED_REMADDR,
490 	SPD_DIAGNOSTIC_MALFORMED_ACTION,
491 	SPD_DIAGNOSTIC_MALFORMED_RULE,
492 	SPD_DIAGNOSTIC_MALFORMED_RULESET,
493 	SPD_DIAGNOSTIC_MALFORMED_ICMP_TYPECODE
494 };
495 
496 static const int dup_ext_diag[] = {
497 	SPD_DIAGNOSTIC_DUPLICATE_LCLPORT,
498 	SPD_DIAGNOSTIC_DUPLICATE_REMPORT,
499 	SPD_DIAGNOSTIC_DUPLICATE_PROTO,
500 	SPD_DIAGNOSTIC_DUPLICATE_LCLADDR,
501 	SPD_DIAGNOSTIC_DUPLICATE_REMADDR,
502 	SPD_DIAGNOSTIC_DUPLICATE_ACTION,
503 	SPD_DIAGNOSTIC_DUPLICATE_RULE,
504 	SPD_DIAGNOSTIC_DUPLICATE_RULESET,
505 	SPD_DIAGNOSTIC_DUPLICATE_ICMP_TYPECODE
506 };
507 
508 /*
509  * Transmit a PF_POLICY error message to the instance either pointed to
510  * by ks, the instance with serial number serial, or more, depending.
511  *
512  * The faulty message (or a reasonable facsimile thereof) is in mp.
513  * This function will free mp or recycle it for delivery, thereby causing
514  * the stream head to free it.
515  */
516 static void
517 spdsock_error(queue_t *q, mblk_t *mp, int error, int diagnostic)
518 {
519 	spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr;
520 
521 	ASSERT(mp->b_datap->db_type == M_DATA);
522 
523 	if (spmsg->spd_msg_type < SPD_MIN ||
524 	    spmsg->spd_msg_type > SPD_MAX)
525 		spmsg->spd_msg_type = SPD_RESERVED;
526 
527 	/*
528 	 * Strip out extension headers.
529 	 */
530 	ASSERT(mp->b_rptr + sizeof (*spmsg) <= mp->b_datap->db_lim);
531 	mp->b_wptr = mp->b_rptr + sizeof (*spmsg);
532 	spmsg->spd_msg_len = SPD_8TO64(sizeof (spd_msg_t));
533 	spmsg->spd_msg_errno = (uint8_t)error;
534 	spmsg->spd_msg_diagnostic = (uint16_t)diagnostic;
535 
536 	qreply(q, mp);
537 }
538 
539 static void
540 spdsock_diag(queue_t *q, mblk_t *mp, int diagnostic)
541 {
542 	spdsock_error(q, mp, EINVAL, diagnostic);
543 }
544 
545 static void
546 spd_echo(queue_t *q, mblk_t *mp)
547 {
548 	qreply(q, mp);
549 }
550 
551 /*
552  * Do NOT consume a reference to itp.
553  */
554 /*ARGSUSED*/
555 static void
556 spdsock_flush_node(ipsec_tun_pol_t *itp, void *cookie, netstack_t *ns)
557 {
558 	boolean_t active = (boolean_t)cookie;
559 	ipsec_policy_head_t *iph;
560 
561 	iph = active ? itp->itp_policy : itp->itp_inactive;
562 	IPPH_REFHOLD(iph);
563 	mutex_enter(&itp->itp_lock);
564 	spdsock_flush_one(iph, ns);  /* Releases iph refhold. */
565 	if (active)
566 		itp->itp_flags &= ~ITPF_PFLAGS;
567 	else
568 		itp->itp_flags &= ~ITPF_IFLAGS;
569 	mutex_exit(&itp->itp_lock);
570 	/* SPD_FLUSH is worth a tunnel MTU check. */
571 	update_iptun_policy(itp);
572 }
573 
574 /*
575  * Clear out one polhead.
576  */
577 static void
578 spdsock_flush_one(ipsec_policy_head_t *iph, netstack_t *ns)
579 {
580 	rw_enter(&iph->iph_lock, RW_WRITER);
581 	ipsec_polhead_flush(iph, ns);
582 	rw_exit(&iph->iph_lock);
583 	IPPH_REFRELE(iph, ns);
584 }
585 
586 static void
587 spdsock_flush(queue_t *q, ipsec_policy_head_t *iph, ipsec_tun_pol_t *itp,
588     mblk_t *mp)
589 {
590 	boolean_t active;
591 	spdsock_t *ss = (spdsock_t *)q->q_ptr;
592 	netstack_t *ns = ss->spdsock_spds->spds_netstack;
593 	uint32_t auditing = AU_AUDITING();
594 
595 	if (iph != ALL_ACTIVE_POLHEADS && iph != ALL_INACTIVE_POLHEADS) {
596 		spdsock_flush_one(iph, ns);
597 		if (auditing) {
598 			spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr;
599 			cred_t *cr;
600 			pid_t cpid;
601 
602 			cr = msg_getcred(mp, &cpid);
603 			active = (spmsg->spd_msg_spdid == SPD_ACTIVE);
604 			audit_pf_policy(SPD_FLUSH, cr, ns,
605 			    ITP_NAME(itp), active, 0, cpid);
606 		}
607 	} else {
608 		active = (iph == ALL_ACTIVE_POLHEADS);
609 
610 		/* First flush the global policy. */
611 		spdsock_flush_one(active ? ipsec_system_policy(ns) :
612 		    ipsec_inactive_policy(ns), ns);
613 		if (auditing) {
614 			cred_t *cr;
615 			pid_t cpid;
616 
617 			cr = msg_getcred(mp, &cpid);
618 			audit_pf_policy(SPD_FLUSH, cr, ns, NULL,
619 			    active, 0, cpid);
620 		}
621 		/* Then flush every tunnel's appropriate one. */
622 		itp_walk(spdsock_flush_node, (void *)active, ns);
623 		if (auditing) {
624 			cred_t *cr;
625 			pid_t cpid;
626 
627 			cr = msg_getcred(mp, &cpid);
628 			audit_pf_policy(SPD_FLUSH, cr, ns,
629 			    "all tunnels", active, 0, cpid);
630 		}
631 	}
632 
633 	spd_echo(q, mp);
634 }
635 
636 static boolean_t
637 spdsock_ext_to_sel(spd_ext_t **extv, ipsec_selkey_t *sel, int *diag)
638 {
639 	bzero(sel, sizeof (*sel));
640 
641 	if (extv[SPD_EXT_PROTO] != NULL) {
642 		struct spd_proto *pr =
643 		    (struct spd_proto *)extv[SPD_EXT_PROTO];
644 		sel->ipsl_proto = pr->spd_proto_number;
645 		sel->ipsl_valid |= IPSL_PROTOCOL;
646 	}
647 	if (extv[SPD_EXT_LCLPORT] != NULL) {
648 		struct spd_portrange *pr =
649 		    (struct spd_portrange *)extv[SPD_EXT_LCLPORT];
650 		sel->ipsl_lport = pr->spd_ports_minport;
651 		sel->ipsl_valid |= IPSL_LOCAL_PORT;
652 	}
653 	if (extv[SPD_EXT_REMPORT] != NULL) {
654 		struct spd_portrange *pr =
655 		    (struct spd_portrange *)extv[SPD_EXT_REMPORT];
656 		sel->ipsl_rport = pr->spd_ports_minport;
657 		sel->ipsl_valid |= IPSL_REMOTE_PORT;
658 	}
659 
660 	if (extv[SPD_EXT_ICMP_TYPECODE] != NULL) {
661 		struct spd_typecode *tc=
662 		    (struct spd_typecode *)extv[SPD_EXT_ICMP_TYPECODE];
663 
664 		sel->ipsl_valid |= IPSL_ICMP_TYPE;
665 		sel->ipsl_icmp_type = tc->spd_typecode_type;
666 		if (tc->spd_typecode_type_end < tc->spd_typecode_type)
667 			sel->ipsl_icmp_type_end = tc->spd_typecode_type;
668 		else
669 			sel->ipsl_icmp_type_end = tc->spd_typecode_type_end;
670 
671 		if (tc->spd_typecode_code != 255) {
672 			sel->ipsl_valid |= IPSL_ICMP_CODE;
673 			sel->ipsl_icmp_code = tc->spd_typecode_code;
674 			if (tc->spd_typecode_code_end < tc->spd_typecode_code)
675 				sel->ipsl_icmp_code_end = tc->spd_typecode_code;
676 			else
677 				sel->ipsl_icmp_code_end =
678 				    tc->spd_typecode_code_end;
679 		}
680 	}
681 #define	ADDR2SEL(sel, extv, field, pfield, extn, bit)			      \
682 	if ((extv)[(extn)] != NULL) {					      \
683 		uint_t addrlen;						      \
684 		struct spd_address *ap = 				      \
685 			(struct spd_address *)((extv)[(extn)]); 	      \
686 		addrlen = (ap->spd_address_af == AF_INET6) ? 		      \
687 			IPV6_ADDR_LEN : IP_ADDR_LEN;			      \
688 		if (SPD_64TO8(ap->spd_address_len) < 			      \
689 			(addrlen + sizeof (*ap))) {			      \
690 			*diag = SPD_DIAGNOSTIC_BAD_ADDR_LEN;		      \
691 			return (B_FALSE);				      \
692 		}							      \
693 		bcopy((ap+1), &((sel)->field), addrlen);		      \
694 		(sel)->pfield = ap->spd_address_prefixlen;		      \
695 		(sel)->ipsl_valid |= (bit);				      \
696 		(sel)->ipsl_valid |= (ap->spd_address_af == AF_INET6) ?	      \
697 			IPSL_IPV6 : IPSL_IPV4;				      \
698 	}
699 
700 	ADDR2SEL(sel, extv, ipsl_local, ipsl_local_pfxlen,
701 	    SPD_EXT_LCLADDR, IPSL_LOCAL_ADDR);
702 	ADDR2SEL(sel, extv, ipsl_remote, ipsl_remote_pfxlen,
703 	    SPD_EXT_REMADDR, IPSL_REMOTE_ADDR);
704 
705 	if ((sel->ipsl_valid & (IPSL_IPV6|IPSL_IPV4)) ==
706 	    (IPSL_IPV6|IPSL_IPV4)) {
707 		*diag = SPD_DIAGNOSTIC_MIXED_AF;
708 		return (B_FALSE);
709 	}
710 
711 #undef ADDR2SEL
712 
713 	return (B_TRUE);
714 }
715 
716 static boolean_t
717 spd_convert_type(uint32_t type, ipsec_act_t *act)
718 {
719 	switch (type) {
720 	case SPD_ACTTYPE_DROP:
721 		act->ipa_type = IPSEC_ACT_DISCARD;
722 		return (B_TRUE);
723 
724 	case SPD_ACTTYPE_PASS:
725 		act->ipa_type = IPSEC_ACT_CLEAR;
726 		return (B_TRUE);
727 
728 	case SPD_ACTTYPE_IPSEC:
729 		act->ipa_type = IPSEC_ACT_APPLY;
730 		return (B_TRUE);
731 	}
732 	return (B_FALSE);
733 }
734 
735 static boolean_t
736 spd_convert_flags(uint32_t flags, ipsec_act_t *act)
737 {
738 	/*
739 	 * Note use of !! for boolean canonicalization.
740 	 */
741 	act->ipa_apply.ipp_use_ah = !!(flags & SPD_APPLY_AH);
742 	act->ipa_apply.ipp_use_esp = !!(flags & SPD_APPLY_ESP);
743 	act->ipa_apply.ipp_use_espa = !!(flags & SPD_APPLY_ESPA);
744 	act->ipa_apply.ipp_use_se = !!(flags & SPD_APPLY_SE);
745 	act->ipa_apply.ipp_use_unique = !!(flags & SPD_APPLY_UNIQUE);
746 	return (B_TRUE);
747 }
748 
749 static void
750 spdsock_reset_act(ipsec_act_t *act)
751 {
752 	bzero(act, sizeof (*act));
753 	act->ipa_apply.ipp_espe_maxbits = IPSEC_MAX_KEYBITS;
754 	act->ipa_apply.ipp_espa_maxbits = IPSEC_MAX_KEYBITS;
755 	act->ipa_apply.ipp_ah_maxbits = IPSEC_MAX_KEYBITS;
756 }
757 
758 /*
759  * Sanity check action against reality, and shrink-wrap key sizes..
760  */
761 static boolean_t
762 spdsock_check_action(ipsec_act_t *act, boolean_t tunnel_polhead, int *diag,
763     spd_stack_t *spds)
764 {
765 	if (tunnel_polhead && act->ipa_apply.ipp_use_unique) {
766 		*diag = SPD_DIAGNOSTIC_ADD_INCON_FLAGS;
767 		return (B_FALSE);
768 	}
769 	if ((act->ipa_type != IPSEC_ACT_APPLY) &&
770 	    (act->ipa_apply.ipp_use_ah ||
771 	    act->ipa_apply.ipp_use_esp ||
772 	    act->ipa_apply.ipp_use_espa ||
773 	    act->ipa_apply.ipp_use_se ||
774 	    act->ipa_apply.ipp_use_unique)) {
775 		*diag = SPD_DIAGNOSTIC_ADD_INCON_FLAGS;
776 		return (B_FALSE);
777 	}
778 	if ((act->ipa_type == IPSEC_ACT_APPLY) &&
779 	    !act->ipa_apply.ipp_use_ah &&
780 	    !act->ipa_apply.ipp_use_esp) {
781 		*diag = SPD_DIAGNOSTIC_ADD_INCON_FLAGS;
782 		return (B_FALSE);
783 	}
784 	return (ipsec_check_action(act, diag, spds->spds_netstack));
785 }
786 
787 /*
788  * We may be short a few error checks here..
789  */
790 static boolean_t
791 spdsock_ext_to_actvec(spd_ext_t **extv, ipsec_act_t **actpp, uint_t *nactp,
792     int *diag, spd_stack_t *spds)
793 {
794 	struct spd_ext_actions *sactp =
795 	    (struct spd_ext_actions *)extv[SPD_EXT_ACTION];
796 	ipsec_act_t act, *actp, *endactp;
797 	struct spd_attribute *attrp, *endattrp;
798 	uint64_t *endp;
799 	int nact;
800 	boolean_t tunnel_polhead;
801 
802 	tunnel_polhead = (extv[SPD_EXT_TUN_NAME] != NULL &&
803 	    (((struct spd_rule *)extv[SPD_EXT_RULE])->spd_rule_flags &
804 	    SPD_RULE_FLAG_TUNNEL));
805 
806 	*actpp = NULL;
807 	*nactp = 0;
808 
809 	if (sactp == NULL) {
810 		*diag = SPD_DIAGNOSTIC_NO_ACTION_EXT;
811 		return (B_FALSE);
812 	}
813 
814 	/*
815 	 * Parse the "action" extension and convert into an action chain.
816 	 */
817 
818 	nact = sactp->spd_actions_count;
819 
820 	endp = (uint64_t *)sactp;
821 	endp += sactp->spd_actions_len;
822 	endattrp = (struct spd_attribute *)endp;
823 
824 	actp = kmem_alloc(sizeof (*actp) * nact, KM_NOSLEEP);
825 	if (actp == NULL) {
826 		*diag = SPD_DIAGNOSTIC_ADD_NO_MEM;
827 		return (B_FALSE);
828 	}
829 	*actpp = actp;
830 	*nactp = nact;
831 	endactp = actp + nact;
832 
833 	spdsock_reset_act(&act);
834 	attrp = (struct spd_attribute *)(&sactp[1]);
835 
836 	for (; attrp < endattrp; attrp++) {
837 		switch (attrp->spd_attr_tag) {
838 		case SPD_ATTR_NOP:
839 			break;
840 
841 		case SPD_ATTR_EMPTY:
842 			spdsock_reset_act(&act);
843 			break;
844 
845 		case SPD_ATTR_END:
846 			attrp = endattrp;
847 			/* FALLTHRU */
848 		case SPD_ATTR_NEXT:
849 			if (actp >= endactp) {
850 				*diag = SPD_DIAGNOSTIC_ADD_WRONG_ACT_COUNT;
851 				goto fail;
852 			}
853 			if (!spdsock_check_action(&act, tunnel_polhead,
854 			    diag, spds))
855 				goto fail;
856 			*actp++ = act;
857 			spdsock_reset_act(&act);
858 			break;
859 
860 		case SPD_ATTR_TYPE:
861 			if (!spd_convert_type(attrp->spd_attr_value, &act)) {
862 				*diag = SPD_DIAGNOSTIC_ADD_BAD_TYPE;
863 				goto fail;
864 			}
865 			break;
866 
867 		case SPD_ATTR_FLAGS:
868 			if (!tunnel_polhead && extv[SPD_EXT_TUN_NAME] != NULL) {
869 				/*
870 				 * Set "sa unique" for transport-mode
871 				 * tunnels whether we want to or not.
872 				 */
873 				attrp->spd_attr_value |= SPD_APPLY_UNIQUE;
874 			}
875 			if (!spd_convert_flags(attrp->spd_attr_value, &act)) {
876 				*diag = SPD_DIAGNOSTIC_ADD_BAD_FLAGS;
877 				goto fail;
878 			}
879 			break;
880 
881 		case SPD_ATTR_AH_AUTH:
882 			if (attrp->spd_attr_value == 0) {
883 				*diag = SPD_DIAGNOSTIC_UNSUPP_AH_ALG;
884 				goto fail;
885 			}
886 			act.ipa_apply.ipp_auth_alg = attrp->spd_attr_value;
887 			break;
888 
889 		case SPD_ATTR_ESP_ENCR:
890 			if (attrp->spd_attr_value == 0) {
891 				*diag = SPD_DIAGNOSTIC_UNSUPP_ESP_ENCR_ALG;
892 				goto fail;
893 			}
894 			act.ipa_apply.ipp_encr_alg = attrp->spd_attr_value;
895 			break;
896 
897 		case SPD_ATTR_ESP_AUTH:
898 			if (attrp->spd_attr_value == 0) {
899 				*diag = SPD_DIAGNOSTIC_UNSUPP_ESP_AUTH_ALG;
900 				goto fail;
901 			}
902 			act.ipa_apply.ipp_esp_auth_alg = attrp->spd_attr_value;
903 			break;
904 
905 		case SPD_ATTR_ENCR_MINBITS:
906 			act.ipa_apply.ipp_espe_minbits = attrp->spd_attr_value;
907 			break;
908 
909 		case SPD_ATTR_ENCR_MAXBITS:
910 			act.ipa_apply.ipp_espe_maxbits = attrp->spd_attr_value;
911 			break;
912 
913 		case SPD_ATTR_AH_MINBITS:
914 			act.ipa_apply.ipp_ah_minbits = attrp->spd_attr_value;
915 			break;
916 
917 		case SPD_ATTR_AH_MAXBITS:
918 			act.ipa_apply.ipp_ah_maxbits = attrp->spd_attr_value;
919 			break;
920 
921 		case SPD_ATTR_ESPA_MINBITS:
922 			act.ipa_apply.ipp_espa_minbits = attrp->spd_attr_value;
923 			break;
924 
925 		case SPD_ATTR_ESPA_MAXBITS:
926 			act.ipa_apply.ipp_espa_maxbits = attrp->spd_attr_value;
927 			break;
928 
929 		case SPD_ATTR_LIFE_SOFT_TIME:
930 		case SPD_ATTR_LIFE_HARD_TIME:
931 		case SPD_ATTR_LIFE_SOFT_BYTES:
932 		case SPD_ATTR_LIFE_HARD_BYTES:
933 			break;
934 
935 		case SPD_ATTR_KM_PROTO:
936 			act.ipa_apply.ipp_km_proto = attrp->spd_attr_value;
937 			break;
938 
939 		case SPD_ATTR_KM_COOKIE:
940 			act.ipa_apply.ipp_km_cookie = attrp->spd_attr_value;
941 			break;
942 
943 		case SPD_ATTR_REPLAY_DEPTH:
944 			act.ipa_apply.ipp_replay_depth = attrp->spd_attr_value;
945 			break;
946 		}
947 	}
948 	if (actp != endactp) {
949 		*diag = SPD_DIAGNOSTIC_ADD_WRONG_ACT_COUNT;
950 		goto fail;
951 	}
952 
953 	return (B_TRUE);
954 fail:
955 	ipsec_actvec_free(*actpp, nact);
956 	*actpp = NULL;
957 	return (B_FALSE);
958 }
959 
960 typedef struct
961 {
962 	ipsec_policy_t *pol;
963 	int dir;
964 } tmprule_t;
965 
966 static int
967 mkrule(ipsec_policy_head_t *iph, struct spd_rule *rule,
968     ipsec_selkey_t *sel, ipsec_act_t *actp, int nact, uint_t dir, uint_t af,
969     tmprule_t **rp, uint64_t *index, spd_stack_t *spds)
970 {
971 	ipsec_policy_t *pol;
972 
973 	sel->ipsl_valid &= ~(IPSL_IPV6|IPSL_IPV4);
974 	sel->ipsl_valid |= af;
975 
976 	pol = ipsec_policy_create(sel, actp, nact, rule->spd_rule_priority,
977 	    index, spds->spds_netstack);
978 	if (pol == NULL)
979 		return (ENOMEM);
980 
981 	(*rp)->pol = pol;
982 	(*rp)->dir = dir;
983 	(*rp)++;
984 
985 	if (!ipsec_check_policy(iph, pol, dir))
986 		return (EEXIST);
987 
988 	rule->spd_rule_index = pol->ipsp_index;
989 	return (0);
990 }
991 
992 static int
993 mkrulepair(ipsec_policy_head_t *iph, struct spd_rule *rule,
994     ipsec_selkey_t *sel, ipsec_act_t *actp, int nact, uint_t dir, uint_t afs,
995     tmprule_t **rp, uint64_t *index, spd_stack_t *spds)
996 {
997 	int error;
998 
999 	if (afs & IPSL_IPV4) {
1000 		error = mkrule(iph, rule, sel, actp, nact, dir, IPSL_IPV4, rp,
1001 		    index, spds);
1002 		if (error != 0)
1003 			return (error);
1004 	}
1005 	if (afs & IPSL_IPV6) {
1006 		error = mkrule(iph, rule, sel, actp, nact, dir, IPSL_IPV6, rp,
1007 		    index, spds);
1008 		if (error != 0)
1009 			return (error);
1010 	}
1011 	return (0);
1012 }
1013 
1014 
1015 static void
1016 spdsock_addrule(queue_t *q, ipsec_policy_head_t *iph, mblk_t *mp,
1017     spd_ext_t **extv, ipsec_tun_pol_t *itp)
1018 {
1019 	ipsec_selkey_t sel;
1020 	ipsec_act_t *actp;
1021 	uint_t nact;
1022 	int diag = 0, error, afs;
1023 	struct spd_rule *rule = (struct spd_rule *)extv[SPD_EXT_RULE];
1024 	tmprule_t rules[4], *rulep = &rules[0];
1025 	boolean_t tunnel_mode, empty_itp, active;
1026 	uint64_t *index = (itp == NULL) ? NULL : &itp->itp_next_policy_index;
1027 	spdsock_t *ss = (spdsock_t *)q->q_ptr;
1028 	spd_stack_t *spds = ss->spdsock_spds;
1029 	uint32_t auditing = AU_AUDITING();
1030 
1031 	if (rule == NULL) {
1032 		spdsock_diag(q, mp, SPD_DIAGNOSTIC_NO_RULE_EXT);
1033 		if (auditing) {
1034 			spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr;
1035 			cred_t *cr;
1036 			pid_t cpid;
1037 
1038 			cr = msg_getcred(mp, &cpid);
1039 			active = (spmsg->spd_msg_spdid == SPD_ACTIVE);
1040 			audit_pf_policy(SPD_ADDRULE, cr,
1041 			    spds->spds_netstack, ITP_NAME(itp), active,
1042 			    SPD_DIAGNOSTIC_NO_RULE_EXT, cpid);
1043 		}
1044 		return;
1045 	}
1046 
1047 	tunnel_mode = (rule->spd_rule_flags & SPD_RULE_FLAG_TUNNEL);
1048 
1049 	if (itp != NULL) {
1050 		mutex_enter(&itp->itp_lock);
1051 		ASSERT(itp->itp_policy == iph || itp->itp_inactive == iph);
1052 		active = (itp->itp_policy == iph);
1053 		if (ITP_P_ISACTIVE(itp, iph)) {
1054 			/* Check for mix-and-match of tunnel/transport. */
1055 			if ((tunnel_mode && !ITP_P_ISTUNNEL(itp, iph)) ||
1056 			    (!tunnel_mode && ITP_P_ISTUNNEL(itp, iph))) {
1057 				mutex_exit(&itp->itp_lock);
1058 				spdsock_error(q, mp, EBUSY, 0);
1059 				return;
1060 			}
1061 			empty_itp = B_FALSE;
1062 		} else {
1063 			empty_itp = B_TRUE;
1064 			itp->itp_flags = active ? ITPF_P_ACTIVE : ITPF_I_ACTIVE;
1065 			if (tunnel_mode)
1066 				itp->itp_flags |= active ? ITPF_P_TUNNEL :
1067 				    ITPF_I_TUNNEL;
1068 		}
1069 	} else {
1070 		empty_itp = B_FALSE;
1071 	}
1072 
1073 	if (rule->spd_rule_index != 0) {
1074 		diag = SPD_DIAGNOSTIC_INVALID_RULE_INDEX;
1075 		error = EINVAL;
1076 		goto fail2;
1077 	}
1078 
1079 	if (!spdsock_ext_to_sel(extv, &sel, &diag)) {
1080 		error = EINVAL;
1081 		goto fail2;
1082 	}
1083 
1084 	if (itp != NULL) {
1085 		if (tunnel_mode) {
1086 			if (sel.ipsl_valid &
1087 			    (IPSL_REMOTE_PORT | IPSL_LOCAL_PORT)) {
1088 				itp->itp_flags |= active ?
1089 				    ITPF_P_PER_PORT_SECURITY :
1090 				    ITPF_I_PER_PORT_SECURITY;
1091 			}
1092 		} else {
1093 			/*
1094 			 * For now, we don't allow transport-mode on a tunnel
1095 			 * with ANY specific selectors.  Bail if we have such
1096 			 * a request.
1097 			 */
1098 			if (sel.ipsl_valid & IPSL_WILDCARD) {
1099 				diag = SPD_DIAGNOSTIC_NO_TUNNEL_SELECTORS;
1100 				error = EINVAL;
1101 				goto fail2;
1102 			}
1103 		}
1104 	}
1105 
1106 	if (!spdsock_ext_to_actvec(extv, &actp, &nact, &diag, spds)) {
1107 		error = EINVAL;
1108 		goto fail2;
1109 	}
1110 	/*
1111 	 * If no addresses were specified, add both.
1112 	 */
1113 	afs = sel.ipsl_valid & (IPSL_IPV6|IPSL_IPV4);
1114 	if (afs == 0)
1115 		afs = (IPSL_IPV6|IPSL_IPV4);
1116 
1117 	rw_enter(&iph->iph_lock, RW_WRITER);
1118 
1119 	if (rule->spd_rule_flags & SPD_RULE_FLAG_OUTBOUND) {
1120 		error = mkrulepair(iph, rule, &sel, actp, nact,
1121 		    IPSEC_TYPE_OUTBOUND, afs, &rulep, index, spds);
1122 		if (error != 0)
1123 			goto fail;
1124 	}
1125 
1126 	if (rule->spd_rule_flags & SPD_RULE_FLAG_INBOUND) {
1127 		error = mkrulepair(iph, rule, &sel, actp, nact,
1128 		    IPSEC_TYPE_INBOUND, afs, &rulep, index, spds);
1129 		if (error != 0)
1130 			goto fail;
1131 	}
1132 
1133 	while ((--rulep) >= &rules[0]) {
1134 		ipsec_enter_policy(iph, rulep->pol, rulep->dir,
1135 		    spds->spds_netstack);
1136 	}
1137 	rw_exit(&iph->iph_lock);
1138 	if (itp != NULL)
1139 		mutex_exit(&itp->itp_lock);
1140 
1141 	ipsec_actvec_free(actp, nact);
1142 	spd_echo(q, mp);
1143 	if (auditing) {
1144 		spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr;
1145 		cred_t *cr;
1146 		pid_t cpid;
1147 
1148 		cr = msg_getcred(mp, &cpid);
1149 		active = (spmsg->spd_msg_spdid == SPD_ACTIVE);
1150 		audit_pf_policy(SPD_ADDRULE, cr, spds->spds_netstack,
1151 		    ITP_NAME(itp), active, 0, cpid);
1152 	}
1153 	return;
1154 
1155 fail:
1156 	rw_exit(&iph->iph_lock);
1157 	while ((--rulep) >= &rules[0])
1158 		IPPOL_REFRELE(rulep->pol);
1159 	ipsec_actvec_free(actp, nact);
1160 fail2:
1161 	if (itp != NULL) {
1162 		if (empty_itp)
1163 			itp->itp_flags = 0;
1164 		mutex_exit(&itp->itp_lock);
1165 	}
1166 	spdsock_error(q, mp, error, diag);
1167 	if (auditing) {
1168 		spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr;
1169 		cred_t *cr;
1170 		pid_t cpid;
1171 
1172 		cr = msg_getcred(mp, &cpid);
1173 		active = (spmsg->spd_msg_spdid == SPD_ACTIVE);
1174 		audit_pf_policy(SPD_ADDRULE, cr, spds->spds_netstack,
1175 		    ITP_NAME(itp), active, error, cpid);
1176 	}
1177 }
1178 
1179 void
1180 spdsock_deleterule(queue_t *q, ipsec_policy_head_t *iph, mblk_t *mp,
1181     spd_ext_t **extv, ipsec_tun_pol_t *itp)
1182 {
1183 	ipsec_selkey_t sel;
1184 	struct spd_rule *rule = (struct spd_rule *)extv[SPD_EXT_RULE];
1185 	int err, diag = 0;
1186 	spdsock_t *ss = (spdsock_t *)q->q_ptr;
1187 	netstack_t *ns = ss->spdsock_spds->spds_netstack;
1188 	uint32_t auditing = AU_AUDITING();
1189 
1190 	if (rule == NULL) {
1191 		spdsock_diag(q, mp, SPD_DIAGNOSTIC_NO_RULE_EXT);
1192 		if (auditing) {
1193 			boolean_t active;
1194 			spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr;
1195 			cred_t *cr;
1196 			pid_t cpid;
1197 
1198 			cr = msg_getcred(mp, &cpid);
1199 			active = (spmsg->spd_msg_spdid == SPD_ACTIVE);
1200 			audit_pf_policy(SPD_DELETERULE, cr, ns,
1201 			    ITP_NAME(itp), active, SPD_DIAGNOSTIC_NO_RULE_EXT,
1202 			    cpid);
1203 		}
1204 		return;
1205 	}
1206 
1207 	/*
1208 	 * Must enter itp_lock first to avoid deadlock.  See tun.c's
1209 	 * set_sec_simple() for the other case of itp_lock and iph_lock.
1210 	 */
1211 	if (itp != NULL)
1212 		mutex_enter(&itp->itp_lock);
1213 
1214 	if (rule->spd_rule_index != 0) {
1215 		if (ipsec_policy_delete_index(iph, rule->spd_rule_index, ns) !=
1216 		    0) {
1217 			err = ESRCH;
1218 			goto fail;
1219 		}
1220 	} else {
1221 		if (!spdsock_ext_to_sel(extv, &sel, &diag)) {
1222 			err = EINVAL;	/* diag already set... */
1223 			goto fail;
1224 		}
1225 
1226 		if ((rule->spd_rule_flags & SPD_RULE_FLAG_INBOUND) &&
1227 		    !ipsec_policy_delete(iph, &sel, IPSEC_TYPE_INBOUND, ns)) {
1228 			err = ESRCH;
1229 			goto fail;
1230 		}
1231 
1232 		if ((rule->spd_rule_flags & SPD_RULE_FLAG_OUTBOUND) &&
1233 		    !ipsec_policy_delete(iph, &sel, IPSEC_TYPE_OUTBOUND, ns)) {
1234 			err = ESRCH;
1235 			goto fail;
1236 		}
1237 	}
1238 
1239 	if (itp != NULL) {
1240 		ASSERT(iph == itp->itp_policy || iph == itp->itp_inactive);
1241 		rw_enter(&iph->iph_lock, RW_READER);
1242 		if (avl_numnodes(&iph->iph_rulebyid) == 0) {
1243 			if (iph == itp->itp_policy)
1244 				itp->itp_flags &= ~ITPF_PFLAGS;
1245 			else
1246 				itp->itp_flags &= ~ITPF_IFLAGS;
1247 		}
1248 		/* Can exit locks in any order. */
1249 		rw_exit(&iph->iph_lock);
1250 		mutex_exit(&itp->itp_lock);
1251 	}
1252 	spd_echo(q, mp);
1253 	if (auditing) {
1254 		boolean_t active;
1255 		spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr;
1256 		cred_t *cr;
1257 		pid_t cpid;
1258 
1259 		cr = msg_getcred(mp, &cpid);
1260 		active = (spmsg->spd_msg_spdid == SPD_ACTIVE);
1261 		audit_pf_policy(SPD_DELETERULE, cr, ns, ITP_NAME(itp),
1262 		    active, 0, cpid);
1263 	}
1264 	return;
1265 fail:
1266 	if (itp != NULL)
1267 		mutex_exit(&itp->itp_lock);
1268 	spdsock_error(q, mp, err, diag);
1269 	if (auditing) {
1270 		boolean_t active;
1271 		spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr;
1272 		cred_t *cr;
1273 		pid_t cpid;
1274 
1275 		cr = msg_getcred(mp, &cpid);
1276 		active = (spmsg->spd_msg_spdid == SPD_ACTIVE);
1277 		audit_pf_policy(SPD_DELETERULE, cr, ns, ITP_NAME(itp),
1278 		    active, err, cpid);
1279 	}
1280 }
1281 
1282 /* Do NOT consume a reference to itp. */
1283 /* ARGSUSED */
1284 static void
1285 spdsock_flip_node(ipsec_tun_pol_t *itp, void *ignoreme, netstack_t *ns)
1286 {
1287 	mutex_enter(&itp->itp_lock);
1288 	ITPF_SWAP(itp->itp_flags);
1289 	ipsec_swap_policy(itp->itp_policy, itp->itp_inactive, ns);
1290 	mutex_exit(&itp->itp_lock);
1291 	/* SPD_FLIP is worth a tunnel MTU check. */
1292 	update_iptun_policy(itp);
1293 }
1294 
1295 void
1296 spdsock_flip(queue_t *q, mblk_t *mp, spd_if_t *tunname)
1297 {
1298 	char *tname;
1299 	ipsec_tun_pol_t *itp;
1300 	spdsock_t *ss = (spdsock_t *)q->q_ptr;
1301 	netstack_t *ns = ss->spdsock_spds->spds_netstack;
1302 	uint32_t auditing = AU_AUDITING();
1303 
1304 	if (tunname != NULL) {
1305 		tname = (char *)tunname->spd_if_name;
1306 		if (*tname == '\0') {
1307 			/* can't fail */
1308 			ipsec_swap_global_policy(ns);
1309 			if (auditing) {
1310 				boolean_t active;
1311 				spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr;
1312 				cred_t *cr;
1313 				pid_t cpid;
1314 
1315 				cr = msg_getcred(mp, &cpid);
1316 				active = (spmsg->spd_msg_spdid == SPD_ACTIVE);
1317 				audit_pf_policy(SPD_FLIP, cr, ns,
1318 				    NULL, active, 0, cpid);
1319 			}
1320 			itp_walk(spdsock_flip_node, NULL, ns);
1321 			if (auditing) {
1322 				boolean_t active;
1323 				spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr;
1324 				cred_t *cr;
1325 				pid_t cpid;
1326 
1327 				cr = msg_getcred(mp, &cpid);
1328 				active = (spmsg->spd_msg_spdid == SPD_ACTIVE);
1329 				audit_pf_policy(SPD_FLIP, cr, ns,
1330 				    "all tunnels", active, 0, cpid);
1331 			}
1332 		} else {
1333 			itp = get_tunnel_policy(tname, ns);
1334 			if (itp == NULL) {
1335 				/* Better idea for "tunnel not found"? */
1336 				spdsock_error(q, mp, ESRCH, 0);
1337 				if (auditing) {
1338 					boolean_t active;
1339 					spd_msg_t *spmsg =
1340 					    (spd_msg_t *)mp->b_rptr;
1341 					cred_t *cr;
1342 					pid_t cpid;
1343 
1344 					cr = msg_getcred(mp, &cpid);
1345 					active = (spmsg->spd_msg_spdid ==
1346 					    SPD_ACTIVE);
1347 					audit_pf_policy(SPD_FLIP, cr, ns,
1348 					    ITP_NAME(itp), active,
1349 					    ESRCH, cpid);
1350 				}
1351 				return;
1352 			}
1353 			spdsock_flip_node(itp, NULL, ns);
1354 			if (auditing) {
1355 				boolean_t active;
1356 				spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr;
1357 				cred_t *cr;
1358 				pid_t cpid;
1359 
1360 				cr = msg_getcred(mp, &cpid);
1361 				active = (spmsg->spd_msg_spdid == SPD_ACTIVE);
1362 				audit_pf_policy(SPD_FLIP, cr, ns,
1363 				    ITP_NAME(itp), active, 0, cpid);
1364 			}
1365 			ITP_REFRELE(itp, ns);
1366 		}
1367 	} else {
1368 		ipsec_swap_global_policy(ns);	/* can't fail */
1369 		if (auditing) {
1370 			boolean_t active;
1371 			spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr;
1372 			cred_t *cr;
1373 			pid_t cpid;
1374 
1375 			cr = msg_getcred(mp, &cpid);
1376 			active = (spmsg->spd_msg_spdid == SPD_ACTIVE);
1377 			audit_pf_policy(SPD_FLIP, cr,
1378 			    ns, NULL, active, 0, cpid);
1379 		}
1380 	}
1381 	spd_echo(q, mp);
1382 }
1383 
1384 /*
1385  * Unimplemented feature
1386  */
1387 /* ARGSUSED */
1388 static void
1389 spdsock_lookup(queue_t *q, ipsec_policy_head_t *iph, mblk_t *mp,
1390     spd_ext_t **extv, ipsec_tun_pol_t *itp)
1391 {
1392 	spdsock_error(q, mp, EINVAL, 0);
1393 }
1394 
1395 
1396 static mblk_t *
1397 spdsock_dump_ruleset(mblk_t *req, ipsec_policy_head_t *iph,
1398     uint32_t count, uint16_t error)
1399 {
1400 	size_t len = sizeof (spd_ruleset_ext_t) + sizeof (spd_msg_t);
1401 	spd_msg_t *msg;
1402 	spd_ruleset_ext_t *ruleset;
1403 	mblk_t *m = allocb(len, BPRI_HI);
1404 
1405 	ASSERT(RW_READ_HELD(&iph->iph_lock));
1406 
1407 	if (m == NULL) {
1408 		return (NULL);
1409 	}
1410 	msg = (spd_msg_t *)m->b_rptr;
1411 	ruleset = (spd_ruleset_ext_t *)(&msg[1]);
1412 
1413 	m->b_wptr = (uint8_t *)&ruleset[1];
1414 
1415 	*msg = *(spd_msg_t *)(req->b_rptr);
1416 	msg->spd_msg_len = SPD_8TO64(len);
1417 	msg->spd_msg_errno = error;
1418 
1419 	ruleset->spd_ruleset_len = SPD_8TO64(sizeof (*ruleset));
1420 	ruleset->spd_ruleset_type = SPD_EXT_RULESET;
1421 	ruleset->spd_ruleset_count = count;
1422 	ruleset->spd_ruleset_version = iph->iph_gen;
1423 	return (m);
1424 }
1425 
1426 static mblk_t *
1427 spdsock_dump_finish(spdsock_t *ss, int error)
1428 {
1429 	mblk_t *m;
1430 	ipsec_policy_head_t *iph = ss->spdsock_dump_head;
1431 	mblk_t *req = ss->spdsock_dump_req;
1432 	netstack_t *ns = ss->spdsock_spds->spds_netstack;
1433 
1434 	rw_enter(&iph->iph_lock, RW_READER);
1435 	m = spdsock_dump_ruleset(req, iph, ss->spdsock_dump_count, error);
1436 	rw_exit(&iph->iph_lock);
1437 	IPPH_REFRELE(iph, ns);
1438 	if (ss->spdsock_itp != NULL) {
1439 		ITP_REFRELE(ss->spdsock_itp, ns);
1440 		ss->spdsock_itp = NULL;
1441 	}
1442 	ss->spdsock_dump_req = NULL;
1443 	freemsg(req);
1444 
1445 	return (m);
1446 }
1447 
1448 /*
1449  * Rule encoding functions.
1450  * We do a two-pass encode.
1451  * If base != NULL, fill in encoded rule part starting at base+offset.
1452  * Always return "offset" plus length of to-be-encoded data.
1453  */
1454 static uint_t
1455 spdsock_encode_typecode(uint8_t *base, uint_t offset, uint8_t type,
1456     uint8_t type_end, uint8_t code, uint8_t code_end)
1457 {
1458 	struct spd_typecode *tcp;
1459 
1460 	ASSERT(ALIGNED64(offset));
1461 
1462 	if (base != NULL) {
1463 		tcp = (struct spd_typecode *)(base + offset);
1464 		tcp->spd_typecode_len = SPD_8TO64(sizeof (*tcp));
1465 		tcp->spd_typecode_exttype = SPD_EXT_ICMP_TYPECODE;
1466 		tcp->spd_typecode_code = code;
1467 		tcp->spd_typecode_type = type;
1468 		tcp->spd_typecode_type_end = type_end;
1469 		tcp->spd_typecode_code_end = code_end;
1470 	}
1471 	offset += sizeof (*tcp);
1472 
1473 	ASSERT(ALIGNED64(offset));
1474 
1475 	return (offset);
1476 }
1477 
1478 static uint_t
1479 spdsock_encode_proto(uint8_t *base, uint_t offset, uint8_t proto)
1480 {
1481 	struct spd_proto *spp;
1482 
1483 	ASSERT(ALIGNED64(offset));
1484 
1485 	if (base != NULL) {
1486 		spp = (struct spd_proto *)(base + offset);
1487 		spp->spd_proto_len = SPD_8TO64(sizeof (*spp));
1488 		spp->spd_proto_exttype = SPD_EXT_PROTO;
1489 		spp->spd_proto_number = proto;
1490 		spp->spd_proto_reserved1 = 0;
1491 		spp->spd_proto_reserved2 = 0;
1492 	}
1493 	offset += sizeof (*spp);
1494 
1495 	ASSERT(ALIGNED64(offset));
1496 
1497 	return (offset);
1498 }
1499 
1500 static uint_t
1501 spdsock_encode_port(uint8_t *base, uint_t offset, uint16_t ext, uint16_t port)
1502 {
1503 	struct spd_portrange *spp;
1504 
1505 	ASSERT(ALIGNED64(offset));
1506 
1507 	if (base != NULL) {
1508 		spp = (struct spd_portrange *)(base + offset);
1509 		spp->spd_ports_len = SPD_8TO64(sizeof (*spp));
1510 		spp->spd_ports_exttype = ext;
1511 		spp->spd_ports_minport = port;
1512 		spp->spd_ports_maxport = port;
1513 	}
1514 	offset += sizeof (*spp);
1515 
1516 	ASSERT(ALIGNED64(offset));
1517 
1518 	return (offset);
1519 }
1520 
1521 static uint_t
1522 spdsock_encode_addr(uint8_t *base, uint_t offset, uint16_t ext,
1523     const ipsec_selkey_t *sel, const ipsec_addr_t *addr, uint_t pfxlen)
1524 {
1525 	struct spd_address *sae;
1526 	ipsec_addr_t *spdaddr;
1527 	uint_t start = offset;
1528 	uint_t addrlen;
1529 	uint_t af;
1530 
1531 	if (sel->ipsl_valid & IPSL_IPV4) {
1532 		af = AF_INET;
1533 		addrlen = IP_ADDR_LEN;
1534 	} else {
1535 		af = AF_INET6;
1536 		addrlen = IPV6_ADDR_LEN;
1537 	}
1538 
1539 	ASSERT(ALIGNED64(offset));
1540 
1541 	if (base != NULL) {
1542 		sae = (struct spd_address *)(base + offset);
1543 		sae->spd_address_exttype = ext;
1544 		sae->spd_address_af = af;
1545 		sae->spd_address_prefixlen = pfxlen;
1546 		sae->spd_address_reserved2 = 0;
1547 
1548 		spdaddr = (ipsec_addr_t *)(&sae[1]);
1549 		bcopy(addr, spdaddr, addrlen);
1550 	}
1551 	offset += sizeof (*sae);
1552 	addrlen = roundup(addrlen, sizeof (uint64_t));
1553 	offset += addrlen;
1554 
1555 	ASSERT(ALIGNED64(offset));
1556 
1557 	if (base != NULL)
1558 		sae->spd_address_len = SPD_8TO64(offset - start);
1559 	return (offset);
1560 }
1561 
1562 static uint_t
1563 spdsock_encode_sel(uint8_t *base, uint_t offset, const ipsec_sel_t *sel)
1564 {
1565 	const ipsec_selkey_t *selkey = &sel->ipsl_key;
1566 
1567 	if (selkey->ipsl_valid & IPSL_PROTOCOL)
1568 		offset = spdsock_encode_proto(base, offset, selkey->ipsl_proto);
1569 	if (selkey->ipsl_valid & IPSL_LOCAL_PORT)
1570 		offset = spdsock_encode_port(base, offset, SPD_EXT_LCLPORT,
1571 		    selkey->ipsl_lport);
1572 	if (selkey->ipsl_valid & IPSL_REMOTE_PORT)
1573 		offset = spdsock_encode_port(base, offset, SPD_EXT_REMPORT,
1574 		    selkey->ipsl_rport);
1575 	if (selkey->ipsl_valid & IPSL_REMOTE_ADDR)
1576 		offset = spdsock_encode_addr(base, offset, SPD_EXT_REMADDR,
1577 		    selkey, &selkey->ipsl_remote, selkey->ipsl_remote_pfxlen);
1578 	if (selkey->ipsl_valid & IPSL_LOCAL_ADDR)
1579 		offset = spdsock_encode_addr(base, offset, SPD_EXT_LCLADDR,
1580 		    selkey, &selkey->ipsl_local, selkey->ipsl_local_pfxlen);
1581 	if (selkey->ipsl_valid & IPSL_ICMP_TYPE) {
1582 		offset = spdsock_encode_typecode(base, offset,
1583 		    selkey->ipsl_icmp_type, selkey->ipsl_icmp_type_end,
1584 		    (selkey->ipsl_valid & IPSL_ICMP_CODE) ?
1585 		    selkey->ipsl_icmp_code : 255,
1586 		    (selkey->ipsl_valid & IPSL_ICMP_CODE) ?
1587 		    selkey->ipsl_icmp_code_end : 255);
1588 	}
1589 	return (offset);
1590 }
1591 
1592 static uint_t
1593 spdsock_encode_actattr(uint8_t *base, uint_t offset, uint32_t tag,
1594     uint32_t value)
1595 {
1596 	struct spd_attribute *attr;
1597 
1598 	ASSERT(ALIGNED64(offset));
1599 
1600 	if (base != NULL) {
1601 		attr = (struct spd_attribute *)(base + offset);
1602 		attr->spd_attr_tag = tag;
1603 		attr->spd_attr_value = value;
1604 	}
1605 	offset += sizeof (struct spd_attribute);
1606 
1607 	ASSERT(ALIGNED64(offset));
1608 
1609 	return (offset);
1610 }
1611 
1612 
1613 #define	EMIT(t, v) offset = spdsock_encode_actattr(base, offset, (t), (v))
1614 
1615 static uint_t
1616 spdsock_encode_action(uint8_t *base, uint_t offset, const ipsec_action_t *ap)
1617 {
1618 	const struct ipsec_act *act = &(ap->ipa_act);
1619 	uint_t flags;
1620 
1621 	EMIT(SPD_ATTR_EMPTY, 0);
1622 	switch (act->ipa_type) {
1623 	case IPSEC_ACT_DISCARD:
1624 	case IPSEC_ACT_REJECT:
1625 		EMIT(SPD_ATTR_TYPE, SPD_ACTTYPE_DROP);
1626 		break;
1627 	case IPSEC_ACT_BYPASS:
1628 	case IPSEC_ACT_CLEAR:
1629 		EMIT(SPD_ATTR_TYPE, SPD_ACTTYPE_PASS);
1630 		break;
1631 
1632 	case IPSEC_ACT_APPLY:
1633 		EMIT(SPD_ATTR_TYPE, SPD_ACTTYPE_IPSEC);
1634 		flags = 0;
1635 		if (act->ipa_apply.ipp_use_ah)
1636 			flags |= SPD_APPLY_AH;
1637 		if (act->ipa_apply.ipp_use_esp)
1638 			flags |= SPD_APPLY_ESP;
1639 		if (act->ipa_apply.ipp_use_espa)
1640 			flags |= SPD_APPLY_ESPA;
1641 		if (act->ipa_apply.ipp_use_se)
1642 			flags |= SPD_APPLY_SE;
1643 		if (act->ipa_apply.ipp_use_unique)
1644 			flags |= SPD_APPLY_UNIQUE;
1645 		EMIT(SPD_ATTR_FLAGS, flags);
1646 		if (flags & SPD_APPLY_AH) {
1647 			EMIT(SPD_ATTR_AH_AUTH, act->ipa_apply.ipp_auth_alg);
1648 			EMIT(SPD_ATTR_AH_MINBITS,
1649 			    act->ipa_apply.ipp_ah_minbits);
1650 			EMIT(SPD_ATTR_AH_MAXBITS,
1651 			    act->ipa_apply.ipp_ah_maxbits);
1652 		}
1653 		if (flags & SPD_APPLY_ESP) {
1654 			EMIT(SPD_ATTR_ESP_ENCR, act->ipa_apply.ipp_encr_alg);
1655 			EMIT(SPD_ATTR_ENCR_MINBITS,
1656 			    act->ipa_apply.ipp_espe_minbits);
1657 			EMIT(SPD_ATTR_ENCR_MAXBITS,
1658 			    act->ipa_apply.ipp_espe_maxbits);
1659 			if (flags & SPD_APPLY_ESPA) {
1660 				EMIT(SPD_ATTR_ESP_AUTH,
1661 				    act->ipa_apply.ipp_esp_auth_alg);
1662 				EMIT(SPD_ATTR_ESPA_MINBITS,
1663 				    act->ipa_apply.ipp_espa_minbits);
1664 				EMIT(SPD_ATTR_ESPA_MAXBITS,
1665 				    act->ipa_apply.ipp_espa_maxbits);
1666 			}
1667 		}
1668 		if (act->ipa_apply.ipp_km_proto != 0)
1669 			EMIT(SPD_ATTR_KM_PROTO, act->ipa_apply.ipp_km_proto);
1670 		if (act->ipa_apply.ipp_km_cookie != 0)
1671 			EMIT(SPD_ATTR_KM_PROTO, act->ipa_apply.ipp_km_cookie);
1672 		if (act->ipa_apply.ipp_replay_depth != 0)
1673 			EMIT(SPD_ATTR_REPLAY_DEPTH,
1674 			    act->ipa_apply.ipp_replay_depth);
1675 		/* Add more here */
1676 		break;
1677 	}
1678 
1679 	return (offset);
1680 }
1681 
1682 static uint_t
1683 spdsock_encode_action_list(uint8_t *base, uint_t offset,
1684     const ipsec_action_t *ap)
1685 {
1686 	struct spd_ext_actions *act;
1687 	uint_t nact = 0;
1688 	uint_t start = offset;
1689 
1690 	ASSERT(ALIGNED64(offset));
1691 
1692 	if (base != NULL) {
1693 		act = (struct spd_ext_actions *)(base + offset);
1694 		act->spd_actions_len = 0;
1695 		act->spd_actions_exttype = SPD_EXT_ACTION;
1696 		act->spd_actions_count = 0;
1697 		act->spd_actions_reserved = 0;
1698 	}
1699 
1700 	offset += sizeof (*act);
1701 
1702 	ASSERT(ALIGNED64(offset));
1703 
1704 	while (ap != NULL) {
1705 		offset = spdsock_encode_action(base, offset, ap);
1706 		ap = ap->ipa_next;
1707 		nact++;
1708 		if (ap != NULL) {
1709 			EMIT(SPD_ATTR_NEXT, 0);
1710 		}
1711 	}
1712 	EMIT(SPD_ATTR_END, 0);
1713 
1714 	ASSERT(ALIGNED64(offset));
1715 
1716 	if (base != NULL) {
1717 		act->spd_actions_count = nact;
1718 		act->spd_actions_len = SPD_8TO64(offset - start);
1719 	}
1720 
1721 	return (offset);
1722 }
1723 
1724 #undef EMIT
1725 
1726 /* ARGSUSED */
1727 static uint_t
1728 spdsock_rule_flags(uint_t dir, uint_t af)
1729 {
1730 	uint_t flags = 0;
1731 
1732 	if (dir == IPSEC_TYPE_INBOUND)
1733 		flags |= SPD_RULE_FLAG_INBOUND;
1734 	if (dir == IPSEC_TYPE_OUTBOUND)
1735 		flags |= SPD_RULE_FLAG_OUTBOUND;
1736 
1737 	return (flags);
1738 }
1739 
1740 
1741 static uint_t
1742 spdsock_encode_rule_head(uint8_t *base, uint_t offset, spd_msg_t *req,
1743     const ipsec_policy_t *rule, uint_t dir, uint_t af, char *name,
1744     boolean_t tunnel)
1745 {
1746 	struct spd_msg *spmsg;
1747 	struct spd_rule *spr;
1748 	spd_if_t *sid;
1749 
1750 	uint_t start = offset;
1751 
1752 	ASSERT(ALIGNED64(offset));
1753 
1754 	if (base != NULL) {
1755 		spmsg = (struct spd_msg *)(base + offset);
1756 		bzero(spmsg, sizeof (*spmsg));
1757 		spmsg->spd_msg_version = PF_POLICY_V1;
1758 		spmsg->spd_msg_type = SPD_DUMP;
1759 		spmsg->spd_msg_seq = req->spd_msg_seq;
1760 		spmsg->spd_msg_pid = req->spd_msg_pid;
1761 	}
1762 	offset += sizeof (struct spd_msg);
1763 
1764 	ASSERT(ALIGNED64(offset));
1765 
1766 	if (base != NULL) {
1767 		spr = (struct spd_rule *)(base + offset);
1768 		spr->spd_rule_type = SPD_EXT_RULE;
1769 		spr->spd_rule_priority = rule->ipsp_prio;
1770 		spr->spd_rule_flags = spdsock_rule_flags(dir, af);
1771 		if (tunnel)
1772 			spr->spd_rule_flags |= SPD_RULE_FLAG_TUNNEL;
1773 		spr->spd_rule_unused = 0;
1774 		spr->spd_rule_len = SPD_8TO64(sizeof (*spr));
1775 		spr->spd_rule_index = rule->ipsp_index;
1776 	}
1777 	offset += sizeof (struct spd_rule);
1778 
1779 	/*
1780 	 * If we have an interface name (i.e. if this policy head came from
1781 	 * a tunnel), add the SPD_EXT_TUN_NAME extension.
1782 	 */
1783 	if (name != NULL) {
1784 
1785 		ASSERT(ALIGNED64(offset));
1786 
1787 		if (base != NULL) {
1788 			sid = (spd_if_t *)(base + offset);
1789 			sid->spd_if_exttype = SPD_EXT_TUN_NAME;
1790 			sid->spd_if_len = SPD_8TO64(sizeof (spd_if_t) +
1791 			    roundup((strlen(name) - 4), 8));
1792 			(void) strlcpy((char *)sid->spd_if_name, name,
1793 			    LIFNAMSIZ);
1794 		}
1795 
1796 		offset += sizeof (spd_if_t) + roundup((strlen(name) - 4), 8);
1797 	}
1798 
1799 	offset = spdsock_encode_sel(base, offset, rule->ipsp_sel);
1800 	offset = spdsock_encode_action_list(base, offset, rule->ipsp_act);
1801 
1802 	ASSERT(ALIGNED64(offset));
1803 
1804 	if (base != NULL) {
1805 		spmsg->spd_msg_len = SPD_8TO64(offset - start);
1806 	}
1807 	return (offset);
1808 }
1809 
1810 /* ARGSUSED */
1811 static mblk_t *
1812 spdsock_encode_rule(mblk_t *req, const ipsec_policy_t *rule,
1813     uint_t dir, uint_t af, char *name, boolean_t tunnel)
1814 {
1815 	mblk_t *m;
1816 	uint_t len;
1817 	spd_msg_t *mreq = (spd_msg_t *)req->b_rptr;
1818 
1819 	/*
1820 	 * Figure out how much space we'll need.
1821 	 */
1822 	len = spdsock_encode_rule_head(NULL, 0, mreq, rule, dir, af, name,
1823 	    tunnel);
1824 
1825 	/*
1826 	 * Allocate mblk.
1827 	 */
1828 	m = allocb(len, BPRI_HI);
1829 	if (m == NULL)
1830 		return (NULL);
1831 
1832 	/*
1833 	 * Fill it in..
1834 	 */
1835 	m->b_wptr = m->b_rptr + len;
1836 	bzero(m->b_rptr, len);
1837 	(void) spdsock_encode_rule_head(m->b_rptr, 0, mreq, rule, dir, af,
1838 	    name, tunnel);
1839 	return (m);
1840 }
1841 
1842 static ipsec_policy_t *
1843 spdsock_dump_next_in_chain(spdsock_t *ss, ipsec_policy_head_t *iph,
1844     ipsec_policy_t *cur)
1845 {
1846 	ASSERT(RW_READ_HELD(&iph->iph_lock));
1847 
1848 	ss->spdsock_dump_count++;
1849 	ss->spdsock_dump_cur_rule = cur->ipsp_hash.hash_next;
1850 	return (cur);
1851 }
1852 
1853 static ipsec_policy_t *
1854 spdsock_dump_next_rule(spdsock_t *ss, ipsec_policy_head_t *iph)
1855 {
1856 	ipsec_policy_t *cur;
1857 	ipsec_policy_root_t *ipr;
1858 	int chain, nchains, type, af;
1859 
1860 	ASSERT(RW_READ_HELD(&iph->iph_lock));
1861 
1862 	cur = ss->spdsock_dump_cur_rule;
1863 
1864 	if (cur != NULL)
1865 		return (spdsock_dump_next_in_chain(ss, iph, cur));
1866 
1867 	type = ss->spdsock_dump_cur_type;
1868 
1869 next:
1870 	chain = ss->spdsock_dump_cur_chain;
1871 	ipr = &iph->iph_root[type];
1872 	nchains = ipr->ipr_nchains;
1873 
1874 	while (chain < nchains) {
1875 		cur = ipr->ipr_hash[chain].hash_head;
1876 		chain++;
1877 		if (cur != NULL) {
1878 			ss->spdsock_dump_cur_chain = chain;
1879 			return (spdsock_dump_next_in_chain(ss, iph, cur));
1880 		}
1881 	}
1882 	ss->spdsock_dump_cur_chain = nchains;
1883 
1884 	af = ss->spdsock_dump_cur_af;
1885 	while (af < IPSEC_NAF) {
1886 		cur = ipr->ipr_nonhash[af];
1887 		af++;
1888 		if (cur != NULL) {
1889 			ss->spdsock_dump_cur_af = af;
1890 			return (spdsock_dump_next_in_chain(ss, iph, cur));
1891 		}
1892 	}
1893 
1894 	type++;
1895 	if (type >= IPSEC_NTYPES)
1896 		return (NULL);
1897 
1898 	ss->spdsock_dump_cur_chain = 0;
1899 	ss->spdsock_dump_cur_type = type;
1900 	ss->spdsock_dump_cur_af = IPSEC_AF_V4;
1901 	goto next;
1902 
1903 }
1904 
1905 /*
1906  * If we're done with one policy head, but have more to go, we iterate through
1907  * another IPsec tunnel policy head (itp).  Return NULL if it is an error
1908  * worthy of returning EAGAIN via PF_POLICY.
1909  */
1910 static ipsec_tun_pol_t *
1911 spdsock_dump_iterate_next_tunnel(spdsock_t *ss, ipsec_stack_t *ipss)
1912 {
1913 	ipsec_tun_pol_t *itp;
1914 
1915 	ASSERT(RW_READ_HELD(&ipss->ipsec_tunnel_policy_lock));
1916 	if (ipss->ipsec_tunnel_policy_gen > ss->spdsock_dump_tun_gen) {
1917 		/* Oops, state of the tunnel polheads changed. */
1918 		itp = NULL;
1919 	} else if (ss->spdsock_itp == NULL) {
1920 		/* Just finished global, find first node. */
1921 		itp = avl_first(&ipss->ipsec_tunnel_policies);
1922 	} else {
1923 		/* We just finished current polhead, find the next one. */
1924 		itp = AVL_NEXT(&ipss->ipsec_tunnel_policies, ss->spdsock_itp);
1925 	}
1926 	if (itp != NULL) {
1927 		ITP_REFHOLD(itp);
1928 	}
1929 	if (ss->spdsock_itp != NULL) {
1930 		ITP_REFRELE(ss->spdsock_itp, ipss->ipsec_netstack);
1931 	}
1932 	ss->spdsock_itp = itp;
1933 	return (itp);
1934 }
1935 
1936 static mblk_t *
1937 spdsock_dump_next_record(spdsock_t *ss)
1938 {
1939 	ipsec_policy_head_t *iph;
1940 	ipsec_policy_t *rule;
1941 	mblk_t *m;
1942 	ipsec_tun_pol_t *itp;
1943 	netstack_t *ns = ss->spdsock_spds->spds_netstack;
1944 	ipsec_stack_t *ipss = ns->netstack_ipsec;
1945 
1946 	iph = ss->spdsock_dump_head;
1947 
1948 	ASSERT(iph != NULL);
1949 
1950 	rw_enter(&iph->iph_lock, RW_READER);
1951 
1952 	if (iph->iph_gen != ss->spdsock_dump_gen) {
1953 		rw_exit(&iph->iph_lock);
1954 		return (spdsock_dump_finish(ss, EAGAIN));
1955 	}
1956 
1957 	while ((rule = spdsock_dump_next_rule(ss, iph)) == NULL) {
1958 		rw_exit(&iph->iph_lock);
1959 		if (--(ss->spdsock_dump_remaining_polheads) == 0)
1960 			return (spdsock_dump_finish(ss, 0));
1961 
1962 
1963 		/*
1964 		 * If we reach here, we have more policy heads (tunnel
1965 		 * entries) to dump.  Let's reset to a new policy head
1966 		 * and get some more rules.
1967 		 *
1968 		 * An empty policy head will have spdsock_dump_next_rule()
1969 		 * return NULL, and we loop (while dropping the number of
1970 		 * remaining polheads).  If we loop to 0, we finish.  We
1971 		 * keep looping until we hit 0 or until we have a rule to
1972 		 * encode.
1973 		 *
1974 		 * NOTE:  No need for ITP_REF*() macros here as we're only
1975 		 * going after and refholding the policy head itself.
1976 		 */
1977 		rw_enter(&ipss->ipsec_tunnel_policy_lock, RW_READER);
1978 		itp = spdsock_dump_iterate_next_tunnel(ss, ipss);
1979 		if (itp == NULL) {
1980 			rw_exit(&ipss->ipsec_tunnel_policy_lock);
1981 			return (spdsock_dump_finish(ss, EAGAIN));
1982 		}
1983 
1984 		/* Reset other spdsock_dump thingies. */
1985 		IPPH_REFRELE(ss->spdsock_dump_head, ns);
1986 		if (ss->spdsock_dump_active) {
1987 			ss->spdsock_dump_tunnel =
1988 			    itp->itp_flags & ITPF_P_TUNNEL;
1989 			iph = itp->itp_policy;
1990 		} else {
1991 			ss->spdsock_dump_tunnel =
1992 			    itp->itp_flags & ITPF_I_TUNNEL;
1993 			iph = itp->itp_inactive;
1994 		}
1995 		IPPH_REFHOLD(iph);
1996 		rw_exit(&ipss->ipsec_tunnel_policy_lock);
1997 
1998 		rw_enter(&iph->iph_lock, RW_READER);
1999 		RESET_SPDSOCK_DUMP_POLHEAD(ss, iph);
2000 	}
2001 
2002 	m = spdsock_encode_rule(ss->spdsock_dump_req, rule,
2003 	    ss->spdsock_dump_cur_type, ss->spdsock_dump_cur_af,
2004 	    (ss->spdsock_itp == NULL) ? NULL : ss->spdsock_itp->itp_name,
2005 	    ss->spdsock_dump_tunnel);
2006 	rw_exit(&iph->iph_lock);
2007 
2008 	if (m == NULL)
2009 		return (spdsock_dump_finish(ss, ENOMEM));
2010 	return (m);
2011 }
2012 
2013 /*
2014  * Dump records until we run into flow-control back-pressure.
2015  */
2016 static void
2017 spdsock_dump_some(queue_t *q, spdsock_t *ss)
2018 {
2019 	mblk_t *m, *dataind;
2020 
2021 	while ((ss->spdsock_dump_req != NULL) && canputnext(q)) {
2022 		m = spdsock_dump_next_record(ss);
2023 		if (m == NULL)
2024 			return;
2025 		dataind = allocb(sizeof (struct T_data_req), BPRI_HI);
2026 		if (dataind == NULL) {
2027 			freemsg(m);
2028 			return;
2029 		}
2030 		dataind->b_cont = m;
2031 		dataind->b_wptr += sizeof (struct T_data_req);
2032 		((struct T_data_ind *)dataind->b_rptr)->PRIM_type = T_DATA_IND;
2033 		((struct T_data_ind *)dataind->b_rptr)->MORE_flag = 0;
2034 		dataind->b_datap->db_type = M_PROTO;
2035 		putnext(q, dataind);
2036 	}
2037 }
2038 
2039 /*
2040  * Start dumping.
2041  * Format a start-of-dump record, and set up the stream and kick the rsrv
2042  * procedure to continue the job..
2043  */
2044 /* ARGSUSED */
2045 static void
2046 spdsock_dump(queue_t *q, ipsec_policy_head_t *iph, mblk_t *mp)
2047 {
2048 	spdsock_t *ss = (spdsock_t *)q->q_ptr;
2049 	netstack_t *ns = ss->spdsock_spds->spds_netstack;
2050 	ipsec_stack_t *ipss = ns->netstack_ipsec;
2051 	mblk_t *mr;
2052 
2053 	/* spdsock_open() already set spdsock_itp to NULL. */
2054 	if (iph == ALL_ACTIVE_POLHEADS || iph == ALL_INACTIVE_POLHEADS) {
2055 		rw_enter(&ipss->ipsec_tunnel_policy_lock, RW_READER);
2056 		ss->spdsock_dump_remaining_polheads = 1 +
2057 		    avl_numnodes(&ipss->ipsec_tunnel_policies);
2058 		ss->spdsock_dump_tun_gen = ipss->ipsec_tunnel_policy_gen;
2059 		rw_exit(&ipss->ipsec_tunnel_policy_lock);
2060 		if (iph == ALL_ACTIVE_POLHEADS) {
2061 			iph = ipsec_system_policy(ns);
2062 			ss->spdsock_dump_active = B_TRUE;
2063 		} else {
2064 			iph = ipsec_inactive_policy(ns);
2065 			ss->spdsock_dump_active = B_FALSE;
2066 		}
2067 		ASSERT(ss->spdsock_itp == NULL);
2068 	} else {
2069 		ss->spdsock_dump_remaining_polheads = 1;
2070 	}
2071 
2072 	rw_enter(&iph->iph_lock, RW_READER);
2073 
2074 	mr = spdsock_dump_ruleset(mp, iph, 0, 0);
2075 
2076 	if (!mr) {
2077 		rw_exit(&iph->iph_lock);
2078 		spdsock_error(q, mp, ENOMEM, 0);
2079 		return;
2080 	}
2081 
2082 	ss->spdsock_dump_req = mp;
2083 	RESET_SPDSOCK_DUMP_POLHEAD(ss, iph);
2084 
2085 	rw_exit(&iph->iph_lock);
2086 
2087 	qreply(q, mr);
2088 	qenable(OTHERQ(q));
2089 }
2090 
2091 /* Do NOT consume a reference to ITP. */
2092 void
2093 spdsock_clone_node(ipsec_tun_pol_t *itp, void *ep, netstack_t *ns)
2094 {
2095 	int *errptr = (int *)ep;
2096 
2097 	if (*errptr != 0)
2098 		return;	/* We've failed already for some reason. */
2099 	mutex_enter(&itp->itp_lock);
2100 	ITPF_CLONE(itp->itp_flags);
2101 	*errptr = ipsec_copy_polhead(itp->itp_policy, itp->itp_inactive, ns);
2102 	mutex_exit(&itp->itp_lock);
2103 }
2104 
2105 void
2106 spdsock_clone(queue_t *q, mblk_t *mp, spd_if_t *tunname)
2107 {
2108 	int error;
2109 	char *tname;
2110 	ipsec_tun_pol_t *itp;
2111 	spdsock_t *ss = (spdsock_t *)q->q_ptr;
2112 	netstack_t *ns = ss->spdsock_spds->spds_netstack;
2113 	uint32_t auditing = AU_AUDITING();
2114 
2115 	if (tunname != NULL) {
2116 		tname = (char *)tunname->spd_if_name;
2117 		if (*tname == '\0') {
2118 			error = ipsec_clone_system_policy(ns);
2119 			if (auditing) {
2120 				boolean_t active;
2121 				spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr;
2122 				cred_t *cr;
2123 				pid_t cpid;
2124 
2125 				cr = msg_getcred(mp, &cpid);
2126 				active = (spmsg->spd_msg_spdid == SPD_ACTIVE);
2127 				audit_pf_policy(SPD_CLONE, cr, ns,
2128 				    NULL, active, error, cpid);
2129 			}
2130 			if (error == 0) {
2131 				itp_walk(spdsock_clone_node, &error, ns);
2132 				if (auditing) {
2133 					boolean_t active;
2134 					spd_msg_t *spmsg =
2135 					    (spd_msg_t *)mp->b_rptr;
2136 					cred_t *cr;
2137 					pid_t cpid;
2138 
2139 					cr = msg_getcred(mp, &cpid);
2140 					active = (spmsg->spd_msg_spdid ==
2141 					    SPD_ACTIVE);
2142 					audit_pf_policy(SPD_CLONE, cr,
2143 					    ns, "all tunnels", active, 0,
2144 					    cpid);
2145 				}
2146 			}
2147 		} else {
2148 			itp = get_tunnel_policy(tname, ns);
2149 			if (itp == NULL) {
2150 				spdsock_error(q, mp, ENOENT, 0);
2151 				if (auditing) {
2152 					boolean_t active;
2153 					spd_msg_t *spmsg =
2154 					    (spd_msg_t *)mp->b_rptr;
2155 					cred_t *cr;
2156 					pid_t cpid;
2157 
2158 					cr = msg_getcred(mp, &cpid);
2159 					active = (spmsg->spd_msg_spdid ==
2160 					    SPD_ACTIVE);
2161 					audit_pf_policy(SPD_CLONE, cr,
2162 					    ns, NULL, active, ENOENT, cpid);
2163 				}
2164 				return;
2165 			}
2166 			spdsock_clone_node(itp, &error, NULL);
2167 			if (auditing) {
2168 				boolean_t active;
2169 				spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr;
2170 				cred_t *cr;
2171 				pid_t cpid;
2172 
2173 				cr = msg_getcred(mp, &cpid);
2174 				active = (spmsg->spd_msg_spdid == SPD_ACTIVE);
2175 				audit_pf_policy(SPD_CLONE, cr, ns,
2176 				    ITP_NAME(itp), active, error, cpid);
2177 			}
2178 			ITP_REFRELE(itp, ns);
2179 		}
2180 	} else {
2181 		error = ipsec_clone_system_policy(ns);
2182 		if (auditing) {
2183 			boolean_t active;
2184 			spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr;
2185 			cred_t *cr;
2186 			pid_t cpid;
2187 
2188 			cr = msg_getcred(mp, &cpid);
2189 			active = (spmsg->spd_msg_spdid == SPD_ACTIVE);
2190 			audit_pf_policy(SPD_CLONE, cr, ns, NULL,
2191 			    active, error, cpid);
2192 		}
2193 	}
2194 
2195 	if (error != 0)
2196 		spdsock_error(q, mp, error, 0);
2197 	else
2198 		spd_echo(q, mp);
2199 }
2200 
2201 /*
2202  * Process a SPD_ALGLIST request. The caller expects separate alg entries
2203  * for AH authentication, ESP authentication, and ESP encryption.
2204  * The same distinction is then used when setting the min and max key
2205  * sizes when defining policies.
2206  */
2207 
2208 #define	SPDSOCK_AH_AUTH		0
2209 #define	SPDSOCK_ESP_AUTH	1
2210 #define	SPDSOCK_ESP_ENCR	2
2211 #define	SPDSOCK_NTYPES		3
2212 
2213 static const uint_t algattr[SPDSOCK_NTYPES] = {
2214 	SPD_ATTR_AH_AUTH,
2215 	SPD_ATTR_ESP_AUTH,
2216 	SPD_ATTR_ESP_ENCR
2217 };
2218 static const uint_t minbitsattr[SPDSOCK_NTYPES] = {
2219 	SPD_ATTR_AH_MINBITS,
2220 	SPD_ATTR_ESPA_MINBITS,
2221 	SPD_ATTR_ENCR_MINBITS
2222 };
2223 static const uint_t maxbitsattr[SPDSOCK_NTYPES] = {
2224 	SPD_ATTR_AH_MAXBITS,
2225 	SPD_ATTR_ESPA_MAXBITS,
2226 	SPD_ATTR_ENCR_MAXBITS
2227 };
2228 static const uint_t defbitsattr[SPDSOCK_NTYPES] = {
2229 	SPD_ATTR_AH_DEFBITS,
2230 	SPD_ATTR_ESPA_DEFBITS,
2231 	SPD_ATTR_ENCR_DEFBITS
2232 };
2233 static const uint_t incrbitsattr[SPDSOCK_NTYPES] = {
2234 	SPD_ATTR_AH_INCRBITS,
2235 	SPD_ATTR_ESPA_INCRBITS,
2236 	SPD_ATTR_ENCR_INCRBITS
2237 };
2238 
2239 #define	ATTRPERALG	6	/* fixed attributes per algs */
2240 
2241 void
2242 spdsock_alglist(queue_t *q, mblk_t *mp)
2243 {
2244 	uint_t algtype;
2245 	uint_t algidx;
2246 	uint_t algcount;
2247 	uint_t size;
2248 	mblk_t *m;
2249 	uint8_t *cur;
2250 	spd_msg_t *msg;
2251 	struct spd_ext_actions *act;
2252 	struct spd_attribute *attr;
2253 	spdsock_t *ss = (spdsock_t *)q->q_ptr;
2254 	ipsec_stack_t *ipss = ss->spdsock_spds->spds_netstack->netstack_ipsec;
2255 
2256 	mutex_enter(&ipss->ipsec_alg_lock);
2257 	/*
2258 	 * The SPD client expects to receive separate entries for
2259 	 * AH authentication and ESP authentication supported algorithms.
2260 	 *
2261 	 * Don't return the "any" algorithms, if defined, as no
2262 	 * kernel policies can be set for these algorithms.
2263 	 */
2264 	algcount = 2 * ipss->ipsec_nalgs[IPSEC_ALG_AUTH] +
2265 	    ipss->ipsec_nalgs[IPSEC_ALG_ENCR];
2266 
2267 	if (ipss->ipsec_alglists[IPSEC_ALG_AUTH][SADB_AALG_NONE] != NULL)
2268 		algcount--;
2269 	if (ipss->ipsec_alglists[IPSEC_ALG_ENCR][SADB_EALG_NONE] != NULL)
2270 		algcount--;
2271 
2272 	/*
2273 	 * For each algorithm, we encode:
2274 	 * ALG / MINBITS / MAXBITS / DEFBITS / INCRBITS / {END, NEXT}
2275 	 */
2276 
2277 	size = sizeof (spd_msg_t) + sizeof (struct spd_ext_actions) +
2278 	    ATTRPERALG * sizeof (struct spd_attribute) * algcount;
2279 
2280 	ASSERT(ALIGNED64(size));
2281 
2282 	m = allocb(size, BPRI_HI);
2283 	if (m == NULL) {
2284 		mutex_exit(&ipss->ipsec_alg_lock);
2285 		spdsock_error(q, mp, ENOMEM, 0);
2286 		return;
2287 	}
2288 
2289 	m->b_wptr = m->b_rptr + size;
2290 	cur = m->b_rptr;
2291 
2292 	msg = (spd_msg_t *)cur;
2293 	bcopy(mp->b_rptr, cur, sizeof (*msg));
2294 
2295 	msg->spd_msg_len = SPD_8TO64(size);
2296 	msg->spd_msg_errno = 0;
2297 	msg->spd_msg_diagnostic = 0;
2298 
2299 	cur += sizeof (*msg);
2300 
2301 	act = (struct spd_ext_actions *)cur;
2302 	cur += sizeof (*act);
2303 
2304 	act->spd_actions_len = SPD_8TO64(size - sizeof (spd_msg_t));
2305 	act->spd_actions_exttype = SPD_EXT_ACTION;
2306 	act->spd_actions_count = algcount;
2307 	act->spd_actions_reserved = 0;
2308 
2309 	attr = (struct spd_attribute *)cur;
2310 
2311 #define	EMIT(tag, value) {					\
2312 		attr->spd_attr_tag = (tag); 			\
2313 		attr->spd_attr_value = (value); 		\
2314 		attr++;			  			\
2315 	}
2316 
2317 	/*
2318 	 * If you change the number of EMIT's here, change
2319 	 * ATTRPERALG above to match
2320 	 */
2321 #define	EMITALGATTRS(_type) {					\
2322 		EMIT(algattr[_type], algid); 		/* 1 */	\
2323 		EMIT(minbitsattr[_type], minbits);	/* 2 */	\
2324 		EMIT(maxbitsattr[_type], maxbits);	/* 3 */	\
2325 		EMIT(defbitsattr[_type], defbits);	/* 4 */	\
2326 		EMIT(incrbitsattr[_type], incr);	/* 5 */	\
2327 		EMIT(SPD_ATTR_NEXT, 0);			/* 6 */	\
2328 	}
2329 
2330 	for (algtype = 0; algtype < IPSEC_NALGTYPES; algtype++) {
2331 		for (algidx = 0; algidx < ipss->ipsec_nalgs[algtype];
2332 		    algidx++) {
2333 			int algid = ipss->ipsec_sortlist[algtype][algidx];
2334 			ipsec_alginfo_t *alg =
2335 			    ipss->ipsec_alglists[algtype][algid];
2336 			uint_t minbits = alg->alg_minbits;
2337 			uint_t maxbits = alg->alg_maxbits;
2338 			uint_t defbits = alg->alg_default_bits;
2339 			uint_t incr = alg->alg_increment;
2340 
2341 			if (algtype == IPSEC_ALG_AUTH) {
2342 				if (algid == SADB_AALG_NONE)
2343 					continue;
2344 				EMITALGATTRS(SPDSOCK_AH_AUTH);
2345 				EMITALGATTRS(SPDSOCK_ESP_AUTH);
2346 			} else {
2347 				if (algid == SADB_EALG_NONE)
2348 					continue;
2349 				ASSERT(algtype == IPSEC_ALG_ENCR);
2350 				EMITALGATTRS(SPDSOCK_ESP_ENCR);
2351 			}
2352 		}
2353 	}
2354 
2355 	mutex_exit(&ipss->ipsec_alg_lock);
2356 
2357 #undef EMITALGATTRS
2358 #undef EMIT
2359 #undef ATTRPERALG
2360 
2361 	attr--;
2362 	attr->spd_attr_tag = SPD_ATTR_END;
2363 
2364 	freemsg(mp);
2365 	qreply(q, m);
2366 }
2367 
2368 /*
2369  * Process a SPD_DUMPALGS request.
2370  */
2371 
2372 #define	ATTRPERALG	9	/* fixed attributes per algs */
2373 
2374 void
2375 spdsock_dumpalgs(queue_t *q, mblk_t *mp)
2376 {
2377 	uint_t algtype;
2378 	uint_t algidx;
2379 	uint_t size;
2380 	mblk_t *m;
2381 	uint8_t *cur;
2382 	spd_msg_t *msg;
2383 	struct spd_ext_actions *act;
2384 	struct spd_attribute *attr;
2385 	ipsec_alginfo_t *alg;
2386 	uint_t algid;
2387 	uint_t i;
2388 	uint_t alg_size;
2389 	spdsock_t *ss = (spdsock_t *)q->q_ptr;
2390 	ipsec_stack_t *ipss = ss->spdsock_spds->spds_netstack->netstack_ipsec;
2391 
2392 	mutex_enter(&ipss->ipsec_alg_lock);
2393 
2394 	/*
2395 	 * For each algorithm, we encode:
2396 	 * ALG / MINBITS / MAXBITS / DEFBITS / INCRBITS / {END, NEXT}
2397 	 *
2398 	 * ALG_ID / ALG_PROTO / ALG_INCRBITS / ALG_NKEYSIZES / ALG_KEYSIZE*
2399 	 * ALG_NBLOCKSIZES / ALG_BLOCKSIZE* / ALG_NPARAMS / ALG_PARAMS* /
2400 	 * ALG_MECHNAME / ALG_FLAGS / {END, NEXT}
2401 	 */
2402 
2403 	/*
2404 	 * Compute the size of the SPD message.
2405 	 */
2406 	size = sizeof (spd_msg_t) + sizeof (struct spd_ext_actions);
2407 
2408 	for (algtype = 0; algtype < IPSEC_NALGTYPES; algtype++) {
2409 		for (algidx = 0; algidx < ipss->ipsec_nalgs[algtype];
2410 		    algidx++) {
2411 			algid = ipss->ipsec_sortlist[algtype][algidx];
2412 			alg = ipss->ipsec_alglists[algtype][algid];
2413 			alg_size = sizeof (struct spd_attribute) *
2414 			    (ATTRPERALG + alg->alg_nkey_sizes +
2415 			    alg->alg_nblock_sizes + alg->alg_nparams) +
2416 			    CRYPTO_MAX_MECH_NAME;
2417 			size += alg_size;
2418 		}
2419 	}
2420 
2421 	ASSERT(ALIGNED64(size));
2422 
2423 	m = allocb(size, BPRI_HI);
2424 	if (m == NULL) {
2425 		mutex_exit(&ipss->ipsec_alg_lock);
2426 		spdsock_error(q, mp, ENOMEM, 0);
2427 		return;
2428 	}
2429 
2430 	m->b_wptr = m->b_rptr + size;
2431 	cur = m->b_rptr;
2432 
2433 	msg = (spd_msg_t *)cur;
2434 	bcopy(mp->b_rptr, cur, sizeof (*msg));
2435 
2436 	msg->spd_msg_len = SPD_8TO64(size);
2437 	msg->spd_msg_errno = 0;
2438 	msg->spd_msg_type = SPD_ALGLIST;
2439 
2440 	msg->spd_msg_diagnostic = 0;
2441 
2442 	cur += sizeof (*msg);
2443 
2444 	act = (struct spd_ext_actions *)cur;
2445 	cur += sizeof (*act);
2446 
2447 	act->spd_actions_len = SPD_8TO64(size - sizeof (spd_msg_t));
2448 	act->spd_actions_exttype = SPD_EXT_ACTION;
2449 	act->spd_actions_count = ipss->ipsec_nalgs[IPSEC_ALG_AUTH] +
2450 	    ipss->ipsec_nalgs[IPSEC_ALG_ENCR];
2451 	act->spd_actions_reserved = 0;
2452 
2453 	/*
2454 	 * If there aren't any algorithms registered, return an empty message.
2455 	 * spdsock_get_ext() knows how to deal with this.
2456 	 */
2457 	if (act->spd_actions_count == 0) {
2458 		act->spd_actions_len = 0;
2459 		mutex_exit(&ipss->ipsec_alg_lock);
2460 		goto error;
2461 	}
2462 
2463 	attr = (struct spd_attribute *)cur;
2464 
2465 #define	EMIT(tag, value) {					\
2466 		attr->spd_attr_tag = (tag); 			\
2467 		attr->spd_attr_value = (value); 		\
2468 		attr++;			  			\
2469 	}
2470 
2471 	for (algtype = 0; algtype < IPSEC_NALGTYPES; algtype++) {
2472 		for (algidx = 0; algidx < ipss->ipsec_nalgs[algtype];
2473 		    algidx++) {
2474 
2475 			algid = ipss->ipsec_sortlist[algtype][algidx];
2476 			alg = ipss->ipsec_alglists[algtype][algid];
2477 
2478 			/*
2479 			 * If you change the number of EMIT's here, change
2480 			 * ATTRPERALG above to match
2481 			 */
2482 			EMIT(SPD_ATTR_ALG_ID, algid);
2483 			EMIT(SPD_ATTR_ALG_PROTO, algproto[algtype]);
2484 			EMIT(SPD_ATTR_ALG_INCRBITS, alg->alg_increment);
2485 			EMIT(SPD_ATTR_ALG_NKEYSIZES, alg->alg_nkey_sizes);
2486 			for (i = 0; i < alg->alg_nkey_sizes; i++)
2487 				EMIT(SPD_ATTR_ALG_KEYSIZE,
2488 				    alg->alg_key_sizes[i]);
2489 
2490 			EMIT(SPD_ATTR_ALG_NBLOCKSIZES, alg->alg_nblock_sizes);
2491 			for (i = 0; i < alg->alg_nblock_sizes; i++)
2492 				EMIT(SPD_ATTR_ALG_BLOCKSIZE,
2493 				    alg->alg_block_sizes[i]);
2494 
2495 			EMIT(SPD_ATTR_ALG_NPARAMS, alg->alg_nparams);
2496 			for (i = 0; i < alg->alg_nparams; i++)
2497 				EMIT(SPD_ATTR_ALG_PARAMS,
2498 				    alg->alg_params[i]);
2499 
2500 			EMIT(SPD_ATTR_ALG_FLAGS, alg->alg_flags);
2501 
2502 			EMIT(SPD_ATTR_ALG_MECHNAME, CRYPTO_MAX_MECH_NAME);
2503 			bcopy(alg->alg_mech_name, attr, CRYPTO_MAX_MECH_NAME);
2504 			attr = (struct spd_attribute *)((char *)attr +
2505 			    CRYPTO_MAX_MECH_NAME);
2506 
2507 			EMIT(SPD_ATTR_NEXT, 0);
2508 		}
2509 	}
2510 
2511 	mutex_exit(&ipss->ipsec_alg_lock);
2512 
2513 #undef EMITALGATTRS
2514 #undef EMIT
2515 #undef ATTRPERALG
2516 
2517 	attr--;
2518 	attr->spd_attr_tag = SPD_ATTR_END;
2519 
2520 error:
2521 	freemsg(mp);
2522 	qreply(q, m);
2523 }
2524 
2525 /*
2526  * Do the actual work of processing an SPD_UPDATEALGS request. Can
2527  * be invoked either once IPsec is loaded on a cached request, or
2528  * when a request is received while IPsec is loaded.
2529  */
2530 static int
2531 spdsock_do_updatealg(spd_ext_t *extv[], spd_stack_t *spds)
2532 {
2533 	struct spd_ext_actions *actp;
2534 	struct spd_attribute *attr, *endattr;
2535 	uint64_t *start, *end;
2536 	ipsec_alginfo_t *alg = NULL;
2537 	ipsec_algtype_t alg_type = 0;
2538 	boolean_t skip_alg = B_TRUE, doing_proto = B_FALSE;
2539 	uint_t i, cur_key, cur_block, algid;
2540 	int diag = -1;
2541 
2542 	ASSERT(MUTEX_HELD(&spds->spds_alg_lock));
2543 
2544 	/* parse the message, building the list of algorithms */
2545 
2546 	actp = (struct spd_ext_actions *)extv[SPD_EXT_ACTION];
2547 	if (actp == NULL)
2548 		return (SPD_DIAGNOSTIC_NO_ACTION_EXT);
2549 
2550 	start = (uint64_t *)actp;
2551 	end = (start + actp->spd_actions_len);
2552 	endattr = (struct spd_attribute *)end;
2553 	attr = (struct spd_attribute *)&actp[1];
2554 
2555 	bzero(spds->spds_algs, IPSEC_NALGTYPES * IPSEC_MAX_ALGS *
2556 	    sizeof (ipsec_alginfo_t *));
2557 
2558 	alg = kmem_zalloc(sizeof (*alg), KM_SLEEP);
2559 
2560 #define	ALG_KEY_SIZES(a)   (((a)->alg_nkey_sizes + 1) * sizeof (uint16_t))
2561 #define	ALG_BLOCK_SIZES(a) (((a)->alg_nblock_sizes + 1) * sizeof (uint16_t))
2562 #define	ALG_PARAM_SIZES(a) (((a)->alg_nparams + 1) * sizeof (uint16_t))
2563 
2564 	while (attr < endattr) {
2565 		switch (attr->spd_attr_tag) {
2566 		case SPD_ATTR_NOP:
2567 		case SPD_ATTR_EMPTY:
2568 			break;
2569 		case SPD_ATTR_END:
2570 			attr = endattr;
2571 			/* FALLTHRU */
2572 		case SPD_ATTR_NEXT:
2573 			if (doing_proto) {
2574 				doing_proto = B_FALSE;
2575 				break;
2576 			}
2577 			if (skip_alg) {
2578 				ipsec_alg_free(alg);
2579 			} else {
2580 				ipsec_alg_free(
2581 				    spds->spds_algs[alg_type][alg->alg_id]);
2582 				spds->spds_algs[alg_type][alg->alg_id] =
2583 				    alg;
2584 			}
2585 			alg = kmem_zalloc(sizeof (*alg), KM_SLEEP);
2586 			break;
2587 
2588 		case SPD_ATTR_ALG_ID:
2589 			if (attr->spd_attr_value >= IPSEC_MAX_ALGS) {
2590 				ss1dbg(spds, ("spdsock_do_updatealg: "
2591 				    "invalid alg id %d\n",
2592 				    attr->spd_attr_value));
2593 				diag = SPD_DIAGNOSTIC_ALG_ID_RANGE;
2594 				goto bail;
2595 			}
2596 			alg->alg_id = attr->spd_attr_value;
2597 			break;
2598 
2599 		case SPD_ATTR_ALG_PROTO:
2600 			/* find the alg type */
2601 			for (i = 0; i < NALGPROTOS; i++)
2602 				if (algproto[i] == attr->spd_attr_value)
2603 					break;
2604 			skip_alg = (i == NALGPROTOS);
2605 			if (!skip_alg)
2606 				alg_type = i;
2607 			break;
2608 
2609 		case SPD_ATTR_ALG_INCRBITS:
2610 			alg->alg_increment = attr->spd_attr_value;
2611 			break;
2612 
2613 		case SPD_ATTR_ALG_NKEYSIZES:
2614 			if (alg->alg_key_sizes != NULL) {
2615 				kmem_free(alg->alg_key_sizes,
2616 				    ALG_KEY_SIZES(alg));
2617 			}
2618 			alg->alg_nkey_sizes = attr->spd_attr_value;
2619 			/*
2620 			 * Allocate room for the trailing zero key size
2621 			 * value as well.
2622 			 */
2623 			alg->alg_key_sizes = kmem_zalloc(ALG_KEY_SIZES(alg),
2624 			    KM_SLEEP);
2625 			cur_key = 0;
2626 			break;
2627 
2628 		case SPD_ATTR_ALG_KEYSIZE:
2629 			if (alg->alg_key_sizes == NULL ||
2630 			    cur_key >= alg->alg_nkey_sizes) {
2631 				ss1dbg(spds, ("spdsock_do_updatealg: "
2632 				    "too many key sizes\n"));
2633 				diag = SPD_DIAGNOSTIC_ALG_NUM_KEY_SIZES;
2634 				goto bail;
2635 			}
2636 			alg->alg_key_sizes[cur_key++] = attr->spd_attr_value;
2637 			break;
2638 
2639 		case SPD_ATTR_ALG_FLAGS:
2640 			/*
2641 			 * Flags (bit mask). The alg_flags element of
2642 			 * ipsecalg_flags_t is only 8 bits wide. The
2643 			 * user can set the VALID bit, but we will ignore it
2644 			 * and make the decision is the algorithm is valid.
2645 			 */
2646 			alg->alg_flags |= (uint8_t)attr->spd_attr_value;
2647 			break;
2648 
2649 		case SPD_ATTR_ALG_NBLOCKSIZES:
2650 			if (alg->alg_block_sizes != NULL) {
2651 				kmem_free(alg->alg_block_sizes,
2652 				    ALG_BLOCK_SIZES(alg));
2653 			}
2654 			alg->alg_nblock_sizes = attr->spd_attr_value;
2655 			/*
2656 			 * Allocate room for the trailing zero block size
2657 			 * value as well.
2658 			 */
2659 			alg->alg_block_sizes = kmem_zalloc(ALG_BLOCK_SIZES(alg),
2660 			    KM_SLEEP);
2661 			cur_block = 0;
2662 			break;
2663 
2664 		case SPD_ATTR_ALG_BLOCKSIZE:
2665 			if (alg->alg_block_sizes == NULL ||
2666 			    cur_block >= alg->alg_nblock_sizes) {
2667 				ss1dbg(spds, ("spdsock_do_updatealg: "
2668 				    "too many block sizes\n"));
2669 				diag = SPD_DIAGNOSTIC_ALG_NUM_BLOCK_SIZES;
2670 				goto bail;
2671 			}
2672 			alg->alg_block_sizes[cur_block++] =
2673 			    attr->spd_attr_value;
2674 			break;
2675 
2676 		case SPD_ATTR_ALG_NPARAMS:
2677 			if (alg->alg_params != NULL) {
2678 				kmem_free(alg->alg_params,
2679 				    ALG_PARAM_SIZES(alg));
2680 			}
2681 			alg->alg_nparams = attr->spd_attr_value;
2682 			/*
2683 			 * Allocate room for the trailing zero block size
2684 			 * value as well.
2685 			 */
2686 			alg->alg_params = kmem_zalloc(ALG_PARAM_SIZES(alg),
2687 			    KM_SLEEP);
2688 			cur_block = 0;
2689 			break;
2690 
2691 		case SPD_ATTR_ALG_PARAMS:
2692 			if (alg->alg_params == NULL ||
2693 			    cur_block >= alg->alg_nparams) {
2694 				ss1dbg(spds, ("spdsock_do_updatealg: "
2695 				    "too many params\n"));
2696 				diag = SPD_DIAGNOSTIC_ALG_NUM_BLOCK_SIZES;
2697 				goto bail;
2698 			}
2699 			/*
2700 			 * Array contains: iv_len, icv_len, salt_len
2701 			 * Any additional parameters are currently ignored.
2702 			 */
2703 			alg->alg_params[cur_block++] =
2704 			    attr->spd_attr_value;
2705 			break;
2706 
2707 		case SPD_ATTR_ALG_MECHNAME: {
2708 			char *mech_name;
2709 
2710 			if (attr->spd_attr_value > CRYPTO_MAX_MECH_NAME) {
2711 				ss1dbg(spds, ("spdsock_do_updatealg: "
2712 				    "mech name too long\n"));
2713 				diag = SPD_DIAGNOSTIC_ALG_MECH_NAME_LEN;
2714 				goto bail;
2715 			}
2716 			mech_name = (char *)(attr + 1);
2717 			bcopy(mech_name, alg->alg_mech_name,
2718 			    attr->spd_attr_value);
2719 			alg->alg_mech_name[CRYPTO_MAX_MECH_NAME-1] = '\0';
2720 			attr = (struct spd_attribute *)((char *)attr +
2721 			    attr->spd_attr_value);
2722 			break;
2723 		}
2724 
2725 		case SPD_ATTR_PROTO_ID:
2726 			doing_proto = B_TRUE;
2727 			for (i = 0; i < NALGPROTOS; i++) {
2728 				if (algproto[i] == attr->spd_attr_value) {
2729 					alg_type = i;
2730 					break;
2731 				}
2732 			}
2733 			break;
2734 
2735 		case SPD_ATTR_PROTO_EXEC_MODE:
2736 			if (!doing_proto)
2737 				break;
2738 			for (i = 0; i < NEXECMODES; i++) {
2739 				if (execmodes[i] == attr->spd_attr_value) {
2740 					spds->spds_algs_exec_mode[alg_type] = i;
2741 					break;
2742 				}
2743 			}
2744 			break;
2745 		}
2746 		attr++;
2747 	}
2748 
2749 #undef	ALG_KEY_SIZES
2750 #undef	ALG_BLOCK_SIZES
2751 #undef	ALG_PARAM_SIZES
2752 
2753 	/* update the algorithm tables */
2754 	spdsock_merge_algs(spds);
2755 bail:
2756 	/* cleanup */
2757 	ipsec_alg_free(alg);
2758 	for (alg_type = 0; alg_type < IPSEC_NALGTYPES; alg_type++)
2759 		for (algid = 0; algid < IPSEC_MAX_ALGS; algid++)
2760 		if (spds->spds_algs[alg_type][algid] != NULL)
2761 			ipsec_alg_free(spds->spds_algs[alg_type][algid]);
2762 	return (diag);
2763 }
2764 
2765 /*
2766  * Process an SPD_UPDATEALGS request. If IPsec is not loaded, queue
2767  * the request until IPsec loads. If IPsec is loaded, act on it
2768  * immediately.
2769  */
2770 
2771 static void
2772 spdsock_updatealg(queue_t *q, mblk_t *mp, spd_ext_t *extv[])
2773 {
2774 	spdsock_t *ss = (spdsock_t *)q->q_ptr;
2775 	spd_stack_t	*spds = ss->spdsock_spds;
2776 	ipsec_stack_t	*ipss = spds->spds_netstack->netstack_ipsec;
2777 	uint32_t auditing = AU_AUDITING();
2778 
2779 	if (!ipsec_loaded(ipss)) {
2780 		/*
2781 		 * IPsec is not loaded, save request and return nicely,
2782 		 * the message will be processed once IPsec loads.
2783 		 */
2784 		mblk_t *new_mp;
2785 
2786 		/* last update message wins */
2787 		if ((new_mp = copymsg(mp)) == NULL) {
2788 			spdsock_error(q, mp, ENOMEM, 0);
2789 			return;
2790 		}
2791 		mutex_enter(&spds->spds_alg_lock);
2792 		bcopy(extv, spds->spds_extv_algs,
2793 		    sizeof (spd_ext_t *) * (SPD_EXT_MAX + 1));
2794 		if (spds->spds_mp_algs != NULL)
2795 			freemsg(spds->spds_mp_algs);
2796 		spds->spds_mp_algs = mp;
2797 		spds->spds_algs_pending = B_TRUE;
2798 		mutex_exit(&spds->spds_alg_lock);
2799 		if (auditing) {
2800 			cred_t *cr;
2801 			pid_t cpid;
2802 
2803 			cr = msg_getcred(mp, &cpid);
2804 			audit_pf_policy(SPD_UPDATEALGS, cr,
2805 			    spds->spds_netstack, NULL, B_TRUE, EAGAIN,
2806 			    cpid);
2807 		}
2808 		spd_echo(q, new_mp);
2809 	} else {
2810 		/*
2811 		 * IPsec is loaded, act on the message immediately.
2812 		 */
2813 		int diag;
2814 
2815 		mutex_enter(&spds->spds_alg_lock);
2816 		diag = spdsock_do_updatealg(extv, spds);
2817 		if (diag == -1) {
2818 			/* Keep the lock held while we walk the SA tables. */
2819 			sadb_alg_update(IPSEC_ALG_ALL, 0, 0,
2820 			    spds->spds_netstack);
2821 			mutex_exit(&spds->spds_alg_lock);
2822 			spd_echo(q, mp);
2823 			if (auditing) {
2824 				cred_t *cr;
2825 				pid_t cpid;
2826 
2827 				cr = msg_getcred(mp, &cpid);
2828 				audit_pf_policy(SPD_UPDATEALGS, cr,
2829 				    spds->spds_netstack, NULL, B_TRUE, 0,
2830 				    cpid);
2831 			}
2832 		} else {
2833 			mutex_exit(&spds->spds_alg_lock);
2834 			spdsock_diag(q, mp, diag);
2835 			if (auditing) {
2836 				cred_t *cr;
2837 				pid_t cpid;
2838 
2839 				cr = msg_getcred(mp, &cpid);
2840 				audit_pf_policy(SPD_UPDATEALGS, cr,
2841 				    spds->spds_netstack, NULL, B_TRUE, diag,
2842 				    cpid);
2843 			}
2844 		}
2845 	}
2846 }
2847 
2848 /*
2849  * Find a tunnel instance (using the name to link ID mapping), and
2850  * update it after an IPsec change.  We need to do this always in case
2851  * we add policy AFTER plumbing a tunnel.  We also need to do this
2852  * because, as a side-effect, the tunnel's MTU is updated to reflect
2853  * any IPsec overhead in the itp's policy.
2854  */
2855 static void
2856 update_iptun_policy(ipsec_tun_pol_t *itp)
2857 {
2858 	datalink_id_t linkid;
2859 
2860 	if (dls_mgmt_get_linkid(itp->itp_name, &linkid) == 0)
2861 		iptun_set_policy(linkid, itp);
2862 }
2863 
2864 /*
2865  * Sort through the mess of polhead options to retrieve an appropriate one.
2866  * Returns NULL if we send an spdsock error.  Returns a valid pointer if we
2867  * found a valid polhead.  Returns ALL_ACTIVE_POLHEADS (aka. -1) or
2868  * ALL_INACTIVE_POLHEADS (aka. -2) if the operation calls for the operation to
2869  * act on ALL policy heads.
2870  */
2871 static ipsec_policy_head_t *
2872 get_appropriate_polhead(queue_t *q, mblk_t *mp, spd_if_t *tunname, int spdid,
2873     int msgtype, ipsec_tun_pol_t **itpp)
2874 {
2875 	ipsec_tun_pol_t *itp;
2876 	ipsec_policy_head_t *iph;
2877 	int errno;
2878 	char *tname;
2879 	boolean_t active;
2880 	spdsock_t *ss = (spdsock_t *)q->q_ptr;
2881 	netstack_t *ns = ss->spdsock_spds->spds_netstack;
2882 	uint64_t gen;	/* Placeholder */
2883 
2884 	active = (spdid == SPD_ACTIVE);
2885 	*itpp = NULL;
2886 	if (!active && spdid != SPD_STANDBY) {
2887 		spdsock_diag(q, mp, SPD_DIAGNOSTIC_BAD_SPDID);
2888 		return (NULL);
2889 	}
2890 
2891 	if (tunname != NULL) {
2892 		/* Acting on a tunnel's SPD. */
2893 		tname = (char *)tunname->spd_if_name;
2894 		if (*tname == '\0') {
2895 			/* Handle all-polhead cases here. */
2896 			if (msgtype != SPD_FLUSH && msgtype != SPD_DUMP) {
2897 				spdsock_diag(q, mp,
2898 				    SPD_DIAGNOSTIC_NOT_GLOBAL_OP);
2899 				return (NULL);
2900 			}
2901 			return (active ? ALL_ACTIVE_POLHEADS :
2902 			    ALL_INACTIVE_POLHEADS);
2903 		}
2904 
2905 		itp = get_tunnel_policy(tname, ns);
2906 		if (itp == NULL) {
2907 			if (msgtype != SPD_ADDRULE) {
2908 				/* "Tunnel not found" */
2909 				spdsock_error(q, mp, ENOENT, 0);
2910 				return (NULL);
2911 			}
2912 
2913 			errno = 0;
2914 			itp = create_tunnel_policy(tname, &errno, &gen, ns);
2915 			if (itp == NULL) {
2916 				/*
2917 				 * Something very bad happened, most likely
2918 				 * ENOMEM.  Return an indicator.
2919 				 */
2920 				spdsock_error(q, mp, errno, 0);
2921 				return (NULL);
2922 			}
2923 		}
2924 
2925 		/* Match up the itp to an iptun instance. */
2926 		update_iptun_policy(itp);
2927 
2928 		*itpp = itp;
2929 		/* For spdsock dump state, set the polhead's name. */
2930 		if (msgtype == SPD_DUMP) {
2931 			ITP_REFHOLD(itp);
2932 			ss->spdsock_itp = itp;
2933 			ss->spdsock_dump_tunnel = itp->itp_flags &
2934 			    (active ? ITPF_P_TUNNEL : ITPF_I_TUNNEL);
2935 		}
2936 	} else {
2937 		itp = NULL;
2938 		/* For spdsock dump state, indicate it's global policy. */
2939 		if (msgtype == SPD_DUMP)
2940 			ss->spdsock_itp = NULL;
2941 	}
2942 
2943 	if (active)
2944 		iph = (itp == NULL) ? ipsec_system_policy(ns) : itp->itp_policy;
2945 	else
2946 		iph = (itp == NULL) ? ipsec_inactive_policy(ns) :
2947 		    itp->itp_inactive;
2948 
2949 	ASSERT(iph != NULL);
2950 	if (itp != NULL) {
2951 		IPPH_REFHOLD(iph);
2952 	}
2953 
2954 	return (iph);
2955 }
2956 
2957 static void
2958 spdsock_parse(queue_t *q, mblk_t *mp)
2959 {
2960 	spd_msg_t *spmsg;
2961 	spd_ext_t *extv[SPD_EXT_MAX + 1];
2962 	uint_t msgsize;
2963 	ipsec_policy_head_t *iph;
2964 	ipsec_tun_pol_t *itp;
2965 	spd_if_t *tunname;
2966 	spdsock_t *ss = (spdsock_t *)q->q_ptr;
2967 	spd_stack_t *spds = ss->spdsock_spds;
2968 	netstack_t *ns = spds->spds_netstack;
2969 	ipsec_stack_t *ipss = ns->netstack_ipsec;
2970 
2971 	/* Make sure nothing's below me. */
2972 	ASSERT(WR(q)->q_next == NULL);
2973 
2974 	spmsg = (spd_msg_t *)mp->b_rptr;
2975 
2976 	msgsize = SPD_64TO8(spmsg->spd_msg_len);
2977 
2978 	if (msgdsize(mp) != msgsize) {
2979 		/*
2980 		 * Message len incorrect w.r.t. actual size.  Send an error
2981 		 * (EMSGSIZE).	It may be necessary to massage things a
2982 		 * bit.	 For example, if the spd_msg_type is hosed,
2983 		 * I need to set it to SPD_RESERVED to get delivery to
2984 		 * do the right thing.	Then again, maybe just letting
2985 		 * the error delivery do the right thing.
2986 		 */
2987 		ss2dbg(spds,
2988 		    ("mblk (%lu) and base (%d) message sizes don't jibe.\n",
2989 		    msgdsize(mp), msgsize));
2990 		spdsock_error(q, mp, EMSGSIZE, SPD_DIAGNOSTIC_NONE);
2991 		return;
2992 	}
2993 
2994 	if (msgsize > (uint_t)(mp->b_wptr - mp->b_rptr)) {
2995 		/* Get all message into one mblk. */
2996 		if (pullupmsg(mp, -1) == 0) {
2997 			/*
2998 			 * Something screwy happened.
2999 			 */
3000 			ss3dbg(spds, ("spdsock_parse: pullupmsg() failed.\n"));
3001 			return;
3002 		} else {
3003 			spmsg = (spd_msg_t *)mp->b_rptr;
3004 		}
3005 	}
3006 
3007 	switch (spdsock_get_ext(extv, spmsg, msgsize)) {
3008 	case KGE_DUP:
3009 		/* Handle duplicate extension. */
3010 		ss1dbg(spds, ("Got duplicate extension of type %d.\n",
3011 		    extv[0]->spd_ext_type));
3012 		spdsock_diag(q, mp, dup_ext_diag[extv[0]->spd_ext_type]);
3013 		return;
3014 	case KGE_UNK:
3015 		/* Handle unknown extension. */
3016 		ss1dbg(spds, ("Got unknown extension of type %d.\n",
3017 		    extv[0]->spd_ext_type));
3018 		spdsock_diag(q, mp, SPD_DIAGNOSTIC_UNKNOWN_EXT);
3019 		return;
3020 	case KGE_LEN:
3021 		/* Length error. */
3022 		ss1dbg(spds, ("Length %d on extension type %d overrun or 0.\n",
3023 		    extv[0]->spd_ext_len, extv[0]->spd_ext_type));
3024 		spdsock_diag(q, mp, SPD_DIAGNOSTIC_BAD_EXTLEN);
3025 		return;
3026 	case KGE_CHK:
3027 		/* Reality check failed. */
3028 		ss1dbg(spds, ("Reality check failed on extension type %d.\n",
3029 		    extv[0]->spd_ext_type));
3030 		spdsock_diag(q, mp, bad_ext_diag[extv[0]->spd_ext_type]);
3031 		return;
3032 	default:
3033 		/* Default case is no errors. */
3034 		break;
3035 	}
3036 
3037 	/*
3038 	 * Special-case SPD_UPDATEALGS so as not to load IPsec.
3039 	 */
3040 	if (!ipsec_loaded(ipss) && spmsg->spd_msg_type != SPD_UPDATEALGS) {
3041 		spdsock_t *ss = (spdsock_t *)q->q_ptr;
3042 
3043 		ASSERT(ss != NULL);
3044 		ipsec_loader_loadnow(ipss);
3045 		ss->spdsock_timeout_arg = mp;
3046 		ss->spdsock_timeout = qtimeout(q, spdsock_loadcheck,
3047 		    q, LOADCHECK_INTERVAL);
3048 		return;
3049 	}
3050 
3051 	/* First check for messages that need no polheads at all. */
3052 	switch (spmsg->spd_msg_type) {
3053 	case SPD_UPDATEALGS:
3054 		spdsock_updatealg(q, mp, extv);
3055 		return;
3056 	case SPD_ALGLIST:
3057 		spdsock_alglist(q, mp);
3058 		return;
3059 	case SPD_DUMPALGS:
3060 		spdsock_dumpalgs(q, mp);
3061 		return;
3062 	}
3063 
3064 	/*
3065 	 * Then check for ones that need both primary/secondary polheads,
3066 	 * finding the appropriate tunnel policy if need be.
3067 	 */
3068 	tunname = (spd_if_t *)extv[SPD_EXT_TUN_NAME];
3069 	switch (spmsg->spd_msg_type) {
3070 	case SPD_FLIP:
3071 		spdsock_flip(q, mp, tunname);
3072 		return;
3073 	case SPD_CLONE:
3074 		spdsock_clone(q, mp, tunname);
3075 		return;
3076 	}
3077 
3078 	/*
3079 	 * Finally, find ones that operate on exactly one polhead, or
3080 	 * "all polheads" of a given type (active/inactive).
3081 	 */
3082 	iph = get_appropriate_polhead(q, mp, tunname, spmsg->spd_msg_spdid,
3083 	    spmsg->spd_msg_type, &itp);
3084 	if (iph == NULL)
3085 		return;
3086 
3087 	/* All-polheads-ready operations. */
3088 	switch (spmsg->spd_msg_type) {
3089 	case SPD_FLUSH:
3090 		if (itp != NULL) {
3091 			mutex_enter(&itp->itp_lock);
3092 			if (spmsg->spd_msg_spdid == SPD_ACTIVE)
3093 				itp->itp_flags &= ~ITPF_PFLAGS;
3094 			else
3095 				itp->itp_flags &= ~ITPF_IFLAGS;
3096 			mutex_exit(&itp->itp_lock);
3097 		}
3098 
3099 		spdsock_flush(q, iph, itp, mp);
3100 
3101 		if (itp != NULL) {
3102 			/* SPD_FLUSH is worth a tunnel MTU check. */
3103 			update_iptun_policy(itp);
3104 			ITP_REFRELE(itp, ns);
3105 		}
3106 		return;
3107 	case SPD_DUMP:
3108 		if (itp != NULL)
3109 			ITP_REFRELE(itp, ns);
3110 		spdsock_dump(q, iph, mp);
3111 		return;
3112 	}
3113 
3114 	if (iph == ALL_ACTIVE_POLHEADS || iph == ALL_INACTIVE_POLHEADS) {
3115 		spdsock_diag(q, mp, SPD_DIAGNOSTIC_NOT_GLOBAL_OP);
3116 		return;
3117 	}
3118 
3119 	/* Single-polhead-only operations. */
3120 	switch (spmsg->spd_msg_type) {
3121 	case SPD_ADDRULE:
3122 		spdsock_addrule(q, iph, mp, extv, itp);
3123 		break;
3124 	case SPD_DELETERULE:
3125 		spdsock_deleterule(q, iph, mp, extv, itp);
3126 		break;
3127 	case SPD_LOOKUP:
3128 		spdsock_lookup(q, iph, mp, extv, itp);
3129 		break;
3130 	default:
3131 		spdsock_diag(q, mp, SPD_DIAGNOSTIC_BAD_MSG_TYPE);
3132 		break;
3133 	}
3134 
3135 	IPPH_REFRELE(iph, ns);
3136 	if (itp != NULL) {
3137 		/* SPD_{ADD,DELETE}RULE are worth a tunnel MTU check. */
3138 		if (spmsg->spd_msg_type == SPD_ADDRULE ||
3139 		    spmsg->spd_msg_type == SPD_DELETERULE)
3140 			update_iptun_policy(itp);
3141 		ITP_REFRELE(itp, ns);
3142 	}
3143 }
3144 
3145 /*
3146  * If an algorithm mapping was received before IPsec was loaded, process it.
3147  * Called from the IPsec loader.
3148  */
3149 void
3150 spdsock_update_pending_algs(netstack_t *ns)
3151 {
3152 	spd_stack_t *spds = ns->netstack_spdsock;
3153 
3154 	mutex_enter(&spds->spds_alg_lock);
3155 	if (spds->spds_algs_pending) {
3156 		(void) spdsock_do_updatealg(spds->spds_extv_algs, spds);
3157 		spds->spds_algs_pending = B_FALSE;
3158 	}
3159 	mutex_exit(&spds->spds_alg_lock);
3160 }
3161 
3162 static void
3163 spdsock_loadcheck(void *arg)
3164 {
3165 	queue_t *q = (queue_t *)arg;
3166 	spdsock_t *ss = (spdsock_t *)q->q_ptr;
3167 	mblk_t *mp;
3168 	ipsec_stack_t *ipss = ss->spdsock_spds->spds_netstack->netstack_ipsec;
3169 
3170 	ASSERT(ss != NULL);
3171 
3172 	ss->spdsock_timeout = 0;
3173 	mp = ss->spdsock_timeout_arg;
3174 	ASSERT(mp != NULL);
3175 	ss->spdsock_timeout_arg = NULL;
3176 	if (ipsec_failed(ipss))
3177 		spdsock_error(q, mp, EPROTONOSUPPORT, 0);
3178 	else
3179 		spdsock_parse(q, mp);
3180 }
3181 
3182 /*
3183  * Copy relevant state bits.
3184  */
3185 static void
3186 spdsock_copy_info(struct T_info_ack *tap, spdsock_t *ss)
3187 {
3188 	*tap = spdsock_g_t_info_ack;
3189 	tap->CURRENT_state = ss->spdsock_state;
3190 	tap->OPT_size = spdsock_max_optsize;
3191 }
3192 
3193 /*
3194  * This routine responds to T_CAPABILITY_REQ messages.  It is called by
3195  * spdsock_wput.  Much of the T_CAPABILITY_ACK information is copied from
3196  * spdsock_g_t_info_ack.  The current state of the stream is copied from
3197  * spdsock_state.
3198  */
3199 static void
3200 spdsock_capability_req(queue_t *q, mblk_t *mp)
3201 {
3202 	spdsock_t *ss = (spdsock_t *)q->q_ptr;
3203 	t_uscalar_t cap_bits1;
3204 	struct T_capability_ack	*tcap;
3205 
3206 	cap_bits1 = ((struct T_capability_req *)mp->b_rptr)->CAP_bits1;
3207 
3208 	mp = tpi_ack_alloc(mp, sizeof (struct T_capability_ack),
3209 	    mp->b_datap->db_type, T_CAPABILITY_ACK);
3210 	if (mp == NULL)
3211 		return;
3212 
3213 	tcap = (struct T_capability_ack *)mp->b_rptr;
3214 	tcap->CAP_bits1 = 0;
3215 
3216 	if (cap_bits1 & TC1_INFO) {
3217 		spdsock_copy_info(&tcap->INFO_ack, ss);
3218 		tcap->CAP_bits1 |= TC1_INFO;
3219 	}
3220 
3221 	qreply(q, mp);
3222 }
3223 
3224 /*
3225  * This routine responds to T_INFO_REQ messages. It is called by
3226  * spdsock_wput_other.
3227  * Most of the T_INFO_ACK information is copied from spdsock_g_t_info_ack.
3228  * The current state of the stream is copied from spdsock_state.
3229  */
3230 static void
3231 spdsock_info_req(q, mp)
3232 	queue_t	*q;
3233 	mblk_t	*mp;
3234 {
3235 	mp = tpi_ack_alloc(mp, sizeof (struct T_info_ack), M_PCPROTO,
3236 	    T_INFO_ACK);
3237 	if (mp == NULL)
3238 		return;
3239 	spdsock_copy_info((struct T_info_ack *)mp->b_rptr,
3240 	    (spdsock_t *)q->q_ptr);
3241 	qreply(q, mp);
3242 }
3243 
3244 /*
3245  * spdsock_err_ack. This routine creates a
3246  * T_ERROR_ACK message and passes it
3247  * upstream.
3248  */
3249 static void
3250 spdsock_err_ack(q, mp, t_error, sys_error)
3251 	queue_t	*q;
3252 	mblk_t	*mp;
3253 	int	t_error;
3254 	int	sys_error;
3255 {
3256 	if ((mp = mi_tpi_err_ack_alloc(mp, t_error, sys_error)) != NULL)
3257 		qreply(q, mp);
3258 }
3259 
3260 /*
3261  * This routine retrieves the current status of socket options.
3262  * It returns the size of the option retrieved.
3263  */
3264 /* ARGSUSED */
3265 int
3266 spdsock_opt_get(queue_t *q, int level, int name, uchar_t *ptr)
3267 {
3268 	int *i1 = (int *)ptr;
3269 
3270 	switch (level) {
3271 	case SOL_SOCKET:
3272 		switch (name) {
3273 		case SO_TYPE:
3274 			*i1 = SOCK_RAW;
3275 			break;
3276 		/*
3277 		 * The following two items can be manipulated,
3278 		 * but changing them should do nothing.
3279 		 */
3280 		case SO_SNDBUF:
3281 			*i1 = (int)q->q_hiwat;
3282 			break;
3283 		case SO_RCVBUF:
3284 			*i1 = (int)(RD(q)->q_hiwat);
3285 			break;
3286 		}
3287 		break;
3288 	default:
3289 		return (0);
3290 	}
3291 	return (sizeof (int));
3292 }
3293 
3294 /*
3295  * This routine sets socket options.
3296  */
3297 /* ARGSUSED */
3298 int
3299 spdsock_opt_set(queue_t *q, uint_t mgmt_flags, int level, int name,
3300     uint_t inlen, uchar_t *invalp, uint_t *outlenp, uchar_t *outvalp,
3301     void *thisdg_attrs, cred_t *cr)
3302 {
3303 	int *i1 = (int *)invalp;
3304 	spdsock_t *ss = (spdsock_t *)q->q_ptr;
3305 	spd_stack_t	*spds = ss->spdsock_spds;
3306 
3307 	switch (level) {
3308 	case SOL_SOCKET:
3309 		switch (name) {
3310 		case SO_SNDBUF:
3311 			if (*i1 > spds->spds_max_buf)
3312 				return (ENOBUFS);
3313 			q->q_hiwat = *i1;
3314 			break;
3315 		case SO_RCVBUF:
3316 			if (*i1 > spds->spds_max_buf)
3317 				return (ENOBUFS);
3318 			RD(q)->q_hiwat = *i1;
3319 			(void) proto_set_rx_hiwat(RD(q), NULL, *i1);
3320 			break;
3321 		}
3322 		break;
3323 	}
3324 	return (0);
3325 }
3326 
3327 
3328 /*
3329  * Handle STREAMS messages.
3330  */
3331 static void
3332 spdsock_wput_other(queue_t *q, mblk_t *mp)
3333 {
3334 	struct iocblk *iocp;
3335 	int error;
3336 	spdsock_t *ss = (spdsock_t *)q->q_ptr;
3337 	spd_stack_t	*spds = ss->spdsock_spds;
3338 	cred_t		*cr;
3339 
3340 	switch (mp->b_datap->db_type) {
3341 	case M_PROTO:
3342 	case M_PCPROTO:
3343 		if ((mp->b_wptr - mp->b_rptr) < sizeof (long)) {
3344 			ss3dbg(spds, (
3345 			    "spdsock_wput_other: Not big enough M_PROTO\n"));
3346 			freemsg(mp);
3347 			return;
3348 		}
3349 		switch (((union T_primitives *)mp->b_rptr)->type) {
3350 		case T_CAPABILITY_REQ:
3351 			spdsock_capability_req(q, mp);
3352 			break;
3353 		case T_INFO_REQ:
3354 			spdsock_info_req(q, mp);
3355 			break;
3356 		case T_SVR4_OPTMGMT_REQ:
3357 		case T_OPTMGMT_REQ:
3358 			/*
3359 			 * All Solaris components should pass a db_credp
3360 			 * for this TPI message, hence we ASSERT.
3361 			 * But in case there is some other M_PROTO that looks
3362 			 * like a TPI message sent by some other kernel
3363 			 * component, we check and return an error.
3364 			 */
3365 			cr = msg_getcred(mp, NULL);
3366 			ASSERT(cr != NULL);
3367 			if (cr == NULL) {
3368 				spdsock_err_ack(q, mp, TSYSERR, EINVAL);
3369 				return;
3370 			}
3371 			if (((union T_primitives *)mp->b_rptr)->type ==
3372 			    T_SVR4_OPTMGMT_REQ) {
3373 				svr4_optcom_req(q, mp, cr, &spdsock_opt_obj);
3374 			} else {
3375 				tpi_optcom_req(q, mp, cr, &spdsock_opt_obj);
3376 			}
3377 			break;
3378 		case T_DATA_REQ:
3379 		case T_EXDATA_REQ:
3380 		case T_ORDREL_REQ:
3381 			/* Illegal for spdsock. */
3382 			freemsg(mp);
3383 			(void) putnextctl1(RD(q), M_ERROR, EPROTO);
3384 			break;
3385 		default:
3386 			/* Not supported by spdsock. */
3387 			spdsock_err_ack(q, mp, TNOTSUPPORT, 0);
3388 			break;
3389 		}
3390 		return;
3391 	case M_IOCTL:
3392 		iocp = (struct iocblk *)mp->b_rptr;
3393 		error = EINVAL;
3394 
3395 		switch (iocp->ioc_cmd) {
3396 		case ND_SET:
3397 		case ND_GET:
3398 			if (nd_getset(q, spds->spds_g_nd, mp)) {
3399 				qreply(q, mp);
3400 				return;
3401 			} else
3402 				error = ENOENT;
3403 			/* FALLTHRU */
3404 		default:
3405 			miocnak(q, mp, 0, error);
3406 			return;
3407 		}
3408 	case M_FLUSH:
3409 		if (*mp->b_rptr & FLUSHW) {
3410 			flushq(q, FLUSHALL);
3411 			*mp->b_rptr &= ~FLUSHW;
3412 		}
3413 		if (*mp->b_rptr & FLUSHR) {
3414 			qreply(q, mp);
3415 			return;
3416 		}
3417 		/* Else FALLTHRU */
3418 	}
3419 
3420 	/* If fell through, just black-hole the message. */
3421 	freemsg(mp);
3422 }
3423 
3424 static void
3425 spdsock_wput(queue_t *q, mblk_t *mp)
3426 {
3427 	uint8_t *rptr = mp->b_rptr;
3428 	mblk_t *mp1;
3429 	spdsock_t *ss = (spdsock_t *)q->q_ptr;
3430 	spd_stack_t	*spds = ss->spdsock_spds;
3431 
3432 	/*
3433 	 * If we're dumping, defer processing other messages until the
3434 	 * dump completes.
3435 	 */
3436 	if (ss->spdsock_dump_req != NULL) {
3437 		if (!putq(q, mp))
3438 			freemsg(mp);
3439 		return;
3440 	}
3441 
3442 	switch (mp->b_datap->db_type) {
3443 	case M_DATA:
3444 		/*
3445 		 * Silently discard.
3446 		 */
3447 		ss2dbg(spds, ("raw M_DATA in spdsock.\n"));
3448 		freemsg(mp);
3449 		return;
3450 	case M_PROTO:
3451 	case M_PCPROTO:
3452 		if ((mp->b_wptr - rptr) >= sizeof (struct T_data_req)) {
3453 			if (((union T_primitives *)rptr)->type == T_DATA_REQ) {
3454 				if ((mp1 = mp->b_cont) == NULL) {
3455 					/* No data after T_DATA_REQ. */
3456 					ss2dbg(spds,
3457 					    ("No data after DATA_REQ.\n"));
3458 					freemsg(mp);
3459 					return;
3460 				}
3461 				freeb(mp);
3462 				mp = mp1;
3463 				ss2dbg(spds, ("T_DATA_REQ\n"));
3464 				break;	/* Out of switch. */
3465 			}
3466 		}
3467 		/* FALLTHRU */
3468 	default:
3469 		ss3dbg(spds, ("In default wput case (%d %d).\n",
3470 		    mp->b_datap->db_type, ((union T_primitives *)rptr)->type));
3471 		spdsock_wput_other(q, mp);
3472 		return;
3473 	}
3474 
3475 	/* I now have a PF_POLICY message in an M_DATA block. */
3476 	spdsock_parse(q, mp);
3477 }
3478 
3479 /*
3480  * Device open procedure, called when new queue pair created.
3481  * We are passed the read-side queue.
3482  */
3483 /* ARGSUSED */
3484 static int
3485 spdsock_open(queue_t *q, dev_t *devp, int flag, int sflag, cred_t *credp)
3486 {
3487 	spdsock_t *ss;
3488 	queue_t *oq = OTHERQ(q);
3489 	minor_t ssminor;
3490 	netstack_t *ns;
3491 	spd_stack_t *spds;
3492 
3493 	if (secpolicy_ip_config(credp, B_FALSE) != 0)
3494 		return (EPERM);
3495 
3496 	if (q->q_ptr != NULL)
3497 		return (0);  /* Re-open of an already open instance. */
3498 
3499 	if (sflag & MODOPEN)
3500 		return (EINVAL);
3501 
3502 	ns = netstack_find_by_cred(credp);
3503 	ASSERT(ns != NULL);
3504 	spds = ns->netstack_spdsock;
3505 	ASSERT(spds != NULL);
3506 
3507 	ss2dbg(spds, ("Made it into PF_POLICY socket open.\n"));
3508 
3509 	ssminor = (minor_t)(uintptr_t)vmem_alloc(spdsock_vmem, 1, VM_NOSLEEP);
3510 	if (ssminor == 0) {
3511 		netstack_rele(spds->spds_netstack);
3512 		return (ENOMEM);
3513 	}
3514 	ss = kmem_zalloc(sizeof (spdsock_t), KM_NOSLEEP);
3515 	if (ss == NULL) {
3516 		vmem_free(spdsock_vmem, (void *)(uintptr_t)ssminor, 1);
3517 		netstack_rele(spds->spds_netstack);
3518 		return (ENOMEM);
3519 	}
3520 
3521 	ss->spdsock_minor = ssminor;
3522 	ss->spdsock_state = TS_UNBND;
3523 	ss->spdsock_dump_req = NULL;
3524 
3525 	ss->spdsock_spds = spds;
3526 
3527 	q->q_ptr = ss;
3528 	oq->q_ptr = ss;
3529 
3530 	q->q_hiwat = spds->spds_recv_hiwat;
3531 
3532 	oq->q_hiwat = spds->spds_xmit_hiwat;
3533 	oq->q_lowat = spds->spds_xmit_lowat;
3534 
3535 	qprocson(q);
3536 	(void) proto_set_rx_hiwat(q, NULL, spds->spds_recv_hiwat);
3537 
3538 	*devp = makedevice(getmajor(*devp), ss->spdsock_minor);
3539 	return (0);
3540 }
3541 
3542 /*
3543  * Read-side service procedure, invoked when we get back-enabled
3544  * when buffer space becomes available.
3545  *
3546  * Dump another chunk if we were dumping before; when we finish, kick
3547  * the write-side queue in case it's waiting for read queue space.
3548  */
3549 void
3550 spdsock_rsrv(queue_t *q)
3551 {
3552 	spdsock_t *ss = q->q_ptr;
3553 
3554 	if (ss->spdsock_dump_req != NULL)
3555 		spdsock_dump_some(q, ss);
3556 
3557 	if (ss->spdsock_dump_req == NULL)
3558 		qenable(OTHERQ(q));
3559 }
3560 
3561 /*
3562  * Write-side service procedure, invoked when we defer processing
3563  * if another message is received while a dump is in progress.
3564  */
3565 void
3566 spdsock_wsrv(queue_t *q)
3567 {
3568 	spdsock_t *ss = q->q_ptr;
3569 	mblk_t *mp;
3570 	ipsec_stack_t *ipss = ss->spdsock_spds->spds_netstack->netstack_ipsec;
3571 
3572 	if (ss->spdsock_dump_req != NULL) {
3573 		qenable(OTHERQ(q));
3574 		return;
3575 	}
3576 
3577 	while ((mp = getq(q)) != NULL) {
3578 		if (ipsec_loaded(ipss)) {
3579 			spdsock_wput(q, mp);
3580 			if (ss->spdsock_dump_req != NULL)
3581 				return;
3582 		} else if (!ipsec_failed(ipss)) {
3583 			(void) putq(q, mp);
3584 		} else {
3585 			spdsock_error(q, mp, EPFNOSUPPORT, 0);
3586 		}
3587 	}
3588 }
3589 
3590 static int
3591 spdsock_close(queue_t *q)
3592 {
3593 	spdsock_t *ss = q->q_ptr;
3594 	spd_stack_t	*spds = ss->spdsock_spds;
3595 
3596 	qprocsoff(q);
3597 
3598 	/* Safe assumption. */
3599 	ASSERT(ss != NULL);
3600 
3601 	if (ss->spdsock_timeout != 0)
3602 		(void) quntimeout(q, ss->spdsock_timeout);
3603 
3604 	ss3dbg(spds, ("Driver close, PF_POLICY socket is going away.\n"));
3605 
3606 	vmem_free(spdsock_vmem, (void *)(uintptr_t)ss->spdsock_minor, 1);
3607 	netstack_rele(ss->spdsock_spds->spds_netstack);
3608 
3609 	kmem_free(ss, sizeof (spdsock_t));
3610 	return (0);
3611 }
3612 
3613 /*
3614  * Merge the IPsec algorithms tables with the received algorithm information.
3615  */
3616 void
3617 spdsock_merge_algs(spd_stack_t *spds)
3618 {
3619 	ipsec_alginfo_t *alg, *oalg;
3620 	ipsec_algtype_t algtype;
3621 	uint_t algidx, algid, nalgs;
3622 	crypto_mech_name_t *mechs;
3623 	uint_t mech_count, mech_idx;
3624 	netstack_t	*ns = spds->spds_netstack;
3625 	ipsec_stack_t	*ipss = ns->netstack_ipsec;
3626 
3627 	ASSERT(MUTEX_HELD(&spds->spds_alg_lock));
3628 
3629 	/*
3630 	 * Get the list of supported mechanisms from the crypto framework.
3631 	 * If a mechanism is supported by KCF, resolve its mechanism
3632 	 * id and mark it as being valid. This operation must be done
3633 	 * without holding alg_lock, since it can cause a provider
3634 	 * module to be loaded and the provider notification callback to
3635 	 * be invoked.
3636 	 */
3637 	mechs = crypto_get_mech_list(&mech_count, KM_SLEEP);
3638 	for (algtype = 0; algtype < IPSEC_NALGTYPES; algtype++) {
3639 		for (algid = 0; algid < IPSEC_MAX_ALGS; algid++) {
3640 			int algflags = 0;
3641 			crypto_mech_type_t mt = CRYPTO_MECHANISM_INVALID;
3642 
3643 			alg = spds->spds_algs[algtype][algid];
3644 			if (alg == NULL)
3645 				continue;
3646 
3647 			/*
3648 			 * The NULL encryption algorithm is a special
3649 			 * case because there are no mechanisms, yet
3650 			 * the algorithm is still valid.
3651 			 */
3652 			if (alg->alg_id == SADB_EALG_NULL) {
3653 				alg->alg_mech_type = CRYPTO_MECHANISM_INVALID;
3654 				alg->alg_flags |= ALG_FLAG_VALID;
3655 				continue;
3656 			}
3657 
3658 			for (mech_idx = 0; mech_idx < mech_count; mech_idx++) {
3659 				if (strncmp(alg->alg_mech_name, mechs[mech_idx],
3660 				    CRYPTO_MAX_MECH_NAME) == 0) {
3661 					mt = crypto_mech2id(alg->alg_mech_name);
3662 					ASSERT(mt != CRYPTO_MECHANISM_INVALID);
3663 					algflags = ALG_FLAG_VALID;
3664 					break;
3665 				}
3666 			}
3667 			alg->alg_mech_type = mt;
3668 			alg->alg_flags |= algflags;
3669 		}
3670 	}
3671 
3672 	mutex_enter(&ipss->ipsec_alg_lock);
3673 
3674 	/*
3675 	 * For each algorithm currently defined, check if it is
3676 	 * present in the new tables created from the SPD_UPDATEALGS
3677 	 * message received from user-space.
3678 	 * Delete the algorithm entries that are currently defined
3679 	 * but not part of the new tables.
3680 	 */
3681 	for (algtype = 0; algtype < IPSEC_NALGTYPES; algtype++) {
3682 		nalgs = ipss->ipsec_nalgs[algtype];
3683 		for (algidx = 0; algidx < nalgs; algidx++) {
3684 			algid = ipss->ipsec_sortlist[algtype][algidx];
3685 			if (spds->spds_algs[algtype][algid] == NULL)
3686 				ipsec_alg_unreg(algtype, algid, ns);
3687 		}
3688 	}
3689 
3690 	/*
3691 	 * For each algorithm we just received, check if it is
3692 	 * present in the currently defined tables. If it is, swap
3693 	 * the entry with the one we just allocated.
3694 	 * If the new algorithm is not in the current tables,
3695 	 * add it.
3696 	 */
3697 	for (algtype = 0; algtype < IPSEC_NALGTYPES; algtype++) {
3698 		for (algid = 0; algid < IPSEC_MAX_ALGS; algid++) {
3699 			alg = spds->spds_algs[algtype][algid];
3700 			if (alg == NULL)
3701 				continue;
3702 
3703 			if ((oalg = ipss->ipsec_alglists[algtype][algid]) ==
3704 			    NULL) {
3705 				/*
3706 				 * New algorithm, add it to the algorithm
3707 				 * table.
3708 				 */
3709 				ipsec_alg_reg(algtype, alg, ns);
3710 			} else {
3711 				/*
3712 				 * Algorithm is already in the table. Swap
3713 				 * the existing entry with the new one.
3714 				 */
3715 				ipsec_alg_fix_min_max(alg, algtype, ns);
3716 				ipss->ipsec_alglists[algtype][algid] = alg;
3717 				ipsec_alg_free(oalg);
3718 			}
3719 			spds->spds_algs[algtype][algid] = NULL;
3720 		}
3721 	}
3722 
3723 	for (algtype = 0; algtype < IPSEC_NALGTYPES; algtype++) {
3724 		ipss->ipsec_algs_exec_mode[algtype] =
3725 		    spds->spds_algs_exec_mode[algtype];
3726 	}
3727 
3728 	mutex_exit(&ipss->ipsec_alg_lock);
3729 
3730 	crypto_free_mech_list(mechs, mech_count);
3731 
3732 	ipsecah_algs_changed(ns);
3733 	ipsecesp_algs_changed(ns);
3734 }
3735