xref: /illumos-gate/usr/src/uts/common/inet/ipsec_impl.h (revision 581cede61ac9c14d8d4ea452562a567189eead78)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #ifndef _INET_IPSEC_IMPL_H
27 #define	_INET_IPSEC_IMPL_H
28 
29 #pragma ident	"%Z%%M%	%I%	%E% SMI"
30 
31 #include <inet/ip.h>
32 #include <inet/ipdrop.h>
33 
34 #ifdef	__cplusplus
35 extern "C" {
36 #endif
37 
38 #define	IPSEC_CONF_SRC_ADDRESS	0	/* Source Address */
39 #define	IPSEC_CONF_SRC_PORT		1	/* Source Port */
40 #define	IPSEC_CONF_DST_ADDRESS	2	/* Dest Address */
41 #define	IPSEC_CONF_DST_PORT		3	/* Dest Port */
42 #define	IPSEC_CONF_SRC_MASK		4	/* Source Address Mask */
43 #define	IPSEC_CONF_DST_MASK		5	/* Destination Address Mask */
44 #define	IPSEC_CONF_ULP			6	/* Upper layer Port */
45 #define	IPSEC_CONF_IPSEC_PROT	7	/* AH or ESP or AH_ESP */
46 #define	IPSEC_CONF_IPSEC_AALGS	8	/* Auth Algorithms - MD5 etc. */
47 #define	IPSEC_CONF_IPSEC_EALGS	9	/* Encr Algorithms - DES etc. */
48 #define	IPSEC_CONF_IPSEC_EAALGS	10	/* Encr Algorithms - MD5 etc. */
49 #define	IPSEC_CONF_IPSEC_SA		11	/* Shared or unique SA */
50 #define	IPSEC_CONF_IPSEC_DIR 		12	/* Direction of traffic */
51 #define	IPSEC_CONF_ICMP_TYPE 		13	/* ICMP type */
52 #define	IPSEC_CONF_ICMP_CODE 		14	/* ICMP code */
53 #define	IPSEC_CONF_NEGOTIATE		15	/* Negotiation */
54 #define	IPSEC_CONF_TUNNEL		16	/* Tunnel */
55 
56 /* Type of an entry */
57 
58 #define	IPSEC_NTYPES			0x02
59 #define	IPSEC_TYPE_OUTBOUND		0x00
60 #define	IPSEC_TYPE_INBOUND		0x01
61 
62 /* Policy */
63 #define	IPSEC_POLICY_APPLY	0x01
64 #define	IPSEC_POLICY_DISCARD	0x02
65 #define	IPSEC_POLICY_BYPASS	0x03
66 
67 /* Shared or unique SA */
68 #define	IPSEC_SHARED_SA		0x01
69 #define	IPSEC_UNIQUE_SA		0x02
70 
71 /* IPsec protocols and combinations */
72 #define	IPSEC_AH_ONLY		0x01
73 #define	IPSEC_ESP_ONLY		0x02
74 #define	IPSEC_AH_ESP		0x03
75 
76 /*
77  * Internally defined "any" algorithm.
78  * Move to PF_KEY v3 when that RFC is released.
79  */
80 #define	SADB_AALG_ANY 255
81 
82 #ifdef _KERNEL
83 
84 #include <inet/common.h>
85 #include <netinet/ip6.h>
86 #include <netinet/icmp6.h>
87 #include <net/pfkeyv2.h>
88 #include <inet/ip.h>
89 #include <inet/sadb.h>
90 #include <inet/ipsecah.h>
91 #include <inet/ipsecesp.h>
92 #include <sys/crypto/common.h>
93 #include <sys/crypto/api.h>
94 #include <sys/avl.h>
95 
96 /*
97  * Maximum number of authentication algorithms (can be indexed by one byte
98  * per PF_KEY and the IKE IPsec DOI.
99  */
100 #define	MAX_AALGS 256
101 
102 /*
103  * IPsec task queue constants.
104  */
105 #define	IPSEC_TASKQ_MIN 10
106 #define	IPSEC_TASKQ_MAX 20
107 
108 /*
109  * So we can access IPsec global variables that live in keysock.c.
110  */
111 extern boolean_t keysock_extended_reg(netstack_t *);
112 extern uint32_t keysock_next_seq(netstack_t *);
113 
114 /*
115  * Locking for ipsec policy rules:
116  *
117  * policy heads: system policy is static; per-conn polheads are dynamic,
118  * and refcounted (and inherited); use atomic refcounts and "don't let
119  * go with both hands".
120  *
121  * policy: refcounted; references from polhead, ipsec_out
122  *
123  * actions: refcounted; referenced from: action hash table, policy, ipsec_out
124  * selectors: refcounted; referenced from: selector hash table, policy.
125  */
126 
127 /*
128  * the following are inspired by, but not directly based on,
129  * some of the sys/queue.h type-safe pseudo-polymorphic macros
130  * found in BSD.
131  *
132  * XXX If we use these more generally, we'll have to make the names
133  * less generic (HASH_* will probably clobber other namespaces).
134  */
135 
136 #define	HASH_LOCK(table, hash) \
137 	mutex_enter(&(table)[hash].hash_lock)
138 #define	HASH_UNLOCK(table, hash) \
139 	mutex_exit(&(table)[hash].hash_lock)
140 
141 #define	HASH_LOCKED(table, hash) \
142 	MUTEX_HELD(&(table)[hash].hash_lock)
143 
144 #define	HASH_ITERATE(var, field, table, hash) 		\
145 	var = table[hash].hash_head; var != NULL; var = var->field.hash_next
146 
147 #define	HASH_NEXT(var, field) 		\
148 	(var)->field.hash_next
149 
150 #define	HASH_INSERT(var, field, table, hash)			\
151 {								\
152 	ASSERT(HASH_LOCKED(table, hash));			\
153 	(var)->field.hash_next = (table)[hash].hash_head;	\
154 	(var)->field.hash_pp = &(table)[hash].hash_head;	\
155 	(table)[hash].hash_head = var;				\
156 	if ((var)->field.hash_next != NULL)			\
157 		(var)->field.hash_next->field.hash_pp = 	\
158 			&((var)->field.hash_next); 		\
159 }
160 
161 
162 #define	HASH_UNCHAIN(var, field, table, hash)			\
163 {								\
164 	ASSERT(MUTEX_HELD(&(table)[hash].hash_lock));		\
165 	HASHLIST_UNCHAIN(var, field);				\
166 }
167 
168 #define	HASHLIST_INSERT(var, field, head)			\
169 {								\
170 	(var)->field.hash_next = head;				\
171 	(var)->field.hash_pp = &(head);				\
172 	head = var;						\
173 	if ((var)->field.hash_next != NULL)			\
174 		(var)->field.hash_next->field.hash_pp = 	\
175 			&((var)->field.hash_next); 		\
176 }
177 
178 #define	HASHLIST_UNCHAIN(var, field) 				\
179 {								\
180 	*var->field.hash_pp = var->field.hash_next;		\
181 	if (var->field.hash_next)				\
182 		var->field.hash_next->field.hash_pp = 		\
183 			var->field.hash_pp;			\
184 	HASH_NULL(var, field);					\
185 }
186 
187 
188 #define	HASH_NULL(var, field) 					\
189 {								\
190 	var->field.hash_next = NULL;				\
191 	var->field.hash_pp = NULL;				\
192 }
193 
194 #define	HASH_LINK(fieldname, type)				\
195 	struct {						\
196 		type *hash_next;				\
197 		type **hash_pp;					\
198 	} fieldname
199 
200 
201 #define	HASH_HEAD(tag)						\
202 	struct {						\
203 		struct tag *hash_head;				\
204 		kmutex_t hash_lock;				\
205 	}
206 
207 
208 typedef struct ipsec_policy_s ipsec_policy_t;
209 
210 typedef HASH_HEAD(ipsec_policy_s) ipsec_policy_hash_t;
211 
212 /*
213  * When adding new fields to ipsec_prot_t, make sure to update
214  * ipsec_in_to_out_action() as well as other code in spd.c
215  */
216 
217 typedef struct ipsec_prot
218 {
219 	unsigned int
220 		ipp_use_ah : 1,
221 		ipp_use_esp : 1,
222 		ipp_use_se : 1,
223 		ipp_use_unique : 1,
224 		ipp_use_espa : 1,
225 		ipp_pad : 27;
226 	uint8_t		ipp_auth_alg;		 /* DOI number */
227 	uint8_t		ipp_encr_alg;		 /* DOI number */
228 	uint8_t		ipp_esp_auth_alg;	 /* DOI number */
229 	uint16_t 	ipp_ah_minbits;		 /* AH: min keylen */
230 	uint16_t 	ipp_ah_maxbits;		 /* AH: max keylen */
231 	uint16_t	ipp_espe_minbits;	 /* ESP encr: min keylen */
232 	uint16_t	ipp_espe_maxbits;	 /* ESP encr: max keylen */
233 	uint16_t	ipp_espa_minbits;	 /* ESP auth: min keylen */
234 	uint16_t	ipp_espa_maxbits;	 /* ESP auth: max keylen */
235 	uint32_t	ipp_km_proto;		 /* key mgmt protocol */
236 	uint32_t	ipp_km_cookie;		 /* key mgmt cookie */
237 	uint32_t	ipp_replay_depth;	 /* replay window */
238 	/* XXX add lifetimes */
239 } ipsec_prot_t;
240 
241 #define	IPSEC_MAX_KEYBITS (0xffff)
242 
243 /*
244  * An individual policy action, possibly a member of a chain.
245  *
246  * Action chains may be shared between multiple policy rules.
247  *
248  * With one exception (IPSEC_POLICY_LOG), a chain consists of an
249  * ordered list of alternative ways to handle a packet.
250  *
251  * All actions are also "interned" into a hash table (to allow
252  * multiple rules with the same action chain to share one copy in
253  * memory).
254  */
255 
256 typedef struct ipsec_act
257 {
258 	uint8_t		ipa_type;
259 	uint8_t		ipa_log;
260 	union
261 	{
262 		ipsec_prot_t	ipau_apply;
263 		uint8_t		ipau_reject_type;
264 		uint32_t	ipau_resolve_id; /* magic cookie */
265 		uint8_t		ipau_log_type;
266 	} ipa_u;
267 #define	ipa_apply ipa_u.ipau_apply
268 #define	ipa_reject_type ipa_u.ipau_reject_type
269 #define	ipa_log_type ipa_u.ipau_log_type
270 #define	ipa_resolve_type ipa_u.ipau_resolve_type
271 } ipsec_act_t;
272 
273 #define	IPSEC_ACT_APPLY		0x01 /* match IPSEC_POLICY_APPLY */
274 #define	IPSEC_ACT_DISCARD	0x02 /* match IPSEC_POLICY_DISCARD */
275 #define	IPSEC_ACT_BYPASS	0x03 /* match IPSEC_POLICY_BYPASS */
276 #define	IPSEC_ACT_REJECT	0x04
277 #define	IPSEC_ACT_CLEAR		0x05
278 
279 typedef struct ipsec_action_s
280 {
281 	HASH_LINK(ipa_hash, struct ipsec_action_s);
282 	struct ipsec_action_s	*ipa_next;	/* next alternative */
283 	uint32_t		ipa_refs;		/* refcount */
284 	ipsec_act_t		ipa_act;
285 	/*
286 	 * The following bits are equivalent to an OR of bits included in the
287 	 * ipau_apply fields of this and subsequent actions in an
288 	 * action chain; this is an optimization for the sake of
289 	 * ipsec_out_process() in ip.c and a few other places.
290 	 */
291 	unsigned int
292 		ipa_hval: 8,
293 		ipa_allow_clear:1,		/* rule allows cleartext? */
294 		ipa_want_ah:1,			/* an action wants ah */
295 		ipa_want_esp:1,			/* an action wants esp */
296 		ipa_want_se:1,			/* an action wants se */
297 		ipa_want_unique:1,		/* want unique sa's */
298 		ipa_pad:19;
299 	uint32_t		ipa_ovhd;	/* per-packet encap ovhd */
300 } ipsec_action_t;
301 
302 #define	IPACT_REFHOLD(ipa) {			\
303 	atomic_add_32(&(ipa)->ipa_refs, 1);	\
304 	ASSERT((ipa)->ipa_refs != 0);	\
305 }
306 #define	IPACT_REFRELE(ipa) {					\
307 	ASSERT((ipa)->ipa_refs != 0);				\
308 	membar_exit();						\
309 	if (atomic_add_32_nv(&(ipa)->ipa_refs, -1) == 0)	\
310 		ipsec_action_free(ipa);				\
311 	(ipa) = 0;						\
312 }
313 
314 /*
315  * For now, use a trivially sized hash table for actions.
316  * In the future we can add the structure canonicalization necessary
317  * to get the hash function to behave correctly..
318  */
319 #define	IPSEC_ACTION_HASH_SIZE 1
320 
321 /*
322  * Merged address structure, for cheezy address-family independent
323  * matches in policy code.
324  */
325 
326 typedef union ipsec_addr
327 {
328 	in6_addr_t	ipsad_v6;
329 	in_addr_t	ipsad_v4;
330 } ipsec_addr_t;
331 
332 /*
333  * ipsec selector set, as used by the kernel policy structures.
334  * Note that that we specify "local" and "remote"
335  * rather than "source" and "destination", which allows the selectors
336  * for symmetric policy rules to be shared between inbound and
337  * outbound rules.
338  *
339  * "local" means "destination" on inbound, and "source" on outbound.
340  * "remote" means "source" on inbound, and "destination" on outbound.
341  * XXX if we add a fifth policy enforcement point for forwarded packets,
342  * what do we do?
343  *
344  * The ipsl_valid mask is not done as a bitfield; this is so we
345  * can use "ffs()" to find the "most interesting" valid tag.
346  *
347  * XXX should we have multiple types for space-conservation reasons?
348  * (v4 vs v6?  prefix vs. range)?
349  */
350 
351 typedef struct ipsec_selkey
352 {
353 	uint32_t	ipsl_valid;		/* bitmask of valid entries */
354 #define	IPSL_REMOTE_ADDR		0x00000001
355 #define	IPSL_LOCAL_ADDR			0x00000002
356 #define	IPSL_REMOTE_PORT		0x00000004
357 #define	IPSL_LOCAL_PORT			0x00000008
358 #define	IPSL_PROTOCOL			0x00000010
359 #define	IPSL_ICMP_TYPE			0x00000020
360 #define	IPSL_ICMP_CODE			0x00000040
361 #define	IPSL_IPV6			0x00000080
362 #define	IPSL_IPV4			0x00000100
363 
364 #define	IPSL_WILDCARD			0x0000007f
365 
366 	ipsec_addr_t	ipsl_local;
367 	ipsec_addr_t	ipsl_remote;
368 	uint16_t	ipsl_lport;
369 	uint16_t	ipsl_rport;
370 	/*
371 	 * ICMP type and code selectors. Both have an end value to
372 	 * specify ranges, or * and *_end are equal for a single
373 	 * value
374 	 */
375 	uint8_t		ipsl_icmp_type;
376 	uint8_t		ipsl_icmp_type_end;
377 	uint8_t		ipsl_icmp_code;
378 	uint8_t		ipsl_icmp_code_end;
379 
380 	uint8_t		ipsl_proto;		/* ip payload type */
381 	uint8_t		ipsl_local_pfxlen;	/* #bits of prefix */
382 	uint8_t		ipsl_remote_pfxlen;	/* #bits of prefix */
383 	uint8_t		ipsl_mbz;
384 
385 	/* Insert new elements above this line */
386 	uint32_t	ipsl_pol_hval;
387 	uint32_t	ipsl_sel_hval;
388 } ipsec_selkey_t;
389 
390 typedef struct ipsec_sel
391 {
392 	HASH_LINK(ipsl_hash, struct ipsec_sel);
393 	uint32_t	ipsl_refs;		/* # refs to this sel */
394 	ipsec_selkey_t	ipsl_key;		/* actual selector guts */
395 } ipsec_sel_t;
396 
397 /*
398  * One policy rule.  This will be linked into a single hash chain bucket in
399  * the parent rule structure.  If the selector is simple enough to
400  * allow hashing, it gets filed under ipsec_policy_root_t->ipr_hash.
401  * Otherwise it goes onto a linked list in ipsec_policy_root_t->ipr_nonhash[af]
402  *
403  * In addition, we file the rule into an avl tree keyed by the rule index.
404  * (Duplicate rules are permitted; the comparison function breaks ties).
405  */
406 struct ipsec_policy_s
407 {
408 	HASH_LINK(ipsp_hash, struct ipsec_policy_s);
409 	avl_node_t		ipsp_byid;
410 	uint64_t		ipsp_index;	/* unique id */
411 	uint32_t		ipsp_prio; 	/* rule priority */
412 	uint32_t		ipsp_refs;
413 	ipsec_sel_t		*ipsp_sel;	/* selector set (shared) */
414 	ipsec_action_t		*ipsp_act; 	/* action (may be shared) */
415 };
416 
417 #define	IPPOL_REFHOLD(ipp) {			\
418 	atomic_add_32(&(ipp)->ipsp_refs, 1);	\
419 	ASSERT((ipp)->ipsp_refs != 0);		\
420 }
421 #define	IPPOL_REFRELE(ipp, ns) {				\
422 	ASSERT((ipp)->ipsp_refs != 0);				\
423 	membar_exit();						\
424 	if (atomic_add_32_nv(&(ipp)->ipsp_refs, -1) == 0)	\
425 		ipsec_policy_free(ipp, ns);			\
426 	(ipp) = 0;						\
427 }
428 
429 #define	IPPOL_UNCHAIN(php, ip, ns)					\
430 	HASHLIST_UNCHAIN((ip), ipsp_hash);				\
431 	avl_remove(&(php)->iph_rulebyid, (ip));				\
432 	IPPOL_REFRELE(ip, ns);
433 
434 /*
435  * Policy ruleset.  One per (protocol * direction) for system policy.
436  */
437 
438 #define	IPSEC_AF_V4	0
439 #define	IPSEC_AF_V6	1
440 #define	IPSEC_NAF	2
441 
442 typedef struct ipsec_policy_root_s
443 {
444 	ipsec_policy_t		*ipr_nonhash[IPSEC_NAF];
445 	int			ipr_nchains;
446 	ipsec_policy_hash_t 	*ipr_hash;
447 } ipsec_policy_root_t;
448 
449 /*
450  * Policy head.  One for system policy; there may also be one present
451  * on ill_t's with interface-specific policy, as well as one present
452  * for sockets with per-socket policy allocated.
453  */
454 
455 typedef struct ipsec_policy_head_s
456 {
457 	uint32_t	iph_refs;
458 	krwlock_t	iph_lock;
459 	uint64_t	iph_gen; /* generation number */
460 	ipsec_policy_root_t iph_root[IPSEC_NTYPES];
461 	avl_tree_t	iph_rulebyid;
462 } ipsec_policy_head_t;
463 
464 #define	IPPH_REFHOLD(iph) {			\
465 	atomic_add_32(&(iph)->iph_refs, 1);	\
466 	ASSERT((iph)->iph_refs != 0);		\
467 }
468 #define	IPPH_REFRELE(iph, ns) {					\
469 	ASSERT((iph)->iph_refs != 0);				\
470 	membar_exit();						\
471 	if (atomic_add_32_nv(&(iph)->iph_refs, -1) == 0)	\
472 		ipsec_polhead_free(iph, ns);			\
473 	(iph) = 0;						\
474 }
475 
476 /*
477  * IPsec fragment related structures
478  */
479 
480 typedef struct ipsec_fragcache_entry {
481 	struct ipsec_fragcache_entry *itpfe_next;	/* hash list chain */
482 	mblk_t *itpfe_fraglist;			/* list of fragments */
483 	time_t itpfe_exp;			/* time when entry is stale */
484 	int itpfe_depth;			/* # of fragments in list */
485 	ipsec_addr_t itpfe_frag_src;
486 	ipsec_addr_t itpfe_frag_dst;
487 #define	itpfe_src itpfe_frag_src.ipsad_v4
488 #define	itpfe_src6 itpfe_frag_src.ipsad_v6
489 #define	itpfe_dst itpfe_frag_dst.ipsad_v4
490 #define	itpfe_dst6 itpfe_frag_dst.ipsad_v6
491 	uint32_t itpfe_id;			/* IP datagram ID */
492 	uint8_t itpfe_proto;			/* IP Protocol */
493 	uint8_t itpfe_last;			/* Last packet */
494 } ipsec_fragcache_entry_t;
495 
496 typedef struct ipsec_fragcache {
497 	kmutex_t itpf_lock;
498 	struct ipsec_fragcache_entry **itpf_ptr;
499 	struct ipsec_fragcache_entry *itpf_freelist;
500 	time_t itpf_expire_hint;	/* time when oldest entry is stale */
501 } ipsec_fragcache_t;
502 
503 /*
504  * Tunnel policies.  We keep a minature of the transport-mode/global policy
505  * per each tunnel instance.
506  *
507  * People who need both an itp held down AND one of its polheads need to
508  * first lock the itp, THEN the polhead, otherwise deadlock WILL occur.
509  */
510 typedef struct ipsec_tun_pol_s {
511 	avl_node_t itp_node;
512 	kmutex_t itp_lock;
513 	uint64_t itp_next_policy_index;
514 	ipsec_policy_head_t *itp_policy;
515 	ipsec_policy_head_t *itp_inactive;
516 	uint32_t itp_flags;
517 	uint32_t itp_refcnt;
518 	char itp_name[LIFNAMSIZ];
519 	ipsec_fragcache_t itp_fragcache;
520 } ipsec_tun_pol_t;
521 /* NOTE - Callers (tun code) synchronize their own instances for these flags. */
522 #define	ITPF_P_ACTIVE 0x1	/* Are we using IPsec right now? */
523 #define	ITPF_P_TUNNEL 0x2	/* Negotiate tunnel-mode */
524 /* Optimization -> Do we have per-port security entries in this polhead? */
525 #define	ITPF_P_PER_PORT_SECURITY 0x4
526 #define	ITPF_PFLAGS 0x7
527 #define	ITPF_SHIFT 3
528 
529 #define	ITPF_I_ACTIVE 0x8	/* Is the inactive using IPsec right now? */
530 #define	ITPF_I_TUNNEL 0x10	/* Negotiate tunnel-mode (on inactive) */
531 /* Optimization -> Do we have per-port security entries in this polhead? */
532 #define	ITPF_I_PER_PORT_SECURITY 0x20
533 #define	ITPF_IFLAGS 0x38
534 
535 /* NOTE:  f cannot be an expression. */
536 #define	ITPF_CLONE(f) (f) = (((f) & ITPF_PFLAGS) | \
537 	    (((f) & ITPF_PFLAGS) << ITPF_SHIFT));
538 #define	ITPF_SWAP(f) (f) = ((((f) & ITPF_PFLAGS) << ITPF_SHIFT) | \
539 	    (((f) & ITPF_IFLAGS) >> ITPF_SHIFT))
540 
541 #define	ITP_P_ISACTIVE(itp, iph) ((itp)->itp_flags & \
542 	(((itp)->itp_policy == (iph)) ? ITPF_P_ACTIVE : ITPF_I_ACTIVE))
543 
544 #define	ITP_P_ISTUNNEL(itp, iph) ((itp)->itp_flags & \
545 	(((itp)->itp_policy == (iph)) ? ITPF_P_TUNNEL : ITPF_I_TUNNEL))
546 
547 #define	ITP_P_ISPERPORT(itp, iph) ((itp)->itp_flags & \
548 	(((itp)->itp_policy == (iph)) ? ITPF_P_PER_PORT_SECURITY : \
549 	ITPF_I_PER_PORT_SECURITY))
550 
551 #define	ITP_REFHOLD(itp) { \
552 	atomic_add_32(&((itp)->itp_refcnt), 1);	\
553 	ASSERT((itp)->itp_refcnt != 0); \
554 }
555 
556 #define	ITP_REFRELE(itp, ns) { \
557 	ASSERT((itp)->itp_refcnt != 0); \
558 	membar_exit(); \
559 	if (atomic_add_32_nv(&((itp)->itp_refcnt), -1) == 0) \
560 		itp_free(itp, ns); \
561 }
562 
563 /*
564  * Certificate identity.
565  */
566 
567 typedef struct ipsid_s
568 {
569 	struct ipsid_s *ipsid_next;
570 	struct ipsid_s **ipsid_ptpn;
571 	uint32_t	ipsid_refcnt;
572 	int		ipsid_type;	/* id type */
573 	char 		*ipsid_cid;	/* certificate id string */
574 } ipsid_t;
575 
576 /*
577  * ipsid_t reference hold/release macros, just like ipsa versions.
578  */
579 
580 #define	IPSID_REFHOLD(ipsid) {			\
581 	atomic_add_32(&(ipsid)->ipsid_refcnt, 1);	\
582 	ASSERT((ipsid)->ipsid_refcnt != 0);	\
583 }
584 
585 /*
586  * Decrement the reference count on the ID.  Someone else will clean up
587  * after us later.
588  */
589 
590 #define	IPSID_REFRELE(ipsid) {					\
591 	membar_exit();						\
592 	atomic_add_32(&(ipsid)->ipsid_refcnt, -1);		\
593 }
594 
595 struct ipsec_out_s;
596 
597 /*
598  * Following are the estimates of what the maximum AH and ESP header size
599  * would be. This is used to tell the upper layer the right value of MSS
600  * it should use without consulting AH/ESP. If the size is something
601  * different from this, ULP will learn the right one through
602  * ICMP_FRAGMENTATION_NEEDED messages generated locally.
603  *
604  * AH : 12 bytes of constant header + 32 bytes of ICV checksum (SHA-512).
605  */
606 #define	IPSEC_MAX_AH_HDR_SIZE   (44)
607 
608 /*
609  * ESP : Is a bit more complex...
610  *
611  * A system of one inequality and one equation MUST be solved for proper ESP
612  * overhead.  The inequality is:
613  *
614  *    MTU - sizeof (IP header + options) >=
615  *		sizeof (esph_t) + sizeof (IV or ctr) + data-size + 2 + ICV
616  *
617  * IV or counter is almost always the cipher's block size.  The equation is:
618  *
619  *    data-size % block-size = (block-size - 2)
620  *
621  * so we can put as much data into the datagram as possible.  If we are
622  * pessimistic and include our largest overhead cipher (AES) and hash
623  * (SHA-512), and assume 1500-byte MTU minus IPv4 overhead of 20 bytes, we get:
624  *
625  *    1480 >= 8 + 16 + data-size + 2 + 32
626  *    1480 >= 58 + data-size
627  *    1422 >= data-size,      1422 % 16 = 14, so 58 is the overhead!
628  *
629  * But, let's re-run the numbers with the same algorithms, but with an IPv6
630  * header:
631  *
632  *    1460 >= 58 + data-size
633  *    1402 >= data-size,     1402 % 16 = 10, meaning shrink to 1390 to get 14,
634  *
635  * which means the overhead is now 70.
636  *
637  * Hmmm... IPv4 headers can never be anything other than multiples of 4-bytes,
638  * and IPv6 ones can never be anything other than multiples of 8-bytes.  We've
639  * seen overheads of 58 and 70.  58 % 16 == 10, and 70 % 16 == 6.  IPv4 could
640  * force us to have 62 ( % 16 == 14) or 66 ( % 16 == 2), or IPv6 could force us
641  * to have 78 ( % 16 = 14).  Let's compute IPv6 + 8-bytes of options:
642  *
643  *    1452 >= 58 + data-size
644  *    1394 >= data-size,     1394 % 16 = 2, meaning shrink to 1390 to get 14,
645  *
646  * Aha!  The "ESP overhead" shrinks to 62 (70 - 8).  This is good.  Let's try
647  * IPv4 + 8 bytes of IPv4 options:
648  *
649  *    1472 >= 58 + data-size
650  *    1414 >= data-size,      1414 % 16 = 6, meaning shrink to 1406,
651  *
652  * meaning 66 is the overhead.  Let's try 12 bytes:
653  *
654  *    1468 >= 58 + data-size
655  *    1410 >= data-size,      1410 % 16 = 2, meaning also shrink to 1406,
656  *
657  * meaning 62 is the overhead.  How about 16 bytes?
658  *
659  *    1464 >= 58 + data-size
660  *    1406 >= data-size,      1402 % 16 = 14, which is great!
661  *
662  * this means 58 is the overhead.  If I wrap and add 20 bytes, it looks just
663  * like IPv6's 70 bytes.  If I add 24, we go back to 66 bytes.
664  *
665  * So picking 70 is a sensible, conservative default.  Optimal calculations
666  * will depend on knowing pre-ESP header length (called "divpoint" in the ESP
667  * code), which could be cached in the conn_t for connected endpoints, or
668  * which must be computed on every datagram otherwise.
669  */
670 #define	IPSEC_MAX_ESP_HDR_SIZE  (70)
671 
672 /*
673  * Alternate, when we know the crypto block size via the SA.  Assume an ICV on
674  * the SA.  Use:
675  *
676  * sizeof (esph_t) + 2 * (sizeof (IV/counter)) - 2 + sizeof (ICV).  The "-2"
677  * discounts the overhead of the pad + padlen that gets swallowed up by the
678  * second (theoretically all-pad) cipher-block.  If you use our examples of
679  * AES and SHA512, you get:
680  *
681  *    8 + 32 - 2 + 32 == 70.
682  *
683  * Which is our pre-computed maximum above.
684  */
685 #include <inet/ipsecesp.h>
686 #define	IPSEC_BASE_ESP_HDR_SIZE(sa) \
687 	(sizeof (esph_t) + ((sa)->ipsa_iv_len << 1) - 2 + (sa)->ipsa_mac_len)
688 
689 /*
690  * Identity hash table.
691  *
692  * Identities are refcounted and "interned" into the hash table.
693  * Only references coming from other objects (SA's, latching state)
694  * are counted in ipsid_refcnt.
695  *
696  * Locking: IPSID_REFHOLD is safe only when (a) the object's hash bucket
697  * is locked, (b) we know that the refcount must be > 0.
698  *
699  * The ipsid_next and ipsid_ptpn fields are only to be referenced or
700  * modified when the bucket lock is held; in particular, we only
701  * delete objects while holding the bucket lock, and we only increase
702  * the refcount from 0 to 1 while the bucket lock is held.
703  */
704 
705 #define	IPSID_HASHSIZE 64
706 
707 typedef struct ipsif_s
708 {
709 	ipsid_t *ipsif_head;
710 	kmutex_t ipsif_lock;
711 } ipsif_t;
712 
713 
714 /*
715  * IPsec stack instances
716  */
717 struct ipsec_stack {
718 	netstack_t		*ipsec_netstack;	/* Common netstack */
719 
720 	/* Packet dropper for IP IPsec processing failures */
721 	ipdropper_t		ipsec_dropper;
722 
723 /* From spd.c */
724 	/*
725 	 * Policy rule index generator.  We assume this won't wrap in the
726 	 * lifetime of a system.  If we make 2^20 policy changes per second,
727 	 * this will last 2^44 seconds, or roughly 500,000 years, so we don't
728 	 * have to worry about reusing policy index values.
729 	 */
730 	uint64_t		ipsec_next_policy_index;
731 
732 	HASH_HEAD(ipsec_action_s) ipsec_action_hash[IPSEC_ACTION_HASH_SIZE];
733 	HASH_HEAD(ipsec_sel)	  *ipsec_sel_hash;
734 	uint32_t		ipsec_spd_hashsize;
735 
736 	ipsif_t			ipsec_ipsid_buckets[IPSID_HASHSIZE];
737 
738 	/*
739 	 * Active & Inactive system policy roots
740 	 */
741 	ipsec_policy_head_t	ipsec_system_policy;
742 	ipsec_policy_head_t	ipsec_inactive_policy;
743 
744 	/* Packet dropper for generic SPD drops. */
745 	ipdropper_t		ipsec_spd_dropper;
746 	krwlock_t		ipsec_itp_get_byaddr_rw_lock;
747 	ipsec_tun_pol_t		*(*ipsec_itp_get_byaddr)
748 	    (uint32_t *, uint32_t *, int, netstack_t *);
749 
750 /* ipdrop.c */
751 	kstat_t			*ipsec_ip_drop_kstat;
752 	struct ip_dropstats	*ipsec_ip_drop_types;
753 
754 /* spd.c */
755 	/*
756 	 * Have a counter for every possible policy message in
757 	 * ipsec_policy_failure_msgs
758 	 */
759 	uint32_t		ipsec_policy_failure_count[IPSEC_POLICY_MAX];
760 	/* Time since last ipsec policy failure that printed a message. */
761 	hrtime_t		ipsec_policy_failure_last;
762 
763 /* ip_spd.c */
764 	/* stats */
765 	kstat_t			*ipsec_ksp;
766 	struct ipsec_kstats_s	*ipsec_kstats;
767 
768 /* sadb.c */
769 	/* Packet dropper for generic SADB drops. */
770 	ipdropper_t		ipsec_sadb_dropper;
771 
772 /* spd.c */
773 	boolean_t		ipsec_inbound_v4_policy_present;
774 	boolean_t		ipsec_outbound_v4_policy_present;
775 	boolean_t		ipsec_inbound_v6_policy_present;
776 	boolean_t		ipsec_outbound_v6_policy_present;
777 
778 /* spd.c */
779 	/*
780 	 * Because policy needs to know what algorithms are supported, keep the
781 	 * lists of algorithms here.
782 	 */
783 	kmutex_t 		ipsec_alg_lock;
784 
785 	uint8_t			ipsec_nalgs[IPSEC_NALGTYPES];
786 	ipsec_alginfo_t	*ipsec_alglists[IPSEC_NALGTYPES][IPSEC_MAX_ALGS];
787 
788 	uint8_t		ipsec_sortlist[IPSEC_NALGTYPES][IPSEC_MAX_ALGS];
789 
790 	int		ipsec_algs_exec_mode[IPSEC_NALGTYPES];
791 
792 	uint32_t 	ipsec_tun_spd_hashsize;
793 	/*
794 	 * Tunnel policies - AVL tree indexed by tunnel name.
795 	 */
796 	krwlock_t 	ipsec_tunnel_policy_lock;
797 	uint64_t	ipsec_tunnel_policy_gen;
798 	avl_tree_t	ipsec_tunnel_policies;
799 
800 /* ipsec_loader.c */
801 	kmutex_t	ipsec_loader_lock;
802 	int		ipsec_loader_state;
803 	int		ipsec_loader_sig;
804 	kt_did_t	ipsec_loader_tid;
805 	kcondvar_t	ipsec_loader_sig_cv;	/* For loader_sig conditions. */
806 
807 };
808 typedef struct ipsec_stack ipsec_stack_t;
809 
810 /* Handle the kstat_create in ip_drop_init() failing */
811 #define	DROPPER(_ipss, _dropper) \
812 	(((_ipss)->ipsec_ip_drop_types == NULL) ? NULL : \
813 	&((_ipss)->ipsec_ip_drop_types->_dropper))
814 
815 /*
816  * Loader states..
817  */
818 #define	IPSEC_LOADER_WAIT	0
819 #define	IPSEC_LOADER_FAILED	-1
820 #define	IPSEC_LOADER_SUCCEEDED	1
821 
822 /*
823  * ipsec_loader entrypoints.
824  */
825 extern void ipsec_loader_init(ipsec_stack_t *);
826 extern void ipsec_loader_start(ipsec_stack_t *);
827 extern void ipsec_loader_destroy(ipsec_stack_t *);
828 extern void ipsec_loader_loadnow(ipsec_stack_t *);
829 extern boolean_t ipsec_loader_wait(queue_t *q, ipsec_stack_t *);
830 extern boolean_t ipsec_loaded(ipsec_stack_t *);
831 extern boolean_t ipsec_failed(ipsec_stack_t *);
832 
833 /*
834  * callback from ipsec_loader to ip
835  */
836 extern void ip_ipsec_load_complete(ipsec_stack_t *);
837 
838 /*
839  * ipsec policy entrypoints (spd.c)
840  */
841 
842 extern void ipsec_policy_g_destroy(void);
843 extern void ipsec_policy_g_init(void);
844 
845 extern int ipsec_alloc_table(ipsec_policy_head_t *, int, int, boolean_t,
846     netstack_t *);
847 extern void ipsec_polhead_init(ipsec_policy_head_t *, int);
848 extern void ipsec_polhead_destroy(ipsec_policy_head_t *);
849 extern void ipsec_polhead_free_table(ipsec_policy_head_t *);
850 extern mblk_t *ipsec_check_global_policy(mblk_t *, conn_t *, ipha_t *,
851 		    ip6_t *, boolean_t, netstack_t *);
852 extern mblk_t *ipsec_check_inbound_policy(mblk_t *, conn_t *, ipha_t *, ip6_t *,
853     boolean_t);
854 
855 extern boolean_t ipsec_in_to_out(mblk_t *, ipha_t *, ip6_t *);
856 extern void ipsec_log_policy_failure(int, char *, ipha_t *, ip6_t *, boolean_t,
857 		    netstack_t *);
858 extern boolean_t ipsec_inbound_accept_clear(mblk_t *, ipha_t *, ip6_t *);
859 extern int ipsec_conn_cache_policy(conn_t *, boolean_t);
860 extern mblk_t *ipsec_alloc_ipsec_out(netstack_t *);
861 extern mblk_t	*ipsec_attach_ipsec_out(mblk_t **, conn_t *, ipsec_policy_t *,
862     uint8_t, netstack_t *);
863 extern mblk_t	*ipsec_init_ipsec_out(mblk_t *, mblk_t **, conn_t *,
864     ipsec_policy_t *, uint8_t, netstack_t *);
865 struct ipsec_in_s;
866 extern ipsec_action_t *ipsec_in_to_out_action(struct ipsec_in_s *);
867 extern boolean_t ipsec_check_ipsecin_latch(struct ipsec_in_s *, mblk_t *,
868     struct ipsec_latch_s *, ipha_t *, ip6_t *, const char **, kstat_named_t **,
869     conn_t *);
870 extern void ipsec_latch_inbound(ipsec_latch_t *ipl, struct ipsec_in_s *ii);
871 
872 extern void ipsec_policy_free(ipsec_policy_t *, netstack_t *);
873 extern void ipsec_action_free(ipsec_action_t *);
874 extern void ipsec_polhead_free(ipsec_policy_head_t *, netstack_t *);
875 extern ipsec_policy_head_t *ipsec_polhead_split(ipsec_policy_head_t *,
876     netstack_t *);
877 extern ipsec_policy_head_t *ipsec_polhead_create(void);
878 extern ipsec_policy_head_t *ipsec_system_policy(netstack_t *);
879 extern ipsec_policy_head_t *ipsec_inactive_policy(netstack_t *);
880 extern void ipsec_swap_policy(ipsec_policy_head_t *, ipsec_policy_head_t *,
881     netstack_t *);
882 extern void ipsec_swap_global_policy(netstack_t *);
883 
884 extern int ipsec_clone_system_policy(netstack_t *);
885 extern ipsec_policy_t *ipsec_policy_create(ipsec_selkey_t *,
886     const ipsec_act_t *, int, int, uint64_t *, netstack_t *);
887 extern boolean_t ipsec_policy_delete(ipsec_policy_head_t *,
888     ipsec_selkey_t *, int, netstack_t *);
889 extern int ipsec_policy_delete_index(ipsec_policy_head_t *, uint64_t,
890     netstack_t *);
891 extern void ipsec_polhead_flush(ipsec_policy_head_t *, netstack_t *);
892 extern int ipsec_copy_polhead(ipsec_policy_head_t *, ipsec_policy_head_t *,
893     netstack_t *);
894 extern void ipsec_actvec_from_req(ipsec_req_t *, ipsec_act_t **, uint_t *,
895     netstack_t *);
896 extern void ipsec_actvec_free(ipsec_act_t *, uint_t);
897 extern int ipsec_req_from_head(ipsec_policy_head_t *, ipsec_req_t *, int);
898 extern mblk_t *ipsec_construct_inverse_acquire(sadb_msg_t *, sadb_ext_t **,
899     netstack_t *);
900 extern mblk_t *ip_wput_attach_policy(mblk_t *, ipha_t *, ip6_t *, ire_t *,
901     conn_t *, boolean_t, zoneid_t);
902 extern mblk_t	*ip_wput_ire_parse_ipsec_out(mblk_t *, ipha_t *, ip6_t *,
903     ire_t *, conn_t *, boolean_t, zoneid_t);
904 extern ipsec_policy_t *ipsec_find_policy(int, conn_t *,
905     struct ipsec_out_s *, ipsec_selector_t *, netstack_t *);
906 extern ipsid_t *ipsid_lookup(int, char *, netstack_t *);
907 extern boolean_t ipsid_equal(ipsid_t *, ipsid_t *);
908 extern void ipsid_gc(netstack_t *);
909 extern void ipsec_latch_ids(ipsec_latch_t *, ipsid_t *, ipsid_t *);
910 
911 extern void ipsec_config_flush(netstack_t *);
912 extern boolean_t ipsec_check_policy(ipsec_policy_head_t *, ipsec_policy_t *,
913     int);
914 extern void ipsec_enter_policy(ipsec_policy_head_t *, ipsec_policy_t *, int,
915     netstack_t *);
916 extern boolean_t ipsec_check_action(ipsec_act_t *, int *, netstack_t *);
917 
918 extern mblk_t *ipsec_out_tag(mblk_t *, mblk_t *, netstack_t *);
919 extern mblk_t *ipsec_in_tag(mblk_t *, mblk_t *, netstack_t *);
920 extern mblk_t *ip_copymsg(mblk_t *mp);
921 
922 extern void iplatch_free(ipsec_latch_t *, netstack_t *);
923 extern ipsec_latch_t *iplatch_create(void);
924 extern int ipsec_set_req(cred_t *, conn_t *, ipsec_req_t *);
925 
926 extern void ipsec_insert_always(avl_tree_t *tree, void *new_node);
927 
928 extern int32_t ipsec_act_ovhd(const ipsec_act_t *act);
929 
930 
931 extern boolean_t iph_ipvN(ipsec_policy_head_t *, boolean_t);
932 
933 /*
934  * Tunnel-support SPD functions and variables.
935  */
936 struct tun_s;	/* Defined in inet/tun.h. */
937 extern boolean_t ipsec_tun_inbound(mblk_t *, mblk_t **,  ipsec_tun_pol_t *,
938     ipha_t *, ip6_t *, ipha_t *, ip6_t *, int, netstack_t *);
939 extern mblk_t *ipsec_tun_outbound(mblk_t *, struct tun_s *, ipha_t *,
940     ip6_t *, ipha_t *, ip6_t *, int, netstack_t *);
941 extern void itp_free(ipsec_tun_pol_t *, netstack_t *);
942 extern ipsec_tun_pol_t *create_tunnel_policy(char *, int *, uint64_t *,
943     netstack_t *);
944 extern ipsec_tun_pol_t *get_tunnel_policy(char *, netstack_t *);
945 extern void itp_unlink(ipsec_tun_pol_t *, netstack_t *);
946 extern void itp_walk(void (*)(ipsec_tun_pol_t *, void *, netstack_t *),
947     void *, netstack_t *);
948 
949 extern ipsec_tun_pol_t *itp_get_byaddr_dummy(uint32_t *, uint32_t *,
950     int, netstack_t *);
951 
952 /*
953  * IPsec AH/ESP functions called from IP or the common SADB code in AH.
954  */
955 
956 extern void ipsecah_in_assocfailure(mblk_t *, char, ushort_t, char *,
957     uint32_t, void *, int, ipsecah_stack_t *);
958 extern void ipsecesp_in_assocfailure(mblk_t *, char, ushort_t, char *,
959     uint32_t, void *, int, ipsecesp_stack_t *);
960 extern void ipsecesp_send_keepalive(ipsa_t *);
961 
962 /*
963  * Algorithm management helper functions.
964  */
965 extern boolean_t ipsec_valid_key_size(uint16_t, ipsec_alginfo_t *);
966 
967 /*
968  * Per-socket policy, for now, takes precedence... this priority value
969  * insures it.
970  */
971 #define	IPSEC_PRIO_SOCKET		0x1000000
972 
973 /* DDI initialization functions. */
974 extern	boolean_t    ipsecesp_ddi_init(void);
975 extern	boolean_t    ipsecah_ddi_init(void);
976 extern	boolean_t    keysock_ddi_init(void);
977 extern	boolean_t    spdsock_ddi_init(void);
978 
979 extern	void    ipsecesp_ddi_destroy(void);
980 extern	void    ipsecah_ddi_destroy(void);
981 extern	void	keysock_ddi_destroy(void);
982 extern	void    spdsock_ddi_destroy(void);
983 
984 /*
985  * AH- and ESP-specific functions that are called directly by other modules.
986  */
987 extern void ipsecah_fill_defs(struct sadb_x_ecomb *, netstack_t *);
988 extern void ipsecesp_fill_defs(struct sadb_x_ecomb *, netstack_t *);
989 extern void ipsecah_algs_changed(netstack_t *);
990 extern void ipsecesp_algs_changed(netstack_t *);
991 extern void ipsecesp_init_funcs(ipsa_t *);
992 extern void ipsecah_init_funcs(ipsa_t *);
993 extern ipsec_status_t ipsecah_icmp_error(mblk_t *);
994 extern ipsec_status_t ipsecesp_icmp_error(mblk_t *);
995 
996 /*
997  * Wrapper for putnext() to ipsec accelerated interface.
998  */
999 extern void ipsec_hw_putnext(queue_t *, mblk_t *);
1000 
1001 /*
1002  * spdsock functions that are called directly by IP.
1003  */
1004 extern void spdsock_update_pending_algs(netstack_t *);
1005 
1006 /*
1007  * IP functions that are called from AH and ESP.
1008  */
1009 extern boolean_t ipsec_outbound_sa(mblk_t *, uint_t);
1010 extern esph_t *ipsec_inbound_esp_sa(mblk_t *, netstack_t *);
1011 extern ah_t *ipsec_inbound_ah_sa(mblk_t *, netstack_t *);
1012 extern ipsec_policy_t *ipsec_find_policy_head(ipsec_policy_t *,
1013     ipsec_policy_head_t *, int, ipsec_selector_t *, netstack_t *);
1014 
1015 /*
1016  * IP dropper init/destroy.
1017  */
1018 void ip_drop_init(ipsec_stack_t *);
1019 void ip_drop_destroy(ipsec_stack_t *);
1020 
1021 /*
1022  * Common functions
1023  */
1024 extern boolean_t ip_addr_match(uint8_t *, int, in6_addr_t *);
1025 
1026 /*
1027  * AH and ESP counters types.
1028  */
1029 typedef uint32_t ah_counter;
1030 typedef uint32_t esp_counter;
1031 
1032 #endif /* _KERNEL */
1033 
1034 #ifdef	__cplusplus
1035 }
1036 #endif
1037 
1038 #endif	/* _INET_IPSEC_IMPL_H */
1039