xref: /illumos-gate/usr/src/lib/libc/port/threads/door_calls.c (revision 581cede61ac9c14d8d4ea452562a567189eead78)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include "lint.h"
30 #include "thr_uberdata.h"
31 #include "libc.h"
32 
33 #include <alloca.h>
34 #include <unistd.h>
35 #include <thread.h>
36 #include <pthread.h>
37 #include <stdio.h>
38 #include <errno.h>
39 #include <door.h>
40 #include <signal.h>
41 #include <ucred.h>
42 #include <sys/ucred.h>
43 
44 static door_server_func_t door_create_server;
45 
46 /*
47  * Global state -- the non-statics are accessed from the __door_return()
48  * syscall wrapper.
49  */
50 static mutex_t		door_state_lock = DEFAULTMUTEX;
51 door_server_func_t	*door_server_func = door_create_server;
52 pid_t			door_create_pid = 0;
53 static pid_t		door_create_first_pid = 0;
54 static pid_t		door_create_unref_pid = 0;
55 
56 /*
57  * The raw system call interfaces
58  */
59 extern int __door_create(void (*)(void *, char *, size_t, door_desc_t *,
60     uint_t), void *, uint_t);
61 extern int __door_return(caddr_t, size_t, door_return_desc_t *, caddr_t,
62     size_t);
63 extern int __door_ucred(ucred_t *);
64 extern int __door_unref(void);
65 
66 /*
67  * We park the ourselves in the kernel to serve as the "caller" for
68  * unreferenced upcalls for this process.  If the call returns with
69  * EINTR (e.g., someone did a forkall), we repeat as long as we're still
70  * in the parent.  If the child creates an unref door it will create
71  * a new thread.
72  */
73 static void *
74 door_unref_func(void *arg)
75 {
76 	pid_t mypid = (pid_t)(uintptr_t)arg;
77 
78 	sigset_t fillset;
79 
80 	/* mask signals before diving into the kernel */
81 	(void) sigfillset(&fillset);
82 	(void) thr_sigsetmask(SIG_SETMASK, &fillset, NULL);
83 
84 	while (getpid() == mypid && __door_unref() && errno == EINTR)
85 		continue;
86 
87 	return (NULL);
88 }
89 
90 int
91 door_create(void (*f)(void *, char *, size_t, door_desc_t *, uint_t),
92     void *cookie, uint_t flags)
93 {
94 	int d;
95 
96 	int is_private = (flags & DOOR_PRIVATE);
97 	int is_unref = (flags & (DOOR_UNREF | DOOR_UNREF_MULTI));
98 	int do_create_first = 0;
99 	int do_create_unref = 0;
100 
101 	ulwp_t *self = curthread;
102 
103 	pid_t mypid;
104 
105 	if (self->ul_vfork) {
106 		errno = ENOTSUP;
107 		return (-1);
108 	}
109 
110 	/*
111 	 * Doors are associated with the processes which created them.  In
112 	 * the face of forkall(), this gets quite complicated.  To simplify
113 	 * it somewhat, we include the call to __door_create() in a critical
114 	 * section, and figure out what additional actions to take while
115 	 * still in the critical section.
116 	 */
117 	enter_critical(self);
118 	if ((d = __door_create(f, cookie, flags)) < 0) {
119 		exit_critical(self);
120 		return (-1);
121 	}
122 	mypid = getpid();
123 	if (mypid != door_create_pid ||
124 	    (!is_private && mypid != door_create_first_pid) ||
125 	    (is_unref && mypid != door_create_unref_pid)) {
126 
127 		lmutex_lock(&door_state_lock);
128 		door_create_pid = mypid;
129 
130 		if (!is_private && mypid != door_create_first_pid) {
131 			do_create_first = 1;
132 			door_create_first_pid = mypid;
133 		}
134 		if (is_unref && mypid != door_create_unref_pid) {
135 			do_create_unref = 1;
136 			door_create_unref_pid = mypid;
137 		}
138 		lmutex_unlock(&door_state_lock);
139 	}
140 	exit_critical(self);
141 
142 	if (do_create_unref) {
143 		/*
144 		 * Create an unref thread the first time we create an
145 		 * unref door for this process.  Create it as a daemon
146 		 * thread, so that it doesn't interfere with normal exit
147 		 * processing.
148 		 */
149 		(void) thr_create(NULL, 0, door_unref_func,
150 		    (void *)(uintptr_t)mypid, THR_DAEMON, NULL);
151 	}
152 
153 	/*
154 	 * If this is the first door created in the process, or the door
155 	 * has a private pool, we need to kick off the thread pool now.
156 	 */
157 	if (do_create_first)
158 		(*door_server_func)(NULL);
159 
160 	if (is_private) {
161 		door_info_t di;
162 
163 		if (__door_info(d, &di) < 0)
164 			return (-1);
165 		(*door_server_func)(&di);
166 	}
167 
168 	return (d);
169 }
170 
171 int
172 door_ucred(ucred_t **uc)
173 {
174 	ucred_t *ucp = *uc;
175 
176 	if (ucp == NULL) {
177 		ucp = _ucred_alloc();
178 		if (ucp == NULL)
179 			return (-1);
180 	}
181 
182 	if (__door_ucred(ucp) != 0) {
183 		if (*uc == NULL)
184 			ucred_free(ucp);
185 		return (-1);
186 	}
187 
188 	*uc = ucp;
189 
190 	return (0);
191 }
192 
193 int
194 door_cred(door_cred_t *dc)
195 {
196 	/*
197 	 * Ucred size is small and alloca is fast
198 	 * and cannot fail.
199 	 */
200 	ucred_t *ucp = alloca(ucred_size());
201 	int ret;
202 
203 	if ((ret = __door_ucred(ucp)) == 0) {
204 		dc->dc_euid = ucred_geteuid(ucp);
205 		dc->dc_ruid = ucred_getruid(ucp);
206 		dc->dc_egid = ucred_getegid(ucp);
207 		dc->dc_rgid = ucred_getrgid(ucp);
208 		dc->dc_pid = ucred_getpid(ucp);
209 	}
210 	return (ret);
211 }
212 
213 int
214 door_return(char *data_ptr, size_t data_size,
215     door_desc_t *desc_ptr, uint_t num_desc)
216 {
217 	caddr_t sp;
218 	size_t ssize;
219 	size_t reserve;
220 	ulwp_t *self = curthread;
221 
222 	{
223 		stack_t s;
224 		if (thr_stksegment(&s) != 0) {
225 			errno = EINVAL;
226 			return (-1);
227 		}
228 		sp = s.ss_sp;
229 		ssize = s.ss_size;
230 	}
231 
232 	if (!self->ul_door_noreserve) {
233 		/*
234 		 * When we return from the kernel, we must have enough stack
235 		 * available to handle the request.  Since the creator of
236 		 * the thread has control over its stack size, and larger
237 		 * stacks generally indicate bigger request queues, we
238 		 * use the heuristic of reserving 1/32nd of the stack size
239 		 * (up to the default stack size), with a minimum of 1/8th
240 		 * of MINSTACK.  Currently, this translates to:
241 		 *
242 		 *			_ILP32		_LP64
243 		 *	min resv	 512 bytes	1024 bytes
244 		 *	max resv	 32k bytes	 64k bytes
245 		 *
246 		 * This reservation can be disabled by setting
247 		 *	_THREAD_DOOR_NORESERVE=1
248 		 * in the environment, but shouldn't be.
249 		 */
250 
251 #define	STACK_FRACTION		32
252 #define	MINSTACK_FRACTION	8
253 
254 		if (ssize < (MINSTACK * (STACK_FRACTION/MINSTACK_FRACTION)))
255 			reserve = MINSTACK / MINSTACK_FRACTION;
256 		else if (ssize < DEFAULTSTACK)
257 			reserve = ssize / STACK_FRACTION;
258 		else
259 			reserve = DEFAULTSTACK / STACK_FRACTION;
260 
261 #undef STACK_FRACTION
262 #undef MINSTACK_FRACTION
263 
264 		if (ssize > reserve)
265 			ssize -= reserve;
266 		else
267 			ssize = 0;
268 	}
269 
270 	/*
271 	 * Historically, the __door_return() syscall wrapper subtracted
272 	 * some "slop" from the stack pointer before trapping into the
273 	 * kernel.  We now do this here, so that ssize can be adjusted
274 	 * correctly.  Eventually, this should be removed, since it is
275 	 * unnecessary.  (note that TNF on x86 currently relies upon this
276 	 * idiocy)
277 	 */
278 #if defined(__sparc)
279 	reserve = SA(MINFRAME);
280 #elif defined(__x86)
281 	reserve = SA(512);
282 #else
283 #error need to define stack base reserve
284 #endif
285 
286 #ifdef _STACK_GROWS_DOWNWARD
287 	sp -= reserve;
288 #else
289 #error stack does not grow downwards, routine needs update
290 #endif
291 
292 	if (ssize > reserve)
293 		ssize -= reserve;
294 	else
295 		ssize = 0;
296 
297 	/*
298 	 * Normally, the above will leave plenty of space in sp for a
299 	 * request.  Just in case some bozo overrides thr_stksegment() to
300 	 * return an uncommonly small stack size, we turn off stack size
301 	 * checking if there is less than 1k remaining.
302 	 */
303 #define	MIN_DOOR_STACK	1024
304 	if (ssize < MIN_DOOR_STACK)
305 		ssize = 0;
306 
307 #undef MIN_DOOR_STACK
308 
309 	/*
310 	 * We have to wrap the desc_* arguments for the syscall.  If there are
311 	 * no descriptors being returned, we can skip the wrapping.
312 	 */
313 	if (num_desc != 0) {
314 		door_return_desc_t d;
315 
316 		d.desc_ptr = desc_ptr;
317 		d.desc_num = num_desc;
318 		return (__door_return(data_ptr, data_size, &d, sp, ssize));
319 	}
320 	return (__door_return(data_ptr, data_size, NULL, sp, ssize));
321 }
322 
323 /*
324  * Install a new server creation function.
325  */
326 door_server_func_t *
327 door_server_create(door_server_func_t *create_func)
328 {
329 	door_server_func_t *prev;
330 
331 	lmutex_lock(&door_state_lock);
332 	prev = door_server_func;
333 	door_server_func = create_func;
334 	lmutex_unlock(&door_state_lock);
335 
336 	return (prev);
337 }
338 
339 /*
340  * Create door server threads with cancellation(5) disabled.
341  */
342 static void *
343 door_create_func(void *arg)
344 {
345 	(void) pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, NULL);
346 	(void) door_return(NULL, 0, NULL, 0);
347 
348 	return (arg);
349 }
350 
351 /*
352  * The default server thread creation routine.
353  */
354 /* ARGSUSED */
355 static void
356 door_create_server(door_info_t *dip)
357 {
358 	(void) thr_create(NULL, 0, door_create_func, NULL, THR_DETACHED, NULL);
359 	yield();	/* Gives server thread a chance to run */
360 }
361