xref: /illumos-gate/usr/src/cmd/syseventd/daemons/syseventd/syseventd.c (revision bfed486ad8de8b8ebc6345a8e10accae08bf2f45)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 /*
28  *	syseventd - The system event daemon
29  *
30  *		This daemon dispatches event buffers received from the
31  *		kernel to all interested SLM clients.  SLMs in turn
32  *		deliver the buffers to their particular application
33  *		clients.
34  */
35 #include <stdio.h>
36 #include <sys/types.h>
37 #include <dirent.h>
38 #include <stdarg.h>
39 #include <stddef.h>
40 #include <stdlib.h>
41 #include <dlfcn.h>
42 #include <door.h>
43 #include <errno.h>
44 #include <fcntl.h>
45 #include <signal.h>
46 #include <strings.h>
47 #include <unistd.h>
48 #include <synch.h>
49 #include <syslog.h>
50 #include <thread.h>
51 #include <libsysevent.h>
52 #include <limits.h>
53 #include <locale.h>
54 #include <sys/sysevent.h>
55 #include <sys/sysevent_impl.h>
56 #include <sys/modctl.h>
57 #include <sys/stat.h>
58 #include <sys/systeminfo.h>
59 #include <sys/wait.h>
60 
61 #include "sysevent_signal.h"
62 #include "syseventd.h"
63 #include "message.h"
64 
65 extern int insert_client(void *client, int client_type, int retry_limit);
66 extern void delete_client(int id);
67 extern void initialize_client_tbl(void);
68 
69 extern struct sysevent_client *sysevent_client_tbl[];
70 extern mutex_t client_tbl_lock;
71 
72 #define	DEBUG_LEVEL_FORK	9	/* will run in background at all */
73 					/* levels less than DEBUG_LEVEL_FORK */
74 
75 int debug_level = 0;
76 char *root_dir = "";			/* Relative root for lock and door */
77 
78 /* Maximum number of outstanding events dispatched */
79 #define	SE_EVENT_DISPATCH_CNT	100
80 
81 static int upcall_door;			/* Kernel event door */
82 static int door_upcall_retval;		/* Kernel event posting return value */
83 static int fini_pending = 0;		/* fini pending flag */
84 static int deliver_buf = 0;		/* Current event buffer from kernel */
85 static int dispatch_buf = 0;		/* Current event buffer dispatched */
86 static sysevent_t **eventbuf; 		/* Global array of event buffers */
87 static struct ev_completion *event_compq;	/* Event completion queue */
88 static mutex_t ev_comp_lock;		/* Event completion queue lock */
89 static mutex_t err_mutex;		/* error logging lock */
90 static mutex_t door_lock;		/* sync door return access */
91 static rwlock_t mod_unload_lock;		/* sync module unloading */
92 
93 /* declarations and definitions for avoiding multiple daemons running */
94 #define	DAEMON_LOCK_FILE "/var/run/syseventd.lock"
95 char local_lock_file[PATH_MAX + 1];
96 static int hold_daemon_lock;
97 static int daemon_lock_fd;
98 
99 /*
100  * sema_eventbuf - guards against the global buffer eventbuf
101  *	being written to before it has been dispatched to clients
102  *
103  * sema_dispatch - synchronizes between the kernel uploading thread
104  *	(producer) and the userland dispatch_message thread (consumer).
105  *
106  * sema_resource - throttles outstanding event consumption.
107  *
108  * event_comp_cv - synchronizes threads waiting for the event completion queue
109  *			to empty or become active.
110  */
111 static sema_t sema_eventbuf, sema_dispatch, sema_resource;
112 static cond_t event_comp_cv;
113 
114 /* Self-tuning concurrency level */
115 #define	MIN_CONCURRENCY_LEVEL	4
116 static int concurrency_level = MIN_CONCURRENCY_LEVEL;
117 
118 
119 /* SLM defines */
120 #define	MODULE_SUFFIX	".so"
121 #define	EVENT_FINI	"slm_fini"
122 #define	EVENT_INIT	"slm_init"
123 
124 #define	SE_TIMEOUT	60	/* Client dispatch timeout (seconds) */
125 
126 /* syslog message related */
127 static int logflag = 0;
128 static char *prog;
129 
130 /* function prototypes */
131 static void door_upcall(void *cookie, char *args, size_t alen, door_desc_t *ddp,
132 	uint_t ndid);
133 static void dispatch_message(void);
134 static int dispatch(void);
135 static void event_completion_thr(void);
136 static void usage(void);
137 
138 static void syseventd_init(void);
139 static void syseventd_fini(int sig);
140 
141 static pid_t enter_daemon_lock(void);
142 static void exit_daemon_lock(void);
143 
144 static void
145 usage() {
146 	(void) fprintf(stderr, "usage: syseventd [-d <debug_level>] "
147 	    "[-r <root_dir>]\n");
148 	(void) fprintf(stderr, "higher debug levels get progressively ");
149 	(void) fprintf(stderr, "more detailed debug information.\n");
150 	(void) fprintf(stderr, "syseventd will run in background if ");
151 	(void) fprintf(stderr, "run with a debug_level less than %d.\n",
152 	    DEBUG_LEVEL_FORK);
153 	exit(2);
154 }
155 
156 
157 /* common exit function which ensures releasing locks */
158 void
159 syseventd_exit(int status)
160 {
161 	syseventd_print(1, "exit status = %d\n", status);
162 
163 	if (hold_daemon_lock) {
164 		exit_daemon_lock();
165 	}
166 
167 	exit(status);
168 }
169 
170 
171 /*
172  * hup_handler - SIGHUP handler.  SIGHUP is used to force a reload of
173  *		 all SLMs.  During fini, events are drained from all
174  *		 client event queues.  The events that have been consumed
175  *		 by all clients are freed from the kernel event queue.
176  *
177  *		 Events that have not yet been delivered to all clients
178  *		 are not freed and will be replayed after all SLMs have
179  *		 been (re)loaded.
180  *
181  *		 After all client event queues have been drained, each
182  *		 SLM client is unloaded.  The init phase will (re)load
183  *		 each SLM and initiate event replay and delivery from
184  *		 the kernel.
185  *
186  */
187 /*ARGSUSED*/
188 static void
189 hup_handler(int sig)
190 {
191 	syseventd_err_print(SIGHUP_CAUGHT);
192 	(void) fflush(0);
193 	syseventd_fini(sig);
194 	syseventd_init();
195 	syseventd_err_print(DAEMON_RESTARTED);
196 	(void) fflush(0);
197 }
198 
199 /*
200  * Fault handler for other signals caught
201  */
202 /*ARGSUSED*/
203 static void
204 flt_handler(int sig)
205 {
206 	char signame[SIG2STR_MAX];
207 
208 	if (sig2str(sig, signame) == -1) {
209 		syseventd_err_print(UNKNOWN_SIGNAL_CAUGHT, sig);
210 	}
211 
212 	(void) se_signal_sethandler(sig, SIG_DFL, NULL);
213 
214 	switch (sig) {
215 		case SIGINT:
216 		case SIGSTOP:
217 		case SIGTERM:
218 			/* Close kernel door */
219 			(void) door_revoke(upcall_door);
220 
221 			/* Gracefully exit current event delivery threads */
222 			syseventd_fini(sig);
223 
224 			(void) fflush(0);
225 			(void) se_signal_unblockall();
226 			syseventd_exit(1);
227 			/*NOTREACHED*/
228 		default:
229 			syseventd_err_print(FATAL_ERROR);
230 			(void) fflush(0);
231 
232 	}
233 }
234 
235 /*
236  * Daemon parent process only.
237  * Child process signal to indicate successful daemon initialization.
238  * This is the normal and expected exit path of the daemon parent.
239  */
240 /*ARGSUSED*/
241 static void
242 sigusr1(int sig)
243 {
244 	syseventd_exit(0);
245 }
246 
247 static void
248 sigwait_thr()
249 {
250 	int	sig;
251 	int	err;
252 	sigset_t signal_set;
253 
254 	for (;;) {
255 		syseventd_print(3, "sigwait thread waiting for signal\n");
256 		(void) sigfillset(&signal_set);
257 		err = sigwait(&signal_set, &sig);
258 		if (err) {
259 			syseventd_exit(2);
260 		}
261 
262 		/*
263 		 * Block all signals until the signal handler completes
264 		 */
265 		if (sig == SIGHUP) {
266 			hup_handler(sig);
267 		} else {
268 			flt_handler(sig);
269 		}
270 	}
271 	/* NOTREACHED */
272 }
273 
274 static void
275 set_root_dir(char *dir)
276 {
277 	root_dir = malloc(strlen(dir) + 1);
278 	if (root_dir == NULL) {
279 		syseventd_err_print(INIT_ROOT_DIR_ERR, strerror(errno));
280 		syseventd_exit(2);
281 	}
282 	(void) strcpy(root_dir, dir);
283 }
284 
285 int
286 main(int argc, char **argv)
287 {
288 	int i, c;
289 	int fd;
290 	pid_t pid;
291 	int has_forked = 0;
292 	extern char *optarg;
293 
294 	(void) setlocale(LC_ALL, "");
295 	(void) textdomain(TEXT_DOMAIN);
296 
297 	if (getuid() != 0) {
298 		(void) fprintf(stderr, "Must be root to run syseventd\n");
299 		syseventd_exit(1);
300 	}
301 
302 	if (argc > 5) {
303 		usage();
304 	}
305 
306 	if ((prog = strrchr(argv[0], '/')) == NULL) {
307 		prog = argv[0];
308 	} else {
309 		prog++;
310 	}
311 
312 	if ((c = getopt(argc, argv, "d:r:")) != EOF) {
313 		switch (c) {
314 		case 'd':
315 			debug_level = atoi(optarg);
316 			break;
317 		case 'r':
318 			/*
319 			 * Private flag for suninstall to run
320 			 * daemon during install.
321 			 */
322 			set_root_dir(optarg);
323 			break;
324 		case '?':
325 		default:
326 			usage();
327 		}
328 	}
329 
330 	/* demonize ourselves */
331 	if (debug_level < DEBUG_LEVEL_FORK) {
332 
333 		sigset_t mask;
334 
335 		(void) sigset(SIGUSR1, sigusr1);
336 
337 		(void) sigemptyset(&mask);
338 		(void) sigaddset(&mask, SIGUSR1);
339 		(void) sigprocmask(SIG_BLOCK, &mask, NULL);
340 
341 		if ((pid = fork()) == (pid_t)-1) {
342 			(void) fprintf(stderr,
343 			    "syseventd: fork failed - %s\n", strerror(errno));
344 			syseventd_exit(1);
345 		}
346 
347 		if (pid != 0) {
348 			/*
349 			 * parent
350 			 * handshake with the daemon so that dependents
351 			 * of the syseventd service don't start up until
352 			 * the service is actually functional
353 			 */
354 			int status;
355 			(void) sigprocmask(SIG_UNBLOCK, &mask, NULL);
356 
357 			if (waitpid(pid, &status, 0) != pid) {
358 				/*
359 				 * child process signal indicating
360 				 * successful daemon initialization
361 				 */
362 				syseventd_exit(0);
363 			}
364 			/* child exited implying unsuccessful startup */
365 			syseventd_exit(1);
366 		}
367 
368 		/* child */
369 
370 		has_forked = 1;
371 		(void) sigset(SIGUSR1, SIG_DFL);
372 		(void) sigprocmask(SIG_UNBLOCK, &mask, NULL);
373 
374 		(void) chdir("/");
375 		(void) setsid();
376 		if (debug_level <= 1) {
377 			closefrom(0);
378 			fd = open("/dev/null", 0);
379 			(void) dup2(fd, 1);
380 			(void) dup2(fd, 2);
381 			logflag = 1;
382 		}
383 	}
384 
385 	openlog("syseventd", LOG_PID, LOG_DAEMON);
386 
387 	(void) mutex_init(&err_mutex, USYNC_THREAD, NULL);
388 
389 	syseventd_print(8,
390 	    "syseventd started, debug level = %d\n", debug_level);
391 
392 	/* only one instance of syseventd can run at a time */
393 	if ((pid = enter_daemon_lock()) != getpid()) {
394 		syseventd_print(1,
395 		    "event daemon pid %ld already running\n", pid);
396 		exit(3);
397 	}
398 
399 	/* initialize semaphores and eventbuf */
400 	(void) sema_init(&sema_eventbuf, SE_EVENT_DISPATCH_CNT,
401 	    USYNC_THREAD, NULL);
402 	(void) sema_init(&sema_dispatch, 0, USYNC_THREAD, NULL);
403 	(void) sema_init(&sema_resource, SE_EVENT_DISPATCH_CNT,
404 	    USYNC_THREAD, NULL);
405 	(void) cond_init(&event_comp_cv, USYNC_THREAD, NULL);
406 	eventbuf = (sysevent_t **)calloc(SE_EVENT_DISPATCH_CNT,
407 	    sizeof (sysevent_t *));
408 	if (eventbuf == NULL) {
409 		syseventd_print(1, "Unable to allocate event buffer array\n");
410 		exit(2);
411 	}
412 	for (i = 0; i < SE_EVENT_DISPATCH_CNT; ++i) {
413 		eventbuf[i] = malloc(LOGEVENT_BUFSIZE);
414 		if (eventbuf[i] == NULL) {
415 			syseventd_print(1, "Unable to allocate event "
416 			    "buffers\n");
417 			exit(2);
418 		}
419 	}
420 
421 	(void) mutex_init(&client_tbl_lock, USYNC_THREAD, NULL);
422 	(void) mutex_init(&ev_comp_lock, USYNC_THREAD, NULL);
423 	(void) mutex_init(&door_lock, USYNC_THREAD, NULL);
424 	(void) rwlock_init(&mod_unload_lock, USYNC_THREAD, NULL);
425 
426 	event_compq = NULL;
427 
428 	syseventd_print(8, "start the message thread running\n");
429 
430 	/*
431 	 * Block all signals to all threads include the main thread.
432 	 * The sigwait_thr thread will process any signals and initiate
433 	 * a graceful recovery if possible.
434 	 */
435 	if (se_signal_blockall() < 0) {
436 		syseventd_err_print(INIT_SIG_BLOCK_ERR);
437 		syseventd_exit(2);
438 	}
439 
440 	if (thr_create(NULL, NULL, (void *(*)(void *))dispatch_message,
441 	    (void *)0, 0, NULL) < 0) {
442 		syseventd_err_print(INIT_THR_CREATE_ERR, strerror(errno));
443 		syseventd_exit(2);
444 	}
445 	if (thr_create(NULL, NULL,
446 	    (void *(*)(void *))event_completion_thr, NULL,
447 	    THR_BOUND, NULL) != 0) {
448 		syseventd_err_print(INIT_THR_CREATE_ERR, strerror(errno));
449 		syseventd_exit(2);
450 	}
451 	/* Create signal catching thread */
452 	if (thr_create(NULL, NULL, (void *(*)(void *))sigwait_thr,
453 	    NULL, 0, NULL) < 0) {
454 		syseventd_err_print(INIT_THR_CREATE_ERR, strerror(errno));
455 		syseventd_exit(2);
456 	}
457 
458 	setbuf(stdout, (char *)NULL);
459 
460 	/* Initialize and load SLM clients */
461 	initialize_client_tbl();
462 	syseventd_init();
463 
464 	/* signal parent to indicate successful daemon initialization */
465 	if (has_forked) {
466 		if (kill(getppid(), SIGUSR1) != 0) {
467 			syseventd_err_print(
468 			    "signal to the parent failed - %s\n",
469 			    strerror(errno));
470 			syseventd_exit(2);
471 		}
472 	}
473 
474 	syseventd_print(8, "Pausing\n");
475 
476 	for (;;) {
477 		(void) pause();
478 	}
479 	/* NOTREACHED */
480 	return (0);
481 }
482 
483 /*
484  * door_upcall - called from the kernel via kernel sysevent door
485  *		to upload event(s).
486  *
487  *		This routine should never block.  If resources are
488  *		not available to immediately accept the event buffer
489  *		EAGAIN is returned to the kernel.
490  *
491  *		Once resources are available, the kernel is notified
492  *		via a modctl interface to resume event delivery to
493  *		syseventd.
494  *
495  */
496 /*ARGSUSED*/
497 static void
498 door_upcall(void *cookie, char *args, size_t alen,
499     door_desc_t *ddp, uint_t ndid)
500 {
501 	sysevent_t *ev;
502 	int rval;
503 
504 
505 	(void) mutex_lock(&door_lock);
506 	if (args == NULL) {
507 		rval = EINVAL;
508 	} else if (sema_trywait(&sema_eventbuf)) {
509 		ev = (sysevent_t *)
510 		    &((log_event_upcall_arg_t *)(void *)args)->buf;
511 		syseventd_print(2, "door_upcall: busy event %llx "
512 		    "retry\n", sysevent_get_seq(ev));
513 		rval = door_upcall_retval = EAGAIN;
514 	} else {
515 		/*
516 		 * Copy received message to local buffer.
517 		 */
518 		size_t size;
519 		ev = (sysevent_t *)
520 		    &((log_event_upcall_arg_t *)(void *)args)->buf;
521 
522 		syseventd_print(2, "door_upcall: event %llx in eventbuf %d\n",
523 		    sysevent_get_seq(ev), deliver_buf);
524 		size = sysevent_get_size(ev) > LOGEVENT_BUFSIZE ?
525 		    LOGEVENT_BUFSIZE : sysevent_get_size(ev);
526 		(void) bcopy(ev, eventbuf[deliver_buf], size);
527 		deliver_buf = (deliver_buf + 1) % SE_EVENT_DISPATCH_CNT;
528 		rval = 0;
529 		(void) sema_post(&sema_dispatch);
530 	}
531 
532 	(void) mutex_unlock(&door_lock);
533 
534 	/*
535 	 * Filling in return values for door_return
536 	 */
537 	(void) door_return((void *)&rval, sizeof (rval), NULL, 0);
538 	(void) door_return(NULL, 0, NULL, 0);
539 }
540 
541 /*
542  * dispatch_message - dispatch message thread
543  *			This thread spins until an event buffer is delivered
544  *			delivered from the kernel.
545  *
546  *			It will wait to dispatch an event to any clients
547  *			until adequate resources are available to process
548  *			the event buffer.
549  */
550 static void
551 dispatch_message(void)
552 {
553 	int error;
554 
555 	for (;;) {
556 		syseventd_print(3, "dispatch_message: thread started\n");
557 		/*
558 		 * Spin till a message comes
559 		 */
560 		while (sema_wait(&sema_dispatch) != 0) {
561 			syseventd_print(1,
562 			    "dispatch_message: sema_wait failed\n");
563 			(void) sleep(1);
564 		}
565 
566 		syseventd_print(3, "dispatch_message: sema_dispatch\n");
567 
568 		/*
569 		 * Wait for available resources
570 		 */
571 		while (sema_wait(&sema_resource) != 0) {
572 			syseventd_print(1, "dispatch_message: sema_wait "
573 			    "failed\n");
574 			(void) sleep(1);
575 		}
576 
577 		syseventd_print(2, "dispatch_message: eventbuf %d\n",
578 		    dispatch_buf);
579 
580 		/*
581 		 * Client dispatch
582 		 */
583 		do {
584 			error = dispatch();
585 		} while (error == EAGAIN);
586 
587 		syseventd_print(2, "eventbuf %d dispatched\n", dispatch_buf);
588 		dispatch_buf = (dispatch_buf + 1) % SE_EVENT_DISPATCH_CNT;
589 
590 		/*
591 		 * kernel received a busy signal -
592 		 * kickstart the kernel delivery thread
593 		 * door_lock blocks the kernel so we hold it for the
594 		 * shortest time possible.
595 		 */
596 		(void) mutex_lock(&door_lock);
597 		if (door_upcall_retval == EAGAIN && !fini_pending) {
598 			syseventd_print(3, "dispatch_message: retrigger "
599 			    "door_upcall_retval = %d\n",
600 			    door_upcall_retval);
601 			(void) modctl(MODEVENTS, (uintptr_t)MODEVENTS_FLUSH,
602 			    NULL, NULL, NULL, 0);
603 			door_upcall_retval = 0;
604 		}
605 		(void) mutex_unlock(&door_lock);
606 	}
607 	/* NOTREACHED */
608 }
609 
610 /*
611  * drain_eventq - Called to drain all pending events from the client's
612  *		event queue.
613  */
614 static void
615 drain_eventq(struct sysevent_client *scp, int status)
616 {
617 	struct event_dispatch_pkg *d_pkg;
618 	struct event_dispatchq *eventq, *eventq_next;
619 
620 	syseventd_print(3, "Draining eventq for client %d\n",
621 	    scp->client_num);
622 
623 	eventq = scp->eventq;
624 	while (eventq) {
625 		/*
626 		 * Mark all dispatched events as completed, but indicate the
627 		 * error status
628 		 */
629 		d_pkg = eventq->d_pkg;
630 
631 		syseventd_print(4, "drain event 0X%llx for client %d\n",
632 		    sysevent_get_seq(d_pkg->ev), scp->client_num);
633 
634 		if (d_pkg->completion_state == SE_NOT_DISPATCHED) {
635 			d_pkg->completion_status = status;
636 			d_pkg->completion_state = SE_COMPLETE;
637 			(void) sema_post(d_pkg->completion_sema);
638 		}
639 
640 		eventq_next = eventq->next;
641 		free(eventq);
642 		eventq = eventq_next;
643 		scp->eventq = eventq;
644 	}
645 }
646 
647 /*
648  * client_deliver_event_thr - Client delivery thread
649  *				This thread will process any events on this
650  *				client's eventq.
651  */
652 static void
653 client_deliver_event_thr(void *arg)
654 {
655 	int flag, error, i;
656 	sysevent_t *ev;
657 	hrtime_t now;
658 	module_t *mod;
659 	struct event_dispatchq *eventq;
660 	struct sysevent_client *scp;
661 	struct event_dispatch_pkg *d_pkg;
662 
663 	scp = (struct sysevent_client *)arg;
664 	mod = (module_t *)scp->client_data;
665 
666 	(void) mutex_lock(&scp->client_lock);
667 	for (;;) {
668 		while (scp->eventq == NULL) {
669 
670 			/*
671 			 * Client has been suspended or unloaded, go no further.
672 			 */
673 			if (fini_pending) {
674 				scp->client_flags &= ~SE_CLIENT_THR_RUNNING;
675 				syseventd_print(3, "Client %d delivery thread "
676 				    "exiting flags: 0X%x\n",
677 				    scp->client_num, scp->client_flags);
678 				(void) mutex_unlock(&scp->client_lock);
679 				return;
680 			}
681 
682 			(void) cond_wait(&scp->client_cv, &scp->client_lock);
683 
684 		}
685 
686 		/*
687 		 * Process events from the head of the eventq, eventq is locked
688 		 * going into the processing.
689 		 */
690 		eventq = scp->eventq;
691 		while (eventq != NULL) {
692 			d_pkg = eventq->d_pkg;
693 			d_pkg->completion_state = SE_OUTSTANDING;
694 			(void) mutex_unlock(&scp->client_lock);
695 
696 
697 			flag = error = 0;
698 			ev = d_pkg->ev;
699 
700 			syseventd_print(3, "Start delivery for client %d "
701 			    "with retry count %d\n",
702 			    scp->client_num, d_pkg->retry_count);
703 
704 			/*
705 			 * Retry limit has been reached by this client, indicate
706 			 * that no further retries are allowed
707 			 */
708 			for (i = 0; i <= scp->retry_limit; ++i) {
709 				if (i == scp->retry_limit)
710 					flag = SE_NO_RETRY;
711 
712 				/* Start the clock for the event delivery */
713 				d_pkg->start_time = gethrtime();
714 
715 				syseventd_print(9, "Deliver to module client "
716 				    "%s\n", mod->name);
717 
718 				error = mod->deliver_event(ev, flag);
719 
720 				/* Can not allow another retry */
721 				if (i == scp->retry_limit)
722 					error = 0;
723 
724 				/* Stop the clock */
725 				now = gethrtime();
726 
727 				/*
728 				 * Suspend event processing and drain the
729 				 * event q for latent clients
730 				 */
731 				if (now - d_pkg->start_time >
732 				    ((hrtime_t)SE_TIMEOUT * NANOSEC)) {
733 					syseventd_print(1, "Unresponsive "
734 					    "client %d: Draining eventq and "
735 					    "suspending event delivery\n",
736 					    scp->client_num);
737 					(void) mutex_lock(&scp->client_lock);
738 					scp->client_flags &=
739 					    ~SE_CLIENT_THR_RUNNING;
740 					scp->client_flags |=
741 					    SE_CLIENT_SUSPENDED;
742 
743 					/* Cleanup current event */
744 					d_pkg->completion_status = EFAULT;
745 					d_pkg->completion_state = SE_COMPLETE;
746 					(void) sema_post(
747 					    d_pkg->completion_sema);
748 
749 					/*
750 					 * Drain the remaining events from the
751 					 * queue.
752 					 */
753 					drain_eventq(scp, EINVAL);
754 					(void) mutex_unlock(&scp->client_lock);
755 					return;
756 				}
757 
758 				/* Event delivery retry requested */
759 				if (fini_pending || error != EAGAIN) {
760 					break;
761 				} else {
762 					(void) sleep(SE_RETRY_TIME);
763 				}
764 			}
765 
766 			(void) mutex_lock(&scp->client_lock);
767 			d_pkg->completion_status = error;
768 			d_pkg->completion_state = SE_COMPLETE;
769 			(void) sema_post(d_pkg->completion_sema);
770 
771 			/* Update eventq pointer */
772 			if (scp->eventq != NULL) {
773 				scp->eventq = eventq->next;
774 				free(eventq);
775 				eventq = scp->eventq;
776 			} else {
777 				free(eventq);
778 				break;
779 			}
780 
781 			syseventd_print(3, "Completed delivery with "
782 			    "error %d\n", error);
783 		}
784 
785 		syseventd_print(3, "No more events to process for client %d\n",
786 		    scp->client_num);
787 
788 		/* Return if this was a synchronous delivery */
789 		if (!SE_CLIENT_IS_THR_RUNNING(scp)) {
790 			(void) mutex_unlock(&scp->client_lock);
791 			return;
792 		}
793 
794 	}
795 }
796 
797 /*
798  * client_deliver_event - Client specific event delivery
799  *			This routine will allocate and initialize the
800  *			neccessary per-client dispatch data.
801  *
802  *			If the eventq is not empty, it may be assumed that
803  *			a delivery thread exists for this client and the
804  *			dispatch data is appended to the eventq.
805  *
806  *			The dispatch package is freed by the event completion
807  *			thread (event_completion_thr) and the eventq entry
808  *			is freed by the event delivery thread.
809  */
810 static struct event_dispatch_pkg *
811 client_deliver_event(struct sysevent_client *scp, sysevent_t *ev,
812 	sema_t *completion_sema)
813 {
814 	size_t ev_sz = sysevent_get_size(ev);
815 	struct event_dispatchq *newq, *tmp;
816 	struct event_dispatch_pkg *d_pkg;
817 
818 	syseventd_print(3, "client_deliver_event: id 0x%llx size %d\n",
819 	    (longlong_t)sysevent_get_seq(ev), ev_sz);
820 	if (debug_level == 9) {
821 		se_print(stdout, ev);
822 	}
823 
824 	/*
825 	 * Check for suspended client
826 	 */
827 	(void) mutex_lock(&scp->client_lock);
828 	if (SE_CLIENT_IS_SUSPENDED(scp) || !SE_CLIENT_IS_THR_RUNNING(scp)) {
829 		(void) mutex_unlock(&scp->client_lock);
830 		return (NULL);
831 	}
832 
833 	/*
834 	 * Allocate a new dispatch package and eventq entry
835 	 */
836 	newq = (struct event_dispatchq *)malloc(
837 	    sizeof (struct event_dispatchq));
838 	if (newq == NULL) {
839 		(void) mutex_unlock(&scp->client_lock);
840 		return (NULL);
841 	}
842 
843 	d_pkg = (struct event_dispatch_pkg *)malloc(
844 	    sizeof (struct event_dispatch_pkg));
845 	if (d_pkg == NULL) {
846 		free(newq);
847 		(void) mutex_unlock(&scp->client_lock);
848 		return (NULL);
849 	}
850 
851 	/* Initialize the dispatch package */
852 	d_pkg->scp = scp;
853 	d_pkg->retry_count = 0;
854 	d_pkg->completion_status = 0;
855 	d_pkg->completion_state = SE_NOT_DISPATCHED;
856 	d_pkg->completion_sema = completion_sema;
857 	d_pkg->ev = ev;
858 	newq->d_pkg = d_pkg;
859 	newq->next = NULL;
860 
861 	if (scp->eventq != NULL) {
862 
863 		/* Add entry to the end of the eventq */
864 		tmp = scp->eventq;
865 		while (tmp->next != NULL)
866 			tmp = tmp->next;
867 		tmp->next = newq;
868 	} else {
869 		/* event queue empty, wakeup delivery thread */
870 		scp->eventq = newq;
871 		(void) cond_signal(&scp->client_cv);
872 	}
873 	(void) mutex_unlock(&scp->client_lock);
874 
875 	return (d_pkg);
876 }
877 
878 /*
879  * event_completion_thr - Event completion thread.  This thread routine
880  *			waits for all client delivery thread to complete
881  *			delivery of a particular event.
882  */
883 static void
884 event_completion_thr()
885 {
886 	int ret, i, client_count, ok_to_free;
887 	sysevent_id_t eid;
888 	struct sysevent_client *scp;
889 	struct ev_completion *ev_comp;
890 	struct event_dispatchq *dispatchq;
891 	struct event_dispatch_pkg *d_pkg;
892 
893 	(void) mutex_lock(&ev_comp_lock);
894 	for (;;) {
895 		while (event_compq == NULL) {
896 			(void) cond_wait(&event_comp_cv, &ev_comp_lock);
897 		}
898 
899 		/*
900 		 * Process event completions from the head of the
901 		 * completion queue
902 		 */
903 		ev_comp = event_compq;
904 		while (ev_comp) {
905 			(void) mutex_unlock(&ev_comp_lock);
906 			eid.eid_seq = sysevent_get_seq(ev_comp->ev);
907 			sysevent_get_time(ev_comp->ev, &eid.eid_ts);
908 			client_count = ev_comp->client_count;
909 			ok_to_free = 1;
910 
911 			syseventd_print(3, "Wait for event completion of "
912 			    "event 0X%llx on %d clients\n",
913 			    eid.eid_seq, client_count);
914 
915 			while (client_count) {
916 				syseventd_print(9, "Waiting for %d clients on "
917 				    "event id 0X%llx\n", client_count,
918 				    eid.eid_seq);
919 
920 				(void) sema_wait(&ev_comp->client_sema);
921 				--client_count;
922 			}
923 
924 			syseventd_print(3, "Cleaning up clients for event "
925 			    "0X%llx\n", eid.eid_seq);
926 			dispatchq = ev_comp->dispatch_list;
927 			while (dispatchq != NULL) {
928 				d_pkg = dispatchq->d_pkg;
929 				scp = d_pkg->scp;
930 
931 				if (d_pkg->completion_status == EAGAIN)
932 					ok_to_free = 0;
933 
934 				syseventd_print(4, "Delivery of 0X%llx "
935 				    "complete for client %d retry count %d "
936 				    "status %d\n", eid.eid_seq,
937 				    scp->client_num,
938 				    d_pkg->retry_count,
939 				    d_pkg->completion_status);
940 
941 				free(d_pkg);
942 				ev_comp->dispatch_list = dispatchq->next;
943 				free(dispatchq);
944 				dispatchq = ev_comp->dispatch_list;
945 			}
946 
947 			if (ok_to_free) {
948 				for (i = 0; i < MAX_MODCTL_RETRY; ++i) {
949 					if ((ret = modctl(MODEVENTS,
950 					    (uintptr_t)MODEVENTS_FREEDATA,
951 					    (uintptr_t)&eid, NULL,
952 					    NULL, 0)) != 0) {
953 						syseventd_print(1, "attempting "
954 						    "to free event 0X%llx\n",
955 						    eid.eid_seq);
956 
957 						/*
958 						 * Kernel may need time to
959 						 * move this event buffer to
960 						 * the sysevent sent queue
961 						 */
962 						(void) sleep(1);
963 					} else {
964 						break;
965 					}
966 				}
967 				if (ret) {
968 					syseventd_print(1, "Unable to free "
969 					    "event 0X%llx from the "
970 					    "kernel\n", eid.eid_seq);
971 				}
972 			} else {
973 				syseventd_print(1, "Not freeing event 0X%llx\n",
974 				    eid.eid_seq);
975 			}
976 
977 			syseventd_print(2, "Event delivery complete for id "
978 			    "0X%llx\n", eid.eid_seq);
979 
980 			(void) mutex_lock(&ev_comp_lock);
981 			event_compq = ev_comp->next;
982 			free(ev_comp->ev);
983 			free(ev_comp);
984 			ev_comp = event_compq;
985 			(void) sema_post(&sema_resource);
986 		}
987 
988 		/*
989 		 * Event completion queue is empty, signal possible unload
990 		 * operation
991 		 */
992 		(void) cond_signal(&event_comp_cv);
993 
994 		syseventd_print(3, "No more events\n");
995 	}
996 }
997 
998 /*
999  * dispatch - Dispatch the current event buffer to all valid SLM clients.
1000  */
1001 static int
1002 dispatch(void)
1003 {
1004 	int ev_sz, i, client_count = 0;
1005 	sysevent_t *new_ev;
1006 	sysevent_id_t eid;
1007 	struct ev_completion *ev_comp, *tmp;
1008 	struct event_dispatchq *dispatchq, *client_list;
1009 	struct event_dispatch_pkg *d_pkg;
1010 
1011 	/* Check for module unload operation */
1012 	if (rw_tryrdlock(&mod_unload_lock) != 0) {
1013 		syseventd_print(2, "unload in progress abort delivery\n");
1014 		(void) sema_post(&sema_eventbuf);
1015 		(void) sema_post(&sema_resource);
1016 		return (0);
1017 	}
1018 
1019 	syseventd_print(3, "deliver dispatch buffer %d", dispatch_buf);
1020 	eid.eid_seq = sysevent_get_seq(eventbuf[dispatch_buf]);
1021 	sysevent_get_time(eventbuf[dispatch_buf], &eid.eid_ts);
1022 	syseventd_print(3, "deliver msg id: 0x%llx\n", eid.eid_seq);
1023 
1024 	/*
1025 	 * ev_comp is used to hold event completion data.  It is freed
1026 	 * by the event completion thread (event_completion_thr).
1027 	 */
1028 	ev_comp = (struct ev_completion *)
1029 	    malloc(sizeof (struct ev_completion));
1030 	if (ev_comp == NULL) {
1031 		(void) rw_unlock(&mod_unload_lock);
1032 		syseventd_print(1, "Can not allocate event completion buffer "
1033 		    "for event id 0X%llx\n", eid.eid_seq);
1034 		return (EAGAIN);
1035 	}
1036 	ev_comp->dispatch_list = NULL;
1037 	ev_comp->next = NULL;
1038 	(void) sema_init(&ev_comp->client_sema, 0, USYNC_THREAD, NULL);
1039 
1040 	ev_sz = sysevent_get_size(eventbuf[dispatch_buf]);
1041 	new_ev = calloc(1, ev_sz);
1042 	if (new_ev == NULL) {
1043 		free(ev_comp);
1044 		(void) rw_unlock(&mod_unload_lock);
1045 		syseventd_print(1, "Can not allocate new event buffer "
1046 		"for event id 0X%llx\n", eid.eid_seq);
1047 		return (EAGAIN);
1048 	}
1049 
1050 
1051 	/*
1052 	 * For long messages, copy additional data from kernel
1053 	 */
1054 	if (ev_sz > LOGEVENT_BUFSIZE) {
1055 		int ret = 0;
1056 
1057 		/* Ok to release eventbuf for next event buffer from kernel */
1058 		(void) sema_post(&sema_eventbuf);
1059 
1060 		for (i = 0; i < MAX_MODCTL_RETRY; ++i) {
1061 			if ((ret = modctl(MODEVENTS,
1062 			    (uintptr_t)MODEVENTS_GETDATA,
1063 			    (uintptr_t)&eid,
1064 			    (uintptr_t)ev_sz,
1065 			    (uintptr_t)new_ev, 0))
1066 			    == 0)
1067 				break;
1068 			else
1069 				(void) sleep(1);
1070 		}
1071 		if (ret) {
1072 			syseventd_print(1, "GET_DATA failed for 0X%llx:%llx\n",
1073 			    eid.eid_ts, eid.eid_seq);
1074 			free(new_ev);
1075 			free(ev_comp);
1076 			(void) rw_unlock(&mod_unload_lock);
1077 			return (EAGAIN);
1078 		}
1079 	} else {
1080 		(void) bcopy(eventbuf[dispatch_buf], new_ev, ev_sz);
1081 		/* Ok to release eventbuf for next event buffer from kernel */
1082 		(void) sema_post(&sema_eventbuf);
1083 	}
1084 
1085 
1086 	/*
1087 	 * Deliver a copy of eventbuf to clients so
1088 	 * eventbuf can be used for the next message
1089 	 */
1090 	for (i = 0; i < MAX_SLM; ++i) {
1091 
1092 		/* Don't bother for suspended or unloaded clients */
1093 		if (!SE_CLIENT_IS_LOADED(sysevent_client_tbl[i]) ||
1094 		    SE_CLIENT_IS_SUSPENDED(sysevent_client_tbl[i]))
1095 			continue;
1096 
1097 		/*
1098 		 * Allocate event dispatch queue entry.  All queue entries
1099 		 * are freed by the event completion thread as client
1100 		 * delivery completes.
1101 		 */
1102 		dispatchq = (struct event_dispatchq *)malloc(
1103 		    sizeof (struct event_dispatchq));
1104 		if (dispatchq == NULL) {
1105 			syseventd_print(1, "Can not allocate dispatch q "
1106 			"for event id 0X%llx client %d\n", eid.eid_seq, i);
1107 			continue;
1108 		}
1109 		dispatchq->next = NULL;
1110 
1111 		/* Initiate client delivery */
1112 		d_pkg = client_deliver_event(sysevent_client_tbl[i],
1113 		    new_ev, &ev_comp->client_sema);
1114 		if (d_pkg == NULL) {
1115 			syseventd_print(1, "Can not allocate dispatch "
1116 			    "package for event id 0X%llx client %d\n",
1117 			    eid.eid_seq, i);
1118 			free(dispatchq);
1119 			continue;
1120 		}
1121 		dispatchq->d_pkg = d_pkg;
1122 		++client_count;
1123 
1124 		if (ev_comp->dispatch_list == NULL) {
1125 			ev_comp->dispatch_list = dispatchq;
1126 			client_list = dispatchq;
1127 		} else {
1128 			client_list->next = dispatchq;
1129 			client_list = client_list->next;
1130 		}
1131 	}
1132 
1133 	ev_comp->client_count = client_count;
1134 	ev_comp->ev = new_ev;
1135 
1136 	(void) mutex_lock(&ev_comp_lock);
1137 
1138 	if (event_compq == NULL) {
1139 		syseventd_print(3, "Wakeup event completion thread for "
1140 		    "id 0X%llx\n", eid.eid_seq);
1141 		event_compq = ev_comp;
1142 		(void) cond_signal(&event_comp_cv);
1143 	} else {
1144 
1145 		/* Add entry to the end of the event completion queue */
1146 		tmp = event_compq;
1147 		while (tmp->next != NULL)
1148 			tmp = tmp->next;
1149 		tmp->next = ev_comp;
1150 		syseventd_print(3, "event added to completion queue for "
1151 		    "id 0X%llx\n", eid.eid_seq);
1152 	}
1153 	(void) mutex_unlock(&ev_comp_lock);
1154 	(void) rw_unlock(&mod_unload_lock);
1155 
1156 	return (0);
1157 }
1158 
1159 #define	MODULE_DIR_HW	"/usr/platform/%s/lib/sysevent/modules/"
1160 #define	MODULE_DIR_GEN	"/usr/lib/sysevent/modules/"
1161 #define	MOD_DIR_NUM	3
1162 static char dirname[MOD_DIR_NUM][MAXPATHLEN];
1163 
1164 static char *
1165 dir_num2name(int dirnum)
1166 {
1167 	char infobuf[MAXPATHLEN];
1168 
1169 	if (dirnum >= MOD_DIR_NUM)
1170 		return (NULL);
1171 
1172 	if (dirname[0][0] == '\0') {
1173 		if (sysinfo(SI_PLATFORM, infobuf, MAXPATHLEN) == -1) {
1174 			syseventd_print(1, "dir_num2name: "
1175 			    "sysinfo error %s\n", strerror(errno));
1176 			return (NULL);
1177 		} else if (snprintf(dirname[0], sizeof (dirname[0]),
1178 		    MODULE_DIR_HW, infobuf) >= sizeof (dirname[0])) {
1179 			syseventd_print(1, "dir_num2name: "
1180 			    "platform name too long: %s\n",
1181 			    infobuf);
1182 			return (NULL);
1183 		}
1184 		if (sysinfo(SI_MACHINE, infobuf, MAXPATHLEN) == -1) {
1185 			syseventd_print(1, "dir_num2name: "
1186 			    "sysinfo error %s\n", strerror(errno));
1187 			return (NULL);
1188 		} else if (snprintf(dirname[1], sizeof (dirname[1]),
1189 		    MODULE_DIR_HW, infobuf) >= sizeof (dirname[1])) {
1190 			syseventd_print(1, "dir_num2name: "
1191 			    "machine name too long: %s\n",
1192 			    infobuf);
1193 			return (NULL);
1194 		}
1195 		(void) strcpy(dirname[2], MODULE_DIR_GEN);
1196 	}
1197 
1198 	return (dirname[dirnum]);
1199 }
1200 
1201 
1202 /*
1203  * load_modules - Load modules found in the common syseventd module directories
1204  *		Modules that do not provide valid interfaces are rejected.
1205  */
1206 static void
1207 load_modules(char *dirname)
1208 {
1209 	int client_id;
1210 	DIR *mod_dir;
1211 	module_t *mod;
1212 	struct dirent *entp;
1213 	struct slm_mod_ops *mod_ops;
1214 	struct sysevent_client *scp;
1215 
1216 	if (dirname == NULL)
1217 		return;
1218 
1219 	/* Return silently if module directory does not exist */
1220 	if ((mod_dir = opendir(dirname)) == NULL) {
1221 		syseventd_print(1, "Unable to open module directory %s: %s\n",
1222 		    dirname, strerror(errno));
1223 		return;
1224 	}
1225 
1226 	syseventd_print(3, "loading modules from %s\n", dirname);
1227 
1228 	/*
1229 	 * Go through directory, looking for files ending with .so
1230 	 */
1231 	while ((entp = readdir(mod_dir)) != NULL) {
1232 		void *dlh, *f;
1233 		char *tmp, modpath[MAXPATHLEN];
1234 
1235 		if (((tmp = strstr(entp->d_name, MODULE_SUFFIX)) == NULL) ||
1236 		    (tmp[strlen(MODULE_SUFFIX)] != '\0')) {
1237 			continue;
1238 		}
1239 
1240 		if (snprintf(modpath, sizeof (modpath), "%s%s",
1241 		    dirname, entp->d_name) >= sizeof (modpath)) {
1242 			syseventd_err_print(INIT_PATH_ERR, modpath);
1243 			continue;
1244 		}
1245 		if ((dlh = dlopen(modpath, RTLD_LAZY)) == NULL) {
1246 			syseventd_err_print(LOAD_MOD_DLOPEN_ERR,
1247 			    modpath, dlerror());
1248 			continue;
1249 		} else if ((f = dlsym(dlh, EVENT_INIT)) == NULL) {
1250 			syseventd_err_print(LOAD_MOD_NO_INIT,
1251 			    modpath, dlerror());
1252 			(void) dlclose(dlh);
1253 			continue;
1254 		}
1255 
1256 		mod = malloc(sizeof (*mod));
1257 		if (mod == NULL) {
1258 			syseventd_err_print(LOAD_MOD_ALLOC_ERR, "mod",
1259 			    strerror(errno));
1260 			(void) dlclose(dlh);
1261 			continue;
1262 		}
1263 
1264 		mod->name = strdup(entp->d_name);
1265 		if (mod->name == NULL) {
1266 			syseventd_err_print(LOAD_MOD_ALLOC_ERR, "mod->name",
1267 			    strerror(errno));
1268 			(void) dlclose(dlh);
1269 			free(mod);
1270 			continue;
1271 		}
1272 
1273 		mod->dlhandle = dlh;
1274 		mod->event_mod_init = (struct slm_mod_ops *(*)())f;
1275 
1276 		/* load in other module functions */
1277 		mod->event_mod_fini = (void (*)())dlsym(dlh, EVENT_FINI);
1278 		if (mod->event_mod_fini == NULL) {
1279 			syseventd_err_print(LOAD_MOD_DLSYM_ERR, mod->name,
1280 			    dlerror());
1281 			free(mod->name);
1282 			free(mod);
1283 			(void) dlclose(dlh);
1284 			continue;
1285 		}
1286 
1287 		/* Call module init routine */
1288 		if ((mod_ops = mod->event_mod_init()) == NULL) {
1289 			syseventd_err_print(LOAD_MOD_EINVAL, mod->name);
1290 			free(mod->name);
1291 			free(mod);
1292 			(void) dlclose(dlh);
1293 			continue;
1294 		}
1295 		if (mod_ops->major_version != SE_MAJOR_VERSION) {
1296 			syseventd_err_print(LOAD_MOD_VERSION_MISMATCH,
1297 			    mod->name, SE_MAJOR_VERSION,
1298 			    mod_ops->major_version);
1299 			mod->event_mod_fini();
1300 			free(mod->name);
1301 			free(mod);
1302 			(void) dlclose(dlh);
1303 			continue;
1304 		}
1305 
1306 		mod->deliver_event = mod_ops->deliver_event;
1307 		/* Add module entry to client list */
1308 		if ((client_id = insert_client((void *)mod, SLM_CLIENT,
1309 		    (mod_ops->retry_limit <= SE_MAX_RETRY_LIMIT ?
1310 		    mod_ops->retry_limit : SE_MAX_RETRY_LIMIT))) < 0) {
1311 			syseventd_err_print(LOAD_MOD_ALLOC_ERR, "insert_client",
1312 			    strerror(errno));
1313 			mod->event_mod_fini();
1314 			free(mod->name);
1315 			free(mod);
1316 			(void) dlclose(dlh);
1317 			continue;
1318 		}
1319 
1320 		scp = sysevent_client_tbl[client_id];
1321 		++concurrency_level;
1322 		(void) thr_setconcurrency(concurrency_level);
1323 		if (thr_create(NULL, 0,
1324 		    (void *(*)(void *))client_deliver_event_thr,
1325 		    (void *)scp, THR_BOUND, &scp->tid) != 0) {
1326 
1327 			syseventd_err_print(LOAD_MOD_ALLOC_ERR, "insert_client",
1328 			    strerror(errno));
1329 			mod->event_mod_fini();
1330 			free(mod->name);
1331 			free(mod);
1332 			(void) dlclose(dlh);
1333 			continue;
1334 		}
1335 		scp->client_flags |= SE_CLIENT_THR_RUNNING;
1336 
1337 		syseventd_print(3, "loaded module %s\n", entp->d_name);
1338 	}
1339 
1340 	(void) closedir(mod_dir);
1341 	syseventd_print(3, "modules loaded\n");
1342 }
1343 
1344 /*
1345  * unload_modules - modules are unloaded prior to graceful shutdown or
1346  *			before restarting the daemon upon receipt of
1347  *			SIGHUP.
1348  */
1349 static void
1350 unload_modules(int sig)
1351 {
1352 	int			i, count, done;
1353 	module_t		*mod;
1354 	struct sysevent_client	*scp;
1355 
1356 	/*
1357 	 * unload modules that are ready, skip those that have not
1358 	 * drained their event queues.
1359 	 */
1360 	count = done = 0;
1361 	while (done < MAX_SLM) {
1362 		/* Don't wait indefinitely for unresponsive clients */
1363 		if (sig != SIGHUP && count > SE_TIMEOUT) {
1364 			break;
1365 		}
1366 
1367 		done = 0;
1368 
1369 		/* Shutdown clients */
1370 		for (i = 0; i < MAX_SLM; ++i) {
1371 			scp = sysevent_client_tbl[i];
1372 			if (mutex_trylock(&scp->client_lock) == 0) {
1373 				if (scp->client_type != SLM_CLIENT ||
1374 				    scp->client_data == NULL) {
1375 					(void) mutex_unlock(&scp->client_lock);
1376 					done++;
1377 					continue;
1378 				}
1379 			} else {
1380 				syseventd_print(3, "Skipping unload of "
1381 				    "client %d: client locked\n",
1382 				    scp->client_num);
1383 				continue;
1384 			}
1385 
1386 			/*
1387 			 * Drain the eventq and wait for delivery thread to
1388 			 * cleanly exit
1389 			 */
1390 			drain_eventq(scp, EAGAIN);
1391 			(void) cond_signal(&scp->client_cv);
1392 			(void) mutex_unlock(&scp->client_lock);
1393 			(void) thr_join(scp->tid, NULL, NULL);
1394 
1395 			/*
1396 			 * It is now safe to unload the module
1397 			 */
1398 			mod = (module_t *)scp->client_data;
1399 			syseventd_print(2, "Unload %s\n", mod->name);
1400 			mod->event_mod_fini();
1401 			(void) dlclose(mod->dlhandle);
1402 			free(mod->name);
1403 			(void) mutex_lock(&client_tbl_lock);
1404 			delete_client(i);
1405 			(void) mutex_unlock(&client_tbl_lock);
1406 			++done;
1407 
1408 		}
1409 		++count;
1410 		(void) sleep(1);
1411 	}
1412 
1413 	/*
1414 	 * Wait for event completions
1415 	 */
1416 	syseventd_print(2, "waiting for event completions\n");
1417 	(void) mutex_lock(&ev_comp_lock);
1418 	while (event_compq != NULL) {
1419 		(void) cond_wait(&event_comp_cv, &ev_comp_lock);
1420 	}
1421 	(void) mutex_unlock(&ev_comp_lock);
1422 }
1423 
1424 /*
1425  * syseventd_init - Called at daemon (re)start-up time to load modules
1426  *			and kickstart the kernel delivery engine.
1427  */
1428 static void
1429 syseventd_init()
1430 {
1431 	int i, fd;
1432 	char local_door_file[PATH_MAX + 1];
1433 
1434 	fini_pending = 0;
1435 
1436 	concurrency_level = MIN_CONCURRENCY_LEVEL;
1437 	(void) thr_setconcurrency(concurrency_level);
1438 
1439 	/*
1440 	 * Load client modules for event delivering
1441 	 */
1442 	for (i = 0; i < MOD_DIR_NUM; ++i) {
1443 		load_modules(dir_num2name(i));
1444 	}
1445 
1446 	/*
1447 	 * Create kernel delivery door service
1448 	 */
1449 	syseventd_print(8, "Create a door for kernel upcalls\n");
1450 	if (snprintf(local_door_file, sizeof (local_door_file), "%s%s",
1451 	    root_dir, LOGEVENT_DOOR_UPCALL) >= sizeof (local_door_file)) {
1452 		syseventd_err_print(INIT_PATH_ERR, local_door_file);
1453 		syseventd_exit(5);
1454 	}
1455 
1456 	/*
1457 	 * Remove door file for robustness.
1458 	 */
1459 	if (unlink(local_door_file) != 0)
1460 		syseventd_print(8, "Unlink of %s failed.\n", local_door_file);
1461 
1462 	fd = open(local_door_file, O_CREAT|O_RDWR, S_IREAD|S_IWRITE);
1463 	if ((fd == -1) && (errno != EEXIST)) {
1464 		syseventd_err_print(INIT_OPEN_DOOR_ERR, strerror(errno));
1465 		syseventd_exit(5);
1466 	}
1467 	(void) close(fd);
1468 
1469 	upcall_door = door_create(door_upcall, NULL,
1470 	    DOOR_REFUSE_DESC | DOOR_NO_CANCEL);
1471 	if (upcall_door == -1) {
1472 		syseventd_err_print(INIT_CREATE_DOOR_ERR, strerror(errno));
1473 		syseventd_exit(5);
1474 	}
1475 
1476 	(void) fdetach(local_door_file);
1477 retry:
1478 	if (fattach(upcall_door, local_door_file) != 0) {
1479 		if (errno == EBUSY)
1480 			goto retry;
1481 		syseventd_err_print(INIT_FATTACH_ERR, strerror(errno));
1482 		(void) door_revoke(upcall_door);
1483 		syseventd_exit(5);
1484 	}
1485 
1486 	/*
1487 	 * Tell kernel the door name and start delivery
1488 	 */
1489 	syseventd_print(2,
1490 	    "local_door_file = %s\n", local_door_file);
1491 	if (modctl(MODEVENTS,
1492 	    (uintptr_t)MODEVENTS_SET_DOOR_UPCALL_FILENAME,
1493 	    (uintptr_t)local_door_file, NULL, NULL, 0) < 0) {
1494 		syseventd_err_print(INIT_DOOR_NAME_ERR, strerror(errno));
1495 		syseventd_exit(6);
1496 	}
1497 
1498 	door_upcall_retval = 0;
1499 
1500 	if (modctl(MODEVENTS, (uintptr_t)MODEVENTS_FLUSH, NULL, NULL, NULL, 0)
1501 	    < 0) {
1502 		syseventd_err_print(KERNEL_REPLAY_ERR, strerror(errno));
1503 		syseventd_exit(7);
1504 	}
1505 }
1506 
1507 /*
1508  * syseventd_fini - shut down daemon, but do not exit
1509  */
1510 static void
1511 syseventd_fini(int sig)
1512 {
1513 	/*
1514 	 * Indicate that event queues should be drained and no
1515 	 * additional events be accepted
1516 	 */
1517 	fini_pending = 1;
1518 
1519 	/* Close the kernel event door to halt delivery */
1520 	(void) door_revoke(upcall_door);
1521 
1522 	syseventd_print(1, "Unloading modules\n");
1523 	(void) rw_wrlock(&mod_unload_lock);
1524 	unload_modules(sig);
1525 	(void) rw_unlock(&mod_unload_lock);
1526 
1527 }
1528 
1529 /*
1530  * enter_daemon_lock - lock the daemon file lock
1531  *
1532  * Use an advisory lock to ensure that only one daemon process is active
1533  * in the system at any point in time.	If the lock is held by another
1534  * process, do not block but return the pid owner of the lock to the
1535  * caller immediately.	The lock is cleared if the holding daemon process
1536  * exits for any reason even if the lock file remains, so the daemon can
1537  * be restarted if necessary.  The lock file is DAEMON_LOCK_FILE.
1538  */
1539 static pid_t
1540 enter_daemon_lock(void)
1541 {
1542 	struct flock	lock;
1543 
1544 	syseventd_print(8, "enter_daemon_lock: lock file = %s\n",
1545 	    DAEMON_LOCK_FILE);
1546 
1547 	if (snprintf(local_lock_file, sizeof (local_lock_file), "%s%s",
1548 	    root_dir, DAEMON_LOCK_FILE) >= sizeof (local_lock_file)) {
1549 		syseventd_err_print(INIT_PATH_ERR, local_lock_file);
1550 		syseventd_exit(8);
1551 	}
1552 	daemon_lock_fd = open(local_lock_file, O_CREAT|O_RDWR, 0644);
1553 	if (daemon_lock_fd < 0) {
1554 		syseventd_err_print(INIT_LOCK_OPEN_ERR,
1555 		    local_lock_file, strerror(errno));
1556 		syseventd_exit(8);
1557 	}
1558 
1559 	lock.l_type = F_WRLCK;
1560 	lock.l_whence = SEEK_SET;
1561 	lock.l_start = 0;
1562 	lock.l_len = 0;
1563 
1564 	if (fcntl(daemon_lock_fd, F_SETLK, &lock) == -1) {
1565 		if (fcntl(daemon_lock_fd, F_GETLK, &lock) == -1) {
1566 			syseventd_err_print(INIT_LOCK_ERR,
1567 			    local_lock_file, strerror(errno));
1568 			exit(2);
1569 		}
1570 		return (lock.l_pid);
1571 	}
1572 	hold_daemon_lock = 1;
1573 
1574 	return (getpid());
1575 }
1576 
1577 /*
1578  * exit_daemon_lock - release the daemon file lock
1579  */
1580 static void
1581 exit_daemon_lock(void)
1582 {
1583 	struct flock lock;
1584 
1585 	lock.l_type = F_UNLCK;
1586 	lock.l_whence = SEEK_SET;
1587 	lock.l_start = 0;
1588 	lock.l_len = 0;
1589 
1590 	if (fcntl(daemon_lock_fd, F_SETLK, &lock) == -1) {
1591 		syseventd_err_print(INIT_UNLOCK_ERR,
1592 		    local_lock_file, strerror(errno));
1593 	}
1594 
1595 	if (close(daemon_lock_fd) == -1) {
1596 		syseventd_err_print(INIT_LOCK_CLOSE_ERR,
1597 		    local_lock_file, strerror(errno));
1598 		exit(-1);
1599 	}
1600 }
1601 
1602 /*
1603  * syseventd_err_print - print error messages to the terminal if not
1604  *			yet daemonized or to syslog.
1605  */
1606 /*PRINTFLIKE1*/
1607 void
1608 syseventd_err_print(char *message, ...)
1609 {
1610 	va_list ap;
1611 
1612 	(void) mutex_lock(&err_mutex);
1613 	va_start(ap, message);
1614 
1615 	if (logflag) {
1616 		(void) vsyslog(LOG_ERR, message, ap);
1617 	} else {
1618 		(void) fprintf(stderr, "%s: ", prog);
1619 		(void) vfprintf(stderr, message, ap);
1620 	}
1621 	va_end(ap);
1622 	(void) mutex_unlock(&err_mutex);
1623 }
1624 
1625 /*
1626  * syseventd_print -  print messages to the terminal or to syslog
1627  *			the following levels are implemented:
1628  *
1629  * 1 - transient errors that does not affect normal program flow
1630  * 2 - upcall/dispatch interaction
1631  * 3 - program flow trace as each message goes through the daemon
1632  * 8 - all the nit-gritty details of startup and shutdown
1633  * 9 - very verbose event flow tracing (no daemonization of syseventd)
1634  *
1635  */
1636 /*PRINTFLIKE2*/
1637 void
1638 syseventd_print(int level, char *message, ...)
1639 {
1640 	va_list ap;
1641 	static int newline = 1;
1642 
1643 	if (level > debug_level) {
1644 		return;
1645 	}
1646 
1647 	(void) mutex_lock(&err_mutex);
1648 	va_start(ap, message);
1649 	if (logflag) {
1650 		(void) syslog(LOG_DEBUG, "%s[%ld]: ",
1651 		    prog, getpid());
1652 		(void) vsyslog(LOG_DEBUG, message, ap);
1653 	} else {
1654 		if (newline) {
1655 			(void) fprintf(stdout, "%s[%ld]: ",
1656 			    prog, getpid());
1657 			(void) vfprintf(stdout, message, ap);
1658 		} else {
1659 			(void) vfprintf(stdout, message, ap);
1660 		}
1661 	}
1662 	if (message[strlen(message)-1] == '\n') {
1663 		newline = 1;
1664 	} else {
1665 		newline = 0;
1666 	}
1667 	va_end(ap);
1668 	(void) mutex_unlock(&err_mutex);
1669 }
1670