xref: /illumos-gate/usr/src/uts/sun4/os/trap.c (revision 257873cfc1dd3337766407f80397db60a56f2f5a)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 
28 #include <sys/mmu.h>
29 #include <sys/systm.h>
30 #include <sys/trap.h>
31 #include <sys/machtrap.h>
32 #include <sys/vtrace.h>
33 #include <sys/prsystm.h>
34 #include <sys/archsystm.h>
35 #include <sys/machsystm.h>
36 #include <sys/fpu/fpusystm.h>
37 #include <sys/tnf.h>
38 #include <sys/tnf_probe.h>
39 #include <sys/simulate.h>
40 #include <sys/ftrace.h>
41 #include <sys/ontrap.h>
42 #include <sys/kcpc.h>
43 #include <sys/kobj.h>
44 #include <sys/procfs.h>
45 #include <sys/sun4asi.h>
46 #include <sys/sdt.h>
47 #include <sys/fpras.h>
48 
49 #ifdef  TRAPTRACE
50 #include <sys/traptrace.h>
51 #endif
52 
53 int tudebug = 0;
54 static int tudebugbpt = 0;
55 static int tudebugfpe = 0;
56 
57 static int alignfaults = 0;
58 
59 #if defined(TRAPDEBUG) || defined(lint)
60 static int lodebug = 0;
61 #else
62 #define	lodebug	0
63 #endif /* defined(TRAPDEBUG) || defined(lint) */
64 
65 
66 int vis1_partial_support(struct regs *rp, k_siginfo_t *siginfo, uint_t *fault);
67 #pragma weak vis1_partial_support
68 
69 void showregs(unsigned, struct regs *, caddr_t, uint_t);
70 #pragma weak showregs
71 
72 void trap_async_hwerr(void);
73 #pragma weak trap_async_hwerr
74 
75 void trap_async_berr_bto(int, struct regs *);
76 #pragma weak trap_async_berr_bto
77 
78 static enum seg_rw get_accesstype(struct regs *);
79 static int nfload(struct regs *, int *);
80 static int swap_nc(struct regs *, int);
81 static int ldstub_nc(struct regs *, int);
82 void	trap_cleanup(struct regs *, uint_t, k_siginfo_t *, int);
83 void	trap_rtt(void);
84 
85 static int
86 die(unsigned type, struct regs *rp, caddr_t addr, uint_t mmu_fsr)
87 {
88 	struct panic_trap_info ti;
89 
90 #ifdef TRAPTRACE
91 	TRAPTRACE_FREEZE;
92 #endif
93 
94 	ti.trap_regs = rp;
95 	ti.trap_type = type;
96 	ti.trap_addr = addr;
97 	ti.trap_mmu_fsr = mmu_fsr;
98 
99 	curthread->t_panic_trap = &ti;
100 
101 	if (type == T_DATA_MMU_MISS && addr < (caddr_t)KERNELBASE) {
102 		panic("BAD TRAP: type=%x rp=%p addr=%p mmu_fsr=%x "
103 		    "occurred in module \"%s\" due to %s",
104 		    type, (void *)rp, (void *)addr, mmu_fsr,
105 		    mod_containing_pc((caddr_t)rp->r_pc),
106 		    addr < (caddr_t)PAGESIZE ?
107 		    "a NULL pointer dereference" :
108 		    "an illegal access to a user address");
109 	} else {
110 		panic("BAD TRAP: type=%x rp=%p addr=%p mmu_fsr=%x",
111 		    type, (void *)rp, (void *)addr, mmu_fsr);
112 	}
113 
114 	return (0);	/* avoid optimization of restore in call's delay slot */
115 }
116 
117 #if defined(SF_ERRATA_23) || defined(SF_ERRATA_30) /* call ... illegal-insn */
118 int	ill_calls;
119 #endif
120 
121 /*
122  * Currently, the only PREFETCH/PREFETCHA instructions which cause traps
123  * are the "strong" prefetches (fcn=20-23).  But we check for all flavors of
124  * PREFETCH, in case some future variant also causes a DATA_MMU_MISS.
125  */
126 #define	IS_PREFETCH(i)	(((i) & 0xc1780000) == 0xc1680000)
127 
128 #define	IS_FLUSH(i)	(((i) & 0xc1f80000) == 0x81d80000)
129 #define	IS_SWAP(i)	(((i) & 0xc1f80000) == 0xc0780000)
130 #define	IS_LDSTUB(i)	(((i) & 0xc1f80000) == 0xc0680000)
131 #define	IS_FLOAT(i)	(((i) & 0x1000000) != 0)
132 #define	IS_STORE(i)	(((i) >> 21) & 1)
133 
134 /*
135  * Called from the trap handler when a processor trap occurs.
136  */
137 /*VARARGS2*/
138 void
139 trap(struct regs *rp, caddr_t addr, uint32_t type, uint32_t mmu_fsr)
140 {
141 	proc_t *p = ttoproc(curthread);
142 	klwp_id_t lwp = ttolwp(curthread);
143 	struct machpcb *mpcb = NULL;
144 	k_siginfo_t siginfo;
145 	uint_t op3, fault = 0;
146 	int stepped = 0;
147 	greg_t oldpc;
148 	int mstate;
149 	char *badaddr;
150 	faultcode_t res;
151 	enum fault_type fault_type;
152 	enum seg_rw rw;
153 	uintptr_t lofault;
154 	int instr;
155 	int iskernel;
156 	int watchcode;
157 	int watchpage;
158 	extern faultcode_t pagefault(caddr_t, enum fault_type,
159 	    enum seg_rw, int);
160 
161 	CPU_STATS_ADDQ(CPU, sys, trap, 1);
162 
163 #ifdef SF_ERRATA_23 /* call causes illegal-insn */
164 	ASSERT((curthread->t_schedflag & TS_DONT_SWAP) ||
165 	    (type == T_UNIMP_INSTR));
166 #else
167 	ASSERT(curthread->t_schedflag & TS_DONT_SWAP);
168 #endif /* SF_ERRATA_23 */
169 
170 	if (USERMODE(rp->r_tstate) || (type & T_USER)) {
171 		/*
172 		 * Set lwp_state before trying to acquire any
173 		 * adaptive lock
174 		 */
175 		ASSERT(lwp != NULL);
176 		lwp->lwp_state = LWP_SYS;
177 		/*
178 		 * Set up the current cred to use during this trap. u_cred
179 		 * no longer exists.  t_cred is used instead.
180 		 * The current process credential applies to the thread for
181 		 * the entire trap.  If trapping from the kernel, this
182 		 * should already be set up.
183 		 */
184 		if (curthread->t_cred != p->p_cred) {
185 			cred_t *oldcred = curthread->t_cred;
186 			/*
187 			 * DTrace accesses t_cred in probe context.  t_cred
188 			 * must always be either NULL, or point to a valid,
189 			 * allocated cred structure.
190 			 */
191 			curthread->t_cred = crgetcred();
192 			crfree(oldcred);
193 		}
194 		type |= T_USER;
195 		ASSERT((type == (T_SYS_RTT_PAGE | T_USER)) ||
196 		    (type == (T_SYS_RTT_ALIGN | T_USER)) ||
197 		    lwp->lwp_regs == rp);
198 		mpcb = lwptompcb(lwp);
199 		switch (type) {
200 		case T_WIN_OVERFLOW + T_USER:
201 		case T_WIN_UNDERFLOW + T_USER:
202 		case T_SYS_RTT_PAGE + T_USER:
203 		case T_DATA_MMU_MISS + T_USER:
204 			mstate = LMS_DFAULT;
205 			break;
206 		case T_INSTR_MMU_MISS + T_USER:
207 			mstate = LMS_TFAULT;
208 			break;
209 		default:
210 			mstate = LMS_TRAP;
211 			break;
212 		}
213 		/* Kernel probe */
214 		TNF_PROBE_1(thread_state, "thread", /* CSTYLED */,
215 		    tnf_microstate, state, (char)mstate);
216 		mstate = new_mstate(curthread, mstate);
217 		siginfo.si_signo = 0;
218 		stepped =
219 		    lwp->lwp_pcb.pcb_step != STEP_NONE &&
220 		    ((oldpc = rp->r_pc), prundostep()) &&
221 		    mmu_btop((uintptr_t)addr) == mmu_btop((uintptr_t)oldpc);
222 		/* this assignment must not precede call to prundostep() */
223 		oldpc = rp->r_pc;
224 	}
225 
226 	TRACE_1(TR_FAC_TRAP, TR_C_TRAP_HANDLER_ENTER,
227 	    "C_trap_handler_enter:type %x", type);
228 
229 #ifdef	F_DEFERRED
230 	/*
231 	 * Take any pending floating point exceptions now.
232 	 * If the floating point unit has an exception to handle,
233 	 * just return to user-level to let the signal handler run.
234 	 * The instruction that got us to trap() will be reexecuted on
235 	 * return from the signal handler and we will trap to here again.
236 	 * This is necessary to disambiguate simultaneous traps which
237 	 * happen when a floating-point exception is pending and a
238 	 * machine fault is incurred.
239 	 */
240 	if (type & USER) {
241 		/*
242 		 * FP_TRAPPED is set only by sendsig() when it copies
243 		 * out the floating-point queue for the signal handler.
244 		 * It is set there so we can test it here and in syscall().
245 		 */
246 		mpcb->mpcb_flags &= ~FP_TRAPPED;
247 		syncfpu();
248 		if (mpcb->mpcb_flags & FP_TRAPPED) {
249 			/*
250 			 * trap() has have been called recursively and may
251 			 * have stopped the process, so do single step
252 			 * support for /proc.
253 			 */
254 			mpcb->mpcb_flags &= ~FP_TRAPPED;
255 			goto out;
256 		}
257 	}
258 #endif
259 	switch (type) {
260 		case T_DATA_MMU_MISS:
261 		case T_INSTR_MMU_MISS + T_USER:
262 		case T_DATA_MMU_MISS + T_USER:
263 		case T_DATA_PROT + T_USER:
264 		case T_AST + T_USER:
265 		case T_SYS_RTT_PAGE + T_USER:
266 		case T_FLUSH_PCB + T_USER:
267 		case T_FLUSHW + T_USER:
268 			break;
269 
270 		default:
271 			FTRACE_3("trap(): type=0x%lx, regs=0x%lx, addr=0x%lx",
272 			    (ulong_t)type, (ulong_t)rp, (ulong_t)addr);
273 			break;
274 	}
275 
276 	switch (type) {
277 
278 	default:
279 		/*
280 		 * Check for user software trap.
281 		 */
282 		if (type & T_USER) {
283 			if (tudebug)
284 				showregs(type, rp, (caddr_t)0, 0);
285 			if ((type & ~T_USER) >= T_SOFTWARE_TRAP) {
286 				bzero(&siginfo, sizeof (siginfo));
287 				siginfo.si_signo = SIGILL;
288 				siginfo.si_code  = ILL_ILLTRP;
289 				siginfo.si_addr  = (caddr_t)rp->r_pc;
290 				siginfo.si_trapno = type &~ T_USER;
291 				fault = FLTILL;
292 				break;
293 			}
294 		}
295 		addr = (caddr_t)rp->r_pc;
296 		(void) die(type, rp, addr, 0);
297 		/*NOTREACHED*/
298 
299 	case T_ALIGNMENT:	/* supv alignment error */
300 		if (nfload(rp, NULL))
301 			goto cleanup;
302 
303 		if (curthread->t_lofault) {
304 			if (lodebug) {
305 				showregs(type, rp, addr, 0);
306 				traceback((caddr_t)rp->r_sp);
307 			}
308 			rp->r_g1 = EFAULT;
309 			rp->r_pc = curthread->t_lofault;
310 			rp->r_npc = rp->r_pc + 4;
311 			goto cleanup;
312 		}
313 		(void) die(type, rp, addr, 0);
314 		/*NOTREACHED*/
315 
316 	case T_INSTR_EXCEPTION:		/* sys instruction access exception */
317 		addr = (caddr_t)rp->r_pc;
318 		(void) die(type, rp, addr, mmu_fsr);
319 		/*NOTREACHED*/
320 
321 	case T_INSTR_MMU_MISS:		/* sys instruction mmu miss */
322 		addr = (caddr_t)rp->r_pc;
323 		(void) die(type, rp, addr, 0);
324 		/*NOTREACHED*/
325 
326 	case T_DATA_EXCEPTION:		/* system data access exception */
327 		switch (X_FAULT_TYPE(mmu_fsr)) {
328 		case FT_RANGE:
329 			/*
330 			 * This happens when we attempt to dereference an
331 			 * address in the address hole.  If t_ontrap is set,
332 			 * then break and fall through to T_DATA_MMU_MISS /
333 			 * T_DATA_PROT case below.  If lofault is set, then
334 			 * honour it (perhaps the user gave us a bogus
335 			 * address in the hole to copyin from or copyout to?)
336 			 */
337 
338 			if (curthread->t_ontrap != NULL)
339 				break;
340 
341 			addr = (caddr_t)((uintptr_t)addr & TAGACC_VADDR_MASK);
342 			if (curthread->t_lofault) {
343 				if (lodebug) {
344 					showregs(type, rp, addr, 0);
345 					traceback((caddr_t)rp->r_sp);
346 				}
347 				rp->r_g1 = EFAULT;
348 				rp->r_pc = curthread->t_lofault;
349 				rp->r_npc = rp->r_pc + 4;
350 				goto cleanup;
351 			}
352 			(void) die(type, rp, addr, mmu_fsr);
353 			/*NOTREACHED*/
354 
355 		case FT_PRIV:
356 			/*
357 			 * This can happen if we access ASI_USER from a kernel
358 			 * thread.  To support pxfs, we need to honor lofault if
359 			 * we're doing a copyin/copyout from a kernel thread.
360 			 */
361 
362 			if (nfload(rp, NULL))
363 				goto cleanup;
364 			addr = (caddr_t)((uintptr_t)addr & TAGACC_VADDR_MASK);
365 			if (curthread->t_lofault) {
366 				if (lodebug) {
367 					showregs(type, rp, addr, 0);
368 					traceback((caddr_t)rp->r_sp);
369 				}
370 				rp->r_g1 = EFAULT;
371 				rp->r_pc = curthread->t_lofault;
372 				rp->r_npc = rp->r_pc + 4;
373 				goto cleanup;
374 			}
375 			(void) die(type, rp, addr, mmu_fsr);
376 			/*NOTREACHED*/
377 
378 		default:
379 			if (nfload(rp, NULL))
380 				goto cleanup;
381 			addr = (caddr_t)((uintptr_t)addr & TAGACC_VADDR_MASK);
382 			(void) die(type, rp, addr, mmu_fsr);
383 			/*NOTREACHED*/
384 
385 		case FT_NFO:
386 			break;
387 		}
388 		/* fall into ... */
389 
390 	case T_DATA_MMU_MISS:		/* system data mmu miss */
391 	case T_DATA_PROT:		/* system data protection fault */
392 		if (nfload(rp, &instr))
393 			goto cleanup;
394 
395 		/*
396 		 * If we're under on_trap() protection (see <sys/ontrap.h>),
397 		 * set ot_trap and return from the trap to the trampoline.
398 		 */
399 		if (curthread->t_ontrap != NULL) {
400 			on_trap_data_t *otp = curthread->t_ontrap;
401 
402 			TRACE_0(TR_FAC_TRAP, TR_C_TRAP_HANDLER_EXIT,
403 			    "C_trap_handler_exit");
404 			TRACE_0(TR_FAC_TRAP, TR_TRAP_END, "trap_end");
405 
406 			if (otp->ot_prot & OT_DATA_ACCESS) {
407 				otp->ot_trap |= OT_DATA_ACCESS;
408 				rp->r_pc = otp->ot_trampoline;
409 				rp->r_npc = rp->r_pc + 4;
410 				goto cleanup;
411 			}
412 		}
413 		lofault = curthread->t_lofault;
414 		curthread->t_lofault = 0;
415 
416 		mstate = new_mstate(curthread, LMS_KFAULT);
417 
418 		switch (type) {
419 		case T_DATA_PROT:
420 			fault_type = F_PROT;
421 			rw = S_WRITE;
422 			break;
423 		case T_INSTR_MMU_MISS:
424 			fault_type = F_INVAL;
425 			rw = S_EXEC;
426 			break;
427 		case T_DATA_MMU_MISS:
428 		case T_DATA_EXCEPTION:
429 			/*
430 			 * The hardware doesn't update the sfsr on mmu
431 			 * misses so it is not easy to find out whether
432 			 * the access was a read or a write so we need
433 			 * to decode the actual instruction.
434 			 */
435 			fault_type = F_INVAL;
436 			rw = get_accesstype(rp);
437 			break;
438 		default:
439 			cmn_err(CE_PANIC, "trap: unknown type %x", type);
440 			break;
441 		}
442 		/*
443 		 * We determine if access was done to kernel or user
444 		 * address space.  The addr passed into trap is really the
445 		 * tag access register.
446 		 */
447 		iskernel = (((uintptr_t)addr & TAGACC_CTX_MASK) == KCONTEXT);
448 		addr = (caddr_t)((uintptr_t)addr & TAGACC_VADDR_MASK);
449 
450 		res = pagefault(addr, fault_type, rw, iskernel);
451 		if (!iskernel && res == FC_NOMAP &&
452 		    addr < p->p_usrstack && grow(addr))
453 			res = 0;
454 
455 		(void) new_mstate(curthread, mstate);
456 
457 		/*
458 		 * Restore lofault.  If we resolved the fault, exit.
459 		 * If we didn't and lofault wasn't set, die.
460 		 */
461 		curthread->t_lofault = lofault;
462 
463 		if (res == 0)
464 			goto cleanup;
465 
466 		if (IS_PREFETCH(instr)) {
467 			/* skip prefetch instructions in kernel-land */
468 			rp->r_pc = rp->r_npc;
469 			rp->r_npc += 4;
470 			goto cleanup;
471 		}
472 
473 		if ((lofault == 0 || lodebug) &&
474 		    (calc_memaddr(rp, &badaddr) == SIMU_SUCCESS))
475 			addr = badaddr;
476 		if (lofault == 0)
477 			(void) die(type, rp, addr, 0);
478 		/*
479 		 * Cannot resolve fault.  Return to lofault.
480 		 */
481 		if (lodebug) {
482 			showregs(type, rp, addr, 0);
483 			traceback((caddr_t)rp->r_sp);
484 		}
485 		if (FC_CODE(res) == FC_OBJERR)
486 			res = FC_ERRNO(res);
487 		else
488 			res = EFAULT;
489 		rp->r_g1 = res;
490 		rp->r_pc = curthread->t_lofault;
491 		rp->r_npc = curthread->t_lofault + 4;
492 		goto cleanup;
493 
494 	case T_INSTR_EXCEPTION + T_USER: /* user insn access exception */
495 		bzero(&siginfo, sizeof (siginfo));
496 		siginfo.si_addr = (caddr_t)rp->r_pc;
497 		siginfo.si_signo = SIGSEGV;
498 		siginfo.si_code = X_FAULT_TYPE(mmu_fsr) == FT_PRIV ?
499 		    SEGV_ACCERR : SEGV_MAPERR;
500 		fault = FLTBOUNDS;
501 		break;
502 
503 	case T_WIN_OVERFLOW + T_USER:	/* window overflow in ??? */
504 	case T_WIN_UNDERFLOW + T_USER:	/* window underflow in ??? */
505 	case T_SYS_RTT_PAGE + T_USER:	/* window underflow in user_rtt */
506 	case T_INSTR_MMU_MISS + T_USER:	/* user instruction mmu miss */
507 	case T_DATA_MMU_MISS + T_USER:	/* user data mmu miss */
508 	case T_DATA_PROT + T_USER:	/* user data protection fault */
509 		switch (type) {
510 		case T_INSTR_MMU_MISS + T_USER:
511 			addr = (caddr_t)rp->r_pc;
512 			fault_type = F_INVAL;
513 			rw = S_EXEC;
514 			break;
515 
516 		case T_DATA_MMU_MISS + T_USER:
517 			addr = (caddr_t)((uintptr_t)addr & TAGACC_VADDR_MASK);
518 			fault_type = F_INVAL;
519 			/*
520 			 * The hardware doesn't update the sfsr on mmu misses
521 			 * so it is not easy to find out whether the access
522 			 * was a read or a write so we need to decode the
523 			 * actual instruction.  XXX BUGLY HW
524 			 */
525 			rw = get_accesstype(rp);
526 			break;
527 
528 		case T_DATA_PROT + T_USER:
529 			addr = (caddr_t)((uintptr_t)addr & TAGACC_VADDR_MASK);
530 			fault_type = F_PROT;
531 			rw = S_WRITE;
532 			break;
533 
534 		case T_WIN_OVERFLOW + T_USER:
535 			addr = (caddr_t)((uintptr_t)addr & TAGACC_VADDR_MASK);
536 			fault_type = F_INVAL;
537 			rw = S_WRITE;
538 			break;
539 
540 		case T_WIN_UNDERFLOW + T_USER:
541 		case T_SYS_RTT_PAGE + T_USER:
542 			addr = (caddr_t)((uintptr_t)addr & TAGACC_VADDR_MASK);
543 			fault_type = F_INVAL;
544 			rw = S_READ;
545 			break;
546 
547 		default:
548 			cmn_err(CE_PANIC, "trap: unknown type %x", type);
549 			break;
550 		}
551 
552 		/*
553 		 * If we are single stepping do not call pagefault
554 		 */
555 		if (stepped) {
556 			res = FC_NOMAP;
557 		} else {
558 			caddr_t vaddr = addr;
559 			size_t sz;
560 			int ta;
561 
562 			ASSERT(!(curthread->t_flag & T_WATCHPT));
563 			watchpage = (pr_watch_active(p) &&
564 			    type != T_WIN_OVERFLOW + T_USER &&
565 			    type != T_WIN_UNDERFLOW + T_USER &&
566 			    type != T_SYS_RTT_PAGE + T_USER &&
567 			    pr_is_watchpage(addr, rw));
568 
569 			if (!watchpage ||
570 			    (sz = instr_size(rp, &vaddr, rw)) <= 0)
571 				/* EMPTY */;
572 			else if ((watchcode = pr_is_watchpoint(&vaddr, &ta,
573 			    sz, NULL, rw)) != 0) {
574 				if (ta) {
575 					do_watch_step(vaddr, sz, rw,
576 					    watchcode, rp->r_pc);
577 					fault_type = F_INVAL;
578 				} else {
579 					bzero(&siginfo,	sizeof (siginfo));
580 					siginfo.si_signo = SIGTRAP;
581 					siginfo.si_code = watchcode;
582 					siginfo.si_addr = vaddr;
583 					siginfo.si_trapafter = 0;
584 					siginfo.si_pc = (caddr_t)rp->r_pc;
585 					fault = FLTWATCH;
586 					break;
587 				}
588 			} else {
589 				if (rw != S_EXEC &&
590 				    pr_watch_emul(rp, vaddr, rw))
591 					goto out;
592 				do_watch_step(vaddr, sz, rw, 0, 0);
593 				fault_type = F_INVAL;
594 			}
595 
596 			if (pr_watch_active(p) &&
597 			    (type == T_WIN_OVERFLOW + T_USER ||
598 			    type == T_WIN_UNDERFLOW + T_USER ||
599 			    type == T_SYS_RTT_PAGE + T_USER)) {
600 				int dotwo = (type == T_WIN_UNDERFLOW + T_USER);
601 				if (copy_return_window(dotwo))
602 					goto out;
603 				fault_type = F_INVAL;
604 			}
605 
606 			res = pagefault(addr, fault_type, rw, 0);
607 
608 			/*
609 			 * If pagefault succeed, ok.
610 			 * Otherwise grow the stack automatically.
611 			 */
612 			if (res == 0 ||
613 			    (res == FC_NOMAP &&
614 			    type != T_INSTR_MMU_MISS + T_USER &&
615 			    addr < p->p_usrstack &&
616 			    grow(addr))) {
617 				int ismem = prismember(&p->p_fltmask, FLTPAGE);
618 
619 				/*
620 				 * instr_size() is used to get the exact
621 				 * address of the fault, instead of the
622 				 * page of the fault. Unfortunately it is
623 				 * very slow, and this is an important
624 				 * code path. Don't call it unless
625 				 * correctness is needed. ie. if FLTPAGE
626 				 * is set, or we're profiling.
627 				 */
628 
629 				if (curthread->t_rprof != NULL || ismem)
630 					(void) instr_size(rp, &addr, rw);
631 
632 				lwp->lwp_lastfault = FLTPAGE;
633 				lwp->lwp_lastfaddr = addr;
634 
635 				if (ismem) {
636 					bzero(&siginfo, sizeof (siginfo));
637 					siginfo.si_addr = addr;
638 					(void) stop_on_fault(FLTPAGE, &siginfo);
639 				}
640 				goto out;
641 			}
642 
643 			if (type != (T_INSTR_MMU_MISS + T_USER)) {
644 				/*
645 				 * check for non-faulting loads, also
646 				 * fetch the instruction to check for
647 				 * flush
648 				 */
649 				if (nfload(rp, &instr))
650 					goto out;
651 
652 				/* skip userland prefetch instructions */
653 				if (IS_PREFETCH(instr)) {
654 					rp->r_pc = rp->r_npc;
655 					rp->r_npc += 4;
656 					goto out;
657 					/*NOTREACHED*/
658 				}
659 
660 				/*
661 				 * check if the instruction was a
662 				 * flush.  ABI allows users to specify
663 				 * an illegal address on the flush
664 				 * instruction so we simply return in
665 				 * this case.
666 				 *
667 				 * NB: the hardware should set a bit
668 				 * indicating this trap was caused by
669 				 * a flush instruction.  Instruction
670 				 * decoding is bugly!
671 				 */
672 				if (IS_FLUSH(instr)) {
673 					/* skip the flush instruction */
674 					rp->r_pc = rp->r_npc;
675 					rp->r_npc += 4;
676 					goto out;
677 					/*NOTREACHED*/
678 				}
679 			} else if (res == FC_PROT) {
680 				report_stack_exec(p, addr);
681 			}
682 
683 			if (tudebug)
684 				showregs(type, rp, addr, 0);
685 		}
686 
687 		/*
688 		 * In the case where both pagefault and grow fail,
689 		 * set the code to the value provided by pagefault.
690 		 */
691 		(void) instr_size(rp, &addr, rw);
692 		bzero(&siginfo, sizeof (siginfo));
693 		siginfo.si_addr = addr;
694 		if (FC_CODE(res) == FC_OBJERR) {
695 			siginfo.si_errno = FC_ERRNO(res);
696 			if (siginfo.si_errno != EINTR) {
697 				siginfo.si_signo = SIGBUS;
698 				siginfo.si_code = BUS_OBJERR;
699 				fault = FLTACCESS;
700 			}
701 		} else { /* FC_NOMAP || FC_PROT */
702 			siginfo.si_signo = SIGSEGV;
703 			siginfo.si_code = (res == FC_NOMAP) ?
704 			    SEGV_MAPERR : SEGV_ACCERR;
705 			fault = FLTBOUNDS;
706 		}
707 		/*
708 		 * If this is the culmination of a single-step,
709 		 * reset the addr, code, signal and fault to
710 		 * indicate a hardware trace trap.
711 		 */
712 		if (stepped) {
713 			pcb_t *pcb = &lwp->lwp_pcb;
714 
715 			siginfo.si_signo = 0;
716 			fault = 0;
717 			if (pcb->pcb_step == STEP_WASACTIVE) {
718 				pcb->pcb_step = STEP_NONE;
719 				pcb->pcb_tracepc = NULL;
720 				oldpc = rp->r_pc - 4;
721 			}
722 			/*
723 			 * If both NORMAL_STEP and WATCH_STEP are in
724 			 * effect, give precedence to WATCH_STEP.
725 			 * One or the other must be set at this point.
726 			 */
727 			ASSERT(pcb->pcb_flags & (NORMAL_STEP|WATCH_STEP));
728 			if ((fault = undo_watch_step(&siginfo)) == 0 &&
729 			    (pcb->pcb_flags & NORMAL_STEP)) {
730 				siginfo.si_signo = SIGTRAP;
731 				siginfo.si_code = TRAP_TRACE;
732 				siginfo.si_addr = (caddr_t)rp->r_pc;
733 				fault = FLTTRACE;
734 			}
735 			pcb->pcb_flags &= ~(NORMAL_STEP|WATCH_STEP);
736 		}
737 		break;
738 
739 	case T_DATA_EXCEPTION + T_USER:	/* user data access exception */
740 
741 		if (&vis1_partial_support != NULL) {
742 			bzero(&siginfo, sizeof (siginfo));
743 			if (vis1_partial_support(rp,
744 			    &siginfo, &fault) == 0)
745 				goto out;
746 		}
747 
748 		if (nfload(rp, &instr))
749 			goto out;
750 		if (IS_FLUSH(instr)) {
751 			/* skip the flush instruction */
752 			rp->r_pc = rp->r_npc;
753 			rp->r_npc += 4;
754 			goto out;
755 			/*NOTREACHED*/
756 		}
757 		bzero(&siginfo, sizeof (siginfo));
758 		siginfo.si_addr = addr;
759 		switch (X_FAULT_TYPE(mmu_fsr)) {
760 		case FT_ATOMIC_NC:
761 			if ((IS_SWAP(instr) && swap_nc(rp, instr)) ||
762 			    (IS_LDSTUB(instr) && ldstub_nc(rp, instr))) {
763 				/* skip the atomic */
764 				rp->r_pc = rp->r_npc;
765 				rp->r_npc += 4;
766 				goto out;
767 			}
768 			/* fall into ... */
769 		case FT_PRIV:
770 			siginfo.si_signo = SIGSEGV;
771 			siginfo.si_code = SEGV_ACCERR;
772 			fault = FLTBOUNDS;
773 			break;
774 		case FT_SPEC_LD:
775 		case FT_ILL_ALT:
776 			siginfo.si_signo = SIGILL;
777 			siginfo.si_code = ILL_ILLADR;
778 			fault = FLTILL;
779 			break;
780 		default:
781 			siginfo.si_signo = SIGSEGV;
782 			siginfo.si_code = SEGV_MAPERR;
783 			fault = FLTBOUNDS;
784 			break;
785 		}
786 		break;
787 
788 	case T_SYS_RTT_ALIGN + T_USER:	/* user alignment error */
789 	case T_ALIGNMENT + T_USER:	/* user alignment error */
790 		if (tudebug)
791 			showregs(type, rp, addr, 0);
792 		/*
793 		 * If the user has to do unaligned references
794 		 * the ugly stuff gets done here.
795 		 */
796 		alignfaults++;
797 		if (&vis1_partial_support != NULL) {
798 			bzero(&siginfo, sizeof (siginfo));
799 			if (vis1_partial_support(rp,
800 			    &siginfo, &fault) == 0)
801 				goto out;
802 		}
803 
804 		bzero(&siginfo, sizeof (siginfo));
805 		if (type == T_SYS_RTT_ALIGN + T_USER) {
806 			if (nfload(rp, NULL))
807 				goto out;
808 			/*
809 			 * Can't do unaligned stack access
810 			 */
811 			siginfo.si_signo = SIGBUS;
812 			siginfo.si_code = BUS_ADRALN;
813 			siginfo.si_addr = addr;
814 			fault = FLTACCESS;
815 			break;
816 		}
817 
818 		/*
819 		 * Try to fix alignment before non-faulting load test.
820 		 */
821 		if (p->p_fixalignment) {
822 			if (do_unaligned(rp, &badaddr) == SIMU_SUCCESS) {
823 				rp->r_pc = rp->r_npc;
824 				rp->r_npc += 4;
825 				goto out;
826 			}
827 			if (nfload(rp, NULL))
828 				goto out;
829 			siginfo.si_signo = SIGSEGV;
830 			siginfo.si_code = SEGV_MAPERR;
831 			siginfo.si_addr = badaddr;
832 			fault = FLTBOUNDS;
833 		} else {
834 			if (nfload(rp, NULL))
835 				goto out;
836 			siginfo.si_signo = SIGBUS;
837 			siginfo.si_code = BUS_ADRALN;
838 			if (rp->r_pc & 3) {	/* offending address, if pc */
839 				siginfo.si_addr = (caddr_t)rp->r_pc;
840 			} else {
841 				if (calc_memaddr(rp, &badaddr) == SIMU_UNALIGN)
842 					siginfo.si_addr = badaddr;
843 				else
844 					siginfo.si_addr = (caddr_t)rp->r_pc;
845 			}
846 			fault = FLTACCESS;
847 		}
848 		break;
849 
850 	case T_PRIV_INSTR + T_USER:	/* privileged instruction fault */
851 		if (tudebug)
852 			showregs(type, rp, (caddr_t)0, 0);
853 		bzero(&siginfo, sizeof (siginfo));
854 		siginfo.si_signo = SIGILL;
855 		siginfo.si_code = ILL_PRVOPC;
856 		siginfo.si_addr = (caddr_t)rp->r_pc;
857 		fault = FLTILL;
858 		break;
859 
860 	case T_UNIMP_INSTR:		/* priv illegal instruction fault */
861 		if (fpras_implemented) {
862 			/*
863 			 * Call fpras_chktrap indicating that
864 			 * we've come from a trap handler and pass
865 			 * the regs.  That function may choose to panic
866 			 * (in which case it won't return) or it may
867 			 * determine that a reboot is desired.  In the
868 			 * latter case it must alter pc/npc to skip
869 			 * the illegal instruction and continue at
870 			 * a controlled address.
871 			 */
872 			if (&fpras_chktrap) {
873 				if (fpras_chktrap(rp))
874 					goto cleanup;
875 			}
876 		}
877 #if defined(SF_ERRATA_23) || defined(SF_ERRATA_30) /* call ... illegal-insn */
878 		instr = *(int *)rp->r_pc;
879 		if ((instr & 0xc0000000) == 0x40000000) {
880 			long pc;
881 
882 			rp->r_o7 = (long long)rp->r_pc;
883 			pc = rp->r_pc + ((instr & 0x3fffffff) << 2);
884 			rp->r_pc = rp->r_npc;
885 			rp->r_npc = pc;
886 			ill_calls++;
887 			goto cleanup;
888 		}
889 #endif /* SF_ERRATA_23 || SF_ERRATA_30 */
890 		/*
891 		 * It's not an fpras failure and it's not SF_ERRATA_23 - die
892 		 */
893 		addr = (caddr_t)rp->r_pc;
894 		(void) die(type, rp, addr, 0);
895 		/*NOTREACHED*/
896 
897 	case T_UNIMP_INSTR + T_USER:	/* illegal instruction fault */
898 #if defined(SF_ERRATA_23) || defined(SF_ERRATA_30) /* call ... illegal-insn */
899 		instr = fetch_user_instr((caddr_t)rp->r_pc);
900 		if ((instr & 0xc0000000) == 0x40000000) {
901 			long pc;
902 
903 			rp->r_o7 = (long long)rp->r_pc;
904 			pc = rp->r_pc + ((instr & 0x3fffffff) << 2);
905 			rp->r_pc = rp->r_npc;
906 			rp->r_npc = pc;
907 			ill_calls++;
908 			goto out;
909 		}
910 #endif /* SF_ERRATA_23 || SF_ERRATA_30 */
911 		if (tudebug)
912 			showregs(type, rp, (caddr_t)0, 0);
913 		bzero(&siginfo, sizeof (siginfo));
914 		/*
915 		 * Try to simulate the instruction.
916 		 */
917 		switch (simulate_unimp(rp, &badaddr)) {
918 		case SIMU_RETRY:
919 			goto out;	/* regs are already set up */
920 			/*NOTREACHED*/
921 
922 		case SIMU_SUCCESS:
923 			/* skip the successfully simulated instruction */
924 			rp->r_pc = rp->r_npc;
925 			rp->r_npc += 4;
926 			goto out;
927 			/*NOTREACHED*/
928 
929 		case SIMU_FAULT:
930 			siginfo.si_signo = SIGSEGV;
931 			siginfo.si_code = SEGV_MAPERR;
932 			siginfo.si_addr = badaddr;
933 			fault = FLTBOUNDS;
934 			break;
935 
936 		case SIMU_DZERO:
937 			siginfo.si_signo = SIGFPE;
938 			siginfo.si_code = FPE_INTDIV;
939 			siginfo.si_addr = (caddr_t)rp->r_pc;
940 			fault = FLTIZDIV;
941 			break;
942 
943 		case SIMU_UNALIGN:
944 			siginfo.si_signo = SIGBUS;
945 			siginfo.si_code = BUS_ADRALN;
946 			siginfo.si_addr = badaddr;
947 			fault = FLTACCESS;
948 			break;
949 
950 		case SIMU_ILLEGAL:
951 		default:
952 			siginfo.si_signo = SIGILL;
953 			op3 = (instr >> 19) & 0x3F;
954 			if ((IS_FLOAT(instr) && (op3 == IOP_V8_STQFA) ||
955 			    (op3 == IOP_V8_STDFA)))
956 				siginfo.si_code = ILL_ILLADR;
957 			else
958 				siginfo.si_code = ILL_ILLOPC;
959 			siginfo.si_addr = (caddr_t)rp->r_pc;
960 			fault = FLTILL;
961 			break;
962 		}
963 		break;
964 
965 	case T_UNIMP_LDD + T_USER:
966 	case T_UNIMP_STD + T_USER:
967 		if (tudebug)
968 			showregs(type, rp, (caddr_t)0, 0);
969 		switch (simulate_lddstd(rp, &badaddr)) {
970 		case SIMU_SUCCESS:
971 			/* skip the successfully simulated instruction */
972 			rp->r_pc = rp->r_npc;
973 			rp->r_npc += 4;
974 			goto out;
975 			/*NOTREACHED*/
976 
977 		case SIMU_FAULT:
978 			if (nfload(rp, NULL))
979 				goto out;
980 			siginfo.si_signo = SIGSEGV;
981 			siginfo.si_code = SEGV_MAPERR;
982 			siginfo.si_addr = badaddr;
983 			fault = FLTBOUNDS;
984 			break;
985 
986 		case SIMU_UNALIGN:
987 			if (nfload(rp, NULL))
988 				goto out;
989 			siginfo.si_signo = SIGBUS;
990 			siginfo.si_code = BUS_ADRALN;
991 			siginfo.si_addr = badaddr;
992 			fault = FLTACCESS;
993 			break;
994 
995 		case SIMU_ILLEGAL:
996 		default:
997 			siginfo.si_signo = SIGILL;
998 			siginfo.si_code = ILL_ILLOPC;
999 			siginfo.si_addr = (caddr_t)rp->r_pc;
1000 			fault = FLTILL;
1001 			break;
1002 		}
1003 		break;
1004 
1005 	case T_UNIMP_LDD:
1006 	case T_UNIMP_STD:
1007 		if (simulate_lddstd(rp, &badaddr) == SIMU_SUCCESS) {
1008 			/* skip the successfully simulated instruction */
1009 			rp->r_pc = rp->r_npc;
1010 			rp->r_npc += 4;
1011 			goto cleanup;
1012 			/*NOTREACHED*/
1013 		}
1014 		/*
1015 		 * A third party driver executed an {LDD,STD,LDDA,STDA}
1016 		 * that we couldn't simulate.
1017 		 */
1018 		if (nfload(rp, NULL))
1019 			goto cleanup;
1020 
1021 		if (curthread->t_lofault) {
1022 			if (lodebug) {
1023 				showregs(type, rp, addr, 0);
1024 				traceback((caddr_t)rp->r_sp);
1025 			}
1026 			rp->r_g1 = EFAULT;
1027 			rp->r_pc = curthread->t_lofault;
1028 			rp->r_npc = rp->r_pc + 4;
1029 			goto cleanup;
1030 		}
1031 		(void) die(type, rp, addr, 0);
1032 		/*NOTREACHED*/
1033 
1034 	case T_IDIV0 + T_USER:		/* integer divide by zero */
1035 	case T_DIV0 + T_USER:		/* integer divide by zero */
1036 		if (tudebug && tudebugfpe)
1037 			showregs(type, rp, (caddr_t)0, 0);
1038 		bzero(&siginfo, sizeof (siginfo));
1039 		siginfo.si_signo = SIGFPE;
1040 		siginfo.si_code = FPE_INTDIV;
1041 		siginfo.si_addr = (caddr_t)rp->r_pc;
1042 		fault = FLTIZDIV;
1043 		break;
1044 
1045 	case T_INT_OVERFLOW + T_USER:	/* integer overflow */
1046 		if (tudebug && tudebugfpe)
1047 			showregs(type, rp, (caddr_t)0, 0);
1048 		bzero(&siginfo, sizeof (siginfo));
1049 		siginfo.si_signo = SIGFPE;
1050 		siginfo.si_code  = FPE_INTOVF;
1051 		siginfo.si_addr  = (caddr_t)rp->r_pc;
1052 		fault = FLTIOVF;
1053 		break;
1054 
1055 	case T_BREAKPOINT + T_USER:	/* breakpoint trap (t 1) */
1056 		if (tudebug && tudebugbpt)
1057 			showregs(type, rp, (caddr_t)0, 0);
1058 		bzero(&siginfo, sizeof (siginfo));
1059 		siginfo.si_signo = SIGTRAP;
1060 		siginfo.si_code = TRAP_BRKPT;
1061 		siginfo.si_addr = (caddr_t)rp->r_pc;
1062 		fault = FLTBPT;
1063 		break;
1064 
1065 	case T_TAG_OVERFLOW + T_USER:	/* tag overflow (taddcctv, tsubcctv) */
1066 		if (tudebug)
1067 			showregs(type, rp, (caddr_t)0, 0);
1068 		bzero(&siginfo, sizeof (siginfo));
1069 		siginfo.si_signo = SIGEMT;
1070 		siginfo.si_code = EMT_TAGOVF;
1071 		siginfo.si_addr = (caddr_t)rp->r_pc;
1072 		fault = FLTACCESS;
1073 		break;
1074 
1075 	case T_FLUSH_PCB + T_USER:	/* finish user window overflow */
1076 	case T_FLUSHW + T_USER:		/* finish user window flush */
1077 		/*
1078 		 * This trap is entered from sys_rtt in locore.s when,
1079 		 * upon return to user is is found that there are user
1080 		 * windows in pcb_wbuf.  This happens because they could
1081 		 * not be saved on the user stack, either because it
1082 		 * wasn't resident or because it was misaligned.
1083 		 */
1084 	{
1085 		int error;
1086 		caddr_t sp;
1087 
1088 		error = flush_user_windows_to_stack(&sp);
1089 		/*
1090 		 * Possible errors:
1091 		 *	error copying out
1092 		 *	unaligned stack pointer
1093 		 * The first is given to us as the return value
1094 		 * from flush_user_windows_to_stack().  The second
1095 		 * results in residual windows in the pcb.
1096 		 */
1097 		if (error != 0) {
1098 			/*
1099 			 * EINTR comes from a signal during copyout;
1100 			 * we should not post another signal.
1101 			 */
1102 			if (error != EINTR) {
1103 				/*
1104 				 * Zap the process with a SIGSEGV - process
1105 				 * may be managing its own stack growth by
1106 				 * taking SIGSEGVs on a different signal stack.
1107 				 */
1108 				bzero(&siginfo, sizeof (siginfo));
1109 				siginfo.si_signo = SIGSEGV;
1110 				siginfo.si_code  = SEGV_MAPERR;
1111 				siginfo.si_addr  = sp;
1112 				fault = FLTBOUNDS;
1113 			}
1114 			break;
1115 		} else if (mpcb->mpcb_wbcnt) {
1116 			bzero(&siginfo, sizeof (siginfo));
1117 			siginfo.si_signo = SIGILL;
1118 			siginfo.si_code  = ILL_BADSTK;
1119 			siginfo.si_addr  = (caddr_t)rp->r_pc;
1120 			fault = FLTILL;
1121 			break;
1122 		}
1123 	}
1124 
1125 		/*
1126 		 * T_FLUSHW is used when handling a ta 0x3 -- the old flush
1127 		 * window trap -- which is implemented by executing the
1128 		 * flushw instruction. The flushw can trap if any of the
1129 		 * stack pages are not writable for whatever reason. In this
1130 		 * case only, we advance the pc to the next instruction so
1131 		 * that the user thread doesn't needlessly execute the trap
1132 		 * again. Normally this wouldn't be a problem -- we'll
1133 		 * usually only end up here if this is the first touch to a
1134 		 * stack page -- since the second execution won't trap, but
1135 		 * if there's a watchpoint on the stack page the user thread
1136 		 * would spin, continuously executing the trap instruction.
1137 		 */
1138 		if (type == T_FLUSHW + T_USER) {
1139 			rp->r_pc = rp->r_npc;
1140 			rp->r_npc += 4;
1141 		}
1142 		goto out;
1143 
1144 	case T_AST + T_USER:		/* profiling or resched pseudo trap */
1145 		if (lwp->lwp_pcb.pcb_flags & CPC_OVERFLOW) {
1146 			lwp->lwp_pcb.pcb_flags &= ~CPC_OVERFLOW;
1147 			if (kcpc_overflow_ast()) {
1148 				/*
1149 				 * Signal performance counter overflow
1150 				 */
1151 				if (tudebug)
1152 					showregs(type, rp, (caddr_t)0, 0);
1153 				bzero(&siginfo, sizeof (siginfo));
1154 				siginfo.si_signo = SIGEMT;
1155 				siginfo.si_code = EMT_CPCOVF;
1156 				siginfo.si_addr = (caddr_t)rp->r_pc;
1157 				/* for trap_cleanup(), below */
1158 				oldpc = rp->r_pc - 4;
1159 				fault = FLTCPCOVF;
1160 			}
1161 		}
1162 
1163 		/*
1164 		 * The CPC_OVERFLOW check above may already have populated
1165 		 * siginfo and set fault, so the checks below must not
1166 		 * touch these and the functions they call must use
1167 		 * trapsig() directly.
1168 		 */
1169 
1170 		if (lwp->lwp_pcb.pcb_flags & ASYNC_HWERR) {
1171 			lwp->lwp_pcb.pcb_flags &= ~ASYNC_HWERR;
1172 			trap_async_hwerr();
1173 		}
1174 
1175 		if (lwp->lwp_pcb.pcb_flags & ASYNC_BERR) {
1176 			lwp->lwp_pcb.pcb_flags &= ~ASYNC_BERR;
1177 			trap_async_berr_bto(ASYNC_BERR, rp);
1178 		}
1179 
1180 		if (lwp->lwp_pcb.pcb_flags & ASYNC_BTO) {
1181 			lwp->lwp_pcb.pcb_flags &= ~ASYNC_BTO;
1182 			trap_async_berr_bto(ASYNC_BTO, rp);
1183 		}
1184 
1185 		break;
1186 	}
1187 
1188 	if (fault) {
1189 		/* We took a fault so abort single step. */
1190 		lwp->lwp_pcb.pcb_flags &= ~(NORMAL_STEP|WATCH_STEP);
1191 	}
1192 	trap_cleanup(rp, fault, &siginfo, oldpc == rp->r_pc);
1193 
1194 out:	/* We can't get here from a system trap */
1195 	ASSERT(type & T_USER);
1196 	trap_rtt();
1197 	(void) new_mstate(curthread, mstate);
1198 	/* Kernel probe */
1199 	TNF_PROBE_1(thread_state, "thread", /* CSTYLED */,
1200 		tnf_microstate, state, LMS_USER);
1201 
1202 	TRACE_0(TR_FAC_TRAP, TR_C_TRAP_HANDLER_EXIT, "C_trap_handler_exit");
1203 	return;
1204 
1205 cleanup:	/* system traps end up here */
1206 	ASSERT(!(type & T_USER));
1207 
1208 	TRACE_0(TR_FAC_TRAP, TR_C_TRAP_HANDLER_EXIT, "C_trap_handler_exit");
1209 }
1210 
1211 void
1212 trap_cleanup(
1213 	struct regs *rp,
1214 	uint_t fault,
1215 	k_siginfo_t *sip,
1216 	int restartable)
1217 {
1218 	extern void aio_cleanup();
1219 	proc_t *p = ttoproc(curthread);
1220 	klwp_id_t lwp = ttolwp(curthread);
1221 
1222 	if (fault) {
1223 		/*
1224 		 * Remember the fault and fault address
1225 		 * for real-time (SIGPROF) profiling.
1226 		 */
1227 		lwp->lwp_lastfault = fault;
1228 		lwp->lwp_lastfaddr = sip->si_addr;
1229 
1230 		DTRACE_PROC2(fault, int, fault, ksiginfo_t *, sip);
1231 
1232 		/*
1233 		 * If a debugger has declared this fault to be an
1234 		 * event of interest, stop the lwp.  Otherwise just
1235 		 * deliver the associated signal.
1236 		 */
1237 		if (sip->si_signo != SIGKILL &&
1238 		    prismember(&p->p_fltmask, fault) &&
1239 		    stop_on_fault(fault, sip) == 0)
1240 			sip->si_signo = 0;
1241 	}
1242 
1243 	if (sip->si_signo)
1244 		trapsig(sip, restartable);
1245 
1246 	if (lwp->lwp_oweupc)
1247 		profil_tick(rp->r_pc);
1248 
1249 	if (curthread->t_astflag | curthread->t_sig_check) {
1250 		/*
1251 		 * Turn off the AST flag before checking all the conditions that
1252 		 * may have caused an AST.  This flag is on whenever a signal or
1253 		 * unusual condition should be handled after the next trap or
1254 		 * syscall.
1255 		 */
1256 		astoff(curthread);
1257 		curthread->t_sig_check = 0;
1258 
1259 		/*
1260 		 * The following check is legal for the following reasons:
1261 		 *	1) The thread we are checking, is ourselves, so there is
1262 		 *	   no way the proc can go away.
1263 		 *	2) The only time we need to be protected by the
1264 		 *	   lock is if the binding is changed.
1265 		 *
1266 		 *	Note we will still take the lock and check the binding
1267 		 *	if the condition was true without the lock held.  This
1268 		 *	prevents lock contention among threads owned by the
1269 		 *	same proc.
1270 		 */
1271 
1272 		if (curthread->t_proc_flag & TP_CHANGEBIND) {
1273 			mutex_enter(&p->p_lock);
1274 			if (curthread->t_proc_flag & TP_CHANGEBIND) {
1275 				timer_lwpbind();
1276 				curthread->t_proc_flag &= ~TP_CHANGEBIND;
1277 			}
1278 			mutex_exit(&p->p_lock);
1279 		}
1280 
1281 		/*
1282 		 * for kaio requests that are on the per-process poll queue,
1283 		 * aiop->aio_pollq, they're AIO_POLL bit is set, the kernel
1284 		 * should copyout their result_t to user memory. by copying
1285 		 * out the result_t, the user can poll on memory waiting
1286 		 * for the kaio request to complete.
1287 		 */
1288 		if (p->p_aio)
1289 			aio_cleanup(0);
1290 
1291 		/*
1292 		 * If this LWP was asked to hold, call holdlwp(), which will
1293 		 * stop.  holdlwps() sets this up and calls pokelwps() which
1294 		 * sets the AST flag.
1295 		 *
1296 		 * Also check TP_EXITLWP, since this is used by fresh new LWPs
1297 		 * through lwp_rtt().  That flag is set if the lwp_create(2)
1298 		 * syscall failed after creating the LWP.
1299 		 */
1300 		if (ISHOLD(p))
1301 			holdlwp();
1302 
1303 		/*
1304 		 * All code that sets signals and makes ISSIG evaluate true must
1305 		 * set t_astflag afterwards.
1306 		 */
1307 		if (ISSIG_PENDING(curthread, lwp, p)) {
1308 			if (issig(FORREAL))
1309 				psig();
1310 			curthread->t_sig_check = 1;
1311 		}
1312 
1313 		if (curthread->t_rprof != NULL) {
1314 			realsigprof(0, 0);
1315 			curthread->t_sig_check = 1;
1316 		}
1317 	}
1318 }
1319 
1320 /*
1321  * Called from fp_traps when a floating point trap occurs.
1322  * Note that the T_DATA_EXCEPTION case does not use X_FAULT_TYPE(mmu_fsr),
1323  * because mmu_fsr (now changed to code) is always 0.
1324  * Note that the T_UNIMP_INSTR case does not call simulate_unimp(),
1325  * because the simulator only simulates multiply and divide instructions,
1326  * which would not cause floating point traps in the first place.
1327  * XXX - Supervisor mode floating point traps?
1328  */
1329 void
1330 fpu_trap(struct regs *rp, caddr_t addr, uint32_t type, uint32_t code)
1331 {
1332 	proc_t *p = ttoproc(curthread);
1333 	klwp_id_t lwp = ttolwp(curthread);
1334 	k_siginfo_t siginfo;
1335 	uint_t op3, fault = 0;
1336 	int mstate;
1337 	char *badaddr;
1338 	kfpu_t *fp;
1339 	struct fpq *pfpq;
1340 	uint32_t inst;
1341 	utrap_handler_t *utrapp;
1342 
1343 	CPU_STATS_ADDQ(CPU, sys, trap, 1);
1344 
1345 	ASSERT(curthread->t_schedflag & TS_DONT_SWAP);
1346 
1347 	if (USERMODE(rp->r_tstate)) {
1348 		/*
1349 		 * Set lwp_state before trying to acquire any
1350 		 * adaptive lock
1351 		 */
1352 		ASSERT(lwp != NULL);
1353 		lwp->lwp_state = LWP_SYS;
1354 		/*
1355 		 * Set up the current cred to use during this trap. u_cred
1356 		 * no longer exists.  t_cred is used instead.
1357 		 * The current process credential applies to the thread for
1358 		 * the entire trap.  If trapping from the kernel, this
1359 		 * should already be set up.
1360 		 */
1361 		if (curthread->t_cred != p->p_cred) {
1362 			cred_t *oldcred = curthread->t_cred;
1363 			/*
1364 			 * DTrace accesses t_cred in probe context.  t_cred
1365 			 * must always be either NULL, or point to a valid,
1366 			 * allocated cred structure.
1367 			 */
1368 			curthread->t_cred = crgetcred();
1369 			crfree(oldcred);
1370 		}
1371 		ASSERT(lwp->lwp_regs == rp);
1372 		mstate = new_mstate(curthread, LMS_TRAP);
1373 		siginfo.si_signo = 0;
1374 		type |= T_USER;
1375 	}
1376 
1377 	TRACE_1(TR_FAC_TRAP, TR_C_TRAP_HANDLER_ENTER,
1378 	    "C_fpu_trap_handler_enter:type %x", type);
1379 
1380 	if (tudebug && tudebugfpe)
1381 		showregs(type, rp, addr, 0);
1382 
1383 	bzero(&siginfo, sizeof (siginfo));
1384 	siginfo.si_code = code;
1385 	siginfo.si_addr = addr;
1386 
1387 	switch (type) {
1388 
1389 	case T_FP_EXCEPTION_IEEE + T_USER:	/* FPU arithmetic exception */
1390 		/*
1391 		 * FPU arithmetic exception - fake up a fpq if we
1392 		 *	came here directly from _fp_ieee_exception,
1393 		 *	which is indicated by a zero fpu_qcnt.
1394 		 */
1395 		fp = lwptofpu(curthread->t_lwp);
1396 		utrapp = curthread->t_procp->p_utraps;
1397 		if (fp->fpu_qcnt == 0) {
1398 			inst = fetch_user_instr((caddr_t)rp->r_pc);
1399 			lwp->lwp_state = LWP_SYS;
1400 			pfpq = &fp->fpu_q->FQu.fpq;
1401 			pfpq->fpq_addr = (uint32_t *)rp->r_pc;
1402 			pfpq->fpq_instr = inst;
1403 			fp->fpu_qcnt = 1;
1404 			fp->fpu_q_entrysize = sizeof (struct fpq);
1405 #ifdef SF_V9_TABLE_28
1406 			/*
1407 			 * Spitfire and blackbird followed the SPARC V9 manual
1408 			 * paragraph 3 of section 5.1.7.9 FSR_current_exception
1409 			 * (cexc) for setting fsr.cexc bits on underflow and
1410 			 * overflow traps when the fsr.tem.inexact bit is set,
1411 			 * instead of following Table 28. Bugid 1263234.
1412 			 */
1413 			{
1414 				extern int spitfire_bb_fsr_bug;
1415 
1416 				if (spitfire_bb_fsr_bug &&
1417 				    (fp->fpu_fsr & FSR_TEM_NX)) {
1418 					if (((fp->fpu_fsr & FSR_TEM_OF) == 0) &&
1419 					    (fp->fpu_fsr & FSR_CEXC_OF)) {
1420 						fp->fpu_fsr &= ~FSR_CEXC_OF;
1421 						fp->fpu_fsr |= FSR_CEXC_NX;
1422 						_fp_write_pfsr(&fp->fpu_fsr);
1423 						siginfo.si_code = FPE_FLTRES;
1424 					}
1425 					if (((fp->fpu_fsr & FSR_TEM_UF) == 0) &&
1426 					    (fp->fpu_fsr & FSR_CEXC_UF)) {
1427 						fp->fpu_fsr &= ~FSR_CEXC_UF;
1428 						fp->fpu_fsr |= FSR_CEXC_NX;
1429 						_fp_write_pfsr(&fp->fpu_fsr);
1430 						siginfo.si_code = FPE_FLTRES;
1431 					}
1432 				}
1433 			}
1434 #endif /* SF_V9_TABLE_28 */
1435 			rp->r_pc = rp->r_npc;
1436 			rp->r_npc += 4;
1437 		} else if (utrapp && utrapp[UT_FP_EXCEPTION_IEEE_754]) {
1438 			/*
1439 			 * The user had a trap handler installed.  Jump to
1440 			 * the trap handler instead of signalling the process.
1441 			 */
1442 			rp->r_pc = (long)utrapp[UT_FP_EXCEPTION_IEEE_754];
1443 			rp->r_npc = rp->r_pc + 4;
1444 			break;
1445 		}
1446 		siginfo.si_signo = SIGFPE;
1447 		fault = FLTFPE;
1448 		break;
1449 
1450 	case T_DATA_EXCEPTION + T_USER:		/* user data access exception */
1451 		siginfo.si_signo = SIGSEGV;
1452 		fault = FLTBOUNDS;
1453 		break;
1454 
1455 	case T_LDDF_ALIGN + T_USER: /* 64 bit user lddfa alignment error */
1456 	case T_STDF_ALIGN + T_USER: /* 64 bit user stdfa alignment error */
1457 		alignfaults++;
1458 		lwp->lwp_state = LWP_SYS;
1459 		if (&vis1_partial_support != NULL) {
1460 			bzero(&siginfo, sizeof (siginfo));
1461 			if (vis1_partial_support(rp,
1462 			    &siginfo, &fault) == 0)
1463 				goto out;
1464 		}
1465 		if (do_unaligned(rp, &badaddr) == SIMU_SUCCESS) {
1466 			rp->r_pc = rp->r_npc;
1467 			rp->r_npc += 4;
1468 			goto out;
1469 		}
1470 		fp = lwptofpu(curthread->t_lwp);
1471 		fp->fpu_qcnt = 0;
1472 		siginfo.si_signo = SIGSEGV;
1473 		siginfo.si_code = SEGV_MAPERR;
1474 		siginfo.si_addr = badaddr;
1475 		fault = FLTBOUNDS;
1476 		break;
1477 
1478 	case T_ALIGNMENT + T_USER:		/* user alignment error */
1479 		/*
1480 		 * If the user has to do unaligned references
1481 		 * the ugly stuff gets done here.
1482 		 * Only handles vanilla loads and stores.
1483 		 */
1484 		alignfaults++;
1485 		if (p->p_fixalignment) {
1486 			if (do_unaligned(rp, &badaddr) == SIMU_SUCCESS) {
1487 				rp->r_pc = rp->r_npc;
1488 				rp->r_npc += 4;
1489 				goto out;
1490 			}
1491 			siginfo.si_signo = SIGSEGV;
1492 			siginfo.si_code = SEGV_MAPERR;
1493 			siginfo.si_addr = badaddr;
1494 			fault = FLTBOUNDS;
1495 		} else {
1496 			siginfo.si_signo = SIGBUS;
1497 			siginfo.si_code = BUS_ADRALN;
1498 			if (rp->r_pc & 3) {	/* offending address, if pc */
1499 				siginfo.si_addr = (caddr_t)rp->r_pc;
1500 			} else {
1501 				if (calc_memaddr(rp, &badaddr) == SIMU_UNALIGN)
1502 					siginfo.si_addr = badaddr;
1503 				else
1504 					siginfo.si_addr = (caddr_t)rp->r_pc;
1505 			}
1506 			fault = FLTACCESS;
1507 		}
1508 		break;
1509 
1510 	case T_UNIMP_INSTR + T_USER:		/* illegal instruction fault */
1511 		siginfo.si_signo = SIGILL;
1512 		inst = fetch_user_instr((caddr_t)rp->r_pc);
1513 		op3 = (inst >> 19) & 0x3F;
1514 		if ((op3 == IOP_V8_STQFA) || (op3 == IOP_V8_STDFA))
1515 			siginfo.si_code = ILL_ILLADR;
1516 		else
1517 			siginfo.si_code = ILL_ILLTRP;
1518 		fault = FLTILL;
1519 		break;
1520 
1521 	default:
1522 		(void) die(type, rp, addr, 0);
1523 		/*NOTREACHED*/
1524 	}
1525 
1526 	/*
1527 	 * We can't get here from a system trap
1528 	 * Never restart any instruction which got here from an fp trap.
1529 	 */
1530 	ASSERT(type & T_USER);
1531 
1532 	trap_cleanup(rp, fault, &siginfo, 0);
1533 out:
1534 	trap_rtt();
1535 	(void) new_mstate(curthread, mstate);
1536 }
1537 
1538 void
1539 trap_rtt(void)
1540 {
1541 	klwp_id_t lwp = ttolwp(curthread);
1542 
1543 	/*
1544 	 * Restore register window if a debugger modified it.
1545 	 * Set up to perform a single-step if a debugger requested it.
1546 	 */
1547 	if (lwp->lwp_pcb.pcb_xregstat != XREGNONE)
1548 		xregrestore(lwp, 0);
1549 
1550 	/*
1551 	 * Set state to LWP_USER here so preempt won't give us a kernel
1552 	 * priority if it occurs after this point.  Call CL_TRAPRET() to
1553 	 * restore the user-level priority.
1554 	 *
1555 	 * It is important that no locks (other than spinlocks) be entered
1556 	 * after this point before returning to user mode (unless lwp_state
1557 	 * is set back to LWP_SYS).
1558 	 */
1559 	lwp->lwp_state = LWP_USER;
1560 	if (curthread->t_trapret) {
1561 		curthread->t_trapret = 0;
1562 		thread_lock(curthread);
1563 		CL_TRAPRET(curthread);
1564 		thread_unlock(curthread);
1565 	}
1566 	if (CPU->cpu_runrun || curthread->t_schedflag & TS_ANYWAITQ)
1567 		preempt();
1568 	if (lwp->lwp_pcb.pcb_step != STEP_NONE)
1569 		prdostep();
1570 
1571 	TRACE_0(TR_FAC_TRAP, TR_C_TRAP_HANDLER_EXIT, "C_trap_handler_exit");
1572 }
1573 
1574 #define	IS_LDASI(o)	\
1575 	((o) == (uint32_t)0xC0C00000 || (o) == (uint32_t)0xC0800000 ||	\
1576 	(o) == (uint32_t)0xC1800000)
1577 #define	IS_IMM_ASI(i)	(((i) & 0x2000) == 0)
1578 #define	IS_ASINF(a)	(((a) & 0xF6) == 0x82)
1579 #define	IS_LDDA(i)	(((i) & 0xC1F80000) == 0xC0980000)
1580 
1581 static int
1582 nfload(struct regs *rp, int *instrp)
1583 {
1584 	uint_t	instr, asi, op3, rd;
1585 	size_t	len;
1586 	struct as *as;
1587 	caddr_t addr;
1588 	FPU_DREGS_TYPE zero;
1589 	extern int segnf_create();
1590 
1591 	if (USERMODE(rp->r_tstate))
1592 		instr = fetch_user_instr((caddr_t)rp->r_pc);
1593 	else
1594 		instr = *(int *)rp->r_pc;
1595 
1596 	if (instrp)
1597 		*instrp = instr;
1598 
1599 	op3 = (uint_t)(instr & 0xC1E00000);
1600 	if (!IS_LDASI(op3))
1601 		return (0);
1602 	if (IS_IMM_ASI(instr))
1603 		asi = (instr & 0x1FE0) >> 5;
1604 	else
1605 		asi = (uint_t)((rp->r_tstate >> TSTATE_ASI_SHIFT) &
1606 		    TSTATE_ASI_MASK);
1607 	if (!IS_ASINF(asi))
1608 		return (0);
1609 	if (calc_memaddr(rp, &addr) == SIMU_SUCCESS) {
1610 		len = 1;
1611 		as = USERMODE(rp->r_tstate) ? ttoproc(curthread)->p_as : &kas;
1612 		as_rangelock(as);
1613 		if (as_gap(as, len, &addr, &len, 0, addr) == 0)
1614 			(void) as_map(as, addr, len, segnf_create, NULL);
1615 		as_rangeunlock(as);
1616 	}
1617 	zero = 0;
1618 	rd = (instr >> 25) & 0x1f;
1619 	if (IS_FLOAT(instr)) {
1620 		uint_t dbflg = ((instr >> 19) & 3) == 3;
1621 
1622 		if (dbflg) {		/* clever v9 reg encoding */
1623 			if (rd & 1)
1624 				rd = (rd & 0x1e) | 0x20;
1625 			rd >>= 1;
1626 		}
1627 		if (fpu_exists) {
1628 			if (!(_fp_read_fprs() & FPRS_FEF))
1629 				fp_enable();
1630 
1631 			if (dbflg)
1632 				_fp_write_pdreg(&zero, rd);
1633 			else
1634 				_fp_write_pfreg((uint_t *)&zero, rd);
1635 		} else {
1636 			kfpu_t *fp = lwptofpu(curthread->t_lwp);
1637 
1638 			if (!fp->fpu_en)
1639 				fp_enable();
1640 
1641 			if (dbflg)
1642 				fp->fpu_fr.fpu_dregs[rd] = zero;
1643 			else
1644 				fp->fpu_fr.fpu_regs[rd] = 0;
1645 		}
1646 	} else {
1647 		(void) putreg(&zero, rp, rd, &addr);
1648 		if (IS_LDDA(instr))
1649 			(void) putreg(&zero, rp, rd + 1, &addr);
1650 	}
1651 	rp->r_pc = rp->r_npc;
1652 	rp->r_npc += 4;
1653 	return (1);
1654 }
1655 
1656 kmutex_t atomic_nc_mutex;
1657 
1658 /*
1659  * The following couple of routines are for userland drivers which
1660  * do atomics to noncached addresses.  This sort of worked on previous
1661  * platforms -- the operation really wasn't atomic, but it didn't generate
1662  * a trap as sun4u systems do.
1663  */
1664 static int
1665 swap_nc(struct regs *rp, int instr)
1666 {
1667 	uint64_t rdata, mdata;
1668 	caddr_t addr, badaddr;
1669 	uint_t tmp, rd;
1670 
1671 	(void) flush_user_windows_to_stack(NULL);
1672 	rd = (instr >> 25) & 0x1f;
1673 	if (calc_memaddr(rp, &addr) != SIMU_SUCCESS)
1674 		return (0);
1675 	if (getreg(rp, rd, &rdata, &badaddr))
1676 		return (0);
1677 	mutex_enter(&atomic_nc_mutex);
1678 	if (fuword32(addr, &tmp) == -1) {
1679 		mutex_exit(&atomic_nc_mutex);
1680 		return (0);
1681 	}
1682 	mdata = (u_longlong_t)tmp;
1683 	if (suword32(addr, (uint32_t)rdata) == -1) {
1684 		mutex_exit(&atomic_nc_mutex);
1685 		return (0);
1686 	}
1687 	(void) putreg(&mdata, rp, rd, &badaddr);
1688 	mutex_exit(&atomic_nc_mutex);
1689 	return (1);
1690 }
1691 
1692 static int
1693 ldstub_nc(struct regs *rp, int instr)
1694 {
1695 	uint64_t mdata;
1696 	caddr_t addr, badaddr;
1697 	uint_t rd;
1698 	uint8_t tmp;
1699 
1700 	(void) flush_user_windows_to_stack(NULL);
1701 	rd = (instr >> 25) & 0x1f;
1702 	if (calc_memaddr(rp, &addr) != SIMU_SUCCESS)
1703 		return (0);
1704 	mutex_enter(&atomic_nc_mutex);
1705 	if (fuword8(addr, &tmp) == -1) {
1706 		mutex_exit(&atomic_nc_mutex);
1707 		return (0);
1708 	}
1709 	mdata = (u_longlong_t)tmp;
1710 	if (suword8(addr, (uint8_t)0xff) == -1) {
1711 		mutex_exit(&atomic_nc_mutex);
1712 		return (0);
1713 	}
1714 	(void) putreg(&mdata, rp, rd, &badaddr);
1715 	mutex_exit(&atomic_nc_mutex);
1716 	return (1);
1717 }
1718 
1719 /*
1720  * This function helps instr_size() determine the operand size.
1721  * It is called for the extended ldda/stda asi's.
1722  */
1723 int
1724 extended_asi_size(int asi)
1725 {
1726 	switch (asi) {
1727 	case ASI_PST8_P:
1728 	case ASI_PST8_S:
1729 	case ASI_PST16_P:
1730 	case ASI_PST16_S:
1731 	case ASI_PST32_P:
1732 	case ASI_PST32_S:
1733 	case ASI_PST8_PL:
1734 	case ASI_PST8_SL:
1735 	case ASI_PST16_PL:
1736 	case ASI_PST16_SL:
1737 	case ASI_PST32_PL:
1738 	case ASI_PST32_SL:
1739 		return (8);
1740 	case ASI_FL8_P:
1741 	case ASI_FL8_S:
1742 	case ASI_FL8_PL:
1743 	case ASI_FL8_SL:
1744 		return (1);
1745 	case ASI_FL16_P:
1746 	case ASI_FL16_S:
1747 	case ASI_FL16_PL:
1748 	case ASI_FL16_SL:
1749 		return (2);
1750 	case ASI_BLK_P:
1751 	case ASI_BLK_S:
1752 	case ASI_BLK_PL:
1753 	case ASI_BLK_SL:
1754 	case ASI_BLK_COMMIT_P:
1755 	case ASI_BLK_COMMIT_S:
1756 		return (64);
1757 	}
1758 
1759 	return (0);
1760 }
1761 
1762 /*
1763  * Patch non-zero to disable preemption of threads in the kernel.
1764  */
1765 int IGNORE_KERNEL_PREEMPTION = 0;	/* XXX - delete this someday */
1766 
1767 struct kpreempt_cnts {	/* kernel preemption statistics */
1768 	int	kpc_idle;	/* executing idle thread */
1769 	int	kpc_intr;	/* executing interrupt thread */
1770 	int	kpc_clock;	/* executing clock thread */
1771 	int	kpc_blocked;	/* thread has blocked preemption (t_preempt) */
1772 	int	kpc_notonproc;	/* thread is surrendering processor */
1773 	int	kpc_inswtch;	/* thread has ratified scheduling decision */
1774 	int	kpc_prilevel;	/* processor interrupt level is too high */
1775 	int	kpc_apreempt;	/* asynchronous preemption */
1776 	int	kpc_spreempt;	/* synchronous preemption */
1777 }	kpreempt_cnts;
1778 
1779 /*
1780  * kernel preemption: forced rescheduling
1781  *	preempt the running kernel thread.
1782  */
1783 void
1784 kpreempt(int asyncspl)
1785 {
1786 	if (IGNORE_KERNEL_PREEMPTION) {
1787 		aston(CPU->cpu_dispthread);
1788 		return;
1789 	}
1790 	/*
1791 	 * Check that conditions are right for kernel preemption
1792 	 */
1793 	do {
1794 		if (curthread->t_preempt) {
1795 			/*
1796 			 * either a privileged thread (idle, panic, interrupt)
1797 			 * or will check when t_preempt is lowered
1798 			 * We need to specifically handle the case where
1799 			 * the thread is in the middle of swtch (resume has
1800 			 * been called) and has its t_preempt set
1801 			 * [idle thread and a thread which is in kpreempt
1802 			 * already] and then a high priority thread is
1803 			 * available in the local dispatch queue.
1804 			 * In this case the resumed thread needs to take a
1805 			 * trap so that it can call kpreempt. We achieve
1806 			 * this by using siron().
1807 			 * How do we detect this condition:
1808 			 * idle thread is running and is in the midst of
1809 			 * resume: curthread->t_pri == -1 && CPU->dispthread
1810 			 * != CPU->thread
1811 			 * Need to ensure that this happens only at high pil
1812 			 * resume is called at high pil
1813 			 * Only in resume_from_idle is the pil changed.
1814 			 */
1815 			if (curthread->t_pri < 0) {
1816 				kpreempt_cnts.kpc_idle++;
1817 				if (CPU->cpu_dispthread != CPU->cpu_thread)
1818 					siron();
1819 			} else if (curthread->t_flag & T_INTR_THREAD) {
1820 				kpreempt_cnts.kpc_intr++;
1821 				if (curthread->t_pil == CLOCK_LEVEL)
1822 					kpreempt_cnts.kpc_clock++;
1823 			} else {
1824 				kpreempt_cnts.kpc_blocked++;
1825 				if (CPU->cpu_dispthread != CPU->cpu_thread)
1826 					siron();
1827 			}
1828 			aston(CPU->cpu_dispthread);
1829 			return;
1830 		}
1831 		if (curthread->t_state != TS_ONPROC ||
1832 		    curthread->t_disp_queue != CPU->cpu_disp) {
1833 			/* this thread will be calling swtch() shortly */
1834 			kpreempt_cnts.kpc_notonproc++;
1835 			if (CPU->cpu_thread != CPU->cpu_dispthread) {
1836 				/* already in swtch(), force another */
1837 				kpreempt_cnts.kpc_inswtch++;
1838 				siron();
1839 			}
1840 			return;
1841 		}
1842 
1843 		if (((asyncspl != KPREEMPT_SYNC) ? spltoipl(asyncspl) :
1844 		    getpil()) >= DISP_LEVEL) {
1845 			/*
1846 			 * We can't preempt this thread if it is at
1847 			 * a PIL >= DISP_LEVEL since it may be holding
1848 			 * a spin lock (like sched_lock).
1849 			 */
1850 			siron();	/* check back later */
1851 			kpreempt_cnts.kpc_prilevel++;
1852 			return;
1853 		}
1854 
1855 		/*
1856 		 * block preemption so we don't have multiple preemptions
1857 		 * pending on the interrupt stack
1858 		 */
1859 		curthread->t_preempt++;
1860 		if (asyncspl != KPREEMPT_SYNC) {
1861 			splx(asyncspl);
1862 			kpreempt_cnts.kpc_apreempt++;
1863 		} else
1864 			kpreempt_cnts.kpc_spreempt++;
1865 
1866 		preempt();
1867 		curthread->t_preempt--;
1868 	} while (CPU->cpu_kprunrun);
1869 }
1870 
1871 static enum seg_rw
1872 get_accesstype(struct regs *rp)
1873 {
1874 	uint32_t instr;
1875 
1876 	if (USERMODE(rp->r_tstate))
1877 		instr = fetch_user_instr((caddr_t)rp->r_pc);
1878 	else
1879 		instr = *(uint32_t *)rp->r_pc;
1880 
1881 	if (IS_FLUSH(instr))
1882 		return (S_OTHER);
1883 
1884 	if (IS_STORE(instr))
1885 		return (S_WRITE);
1886 	else
1887 		return (S_READ);
1888 }
1889