xref: /illumos-gate/usr/src/uts/common/vm/seg_kmem.c (revision 8fd04b8338ed5093ec2d1e668fa620b7de44c177)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #include <sys/types.h>
27 #include <sys/t_lock.h>
28 #include <sys/param.h>
29 #include <sys/sysmacros.h>
30 #include <sys/tuneable.h>
31 #include <sys/systm.h>
32 #include <sys/vm.h>
33 #include <sys/kmem.h>
34 #include <sys/vmem.h>
35 #include <sys/mman.h>
36 #include <sys/cmn_err.h>
37 #include <sys/debug.h>
38 #include <sys/dumphdr.h>
39 #include <sys/bootconf.h>
40 #include <sys/lgrp.h>
41 #include <vm/seg_kmem.h>
42 #include <vm/hat.h>
43 #include <vm/page.h>
44 #include <vm/vm_dep.h>
45 #include <vm/faultcode.h>
46 #include <sys/promif.h>
47 #include <vm/seg_kp.h>
48 #include <sys/bitmap.h>
49 #include <sys/mem_cage.h>
50 
51 #ifdef __sparc
52 #include <sys/ivintr.h>
53 #include <sys/panic.h>
54 #endif
55 
56 /*
57  * seg_kmem is the primary kernel memory segment driver.  It
58  * maps the kernel heap [kernelheap, ekernelheap), module text,
59  * and all memory which was allocated before the VM was initialized
60  * into kas.
61  *
62  * Pages which belong to seg_kmem are hashed into &kvp vnode at
63  * an offset equal to (u_offset_t)virt_addr, and have p_lckcnt >= 1.
64  * They must never be paged out since segkmem_fault() is a no-op to
65  * prevent recursive faults.
66  *
67  * Currently, seg_kmem pages are sharelocked (p_sharelock == 1) on
68  * __x86 and are unlocked (p_sharelock == 0) on __sparc.  Once __x86
69  * supports relocation the #ifdef kludges can be removed.
70  *
71  * seg_kmem pages may be subject to relocation by page_relocate(),
72  * provided that the HAT supports it; if this is so, segkmem_reloc
73  * will be set to a nonzero value. All boot time allocated memory as
74  * well as static memory is considered off limits to relocation.
75  * Pages are "relocatable" if p_state does not have P_NORELOC set, so
76  * we request P_NORELOC pages for memory that isn't safe to relocate.
77  *
78  * The kernel heap is logically divided up into four pieces:
79  *
80  *   heap32_arena is for allocations that require 32-bit absolute
81  *   virtual addresses (e.g. code that uses 32-bit pointers/offsets).
82  *
83  *   heap_core is for allocations that require 2GB *relative*
84  *   offsets; in other words all memory from heap_core is within
85  *   2GB of all other memory from the same arena. This is a requirement
86  *   of the addressing modes of some processors in supervisor code.
87  *
88  *   heap_arena is the general heap arena.
89  *
90  *   static_arena is the static memory arena.  Allocations from it
91  *   are not subject to relocation so it is safe to use the memory
92  *   physical address as well as the virtual address (e.g. the VA to
93  *   PA translations are static).  Caches may import from static_arena;
94  *   all other static memory allocations should use static_alloc_arena.
95  *
96  * On some platforms which have limited virtual address space, seg_kmem
97  * may share [kernelheap, ekernelheap) with seg_kp; if this is so,
98  * segkp_bitmap is non-NULL, and each bit represents a page of virtual
99  * address space which is actually seg_kp mapped.
100  */
101 
102 extern ulong_t *segkp_bitmap;   /* Is set if segkp is from the kernel heap */
103 
104 char *kernelheap;		/* start of primary kernel heap */
105 char *ekernelheap;		/* end of primary kernel heap */
106 struct seg kvseg;		/* primary kernel heap segment */
107 struct seg kvseg_core;		/* "core" kernel heap segment */
108 struct seg kzioseg;		/* Segment for zio mappings */
109 vmem_t *heap_arena;		/* primary kernel heap arena */
110 vmem_t *heap_core_arena;	/* core kernel heap arena */
111 char *heap_core_base;		/* start of core kernel heap arena */
112 char *heap_lp_base;		/* start of kernel large page heap arena */
113 char *heap_lp_end;		/* end of kernel large page heap arena */
114 vmem_t *hat_memload_arena;	/* HAT translation data */
115 struct seg kvseg32;		/* 32-bit kernel heap segment */
116 vmem_t *heap32_arena;		/* 32-bit kernel heap arena */
117 vmem_t *heaptext_arena;		/* heaptext arena */
118 struct as kas;			/* kernel address space */
119 int segkmem_reloc;		/* enable/disable relocatable segkmem pages */
120 vmem_t *static_arena;		/* arena for caches to import static memory */
121 vmem_t *static_alloc_arena;	/* arena for allocating static memory */
122 vmem_t *zio_arena = NULL;	/* arena for allocating zio memory */
123 vmem_t *zio_alloc_arena = NULL;	/* arena for allocating zio memory */
124 
125 /*
126  * seg_kmem driver can map part of the kernel heap with large pages.
127  * Currently this functionality is implemented for sparc platforms only.
128  *
129  * The large page size "segkmem_lpsize" for kernel heap is selected in the
130  * platform specific code. It can also be modified via /etc/system file.
131  * Setting segkmem_lpsize to PAGESIZE in /etc/system disables usage of large
132  * pages for kernel heap. "segkmem_lpshift" is adjusted appropriately to
133  * match segkmem_lpsize.
134  *
135  * At boot time we carve from kernel heap arena a range of virtual addresses
136  * that will be used for large page mappings. This range [heap_lp_base,
137  * heap_lp_end) is set up as a separate vmem arena - "heap_lp_arena". We also
138  * create "kmem_lp_arena" that caches memory already backed up by large
139  * pages. kmem_lp_arena imports virtual segments from heap_lp_arena.
140  */
141 
142 size_t	segkmem_lpsize;
143 static  uint_t	segkmem_lpshift = PAGESHIFT;
144 int	segkmem_lpszc = 0;
145 
146 size_t  segkmem_kmemlp_quantum = 0x400000;	/* 4MB */
147 size_t  segkmem_heaplp_quantum;
148 vmem_t *heap_lp_arena;
149 static  vmem_t *kmem_lp_arena;
150 static  vmem_t *segkmem_ppa_arena;
151 static	segkmem_lpcb_t segkmem_lpcb;
152 
153 /*
154  * We use "segkmem_kmemlp_max" to limit the total amount of physical memory
155  * consumed by the large page heap. By default this parameter is set to 1/8 of
156  * physmem but can be adjusted through /etc/system either directly or
157  * indirectly by setting "segkmem_kmemlp_pcnt" to the percent of physmem
158  * we allow for large page heap.
159  */
160 size_t  segkmem_kmemlp_max;
161 static  uint_t  segkmem_kmemlp_pcnt;
162 
163 /*
164  * Getting large pages for kernel heap could be problematic due to
165  * physical memory fragmentation. That's why we allow to preallocate
166  * "segkmem_kmemlp_min" bytes at boot time.
167  */
168 static  size_t	segkmem_kmemlp_min;
169 
170 /*
171  * Throttling is used to avoid expensive tries to allocate large pages
172  * for kernel heap when a lot of succesive attempts to do so fail.
173  */
174 static  ulong_t segkmem_lpthrottle_max = 0x400000;
175 static  ulong_t segkmem_lpthrottle_start = 0x40;
176 static  ulong_t segkmem_use_lpthrottle = 1;
177 
178 /*
179  * Freed pages accumulate on a garbage list until segkmem is ready,
180  * at which point we call segkmem_gc() to free it all.
181  */
182 typedef struct segkmem_gc_list {
183 	struct segkmem_gc_list	*gc_next;
184 	vmem_t			*gc_arena;
185 	size_t			gc_size;
186 } segkmem_gc_list_t;
187 
188 static segkmem_gc_list_t *segkmem_gc_list;
189 
190 /*
191  * Allocations from the hat_memload arena add VM_MEMLOAD to their
192  * vmflags so that segkmem_xalloc() can inform the hat layer that it needs
193  * to take steps to prevent infinite recursion.  HAT allocations also
194  * must be non-relocatable to prevent recursive page faults.
195  */
196 static void *
197 hat_memload_alloc(vmem_t *vmp, size_t size, int flags)
198 {
199 	flags |= (VM_MEMLOAD | VM_NORELOC);
200 	return (segkmem_alloc(vmp, size, flags));
201 }
202 
203 /*
204  * Allocations from static_arena arena (or any other arena that uses
205  * segkmem_alloc_permanent()) require non-relocatable (permanently
206  * wired) memory pages, since these pages are referenced by physical
207  * as well as virtual address.
208  */
209 void *
210 segkmem_alloc_permanent(vmem_t *vmp, size_t size, int flags)
211 {
212 	return (segkmem_alloc(vmp, size, flags | VM_NORELOC));
213 }
214 
215 /*
216  * Initialize kernel heap boundaries.
217  */
218 void
219 kernelheap_init(
220 	void *heap_start,
221 	void *heap_end,
222 	char *first_avail,
223 	void *core_start,
224 	void *core_end)
225 {
226 	uintptr_t textbase;
227 	size_t core_size;
228 	size_t heap_size;
229 	vmem_t *heaptext_parent;
230 	size_t	heap_lp_size = 0;
231 #ifdef __sparc
232 	size_t kmem64_sz = kmem64_aligned_end - kmem64_base;
233 #endif	/* __sparc */
234 
235 	kernelheap = heap_start;
236 	ekernelheap = heap_end;
237 
238 #ifdef __sparc
239 	heap_lp_size = (((uintptr_t)heap_end - (uintptr_t)heap_start) / 4);
240 	/*
241 	 * Bias heap_lp start address by kmem64_sz to reduce collisions
242 	 * in 4M kernel TSB between kmem64 area and heap_lp
243 	 */
244 	kmem64_sz = P2ROUNDUP(kmem64_sz, MMU_PAGESIZE256M);
245 	if (kmem64_sz <= heap_lp_size / 2)
246 		heap_lp_size -= kmem64_sz;
247 	heap_lp_base = ekernelheap - heap_lp_size;
248 	heap_lp_end = heap_lp_base + heap_lp_size;
249 #endif	/* __sparc */
250 
251 	/*
252 	 * If this platform has a 'core' heap area, then the space for
253 	 * overflow module text should be carved out of the end of that
254 	 * heap.  Otherwise, it gets carved out of the general purpose
255 	 * heap.
256 	 */
257 	core_size = (uintptr_t)core_end - (uintptr_t)core_start;
258 	if (core_size > 0) {
259 		ASSERT(core_size >= HEAPTEXT_SIZE);
260 		textbase = (uintptr_t)core_end - HEAPTEXT_SIZE;
261 		core_size -= HEAPTEXT_SIZE;
262 	}
263 #ifndef __sparc
264 	else {
265 		ekernelheap -= HEAPTEXT_SIZE;
266 		textbase = (uintptr_t)ekernelheap;
267 	}
268 #endif
269 
270 	heap_size = (uintptr_t)ekernelheap - (uintptr_t)kernelheap;
271 	heap_arena = vmem_init("heap", kernelheap, heap_size, PAGESIZE,
272 	    segkmem_alloc, segkmem_free);
273 
274 	if (core_size > 0) {
275 		heap_core_arena = vmem_create("heap_core", core_start,
276 		    core_size, PAGESIZE, NULL, NULL, NULL, 0, VM_SLEEP);
277 		heap_core_base = core_start;
278 	} else {
279 		heap_core_arena = heap_arena;
280 		heap_core_base = kernelheap;
281 	}
282 
283 	/*
284 	 * reserve space for the large page heap. If large pages for kernel
285 	 * heap is enabled large page heap arean will be created later in the
286 	 * boot sequence in segkmem_heap_lp_init(). Otherwise the allocated
287 	 * range will be returned back to the heap_arena.
288 	 */
289 	if (heap_lp_size) {
290 		(void) vmem_xalloc(heap_arena, heap_lp_size, PAGESIZE, 0, 0,
291 		    heap_lp_base, heap_lp_end,
292 		    VM_NOSLEEP | VM_BESTFIT | VM_PANIC);
293 	}
294 
295 	/*
296 	 * Remove the already-spoken-for memory range [kernelheap, first_avail).
297 	 */
298 	(void) vmem_xalloc(heap_arena, first_avail - kernelheap, PAGESIZE,
299 	    0, 0, kernelheap, first_avail, VM_NOSLEEP | VM_BESTFIT | VM_PANIC);
300 
301 #ifdef __sparc
302 	heap32_arena = vmem_create("heap32", (void *)SYSBASE32,
303 	    SYSLIMIT32 - SYSBASE32 - HEAPTEXT_SIZE, PAGESIZE, NULL,
304 	    NULL, NULL, 0, VM_SLEEP);
305 	/*
306 	 * Prom claims the physical and virtual resources used by panicbuf
307 	 * and inter_vec_table. So reserve space for panicbuf, intr_vec_table,
308 	 * reserved interrupt vector data structures from 32-bit heap.
309 	 */
310 	(void) vmem_xalloc(heap32_arena, PANICBUFSIZE, PAGESIZE, 0, 0,
311 	    panicbuf, panicbuf + PANICBUFSIZE,
312 	    VM_NOSLEEP | VM_BESTFIT | VM_PANIC);
313 
314 	(void) vmem_xalloc(heap32_arena, IVSIZE, PAGESIZE, 0, 0,
315 	    intr_vec_table, (caddr_t)intr_vec_table + IVSIZE,
316 	    VM_NOSLEEP | VM_BESTFIT | VM_PANIC);
317 
318 	textbase = SYSLIMIT32 - HEAPTEXT_SIZE;
319 	heaptext_parent = NULL;
320 #else	/* __sparc */
321 	heap32_arena = heap_core_arena;
322 	heaptext_parent = heap_core_arena;
323 #endif	/* __sparc */
324 
325 	heaptext_arena = vmem_create("heaptext", (void *)textbase,
326 	    HEAPTEXT_SIZE, PAGESIZE, NULL, NULL, heaptext_parent, 0, VM_SLEEP);
327 
328 	/*
329 	 * Create a set of arenas for memory with static translations
330 	 * (e.g. VA -> PA translations cannot change).  Since using
331 	 * kernel pages by physical address implies it isn't safe to
332 	 * walk across page boundaries, the static_arena quantum must
333 	 * be PAGESIZE.  Any kmem caches that require static memory
334 	 * should source from static_arena, while direct allocations
335 	 * should only use static_alloc_arena.
336 	 */
337 	static_arena = vmem_create("static", NULL, 0, PAGESIZE,
338 	    segkmem_alloc_permanent, segkmem_free, heap_arena, 0, VM_SLEEP);
339 	static_alloc_arena = vmem_create("static_alloc", NULL, 0,
340 	    sizeof (uint64_t), vmem_alloc, vmem_free, static_arena,
341 	    0, VM_SLEEP);
342 
343 	/*
344 	 * Create an arena for translation data (ptes, hmes, or hblks).
345 	 * We need an arena for this because hat_memload() is essential
346 	 * to vmem_populate() (see comments in common/os/vmem.c).
347 	 *
348 	 * Note: any kmem cache that allocates from hat_memload_arena
349 	 * must be created as a KMC_NOHASH cache (i.e. no external slab
350 	 * and bufctl structures to allocate) so that slab creation doesn't
351 	 * require anything more than a single vmem_alloc().
352 	 */
353 	hat_memload_arena = vmem_create("hat_memload", NULL, 0, PAGESIZE,
354 	    hat_memload_alloc, segkmem_free, heap_arena, 0,
355 	    VM_SLEEP | VMC_POPULATOR | VMC_DUMPSAFE);
356 }
357 
358 void
359 boot_mapin(caddr_t addr, size_t size)
360 {
361 	caddr_t	 eaddr;
362 	page_t	*pp;
363 	pfn_t	 pfnum;
364 
365 	if (page_resv(btop(size), KM_NOSLEEP) == 0)
366 		panic("boot_mapin: page_resv failed");
367 
368 	for (eaddr = addr + size; addr < eaddr; addr += PAGESIZE) {
369 		pfnum = va_to_pfn(addr);
370 		if (pfnum == PFN_INVALID)
371 			continue;
372 		if ((pp = page_numtopp_nolock(pfnum)) == NULL)
373 			panic("boot_mapin(): No pp for pfnum = %lx", pfnum);
374 
375 		/*
376 		 * must break up any large pages that may have constituent
377 		 * pages being utilized for BOP_ALLOC()'s before calling
378 		 * page_numtopp().The locking code (ie. page_reclaim())
379 		 * can't handle them
380 		 */
381 		if (pp->p_szc != 0)
382 			page_boot_demote(pp);
383 
384 		pp = page_numtopp(pfnum, SE_EXCL);
385 		if (pp == NULL || PP_ISFREE(pp))
386 			panic("boot_alloc: pp is NULL or free");
387 
388 		/*
389 		 * If the cage is on but doesn't yet contain this page,
390 		 * mark it as non-relocatable.
391 		 */
392 		if (kcage_on && !PP_ISNORELOC(pp)) {
393 			PP_SETNORELOC(pp);
394 			PLCNT_XFER_NORELOC(pp);
395 		}
396 
397 		(void) page_hashin(pp, &kvp, (u_offset_t)(uintptr_t)addr, NULL);
398 		pp->p_lckcnt = 1;
399 #if defined(__x86)
400 		page_downgrade(pp);
401 #else
402 		page_unlock(pp);
403 #endif
404 	}
405 }
406 
407 /*
408  * Get pages from boot and hash them into the kernel's vp.
409  * Used after page structs have been allocated, but before segkmem is ready.
410  */
411 void *
412 boot_alloc(void *inaddr, size_t size, uint_t align)
413 {
414 	caddr_t addr = inaddr;
415 
416 	if (bootops == NULL)
417 		prom_panic("boot_alloc: attempt to allocate memory after "
418 		    "BOP_GONE");
419 
420 	size = ptob(btopr(size));
421 #ifdef __sparc
422 	if (bop_alloc_chunk(addr, size, align) != (caddr_t)addr)
423 		panic("boot_alloc: bop_alloc_chunk failed");
424 #else
425 	if (BOP_ALLOC(bootops, addr, size, align) != addr)
426 		panic("boot_alloc: BOP_ALLOC failed");
427 #endif
428 	boot_mapin((caddr_t)addr, size);
429 	return (addr);
430 }
431 
432 static void
433 segkmem_badop()
434 {
435 	panic("segkmem_badop");
436 }
437 
438 #define	SEGKMEM_BADOP(t)	(t(*)())segkmem_badop
439 
440 /*ARGSUSED*/
441 static faultcode_t
442 segkmem_fault(struct hat *hat, struct seg *seg, caddr_t addr, size_t size,
443 	enum fault_type type, enum seg_rw rw)
444 {
445 	pgcnt_t npages;
446 	spgcnt_t pg;
447 	page_t *pp;
448 	struct vnode *vp = seg->s_data;
449 
450 	ASSERT(RW_READ_HELD(&seg->s_as->a_lock));
451 
452 	if (seg->s_as != &kas || size > seg->s_size ||
453 	    addr < seg->s_base || addr + size > seg->s_base + seg->s_size)
454 		panic("segkmem_fault: bad args");
455 
456 	/*
457 	 * If it is one of segkp pages, call segkp_fault.
458 	 */
459 	if (segkp_bitmap && seg == &kvseg &&
460 	    BT_TEST(segkp_bitmap, btop((uintptr_t)(addr - seg->s_base))))
461 		return (SEGOP_FAULT(hat, segkp, addr, size, type, rw));
462 
463 	if (rw != S_READ && rw != S_WRITE && rw != S_OTHER)
464 		return (FC_NOSUPPORT);
465 
466 	npages = btopr(size);
467 
468 	switch (type) {
469 	case F_SOFTLOCK:	/* lock down already-loaded translations */
470 		for (pg = 0; pg < npages; pg++) {
471 			pp = page_lookup(vp, (u_offset_t)(uintptr_t)addr,
472 			    SE_SHARED);
473 			if (pp == NULL) {
474 				/*
475 				 * Hmm, no page. Does a kernel mapping
476 				 * exist for it?
477 				 */
478 				if (!hat_probe(kas.a_hat, addr)) {
479 					addr -= PAGESIZE;
480 					while (--pg >= 0) {
481 						pp = page_find(vp, (u_offset_t)
482 						    (uintptr_t)addr);
483 						if (pp)
484 							page_unlock(pp);
485 						addr -= PAGESIZE;
486 					}
487 					return (FC_NOMAP);
488 				}
489 			}
490 			addr += PAGESIZE;
491 		}
492 		if (rw == S_OTHER)
493 			hat_reserve(seg->s_as, addr, size);
494 		return (0);
495 	case F_SOFTUNLOCK:
496 		while (npages--) {
497 			pp = page_find(vp, (u_offset_t)(uintptr_t)addr);
498 			if (pp)
499 				page_unlock(pp);
500 			addr += PAGESIZE;
501 		}
502 		return (0);
503 	default:
504 		return (FC_NOSUPPORT);
505 	}
506 	/*NOTREACHED*/
507 }
508 
509 static int
510 segkmem_setprot(struct seg *seg, caddr_t addr, size_t size, uint_t prot)
511 {
512 	ASSERT(RW_LOCK_HELD(&seg->s_as->a_lock));
513 
514 	if (seg->s_as != &kas || size > seg->s_size ||
515 	    addr < seg->s_base || addr + size > seg->s_base + seg->s_size)
516 		panic("segkmem_setprot: bad args");
517 
518 	/*
519 	 * If it is one of segkp pages, call segkp.
520 	 */
521 	if (segkp_bitmap && seg == &kvseg &&
522 	    BT_TEST(segkp_bitmap, btop((uintptr_t)(addr - seg->s_base))))
523 		return (SEGOP_SETPROT(segkp, addr, size, prot));
524 
525 	if (prot == 0)
526 		hat_unload(kas.a_hat, addr, size, HAT_UNLOAD);
527 	else
528 		hat_chgprot(kas.a_hat, addr, size, prot);
529 	return (0);
530 }
531 
532 /*
533  * This is a dummy segkmem function overloaded to call segkp
534  * when segkp is under the heap.
535  */
536 /* ARGSUSED */
537 static int
538 segkmem_checkprot(struct seg *seg, caddr_t addr, size_t size, uint_t prot)
539 {
540 	ASSERT(RW_LOCK_HELD(&seg->s_as->a_lock));
541 
542 	if (seg->s_as != &kas)
543 		segkmem_badop();
544 
545 	/*
546 	 * If it is one of segkp pages, call into segkp.
547 	 */
548 	if (segkp_bitmap && seg == &kvseg &&
549 	    BT_TEST(segkp_bitmap, btop((uintptr_t)(addr - seg->s_base))))
550 		return (SEGOP_CHECKPROT(segkp, addr, size, prot));
551 
552 	segkmem_badop();
553 	return (0);
554 }
555 
556 /*
557  * This is a dummy segkmem function overloaded to call segkp
558  * when segkp is under the heap.
559  */
560 /* ARGSUSED */
561 static int
562 segkmem_kluster(struct seg *seg, caddr_t addr, ssize_t delta)
563 {
564 	ASSERT(RW_LOCK_HELD(&seg->s_as->a_lock));
565 
566 	if (seg->s_as != &kas)
567 		segkmem_badop();
568 
569 	/*
570 	 * If it is one of segkp pages, call into segkp.
571 	 */
572 	if (segkp_bitmap && seg == &kvseg &&
573 	    BT_TEST(segkp_bitmap, btop((uintptr_t)(addr - seg->s_base))))
574 		return (SEGOP_KLUSTER(segkp, addr, delta));
575 
576 	segkmem_badop();
577 	return (0);
578 }
579 
580 static void
581 segkmem_xdump_range(void *arg, void *start, size_t size)
582 {
583 	struct as *as = arg;
584 	caddr_t addr = start;
585 	caddr_t addr_end = addr + size;
586 
587 	while (addr < addr_end) {
588 		pfn_t pfn = hat_getpfnum(kas.a_hat, addr);
589 		if (pfn != PFN_INVALID && pfn <= physmax && pf_is_memory(pfn))
590 			dump_addpage(as, addr, pfn);
591 		addr += PAGESIZE;
592 		dump_timeleft = dump_timeout;
593 	}
594 }
595 
596 static void
597 segkmem_dump_range(void *arg, void *start, size_t size)
598 {
599 	caddr_t addr = start;
600 	caddr_t addr_end = addr + size;
601 
602 	/*
603 	 * If we are about to start dumping the range of addresses we
604 	 * carved out of the kernel heap for the large page heap walk
605 	 * heap_lp_arena to find what segments are actually populated
606 	 */
607 	if (SEGKMEM_USE_LARGEPAGES &&
608 	    addr == heap_lp_base && addr_end == heap_lp_end &&
609 	    vmem_size(heap_lp_arena, VMEM_ALLOC) < size) {
610 		vmem_walk(heap_lp_arena, VMEM_ALLOC | VMEM_REENTRANT,
611 		    segkmem_xdump_range, arg);
612 	} else {
613 		segkmem_xdump_range(arg, start, size);
614 	}
615 }
616 
617 static void
618 segkmem_dump(struct seg *seg)
619 {
620 	/*
621 	 * The kernel's heap_arena (represented by kvseg) is a very large
622 	 * VA space, most of which is typically unused.  To speed up dumping
623 	 * we use vmem_walk() to quickly find the pieces of heap_arena that
624 	 * are actually in use.  We do the same for heap32_arena and
625 	 * heap_core.
626 	 *
627 	 * We specify VMEM_REENTRANT to vmem_walk() because dump_addpage()
628 	 * may ultimately need to allocate memory.  Reentrant walks are
629 	 * necessarily imperfect snapshots.  The kernel heap continues
630 	 * to change during a live crash dump, for example.  For a normal
631 	 * crash dump, however, we know that there won't be any other threads
632 	 * messing with the heap.  Therefore, at worst, we may fail to dump
633 	 * the pages that get allocated by the act of dumping; but we will
634 	 * always dump every page that was allocated when the walk began.
635 	 *
636 	 * The other segkmem segments are dense (fully populated), so there's
637 	 * no need to use this technique when dumping them.
638 	 *
639 	 * Note: when adding special dump handling for any new sparsely-
640 	 * populated segments, be sure to add similar handling to the ::kgrep
641 	 * code in mdb.
642 	 */
643 	if (seg == &kvseg) {
644 		vmem_walk(heap_arena, VMEM_ALLOC | VMEM_REENTRANT,
645 		    segkmem_dump_range, seg->s_as);
646 #ifndef __sparc
647 		vmem_walk(heaptext_arena, VMEM_ALLOC | VMEM_REENTRANT,
648 		    segkmem_dump_range, seg->s_as);
649 #endif
650 	} else if (seg == &kvseg_core) {
651 		vmem_walk(heap_core_arena, VMEM_ALLOC | VMEM_REENTRANT,
652 		    segkmem_dump_range, seg->s_as);
653 	} else if (seg == &kvseg32) {
654 		vmem_walk(heap32_arena, VMEM_ALLOC | VMEM_REENTRANT,
655 		    segkmem_dump_range, seg->s_as);
656 		vmem_walk(heaptext_arena, VMEM_ALLOC | VMEM_REENTRANT,
657 		    segkmem_dump_range, seg->s_as);
658 	} else if (seg == &kzioseg) {
659 		/*
660 		 * We don't want to dump pages attached to kzioseg since they
661 		 * contain file data from ZFS.  If this page's segment is
662 		 * kzioseg return instead of writing it to the dump device.
663 		 */
664 		return;
665 	} else {
666 		segkmem_dump_range(seg->s_as, seg->s_base, seg->s_size);
667 	}
668 }
669 
670 /*
671  * lock/unlock kmem pages over a given range [addr, addr+len).
672  * Returns a shadow list of pages in ppp. If there are holes
673  * in the range (e.g. some of the kernel mappings do not have
674  * underlying page_ts) returns ENOTSUP so that as_pagelock()
675  * will handle the range via as_fault(F_SOFTLOCK).
676  */
677 /*ARGSUSED*/
678 static int
679 segkmem_pagelock(struct seg *seg, caddr_t addr, size_t len,
680 	page_t ***ppp, enum lock_type type, enum seg_rw rw)
681 {
682 	page_t **pplist, *pp;
683 	pgcnt_t npages;
684 	spgcnt_t pg;
685 	size_t nb;
686 	struct vnode *vp = seg->s_data;
687 
688 	ASSERT(ppp != NULL);
689 
690 	/*
691 	 * If it is one of segkp pages, call into segkp.
692 	 */
693 	if (segkp_bitmap && seg == &kvseg &&
694 	    BT_TEST(segkp_bitmap, btop((uintptr_t)(addr - seg->s_base))))
695 		return (SEGOP_PAGELOCK(segkp, addr, len, ppp, type, rw));
696 
697 	npages = btopr(len);
698 	nb = sizeof (page_t *) * npages;
699 
700 	if (type == L_PAGEUNLOCK) {
701 		pplist = *ppp;
702 		ASSERT(pplist != NULL);
703 
704 		for (pg = 0; pg < npages; pg++) {
705 			pp = pplist[pg];
706 			page_unlock(pp);
707 		}
708 		kmem_free(pplist, nb);
709 		return (0);
710 	}
711 
712 	ASSERT(type == L_PAGELOCK);
713 
714 	pplist = kmem_alloc(nb, KM_NOSLEEP);
715 	if (pplist == NULL) {
716 		*ppp = NULL;
717 		return (ENOTSUP);	/* take the slow path */
718 	}
719 
720 	for (pg = 0; pg < npages; pg++) {
721 		pp = page_lookup(vp, (u_offset_t)(uintptr_t)addr, SE_SHARED);
722 		if (pp == NULL) {
723 			while (--pg >= 0)
724 				page_unlock(pplist[pg]);
725 			kmem_free(pplist, nb);
726 			*ppp = NULL;
727 			return (ENOTSUP);
728 		}
729 		pplist[pg] = pp;
730 		addr += PAGESIZE;
731 	}
732 
733 	*ppp = pplist;
734 	return (0);
735 }
736 
737 /*
738  * This is a dummy segkmem function overloaded to call segkp
739  * when segkp is under the heap.
740  */
741 /* ARGSUSED */
742 static int
743 segkmem_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp)
744 {
745 	ASSERT(RW_LOCK_HELD(&seg->s_as->a_lock));
746 
747 	if (seg->s_as != &kas)
748 		segkmem_badop();
749 
750 	/*
751 	 * If it is one of segkp pages, call into segkp.
752 	 */
753 	if (segkp_bitmap && seg == &kvseg &&
754 	    BT_TEST(segkp_bitmap, btop((uintptr_t)(addr - seg->s_base))))
755 		return (SEGOP_GETMEMID(segkp, addr, memidp));
756 
757 	segkmem_badop();
758 	return (0);
759 }
760 
761 /*ARGSUSED*/
762 static lgrp_mem_policy_info_t *
763 segkmem_getpolicy(struct seg *seg, caddr_t addr)
764 {
765 	return (NULL);
766 }
767 
768 /*ARGSUSED*/
769 static int
770 segkmem_capable(struct seg *seg, segcapability_t capability)
771 {
772 	if (capability == S_CAPABILITY_NOMINFLT)
773 		return (1);
774 	return (0);
775 }
776 
777 static struct seg_ops segkmem_ops = {
778 	SEGKMEM_BADOP(int),		/* dup */
779 	SEGKMEM_BADOP(int),		/* unmap */
780 	SEGKMEM_BADOP(void),		/* free */
781 	segkmem_fault,
782 	SEGKMEM_BADOP(faultcode_t),	/* faulta */
783 	segkmem_setprot,
784 	segkmem_checkprot,
785 	segkmem_kluster,
786 	SEGKMEM_BADOP(size_t),		/* swapout */
787 	SEGKMEM_BADOP(int),		/* sync */
788 	SEGKMEM_BADOP(size_t),		/* incore */
789 	SEGKMEM_BADOP(int),		/* lockop */
790 	SEGKMEM_BADOP(int),		/* getprot */
791 	SEGKMEM_BADOP(u_offset_t),	/* getoffset */
792 	SEGKMEM_BADOP(int),		/* gettype */
793 	SEGKMEM_BADOP(int),		/* getvp */
794 	SEGKMEM_BADOP(int),		/* advise */
795 	segkmem_dump,
796 	segkmem_pagelock,
797 	SEGKMEM_BADOP(int),		/* setpgsz */
798 	segkmem_getmemid,
799 	segkmem_getpolicy,		/* getpolicy */
800 	segkmem_capable,		/* capable */
801 };
802 
803 int
804 segkmem_zio_create(struct seg *seg)
805 {
806 	ASSERT(seg->s_as == &kas && RW_WRITE_HELD(&kas.a_lock));
807 	seg->s_ops = &segkmem_ops;
808 	seg->s_data = &zvp;
809 	kas.a_size += seg->s_size;
810 	return (0);
811 }
812 
813 int
814 segkmem_create(struct seg *seg)
815 {
816 	ASSERT(seg->s_as == &kas && RW_WRITE_HELD(&kas.a_lock));
817 	seg->s_ops = &segkmem_ops;
818 	seg->s_data = &kvp;
819 	kas.a_size += seg->s_size;
820 	return (0);
821 }
822 
823 /*ARGSUSED*/
824 page_t *
825 segkmem_page_create(void *addr, size_t size, int vmflag, void *arg)
826 {
827 	struct seg kseg;
828 	int pgflags;
829 	struct vnode *vp = arg;
830 
831 	if (vp == NULL)
832 		vp = &kvp;
833 
834 	kseg.s_as = &kas;
835 	pgflags = PG_EXCL;
836 
837 	if (segkmem_reloc == 0 || (vmflag & VM_NORELOC))
838 		pgflags |= PG_NORELOC;
839 	if ((vmflag & VM_NOSLEEP) == 0)
840 		pgflags |= PG_WAIT;
841 	if (vmflag & VM_PANIC)
842 		pgflags |= PG_PANIC;
843 	if (vmflag & VM_PUSHPAGE)
844 		pgflags |= PG_PUSHPAGE;
845 
846 	return (page_create_va(vp, (u_offset_t)(uintptr_t)addr, size,
847 	    pgflags, &kseg, addr));
848 }
849 
850 /*
851  * Allocate pages to back the virtual address range [addr, addr + size).
852  * If addr is NULL, allocate the virtual address space as well.
853  */
854 void *
855 segkmem_xalloc(vmem_t *vmp, void *inaddr, size_t size, int vmflag, uint_t attr,
856 	page_t *(*page_create_func)(void *, size_t, int, void *), void *pcarg)
857 {
858 	page_t *ppl;
859 	caddr_t addr = inaddr;
860 	pgcnt_t npages = btopr(size);
861 	int allocflag;
862 
863 	if (inaddr == NULL && (addr = vmem_alloc(vmp, size, vmflag)) == NULL)
864 		return (NULL);
865 
866 	ASSERT(((uintptr_t)addr & PAGEOFFSET) == 0);
867 
868 	if (page_resv(npages, vmflag & VM_KMFLAGS) == 0) {
869 		if (inaddr == NULL)
870 			vmem_free(vmp, addr, size);
871 		return (NULL);
872 	}
873 
874 	ppl = page_create_func(addr, size, vmflag, pcarg);
875 	if (ppl == NULL) {
876 		if (inaddr == NULL)
877 			vmem_free(vmp, addr, size);
878 		page_unresv(npages);
879 		return (NULL);
880 	}
881 
882 	/*
883 	 * Under certain conditions, we need to let the HAT layer know
884 	 * that it cannot safely allocate memory.  Allocations from
885 	 * the hat_memload vmem arena always need this, to prevent
886 	 * infinite recursion.
887 	 *
888 	 * In addition, the x86 hat cannot safely do memory
889 	 * allocations while in vmem_populate(), because there
890 	 * is no simple bound on its usage.
891 	 */
892 	if (vmflag & VM_MEMLOAD)
893 		allocflag = HAT_NO_KALLOC;
894 #if defined(__x86)
895 	else if (vmem_is_populator())
896 		allocflag = HAT_NO_KALLOC;
897 #endif
898 	else
899 		allocflag = 0;
900 
901 	while (ppl != NULL) {
902 		page_t *pp = ppl;
903 		page_sub(&ppl, pp);
904 		ASSERT(page_iolock_assert(pp));
905 		ASSERT(PAGE_EXCL(pp));
906 		page_io_unlock(pp);
907 		hat_memload(kas.a_hat, (caddr_t)(uintptr_t)pp->p_offset, pp,
908 		    (PROT_ALL & ~PROT_USER) | HAT_NOSYNC | attr,
909 		    HAT_LOAD_LOCK | allocflag);
910 		pp->p_lckcnt = 1;
911 #if defined(__x86)
912 		page_downgrade(pp);
913 #else
914 		if (vmflag & SEGKMEM_SHARELOCKED)
915 			page_downgrade(pp);
916 		else
917 			page_unlock(pp);
918 #endif
919 	}
920 
921 	return (addr);
922 }
923 
924 static void *
925 segkmem_alloc_vn(vmem_t *vmp, size_t size, int vmflag, struct vnode *vp)
926 {
927 	void *addr;
928 	segkmem_gc_list_t *gcp, **prev_gcpp;
929 
930 	ASSERT(vp != NULL);
931 
932 	if (kvseg.s_base == NULL) {
933 #ifndef __sparc
934 		if (bootops->bsys_alloc == NULL)
935 			halt("Memory allocation between bop_alloc() and "
936 			    "kmem_alloc().\n");
937 #endif
938 
939 		/*
940 		 * There's not a lot of memory to go around during boot,
941 		 * so recycle it if we can.
942 		 */
943 		for (prev_gcpp = &segkmem_gc_list; (gcp = *prev_gcpp) != NULL;
944 		    prev_gcpp = &gcp->gc_next) {
945 			if (gcp->gc_arena == vmp && gcp->gc_size == size) {
946 				*prev_gcpp = gcp->gc_next;
947 				return (gcp);
948 			}
949 		}
950 
951 		addr = vmem_alloc(vmp, size, vmflag | VM_PANIC);
952 		if (boot_alloc(addr, size, BO_NO_ALIGN) != addr)
953 			panic("segkmem_alloc: boot_alloc failed");
954 		return (addr);
955 	}
956 	return (segkmem_xalloc(vmp, NULL, size, vmflag, 0,
957 	    segkmem_page_create, vp));
958 }
959 
960 void *
961 segkmem_alloc(vmem_t *vmp, size_t size, int vmflag)
962 {
963 	return (segkmem_alloc_vn(vmp, size, vmflag, &kvp));
964 }
965 
966 void *
967 segkmem_zio_alloc(vmem_t *vmp, size_t size, int vmflag)
968 {
969 	return (segkmem_alloc_vn(vmp, size, vmflag, &zvp));
970 }
971 
972 /*
973  * Any changes to this routine must also be carried over to
974  * devmap_free_pages() in the seg_dev driver. This is because
975  * we currently don't have a special kernel segment for non-paged
976  * kernel memory that is exported by drivers to user space.
977  */
978 static void
979 segkmem_free_vn(vmem_t *vmp, void *inaddr, size_t size, struct vnode *vp,
980     void (*func)(page_t *))
981 {
982 	page_t *pp;
983 	caddr_t addr = inaddr;
984 	caddr_t eaddr;
985 	pgcnt_t npages = btopr(size);
986 
987 	ASSERT(((uintptr_t)addr & PAGEOFFSET) == 0);
988 	ASSERT(vp != NULL);
989 
990 	if (kvseg.s_base == NULL) {
991 		segkmem_gc_list_t *gc = inaddr;
992 		gc->gc_arena = vmp;
993 		gc->gc_size = size;
994 		gc->gc_next = segkmem_gc_list;
995 		segkmem_gc_list = gc;
996 		return;
997 	}
998 
999 	hat_unload(kas.a_hat, addr, size, HAT_UNLOAD_UNLOCK);
1000 
1001 	for (eaddr = addr + size; addr < eaddr; addr += PAGESIZE) {
1002 #if defined(__x86)
1003 		pp = page_find(vp, (u_offset_t)(uintptr_t)addr);
1004 		if (pp == NULL)
1005 			panic("segkmem_free: page not found");
1006 		if (!page_tryupgrade(pp)) {
1007 			/*
1008 			 * Some other thread has a sharelock. Wait for
1009 			 * it to drop the lock so we can free this page.
1010 			 */
1011 			page_unlock(pp);
1012 			pp = page_lookup(vp, (u_offset_t)(uintptr_t)addr,
1013 			    SE_EXCL);
1014 		}
1015 #else
1016 		pp = page_lookup(vp, (u_offset_t)(uintptr_t)addr, SE_EXCL);
1017 #endif
1018 		if (pp == NULL)
1019 			panic("segkmem_free: page not found");
1020 		/* Clear p_lckcnt so page_destroy() doesn't update availrmem */
1021 		pp->p_lckcnt = 0;
1022 		if (func)
1023 			func(pp);
1024 		else
1025 			page_destroy(pp, 0);
1026 	}
1027 	if (func == NULL)
1028 		page_unresv(npages);
1029 
1030 	if (vmp != NULL)
1031 		vmem_free(vmp, inaddr, size);
1032 
1033 }
1034 
1035 void
1036 segkmem_xfree(vmem_t *vmp, void *inaddr, size_t size, void (*func)(page_t *))
1037 {
1038 	segkmem_free_vn(vmp, inaddr, size, &kvp, func);
1039 }
1040 
1041 void
1042 segkmem_free(vmem_t *vmp, void *inaddr, size_t size)
1043 {
1044 	segkmem_free_vn(vmp, inaddr, size, &kvp, NULL);
1045 }
1046 
1047 void
1048 segkmem_zio_free(vmem_t *vmp, void *inaddr, size_t size)
1049 {
1050 	segkmem_free_vn(vmp, inaddr, size, &zvp, NULL);
1051 }
1052 
1053 void
1054 segkmem_gc(void)
1055 {
1056 	ASSERT(kvseg.s_base != NULL);
1057 	while (segkmem_gc_list != NULL) {
1058 		segkmem_gc_list_t *gc = segkmem_gc_list;
1059 		segkmem_gc_list = gc->gc_next;
1060 		segkmem_free(gc->gc_arena, gc, gc->gc_size);
1061 	}
1062 }
1063 
1064 /*
1065  * Legacy entry points from here to end of file.
1066  */
1067 void
1068 segkmem_mapin(struct seg *seg, void *addr, size_t size, uint_t vprot,
1069     pfn_t pfn, uint_t flags)
1070 {
1071 	hat_unload(seg->s_as->a_hat, addr, size, HAT_UNLOAD_UNLOCK);
1072 	hat_devload(seg->s_as->a_hat, addr, size, pfn, vprot,
1073 	    flags | HAT_LOAD_LOCK);
1074 }
1075 
1076 void
1077 segkmem_mapout(struct seg *seg, void *addr, size_t size)
1078 {
1079 	hat_unload(seg->s_as->a_hat, addr, size, HAT_UNLOAD_UNLOCK);
1080 }
1081 
1082 void *
1083 kmem_getpages(pgcnt_t npages, int kmflag)
1084 {
1085 	return (kmem_alloc(ptob(npages), kmflag));
1086 }
1087 
1088 void
1089 kmem_freepages(void *addr, pgcnt_t npages)
1090 {
1091 	kmem_free(addr, ptob(npages));
1092 }
1093 
1094 /*
1095  * segkmem_page_create_large() allocates a large page to be used for the kmem
1096  * caches. If kpr is enabled we ask for a relocatable page unless requested
1097  * otherwise. If kpr is disabled we have to ask for a non-reloc page
1098  */
1099 static page_t *
1100 segkmem_page_create_large(void *addr, size_t size, int vmflag, void *arg)
1101 {
1102 	int pgflags;
1103 
1104 	pgflags = PG_EXCL;
1105 
1106 	if (segkmem_reloc == 0 || (vmflag & VM_NORELOC))
1107 		pgflags |= PG_NORELOC;
1108 	if (!(vmflag & VM_NOSLEEP))
1109 		pgflags |= PG_WAIT;
1110 	if (vmflag & VM_PUSHPAGE)
1111 		pgflags |= PG_PUSHPAGE;
1112 
1113 	return (page_create_va_large(&kvp, (u_offset_t)(uintptr_t)addr, size,
1114 	    pgflags, &kvseg, addr, arg));
1115 }
1116 
1117 /*
1118  * Allocate a large page to back the virtual address range
1119  * [addr, addr + size).  If addr is NULL, allocate the virtual address
1120  * space as well.
1121  */
1122 static void *
1123 segkmem_xalloc_lp(vmem_t *vmp, void *inaddr, size_t size, int vmflag,
1124     uint_t attr, page_t *(*page_create_func)(void *, size_t, int, void *),
1125     void *pcarg)
1126 {
1127 	caddr_t addr = inaddr, pa;
1128 	size_t  lpsize = segkmem_lpsize;
1129 	pgcnt_t npages = btopr(size);
1130 	pgcnt_t nbpages = btop(lpsize);
1131 	pgcnt_t nlpages = size >> segkmem_lpshift;
1132 	size_t  ppasize = nbpages * sizeof (page_t *);
1133 	page_t *pp, *rootpp, **ppa, *pplist = NULL;
1134 	int i;
1135 
1136 	vmflag |= VM_NOSLEEP;
1137 
1138 	if (page_resv(npages, vmflag & VM_KMFLAGS) == 0) {
1139 		return (NULL);
1140 	}
1141 
1142 	/*
1143 	 * allocate an array we need for hat_memload_array.
1144 	 * we use a separate arena to avoid recursion.
1145 	 * we will not need this array when hat_memload_array learns pp++
1146 	 */
1147 	if ((ppa = vmem_alloc(segkmem_ppa_arena, ppasize, vmflag)) == NULL) {
1148 		goto fail_array_alloc;
1149 	}
1150 
1151 	if (inaddr == NULL && (addr = vmem_alloc(vmp, size, vmflag)) == NULL)
1152 		goto fail_vmem_alloc;
1153 
1154 	ASSERT(((uintptr_t)addr & (lpsize - 1)) == 0);
1155 
1156 	/* create all the pages */
1157 	for (pa = addr, i = 0; i < nlpages; i++, pa += lpsize) {
1158 		if ((pp = page_create_func(pa, lpsize, vmflag, pcarg)) == NULL)
1159 			goto fail_page_create;
1160 		page_list_concat(&pplist, &pp);
1161 	}
1162 
1163 	/* at this point we have all the resource to complete the request */
1164 	while ((rootpp = pplist) != NULL) {
1165 		for (i = 0; i < nbpages; i++) {
1166 			ASSERT(pplist != NULL);
1167 			pp = pplist;
1168 			page_sub(&pplist, pp);
1169 			ASSERT(page_iolock_assert(pp));
1170 			page_io_unlock(pp);
1171 			ppa[i] = pp;
1172 		}
1173 		/*
1174 		 * Load the locked entry. It's OK to preload the entry into the
1175 		 * TSB since we now support large mappings in the kernel TSB.
1176 		 */
1177 		hat_memload_array(kas.a_hat,
1178 		    (caddr_t)(uintptr_t)rootpp->p_offset, lpsize,
1179 		    ppa, (PROT_ALL & ~PROT_USER) | HAT_NOSYNC | attr,
1180 		    HAT_LOAD_LOCK);
1181 
1182 		for (--i; i >= 0; --i) {
1183 			ppa[i]->p_lckcnt = 1;
1184 			page_unlock(ppa[i]);
1185 		}
1186 	}
1187 
1188 	vmem_free(segkmem_ppa_arena, ppa, ppasize);
1189 	return (addr);
1190 
1191 fail_page_create:
1192 	while ((rootpp = pplist) != NULL) {
1193 		for (i = 0, pp = pplist; i < nbpages; i++, pp = pplist) {
1194 			ASSERT(pp != NULL);
1195 			page_sub(&pplist, pp);
1196 			ASSERT(page_iolock_assert(pp));
1197 			page_io_unlock(pp);
1198 		}
1199 		page_destroy_pages(rootpp);
1200 	}
1201 
1202 	if (inaddr == NULL)
1203 		vmem_free(vmp, addr, size);
1204 
1205 fail_vmem_alloc:
1206 	vmem_free(segkmem_ppa_arena, ppa, ppasize);
1207 
1208 fail_array_alloc:
1209 	page_unresv(npages);
1210 
1211 	return (NULL);
1212 }
1213 
1214 static void
1215 segkmem_free_one_lp(caddr_t addr, size_t size)
1216 {
1217 	page_t		*pp, *rootpp = NULL;
1218 	pgcnt_t 	pgs_left = btopr(size);
1219 
1220 	ASSERT(size == segkmem_lpsize);
1221 
1222 	hat_unload(kas.a_hat, addr, size, HAT_UNLOAD_UNLOCK);
1223 
1224 	for (; pgs_left > 0; addr += PAGESIZE, pgs_left--) {
1225 		pp = page_lookup(&kvp, (u_offset_t)(uintptr_t)addr, SE_EXCL);
1226 		if (pp == NULL)
1227 			panic("segkmem_free_one_lp: page not found");
1228 		ASSERT(PAGE_EXCL(pp));
1229 		pp->p_lckcnt = 0;
1230 		if (rootpp == NULL)
1231 			rootpp = pp;
1232 	}
1233 	ASSERT(rootpp != NULL);
1234 	page_destroy_pages(rootpp);
1235 
1236 	/* page_unresv() is done by the caller */
1237 }
1238 
1239 /*
1240  * This function is called to import new spans into the vmem arenas like
1241  * kmem_default_arena and kmem_oversize_arena. It first tries to import
1242  * spans from large page arena - kmem_lp_arena. In order to do this it might
1243  * have to "upgrade the requested size" to kmem_lp_arena quantum. If
1244  * it was not able to satisfy the upgraded request it then calls regular
1245  * segkmem_alloc() that satisfies the request by importing from "*vmp" arena
1246  */
1247 /*ARGSUSED*/
1248 void *
1249 segkmem_alloc_lp(vmem_t *vmp, size_t *sizep, size_t align, int vmflag)
1250 {
1251 	size_t size;
1252 	kthread_t *t = curthread;
1253 	segkmem_lpcb_t *lpcb = &segkmem_lpcb;
1254 
1255 	ASSERT(sizep != NULL);
1256 
1257 	size = *sizep;
1258 
1259 	if (lpcb->lp_uselp && !(t->t_flag & T_PANIC) &&
1260 	    !(vmflag & SEGKMEM_SHARELOCKED)) {
1261 
1262 		size_t kmemlp_qnt = segkmem_kmemlp_quantum;
1263 		size_t asize = P2ROUNDUP(size, kmemlp_qnt);
1264 		void  *addr = NULL;
1265 		ulong_t *lpthrtp = &lpcb->lp_throttle;
1266 		ulong_t lpthrt = *lpthrtp;
1267 		int	dowakeup = 0;
1268 		int	doalloc = 1;
1269 
1270 		ASSERT(kmem_lp_arena != NULL);
1271 		ASSERT(asize >= size);
1272 
1273 		if (lpthrt != 0) {
1274 			/* try to update the throttle value */
1275 			lpthrt = atomic_add_long_nv(lpthrtp, 1);
1276 			if (lpthrt >= segkmem_lpthrottle_max) {
1277 				lpthrt = atomic_cas_ulong(lpthrtp, lpthrt,
1278 				    segkmem_lpthrottle_max / 4);
1279 			}
1280 
1281 			/*
1282 			 * when we get above throttle start do an exponential
1283 			 * backoff at trying large pages and reaping
1284 			 */
1285 			if (lpthrt > segkmem_lpthrottle_start &&
1286 			    (lpthrt & (lpthrt - 1))) {
1287 				lpcb->allocs_throttled++;
1288 				lpthrt--;
1289 				if ((lpthrt & (lpthrt - 1)) == 0)
1290 					kmem_reap();
1291 				return (segkmem_alloc(vmp, size, vmflag));
1292 			}
1293 		}
1294 
1295 		if (!(vmflag & VM_NOSLEEP) &&
1296 		    segkmem_heaplp_quantum >= (8 * kmemlp_qnt) &&
1297 		    vmem_size(kmem_lp_arena, VMEM_FREE) <= kmemlp_qnt &&
1298 		    asize < (segkmem_heaplp_quantum - kmemlp_qnt)) {
1299 
1300 			/*
1301 			 * we are low on free memory in kmem_lp_arena
1302 			 * we let only one guy to allocate heap_lp
1303 			 * quantum size chunk that everybody is going to
1304 			 * share
1305 			 */
1306 			mutex_enter(&lpcb->lp_lock);
1307 
1308 			if (lpcb->lp_wait) {
1309 
1310 				/* we are not the first one - wait */
1311 				cv_wait(&lpcb->lp_cv, &lpcb->lp_lock);
1312 				if (vmem_size(kmem_lp_arena, VMEM_FREE) <
1313 				    kmemlp_qnt)  {
1314 					doalloc = 0;
1315 				}
1316 			} else if (vmem_size(kmem_lp_arena, VMEM_FREE) <=
1317 			    kmemlp_qnt) {
1318 
1319 				/*
1320 				 * we are the first one, make sure we import
1321 				 * a large page
1322 				 */
1323 				if (asize == kmemlp_qnt)
1324 					asize += kmemlp_qnt;
1325 				dowakeup = 1;
1326 				lpcb->lp_wait = 1;
1327 			}
1328 
1329 			mutex_exit(&lpcb->lp_lock);
1330 		}
1331 
1332 		/*
1333 		 * VM_ABORT flag prevents sleeps in vmem_xalloc when
1334 		 * large pages are not available. In that case this allocation
1335 		 * attempt will fail and we will retry allocation with small
1336 		 * pages. We also do not want to panic if this allocation fails
1337 		 * because we are going to retry.
1338 		 */
1339 		if (doalloc) {
1340 			addr = vmem_alloc(kmem_lp_arena, asize,
1341 			    (vmflag | VM_ABORT) & ~VM_PANIC);
1342 
1343 			if (dowakeup) {
1344 				mutex_enter(&lpcb->lp_lock);
1345 				ASSERT(lpcb->lp_wait != 0);
1346 				lpcb->lp_wait = 0;
1347 				cv_broadcast(&lpcb->lp_cv);
1348 				mutex_exit(&lpcb->lp_lock);
1349 			}
1350 		}
1351 
1352 		if (addr != NULL) {
1353 			*sizep = asize;
1354 			*lpthrtp = 0;
1355 			return (addr);
1356 		}
1357 
1358 		if (vmflag & VM_NOSLEEP)
1359 			lpcb->nosleep_allocs_failed++;
1360 		else
1361 			lpcb->sleep_allocs_failed++;
1362 		lpcb->alloc_bytes_failed += size;
1363 
1364 		/* if large page throttling is not started yet do it */
1365 		if (segkmem_use_lpthrottle && lpthrt == 0) {
1366 			lpthrt = atomic_cas_ulong(lpthrtp, lpthrt, 1);
1367 		}
1368 	}
1369 	return (segkmem_alloc(vmp, size, vmflag));
1370 }
1371 
1372 void
1373 segkmem_free_lp(vmem_t *vmp, void *inaddr, size_t size)
1374 {
1375 	if (kmem_lp_arena == NULL || !IS_KMEM_VA_LARGEPAGE((caddr_t)inaddr)) {
1376 		segkmem_free(vmp, inaddr, size);
1377 	} else {
1378 		vmem_free(kmem_lp_arena, inaddr, size);
1379 	}
1380 }
1381 
1382 /*
1383  * segkmem_alloc_lpi() imports virtual memory from large page heap arena
1384  * into kmem_lp arena. In the process it maps the imported segment with
1385  * large pages
1386  */
1387 static void *
1388 segkmem_alloc_lpi(vmem_t *vmp, size_t size, int vmflag)
1389 {
1390 	segkmem_lpcb_t *lpcb = &segkmem_lpcb;
1391 	void  *addr;
1392 
1393 	ASSERT(size != 0);
1394 	ASSERT(vmp == heap_lp_arena);
1395 
1396 	/* do not allow large page heap grow beyound limits */
1397 	if (vmem_size(vmp, VMEM_ALLOC) >= segkmem_kmemlp_max) {
1398 		lpcb->allocs_limited++;
1399 		return (NULL);
1400 	}
1401 
1402 	addr = segkmem_xalloc_lp(vmp, NULL, size, vmflag, 0,
1403 	    segkmem_page_create_large, NULL);
1404 	return (addr);
1405 }
1406 
1407 /*
1408  * segkmem_free_lpi() returns virtual memory back into large page heap arena
1409  * from kmem_lp arena. Beore doing this it unmaps the segment and frees
1410  * large pages used to map it.
1411  */
1412 static void
1413 segkmem_free_lpi(vmem_t *vmp, void *inaddr, size_t size)
1414 {
1415 	pgcnt_t		nlpages = size >> segkmem_lpshift;
1416 	size_t		lpsize = segkmem_lpsize;
1417 	caddr_t		addr = inaddr;
1418 	pgcnt_t 	npages = btopr(size);
1419 	int		i;
1420 
1421 	ASSERT(vmp == heap_lp_arena);
1422 	ASSERT(IS_KMEM_VA_LARGEPAGE(addr));
1423 	ASSERT(((uintptr_t)inaddr & (lpsize - 1)) == 0);
1424 
1425 	for (i = 0; i < nlpages; i++) {
1426 		segkmem_free_one_lp(addr, lpsize);
1427 		addr += lpsize;
1428 	}
1429 
1430 	page_unresv(npages);
1431 
1432 	vmem_free(vmp, inaddr, size);
1433 }
1434 
1435 /*
1436  * This function is called at system boot time by kmem_init right after
1437  * /etc/system file has been read. It checks based on hardware configuration
1438  * and /etc/system settings if system is going to use large pages. The
1439  * initialiazation necessary to actually start using large pages
1440  * happens later in the process after segkmem_heap_lp_init() is called.
1441  */
1442 int
1443 segkmem_lpsetup()
1444 {
1445 	int use_large_pages = 0;
1446 
1447 #ifdef __sparc
1448 
1449 	size_t memtotal = physmem * PAGESIZE;
1450 
1451 	if (heap_lp_base == NULL) {
1452 		segkmem_lpsize = PAGESIZE;
1453 		return (0);
1454 	}
1455 
1456 	/* get a platform dependent value of large page size for kernel heap */
1457 	segkmem_lpsize = get_segkmem_lpsize(segkmem_lpsize);
1458 
1459 	if (segkmem_lpsize <= PAGESIZE) {
1460 		/*
1461 		 * put virtual space reserved for the large page kernel
1462 		 * back to the regular heap
1463 		 */
1464 		vmem_xfree(heap_arena, heap_lp_base,
1465 		    heap_lp_end - heap_lp_base);
1466 		heap_lp_base = NULL;
1467 		heap_lp_end = NULL;
1468 		segkmem_lpsize = PAGESIZE;
1469 		return (0);
1470 	}
1471 
1472 	/* set heap_lp quantum if necessary */
1473 	if (segkmem_heaplp_quantum == 0 ||
1474 	    (segkmem_heaplp_quantum & (segkmem_heaplp_quantum - 1)) ||
1475 	    P2PHASE(segkmem_heaplp_quantum, segkmem_lpsize)) {
1476 		segkmem_heaplp_quantum = segkmem_lpsize;
1477 	}
1478 
1479 	/* set kmem_lp quantum if necessary */
1480 	if (segkmem_kmemlp_quantum == 0 ||
1481 	    (segkmem_kmemlp_quantum & (segkmem_kmemlp_quantum - 1)) ||
1482 	    segkmem_kmemlp_quantum > segkmem_heaplp_quantum) {
1483 		segkmem_kmemlp_quantum = segkmem_heaplp_quantum;
1484 	}
1485 
1486 	/* set total amount of memory allowed for large page kernel heap */
1487 	if (segkmem_kmemlp_max == 0) {
1488 		if (segkmem_kmemlp_pcnt == 0 || segkmem_kmemlp_pcnt > 100)
1489 			segkmem_kmemlp_pcnt = 12;
1490 		segkmem_kmemlp_max = (memtotal * segkmem_kmemlp_pcnt) / 100;
1491 	}
1492 	segkmem_kmemlp_max = P2ROUNDUP(segkmem_kmemlp_max,
1493 	    segkmem_heaplp_quantum);
1494 
1495 	/* fix lp kmem preallocation request if necesssary */
1496 	if (segkmem_kmemlp_min) {
1497 		segkmem_kmemlp_min = P2ROUNDUP(segkmem_kmemlp_min,
1498 		    segkmem_heaplp_quantum);
1499 		if (segkmem_kmemlp_min > segkmem_kmemlp_max)
1500 			segkmem_kmemlp_min = segkmem_kmemlp_max;
1501 	}
1502 
1503 	use_large_pages = 1;
1504 	segkmem_lpszc = page_szc(segkmem_lpsize);
1505 	segkmem_lpshift = page_get_shift(segkmem_lpszc);
1506 
1507 #endif
1508 	return (use_large_pages);
1509 }
1510 
1511 void
1512 segkmem_zio_init(void *zio_mem_base, size_t zio_mem_size)
1513 {
1514 	ASSERT(zio_mem_base != NULL);
1515 	ASSERT(zio_mem_size != 0);
1516 
1517 	zio_arena = vmem_create("zfs_file_data", zio_mem_base, zio_mem_size,
1518 	    PAGESIZE, NULL, NULL, NULL, 0, VM_SLEEP);
1519 
1520 	zio_alloc_arena = vmem_create("zfs_file_data_buf", NULL, 0, PAGESIZE,
1521 	    segkmem_zio_alloc, segkmem_zio_free, zio_arena, 0, VM_SLEEP);
1522 
1523 	ASSERT(zio_arena != NULL);
1524 	ASSERT(zio_alloc_arena != NULL);
1525 }
1526 
1527 #ifdef __sparc
1528 
1529 
1530 static void *
1531 segkmem_alloc_ppa(vmem_t *vmp, size_t size, int vmflag)
1532 {
1533 	size_t ppaquantum = btopr(segkmem_lpsize) * sizeof (page_t *);
1534 	void   *addr;
1535 
1536 	if (ppaquantum <= PAGESIZE)
1537 		return (segkmem_alloc(vmp, size, vmflag));
1538 
1539 	ASSERT((size & (ppaquantum - 1)) == 0);
1540 
1541 	addr = vmem_xalloc(vmp, size, ppaquantum, 0, 0, NULL, NULL, vmflag);
1542 	if (addr != NULL && segkmem_xalloc(vmp, addr, size, vmflag, 0,
1543 	    segkmem_page_create, NULL) == NULL) {
1544 		vmem_xfree(vmp, addr, size);
1545 		addr = NULL;
1546 	}
1547 
1548 	return (addr);
1549 }
1550 
1551 static void
1552 segkmem_free_ppa(vmem_t *vmp, void *addr, size_t size)
1553 {
1554 	size_t ppaquantum = btopr(segkmem_lpsize) * sizeof (page_t *);
1555 
1556 	ASSERT(addr != NULL);
1557 
1558 	if (ppaquantum <= PAGESIZE) {
1559 		segkmem_free(vmp, addr, size);
1560 	} else {
1561 		segkmem_free(NULL, addr, size);
1562 		vmem_xfree(vmp, addr, size);
1563 	}
1564 }
1565 
1566 void
1567 segkmem_heap_lp_init()
1568 {
1569 	segkmem_lpcb_t *lpcb = &segkmem_lpcb;
1570 	size_t heap_lp_size = heap_lp_end - heap_lp_base;
1571 	size_t lpsize = segkmem_lpsize;
1572 	size_t ppaquantum;
1573 	void   *addr;
1574 
1575 	if (segkmem_lpsize <= PAGESIZE) {
1576 		ASSERT(heap_lp_base == NULL);
1577 		ASSERT(heap_lp_end == NULL);
1578 		return;
1579 	}
1580 
1581 	ASSERT(segkmem_heaplp_quantum >= lpsize);
1582 	ASSERT((segkmem_heaplp_quantum & (lpsize - 1)) == 0);
1583 	ASSERT(lpcb->lp_uselp == 0);
1584 	ASSERT(heap_lp_base != NULL);
1585 	ASSERT(heap_lp_end != NULL);
1586 	ASSERT(heap_lp_base < heap_lp_end);
1587 	ASSERT(heap_lp_arena == NULL);
1588 	ASSERT(((uintptr_t)heap_lp_base & (lpsize - 1)) == 0);
1589 	ASSERT(((uintptr_t)heap_lp_end & (lpsize - 1)) == 0);
1590 
1591 	/* create large page heap arena */
1592 	heap_lp_arena = vmem_create("heap_lp", heap_lp_base, heap_lp_size,
1593 	    segkmem_heaplp_quantum, NULL, NULL, NULL, 0, VM_SLEEP);
1594 
1595 	ASSERT(heap_lp_arena != NULL);
1596 
1597 	/* This arena caches memory already mapped by large pages */
1598 	kmem_lp_arena = vmem_create("kmem_lp", NULL, 0, segkmem_kmemlp_quantum,
1599 	    segkmem_alloc_lpi, segkmem_free_lpi, heap_lp_arena, 0, VM_SLEEP);
1600 
1601 	ASSERT(kmem_lp_arena != NULL);
1602 
1603 	mutex_init(&lpcb->lp_lock, NULL, MUTEX_DEFAULT, NULL);
1604 	cv_init(&lpcb->lp_cv, NULL, CV_DEFAULT, NULL);
1605 
1606 	/*
1607 	 * this arena is used for the array of page_t pointers necessary
1608 	 * to call hat_mem_load_array
1609 	 */
1610 	ppaquantum = btopr(lpsize) * sizeof (page_t *);
1611 	segkmem_ppa_arena = vmem_create("segkmem_ppa", NULL, 0, ppaquantum,
1612 	    segkmem_alloc_ppa, segkmem_free_ppa, heap_arena, ppaquantum,
1613 	    VM_SLEEP);
1614 
1615 	ASSERT(segkmem_ppa_arena != NULL);
1616 
1617 	/* prealloacate some memory for the lp kernel heap */
1618 	if (segkmem_kmemlp_min) {
1619 
1620 		ASSERT(P2PHASE(segkmem_kmemlp_min,
1621 		    segkmem_heaplp_quantum) == 0);
1622 
1623 		if ((addr = segkmem_alloc_lpi(heap_lp_arena,
1624 		    segkmem_kmemlp_min, VM_SLEEP)) != NULL) {
1625 
1626 			addr = vmem_add(kmem_lp_arena, addr,
1627 			    segkmem_kmemlp_min, VM_SLEEP);
1628 			ASSERT(addr != NULL);
1629 		}
1630 	}
1631 
1632 	lpcb->lp_uselp = 1;
1633 }
1634 
1635 #endif
1636