xref: /illumos-gate/usr/src/uts/i86pc/io/amd_iommu/amd_iommu_acpi.c (revision 856f710c9dc323b39da5935194d7928ffb99b67f)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
24  */
25 
26 #include "amd_iommu_acpi.h"
27 #include "amd_iommu_impl.h"
28 
29 static int create_acpi_hash(amd_iommu_acpi_t *acpi);
30 static void amd_iommu_acpi_table_fini(amd_iommu_acpi_t **acpipp);
31 
32 static void dump_acpi_aliases(void);
33 
34 
35 /*
36  * Globals
37  */
38 static amd_iommu_acpi_global_t *amd_iommu_acpi_global;
39 static amd_iommu_acpi_ivhd_t **amd_iommu_acpi_ivhd_hash;
40 static amd_iommu_acpi_ivmd_t **amd_iommu_acpi_ivmd_hash;
41 
42 static int
43 type_byte_size(char *cp)
44 {
45 	uint8_t type8 = *((uint8_t *)cp);
46 	uint8_t len_bits;
47 
48 	len_bits = AMD_IOMMU_REG_GET8(&type8, AMD_IOMMU_ACPI_DEVENTRY_LEN);
49 
50 	switch (len_bits) {
51 	case 0:
52 			return (4);
53 	case 1:
54 			return (8);
55 	case 2:
56 			return (16);
57 	case 3:
58 			return (32);
59 	default:
60 			cmn_err(CE_WARN, "%s: Invalid deventry len: %d",
61 			    amd_iommu_modname, len_bits);
62 			return (len_bits);
63 	}
64 	/*NOTREACHED*/
65 }
66 
67 static void
68 process_4byte_deventry(ivhd_container_t *c, char *cp)
69 {
70 	int entry_type = *((uint8_t *)cp);
71 	ivhd_deventry_t deventry = {0};
72 	ivhd_deventry_t *devp;
73 	uint8_t datsetting8;
74 	align_16_t al = {0};
75 	int i;
76 
77 	/* 4 byte entry */
78 	deventry.idev_len = 4;
79 	deventry.idev_deviceid = -1;
80 	deventry.idev_src_deviceid = -1;
81 
82 	for (i = 0; i < 2; i++) {
83 		al.ent8[i] = *((uint8_t *)&cp[i + 1]);
84 	}
85 
86 	switch (entry_type) {
87 	case 1:
88 		deventry.idev_type = DEVENTRY_ALL;
89 		break;
90 	case 2:
91 		deventry.idev_type = DEVENTRY_SELECT;
92 		deventry.idev_deviceid = al.ent16;
93 		break;
94 	case 3:
95 		deventry.idev_type = DEVENTRY_RANGE;
96 		deventry.idev_deviceid = al.ent16;
97 		break;
98 	case 4:
99 		deventry.idev_type = DEVENTRY_RANGE_END;
100 		deventry.idev_deviceid = al.ent16;
101 		ASSERT(cp[3] == 0);
102 		break;
103 	case 0:
104 		ASSERT(al.ent16 == 0);
105 		ASSERT(cp[3] == 0);
106 	default:
107 		return;
108 	}
109 
110 
111 	devp = kmem_alloc(sizeof (ivhd_deventry_t), KM_SLEEP);
112 	*devp = deventry;
113 
114 	if (c->ivhdc_first_deventry == NULL)
115 		c->ivhdc_first_deventry =  devp;
116 	else
117 		c->ivhdc_last_deventry->idev_next = devp;
118 
119 	c->ivhdc_last_deventry = devp;
120 
121 	if (entry_type == 4)
122 		return;
123 
124 	datsetting8 = (*((uint8_t *)&cp[3]));
125 
126 	devp->idev_Lint1Pass = AMD_IOMMU_REG_GET8(&datsetting8,
127 	    AMD_IOMMU_ACPI_LINT1PASS);
128 
129 	devp->idev_Lint0Pass = AMD_IOMMU_REG_GET8(&datsetting8,
130 	    AMD_IOMMU_ACPI_LINT0PASS);
131 
132 	devp->idev_SysMgt = AMD_IOMMU_REG_GET8(&datsetting8,
133 	    AMD_IOMMU_ACPI_SYSMGT);
134 
135 	ASSERT(AMD_IOMMU_REG_GET8(&datsetting8,
136 	    AMD_IOMMU_ACPI_DATRSV) == 0);
137 
138 	devp->idev_NMIPass = AMD_IOMMU_REG_GET8(&datsetting8,
139 	    AMD_IOMMU_ACPI_NMIPASS);
140 
141 	devp->idev_ExtIntPass = AMD_IOMMU_REG_GET8(&datsetting8,
142 	    AMD_IOMMU_ACPI_EXTINTPASS);
143 
144 	devp->idev_INITPass = AMD_IOMMU_REG_GET8(&datsetting8,
145 	    AMD_IOMMU_ACPI_INITPASS);
146 }
147 
148 static void
149 process_8byte_deventry(ivhd_container_t *c, char *cp)
150 {
151 	uint8_t datsetting8;
152 	int entry_type = (uint8_t)*cp;
153 	ivhd_deventry_t deventry = {0};
154 	ivhd_deventry_t *devp;
155 	align_16_t al1 = {0};
156 	align_16_t al2 = {0};
157 	align_32_t al3 = {0};
158 	int i;
159 
160 	/* Length is 8 bytes */
161 	deventry.idev_len = 8;
162 	deventry.idev_deviceid = -1;
163 	deventry.idev_src_deviceid = -1;
164 
165 	for (i = 0; i < 2; i++) {
166 		al1.ent8[i] = *((uint8_t *)&cp[i+1]);
167 		al2.ent8[i] = *((uint8_t *)&cp[i+5]);
168 	}
169 
170 	datsetting8 = *((uint8_t *)&cp[3]);
171 
172 	switch (entry_type) {
173 	case 66:
174 		deventry.idev_type = DEVENTRY_ALIAS_SELECT;
175 		deventry.idev_deviceid = al1.ent16;
176 		deventry.idev_src_deviceid = al2.ent16;
177 		ASSERT(cp[4] == 0);
178 		ASSERT(cp[7] == 0);
179 		break;
180 	case 67:
181 		deventry.idev_type = DEVENTRY_ALIAS_RANGE;
182 		deventry.idev_deviceid = al1.ent16;
183 		deventry.idev_src_deviceid = al2.ent16;
184 		ASSERT(cp[4] == 0);
185 		ASSERT(cp[7] == 0);
186 		break;
187 	case 70:
188 		deventry.idev_type = DEVENTRY_EXTENDED_SELECT;
189 		deventry.idev_deviceid = al1.ent16;
190 		break;
191 	case 71:
192 		deventry.idev_type = DEVENTRY_EXTENDED_RANGE;
193 		deventry.idev_deviceid = al1.ent16;
194 		break;
195 	case 72:
196 		deventry.idev_type = DEVENTRY_SPECIAL_DEVICE;
197 		ASSERT(al1.ent16 == 0);
198 		deventry.idev_deviceid = -1;
199 		deventry.idev_handle = cp[4];
200 		deventry.idev_variety = cp[7];
201 		deventry.idev_src_deviceid = al2.ent16;
202 	default:
203 #ifdef BROKEN_ASSERT
204 		for (i = 0; i < 7; i++) {
205 			ASSERT(cp[i] == 0);
206 		}
207 #endif
208 		return;
209 	}
210 
211 
212 	devp = kmem_alloc(sizeof (ivhd_deventry_t), KM_SLEEP);
213 	*devp = deventry;
214 
215 	if (c->ivhdc_first_deventry == NULL)
216 		c->ivhdc_first_deventry =  devp;
217 	else
218 		c->ivhdc_last_deventry->idev_next = devp;
219 
220 	c->ivhdc_last_deventry = devp;
221 
222 	devp->idev_Lint1Pass = AMD_IOMMU_REG_GET8(&datsetting8,
223 	    AMD_IOMMU_ACPI_LINT1PASS);
224 
225 	devp->idev_Lint0Pass = AMD_IOMMU_REG_GET8(&datsetting8,
226 	    AMD_IOMMU_ACPI_LINT0PASS);
227 
228 	devp->idev_SysMgt = AMD_IOMMU_REG_GET8(&datsetting8,
229 	    AMD_IOMMU_ACPI_SYSMGT);
230 
231 	ASSERT(AMD_IOMMU_REG_GET8(&datsetting8,
232 	    AMD_IOMMU_ACPI_DATRSV) == 0);
233 
234 	devp->idev_NMIPass = AMD_IOMMU_REG_GET8(&datsetting8,
235 	    AMD_IOMMU_ACPI_NMIPASS);
236 
237 	devp->idev_ExtIntPass = AMD_IOMMU_REG_GET8(&datsetting8,
238 	    AMD_IOMMU_ACPI_EXTINTPASS);
239 
240 	devp->idev_INITPass = AMD_IOMMU_REG_GET8(&datsetting8,
241 	    AMD_IOMMU_ACPI_INITPASS);
242 
243 	if (entry_type != 70 && entry_type != 71) {
244 		return;
245 	}
246 
247 	/* Type 70 and 71 */
248 	for (i = 0; i < 4; i++) {
249 		al3.ent8[i] = *((uint8_t *)&cp[i+4]);
250 	}
251 
252 	devp->idev_AtsDisabled = AMD_IOMMU_REG_GET8(&al3.ent32,
253 	    AMD_IOMMU_ACPI_ATSDISABLED);
254 
255 	ASSERT(AMD_IOMMU_REG_GET8(&al3.ent32, AMD_IOMMU_ACPI_EXTDATRSV) == 0);
256 }
257 
258 static void
259 process_ivhd(amd_iommu_acpi_t *acpi, ivhd_t *ivhdp)
260 {
261 	ivhd_container_t *c;
262 	caddr_t ivhd_end;
263 	caddr_t ivhd_tot_end;
264 	caddr_t cp;
265 
266 	ASSERT(ivhdp->ivhd_type == 0x10);
267 
268 	c = kmem_zalloc(sizeof (ivhd_container_t), KM_SLEEP);
269 	c->ivhdc_ivhd = kmem_alloc(sizeof (ivhd_t), KM_SLEEP);
270 	*(c->ivhdc_ivhd) = *ivhdp;
271 
272 	if (acpi->acp_first_ivhdc == NULL)
273 		acpi->acp_first_ivhdc = c;
274 	else
275 		acpi->acp_last_ivhdc->ivhdc_next = c;
276 
277 	acpi->acp_last_ivhdc = c;
278 
279 	ivhd_end = (caddr_t)ivhdp + sizeof (ivhd_t);
280 	ivhd_tot_end = (caddr_t)ivhdp + ivhdp->ivhd_len;
281 
282 	for (cp = ivhd_end; cp < ivhd_tot_end; cp += type_byte_size(cp)) {
283 		/* 16 byte and 32 byte size are currently reserved */
284 		switch (type_byte_size(cp)) {
285 		case 4:
286 			process_4byte_deventry(c, cp);
287 			break;
288 		case 8:
289 			process_8byte_deventry(c, cp);
290 			break;
291 		case 16:
292 		case 32:
293 			/* Reserved */
294 			break;
295 		default:
296 			cmn_err(CE_WARN, "%s: unsupported length for device "
297 			    "entry in ACPI IVRS table's IVHD entry",
298 			    amd_iommu_modname);
299 			break;
300 		}
301 	}
302 }
303 
304 static void
305 process_ivmd(amd_iommu_acpi_t *acpi, ivmd_t *ivmdp)
306 {
307 	ivmd_container_t *c;
308 
309 	ASSERT(ivmdp->ivmd_type != 0x10);
310 
311 	c = kmem_zalloc(sizeof (ivmd_container_t), KM_SLEEP);
312 	c->ivmdc_ivmd = kmem_alloc(sizeof (ivmd_t), KM_SLEEP);
313 	*(c->ivmdc_ivmd) = *ivmdp;
314 
315 	if (acpi->acp_first_ivmdc == NULL)
316 		acpi->acp_first_ivmdc = c;
317 	else
318 		acpi->acp_last_ivmdc->ivmdc_next = c;
319 
320 	acpi->acp_last_ivmdc = c;
321 }
322 
323 int
324 amd_iommu_acpi_init(void)
325 {
326 	ivrs_t *ivrsp;
327 	caddr_t ivrsp_end;
328 	caddr_t table_end;
329 	caddr_t cp;
330 	uint8_t type8;
331 	amd_iommu_acpi_t *acpi;
332 	align_ivhd_t al_vhd = {0};
333 	align_ivmd_t al_vmd = {0};
334 
335 	if (AcpiGetTable(IVRS_SIG, 1, (ACPI_TABLE_HEADER **)&ivrsp) != AE_OK) {
336 		cmn_err(CE_NOTE, "!amd_iommu: No AMD IOMMU ACPI IVRS table");
337 		return (DDI_FAILURE);
338 	}
339 
340 	/*
341 	 * Reserved field must be 0
342 	 */
343 	ASSERT(ivrsp->ivrs_resv == 0);
344 
345 	ASSERT(AMD_IOMMU_REG_GET32(&ivrsp->ivrs_ivinfo,
346 	    AMD_IOMMU_ACPI_IVINFO_RSV1) == 0);
347 	ASSERT(AMD_IOMMU_REG_GET32(&ivrsp->ivrs_ivinfo,
348 	    AMD_IOMMU_ACPI_IVINFO_RSV2) == 0);
349 
350 	ivrsp_end = (caddr_t)ivrsp + sizeof (struct ivrs);
351 	table_end = (caddr_t)ivrsp + ivrsp->ivrs_hdr.Length;
352 
353 	acpi = kmem_zalloc(sizeof (amd_iommu_acpi_t), KM_SLEEP);
354 	acpi->acp_ivrs = kmem_alloc(sizeof (ivrs_t), KM_SLEEP);
355 	*(acpi->acp_ivrs) = *ivrsp;
356 
357 	for (cp = ivrsp_end; cp < table_end; cp += (al_vhd.ivhdp)->ivhd_len) {
358 		al_vhd.cp = cp;
359 		if (al_vhd.ivhdp->ivhd_type == 0x10)
360 			process_ivhd(acpi, al_vhd.ivhdp);
361 	}
362 
363 	for (cp = ivrsp_end; cp < table_end; cp += (al_vmd.ivmdp)->ivmd_len) {
364 		al_vmd.cp = cp;
365 		type8 = al_vmd.ivmdp->ivmd_type;
366 		if (type8 == 0x20 || type8 == 0x21 || type8 == 0x22)
367 			process_ivmd(acpi, al_vmd.ivmdp);
368 	}
369 
370 	if (create_acpi_hash(acpi) != DDI_SUCCESS) {
371 		return (DDI_FAILURE);
372 	}
373 
374 	amd_iommu_acpi_table_fini(&acpi);
375 
376 	ASSERT(acpi == NULL);
377 
378 	if (amd_iommu_debug & AMD_IOMMU_DEBUG_ACPI) {
379 		dump_acpi_aliases();
380 		debug_enter("dump");
381 	}
382 
383 	return (DDI_SUCCESS);
384 }
385 
386 static ivhd_deventry_t *
387 free_ivhd_deventry(ivhd_deventry_t *devp)
388 {
389 	ivhd_deventry_t *next = devp->idev_next;
390 
391 	kmem_free(devp, sizeof (ivhd_deventry_t));
392 
393 	return (next);
394 }
395 
396 static ivhd_container_t *
397 free_ivhd_container(ivhd_container_t *ivhdcp)
398 {
399 	ivhd_container_t *next = ivhdcp->ivhdc_next;
400 	ivhd_deventry_t *devp;
401 
402 	for (devp = ivhdcp->ivhdc_first_deventry; devp; ) {
403 		devp = free_ivhd_deventry(devp);
404 	}
405 
406 	kmem_free(ivhdcp->ivhdc_ivhd, sizeof (ivhd_t));
407 	kmem_free(ivhdcp, sizeof (ivhd_container_t));
408 
409 	return (next);
410 }
411 
412 static ivmd_container_t *
413 free_ivmd_container(ivmd_container_t *ivmdcp)
414 {
415 	ivmd_container_t *next = ivmdcp->ivmdc_next;
416 
417 	kmem_free(ivmdcp->ivmdc_ivmd, sizeof (ivmd_t));
418 	kmem_free(ivmdcp, sizeof (ivmd_container_t));
419 
420 	return (next);
421 }
422 
423 void
424 amd_iommu_acpi_fini(void)
425 {
426 }
427 
428 /*
429  * TODO: Do we need to free the ACPI table for om GetFirmwareTable()
430  */
431 static void
432 amd_iommu_acpi_table_fini(amd_iommu_acpi_t **acpipp)
433 {
434 	amd_iommu_acpi_t *acpi = *acpipp;
435 	ivhd_container_t *ivhdcp;
436 	ivmd_container_t *ivmdcp;
437 
438 	ASSERT(acpi);
439 
440 	for (ivhdcp = acpi->acp_first_ivhdc; ivhdcp; ) {
441 		ivhdcp = free_ivhd_container(ivhdcp);
442 	}
443 	for (ivmdcp = acpi->acp_first_ivmdc; ivmdcp; ) {
444 		ivmdcp = free_ivmd_container(ivmdcp);
445 	}
446 
447 	kmem_free(acpi->acp_ivrs, sizeof (struct ivrs));
448 	kmem_free(acpi, sizeof (amd_iommu_acpi_t));
449 
450 	*acpipp = NULL;
451 }
452 
453 static uint16_t
454 deviceid_hashfn(uint16_t deviceid)
455 {
456 	return (deviceid % AMD_IOMMU_ACPI_INFO_HASH_SZ);
457 }
458 
459 static void
460 add_deventry_info(ivhd_t *ivhdp, ivhd_deventry_t *deventry,
461     amd_iommu_acpi_ivhd_t **hash)
462 {
463 	static amd_iommu_acpi_ivhd_t *last;
464 	amd_iommu_acpi_ivhd_t *acpi_ivhdp;
465 	uint8_t uint8_flags;
466 	uint16_t uint16_info;
467 	uint16_t idx;
468 
469 	if (deventry->idev_type == DEVENTRY_RANGE_END) {
470 		ASSERT(last);
471 		acpi_ivhdp = last;
472 		last = NULL;
473 		ASSERT(acpi_ivhdp->ach_dev_type == DEVENTRY_RANGE ||
474 		    acpi_ivhdp->ach_dev_type == DEVENTRY_ALIAS_RANGE ||
475 		    acpi_ivhdp->ach_dev_type == DEVENTRY_EXTENDED_RANGE);
476 		ASSERT(acpi_ivhdp->ach_deviceid_end == -1);
477 		acpi_ivhdp->ach_deviceid_end = deventry->idev_deviceid;
478 		/* TODO ASSERT data is 0 */
479 		return;
480 	}
481 
482 	ASSERT(last == NULL);
483 	acpi_ivhdp = kmem_zalloc(sizeof (*acpi_ivhdp), KM_SLEEP);
484 
485 	uint8_flags = ivhdp->ivhd_flags;
486 
487 #ifdef BROKEN_ASSERT
488 	ASSERT(AMD_IOMMU_REG_GET8(&uint8_flags,
489 	    AMD_IOMMU_ACPI_IVHD_FLAGS_RSV) == 0);
490 #endif
491 
492 	acpi_ivhdp->ach_IotlbSup = AMD_IOMMU_REG_GET8(&uint8_flags,
493 	    AMD_IOMMU_ACPI_IVHD_FLAGS_IOTLBSUP);
494 	acpi_ivhdp->ach_Isoc = AMD_IOMMU_REG_GET8(&uint8_flags,
495 	    AMD_IOMMU_ACPI_IVHD_FLAGS_ISOC);
496 	acpi_ivhdp->ach_ResPassPW = AMD_IOMMU_REG_GET8(&uint8_flags,
497 	    AMD_IOMMU_ACPI_IVHD_FLAGS_RESPASSPW);
498 	acpi_ivhdp->ach_PassPW = AMD_IOMMU_REG_GET8(&uint8_flags,
499 	    AMD_IOMMU_ACPI_IVHD_FLAGS_PASSPW);
500 	acpi_ivhdp->ach_HtTunEn = AMD_IOMMU_REG_GET8(&uint8_flags,
501 	    AMD_IOMMU_ACPI_IVHD_FLAGS_HTTUNEN);
502 
503 	/* IVHD fields */
504 	acpi_ivhdp->ach_IOMMU_deviceid = ivhdp->ivhd_deviceid;
505 	acpi_ivhdp->ach_IOMMU_cap_off = ivhdp->ivhd_cap_off;
506 	acpi_ivhdp->ach_IOMMU_reg_base = ivhdp->ivhd_reg_base;
507 	acpi_ivhdp->ach_IOMMU_pci_seg = ivhdp->ivhd_pci_seg;
508 
509 	/* IVHD IOMMU info fields */
510 	uint16_info = ivhdp->ivhd_iommu_info;
511 
512 #ifdef BROKEN_ASSERT
513 	ASSERT(AMD_IOMMU_REG_GET16(&uint16_info,
514 	    AMD_IOMMU_ACPI_IOMMU_INFO_RSV1) == 0);
515 #endif
516 
517 	acpi_ivhdp->ach_IOMMU_UnitID = AMD_IOMMU_REG_GET16(&uint16_info,
518 	    AMD_IOMMU_ACPI_IOMMU_INFO_UNITID);
519 	ASSERT(AMD_IOMMU_REG_GET16(&uint16_info,
520 	    AMD_IOMMU_ACPI_IOMMU_INFO_RSV2) == 0);
521 	acpi_ivhdp->ach_IOMMU_MSInum = AMD_IOMMU_REG_GET16(&uint16_info,
522 	    AMD_IOMMU_ACPI_IOMMU_INFO_MSINUM);
523 
524 	/* Initialize  deviceids to -1 */
525 	acpi_ivhdp->ach_deviceid_start = -1;
526 	acpi_ivhdp->ach_deviceid_end = -1;
527 	acpi_ivhdp->ach_src_deviceid = -1;
528 
529 	/* All range type entries are put on hash entry 0 */
530 	switch (deventry->idev_type) {
531 	case DEVENTRY_ALL:
532 		acpi_ivhdp->ach_deviceid_start = 0;
533 		acpi_ivhdp->ach_deviceid_end = (uint16_t)-1;
534 		acpi_ivhdp->ach_dev_type = DEVENTRY_ALL;
535 		idx = AMD_IOMMU_ACPI_INFO_HASH_SZ;
536 		break;
537 	case DEVENTRY_SELECT:
538 		acpi_ivhdp->ach_deviceid_start = deventry->idev_deviceid;
539 		acpi_ivhdp->ach_deviceid_end = deventry->idev_deviceid;
540 		acpi_ivhdp->ach_dev_type = DEVENTRY_SELECT;
541 		idx = deviceid_hashfn(deventry->idev_deviceid);
542 		break;
543 	case DEVENTRY_RANGE:
544 		acpi_ivhdp->ach_deviceid_start = deventry->idev_deviceid;
545 		acpi_ivhdp->ach_deviceid_end = -1;
546 		acpi_ivhdp->ach_dev_type = DEVENTRY_RANGE;
547 		idx = AMD_IOMMU_ACPI_INFO_HASH_SZ;
548 		last = acpi_ivhdp;
549 		break;
550 	case DEVENTRY_RANGE_END:
551 		cmn_err(CE_PANIC, "%s: Unexpected Range End Deventry",
552 		    amd_iommu_modname);
553 		/*NOTREACHED*/
554 		break;
555 	case DEVENTRY_ALIAS_SELECT:
556 		acpi_ivhdp->ach_deviceid_start = deventry->idev_deviceid;
557 		acpi_ivhdp->ach_deviceid_end = deventry->idev_deviceid;
558 		acpi_ivhdp->ach_src_deviceid = deventry->idev_src_deviceid;
559 		acpi_ivhdp->ach_dev_type = DEVENTRY_ALIAS_SELECT;
560 		idx = deviceid_hashfn(deventry->idev_deviceid);
561 		break;
562 	case DEVENTRY_ALIAS_RANGE:
563 		acpi_ivhdp->ach_deviceid_start = deventry->idev_deviceid;
564 		acpi_ivhdp->ach_deviceid_end = -1;
565 		acpi_ivhdp->ach_src_deviceid = deventry->idev_src_deviceid;
566 		acpi_ivhdp->ach_dev_type = DEVENTRY_ALIAS_RANGE;
567 		idx = AMD_IOMMU_ACPI_INFO_HASH_SZ;
568 		last = acpi_ivhdp;
569 		break;
570 	case DEVENTRY_EXTENDED_SELECT:
571 		acpi_ivhdp->ach_deviceid_start = deventry->idev_deviceid;
572 		acpi_ivhdp->ach_deviceid_end = deventry->idev_deviceid;
573 		acpi_ivhdp->ach_dev_type = DEVENTRY_EXTENDED_SELECT;
574 		idx = deviceid_hashfn(deventry->idev_deviceid);
575 		break;
576 	case DEVENTRY_EXTENDED_RANGE:
577 		acpi_ivhdp->ach_deviceid_start = deventry->idev_deviceid;
578 		acpi_ivhdp->ach_deviceid_end = -1;
579 		acpi_ivhdp->ach_dev_type = DEVENTRY_EXTENDED_RANGE;
580 		idx = AMD_IOMMU_ACPI_INFO_HASH_SZ;
581 		last = acpi_ivhdp;
582 		break;
583 	case DEVENTRY_SPECIAL_DEVICE:
584 		acpi_ivhdp->ach_deviceid_start = -1;
585 		acpi_ivhdp->ach_deviceid_end = -1;
586 		acpi_ivhdp->ach_src_deviceid = deventry->idev_src_deviceid;
587 		acpi_ivhdp->ach_special_handle = deventry->idev_handle;
588 		acpi_ivhdp->ach_special_variety = deventry->idev_variety;
589 		idx = AMD_IOMMU_ACPI_INFO_HASH_SZ;
590 		break;
591 	default:
592 		cmn_err(CE_PANIC, "%s: Unsupported deventry type",
593 		    amd_iommu_modname);
594 		/* FALLTHROUGH */
595 	}
596 
597 	acpi_ivhdp->ach_Lint1Pass = deventry->idev_Lint1Pass;
598 	acpi_ivhdp->ach_Lint0Pass = deventry->idev_Lint0Pass;
599 	acpi_ivhdp->ach_SysMgt = deventry->idev_SysMgt;
600 	acpi_ivhdp->ach_NMIPass = deventry->idev_NMIPass;
601 	acpi_ivhdp->ach_ExtIntPass = deventry->idev_ExtIntPass;
602 	acpi_ivhdp->ach_INITPass = deventry->idev_INITPass;
603 
604 
605 	/* extended data */
606 	if (acpi_ivhdp->ach_dev_type == DEVENTRY_EXTENDED_SELECT ||
607 	    acpi_ivhdp->ach_dev_type == DEVENTRY_EXTENDED_RANGE) {
608 		acpi_ivhdp->ach_AtsDisabled = deventry->idev_AtsDisabled;
609 	}
610 
611 	/*
612 	 * Now add it to the hash
613 	 */
614 	ASSERT(hash[idx] != acpi_ivhdp);
615 	acpi_ivhdp->ach_next = hash[idx];
616 	hash[idx] = acpi_ivhdp;
617 }
618 
619 /*
620  * A device entry may be declared implicitly as a source device ID
621  * in an alias entry. This routine adds it to the hash
622  */
623 static void
624 add_implicit_deventry(ivhd_container_t *ivhdcp, amd_iommu_acpi_ivhd_t **hash)
625 {
626 	ivhd_deventry_t *d;
627 	int deviceid;
628 
629 	for (d = ivhdcp->ivhdc_first_deventry; d; d = d->idev_next) {
630 
631 		if ((d->idev_type != DEVENTRY_ALIAS_SELECT) &&
632 		    (d->idev_type != DEVENTRY_ALIAS_RANGE))
633 			continue;
634 
635 		deviceid = d->idev_src_deviceid;
636 
637 		if (amd_iommu_lookup_ivhd(deviceid) == NULL) {
638 			ivhd_deventry_t deventry;
639 
640 			/* Fake a SELECT entry */
641 			deventry.idev_type = DEVENTRY_SELECT;
642 			deventry.idev_len = 4;
643 			deventry.idev_deviceid = deviceid;
644 			deventry.idev_src_deviceid = -1;
645 
646 			deventry.idev_Lint1Pass = d->idev_Lint1Pass;
647 			deventry.idev_Lint0Pass = d->idev_Lint0Pass;
648 			deventry.idev_SysMgt = d->idev_SysMgt;
649 			deventry.idev_NMIPass = d->idev_NMIPass;
650 			deventry.idev_ExtIntPass = d->idev_ExtIntPass;
651 			deventry.idev_INITPass = d->idev_INITPass;
652 
653 			add_deventry_info(ivhdcp->ivhdc_ivhd, &deventry, hash);
654 
655 			if (amd_iommu_debug & AMD_IOMMU_DEBUG_ACPI) {
656 				cmn_err(CE_NOTE, "Added implicit IVHD entry "
657 				    "for: deviceid = %u", deviceid);
658 			}
659 		}
660 	}
661 }
662 
663 static void
664 add_ivhdc_info(ivhd_container_t *ivhdcp, amd_iommu_acpi_ivhd_t **hash)
665 {
666 	ivhd_deventry_t *deventry;
667 	ivhd_t *ivhdp = ivhdcp->ivhdc_ivhd;
668 
669 	for (deventry = ivhdcp->ivhdc_first_deventry; deventry;
670 	    deventry = deventry->idev_next) {
671 		add_deventry_info(ivhdp, deventry, hash);
672 	}
673 
674 	add_implicit_deventry(ivhdcp, hash);
675 
676 }
677 
678 static void
679 add_ivhd_info(amd_iommu_acpi_t *acpi, amd_iommu_acpi_ivhd_t **hash)
680 {
681 	ivhd_container_t *ivhdcp;
682 
683 	for (ivhdcp = acpi->acp_first_ivhdc; ivhdcp;
684 	    ivhdcp = ivhdcp->ivhdc_next) {
685 		add_ivhdc_info(ivhdcp, hash);
686 	}
687 }
688 
689 static void
690 set_ivmd_info(ivmd_t *ivmdp, amd_iommu_acpi_ivmd_t **hash)
691 {
692 	amd_iommu_acpi_ivmd_t *acpi_ivmdp;
693 	uint8_t uint8_flags;
694 	uint16_t idx;
695 
696 	uint8_flags = ivmdp->ivmd_flags;
697 
698 	acpi_ivmdp = kmem_zalloc(sizeof (*acpi_ivmdp), KM_SLEEP);
699 
700 	switch (ivmdp->ivmd_type) {
701 	case 0x20:
702 		acpi_ivmdp->acm_deviceid_start = 0;
703 		acpi_ivmdp->acm_deviceid_end = (uint16_t)-1;
704 		acpi_ivmdp->acm_dev_type = IVMD_DEVICEID_ALL;
705 		idx = AMD_IOMMU_ACPI_INFO_HASH_SZ;
706 		break;
707 	case 0x21:
708 		acpi_ivmdp->acm_deviceid_start = ivmdp->ivmd_deviceid;
709 		acpi_ivmdp->acm_deviceid_end = ivmdp->ivmd_deviceid;
710 		acpi_ivmdp->acm_dev_type = IVMD_DEVICEID_SELECT;
711 		idx = deviceid_hashfn(ivmdp->ivmd_deviceid);
712 		break;
713 	case 0x22:
714 		acpi_ivmdp->acm_deviceid_start = ivmdp->ivmd_deviceid;
715 		acpi_ivmdp->acm_deviceid_end = ivmdp->ivmd_auxdata;
716 		acpi_ivmdp->acm_dev_type = IVMD_DEVICEID_RANGE;
717 		idx = AMD_IOMMU_ACPI_INFO_HASH_SZ;
718 		break;
719 	default:
720 		cmn_err(CE_PANIC, "Unknown AMD IOMMU ACPI IVMD deviceid type: "
721 		    "%x", ivmdp->ivmd_type);
722 		/*NOTREACHED*/
723 	}
724 
725 	ASSERT(AMD_IOMMU_REG_GET8(&uint8_flags,
726 	    AMD_IOMMU_ACPI_IVMD_RSV) == 0);
727 
728 	acpi_ivmdp->acm_ExclRange = AMD_IOMMU_REG_GET8(&uint8_flags,
729 	    AMD_IOMMU_ACPI_IVMD_EXCL_RANGE);
730 	acpi_ivmdp->acm_IW = AMD_IOMMU_REG_GET8(&uint8_flags,
731 	    AMD_IOMMU_ACPI_IVMD_IW);
732 	acpi_ivmdp->acm_IR = AMD_IOMMU_REG_GET8(&uint8_flags,
733 	    AMD_IOMMU_ACPI_IVMD_IR);
734 	acpi_ivmdp->acm_Unity = AMD_IOMMU_REG_GET8(&uint8_flags,
735 	    AMD_IOMMU_ACPI_IVMD_UNITY);
736 
737 	acpi_ivmdp->acm_ivmd_phys_start = ivmdp->ivmd_phys_start;
738 	acpi_ivmdp->acm_ivmd_phys_len = ivmdp->ivmd_phys_len;
739 
740 	acpi_ivmdp->acm_next = hash[idx];
741 	hash[idx] = acpi_ivmdp;
742 }
743 
744 static void
745 add_ivmdc_info(ivmd_container_t *ivmdcp, amd_iommu_acpi_ivmd_t **hash)
746 {
747 	set_ivmd_info(ivmdcp->ivmdc_ivmd, hash);
748 }
749 
750 static void
751 add_ivmd_info(amd_iommu_acpi_t *acpi, amd_iommu_acpi_ivmd_t **hash)
752 {
753 	ivmd_container_t *ivmdcp;
754 
755 	for (ivmdcp = acpi->acp_first_ivmdc; ivmdcp;
756 	    ivmdcp = ivmdcp->ivmdc_next) {
757 		add_ivmdc_info(ivmdcp, hash);
758 	}
759 }
760 
761 static void
762 add_global_info(amd_iommu_acpi_t *acpi, amd_iommu_acpi_global_t *global)
763 {
764 	uint32_t ivrs_ivinfo = acpi->acp_ivrs->ivrs_ivinfo;
765 
766 	global->acg_HtAtsResv =
767 	    AMD_IOMMU_REG_GET32(&ivrs_ivinfo, AMD_IOMMU_ACPI_HT_ATSRSV);
768 	global->acg_VAsize =
769 	    AMD_IOMMU_REG_GET32(&ivrs_ivinfo, AMD_IOMMU_ACPI_VA_SIZE);
770 	global->acg_PAsize =
771 	    AMD_IOMMU_REG_GET32(&ivrs_ivinfo, AMD_IOMMU_ACPI_PA_SIZE);
772 }
773 
774 static int
775 create_acpi_hash(amd_iommu_acpi_t *acpi)
776 {
777 	/* Last hash entry is for deviceid ranges including "all" */
778 
779 	amd_iommu_acpi_global = kmem_zalloc(sizeof (amd_iommu_acpi_global_t),
780 	    KM_SLEEP);
781 
782 	amd_iommu_acpi_ivhd_hash = kmem_zalloc(sizeof (amd_iommu_acpi_ivhd_t *)
783 	    * (AMD_IOMMU_ACPI_INFO_HASH_SZ + 1), KM_SLEEP);
784 
785 	amd_iommu_acpi_ivmd_hash = kmem_zalloc(sizeof (amd_iommu_acpi_ivmd_t *)
786 	    * (AMD_IOMMU_ACPI_INFO_HASH_SZ + 1), KM_SLEEP);
787 
788 	add_global_info(acpi, amd_iommu_acpi_global);
789 
790 	add_ivhd_info(acpi, amd_iommu_acpi_ivhd_hash);
791 
792 	add_ivmd_info(acpi, amd_iommu_acpi_ivmd_hash);
793 
794 	return (DDI_SUCCESS);
795 }
796 
797 static void
798 set_deventry(amd_iommu_t *iommu, int entry, amd_iommu_acpi_ivhd_t *hinfop)
799 {
800 	uint64_t *dentry;
801 
802 	dentry = (uint64_t *)(intptr_t)
803 	    &iommu->aiomt_devtbl[entry * AMD_IOMMU_DEVTBL_ENTRY_SZ];
804 
805 	AMD_IOMMU_REG_SET64(&(dentry[1]), AMD_IOMMU_DEVTBL_SYSMGT,
806 	    hinfop->ach_SysMgt);
807 }
808 
809 /* Initialize device table according to IVHD */
810 int
811 amd_iommu_acpi_init_devtbl(amd_iommu_t *iommu)
812 {
813 	int i, j;
814 	amd_iommu_acpi_ivhd_t *hinfop;
815 
816 	for (i = 0; i <= AMD_IOMMU_ACPI_INFO_HASH_SZ; i++) {
817 		for (hinfop = amd_iommu_acpi_ivhd_hash[i];
818 		    hinfop; hinfop = hinfop->ach_next) {
819 
820 			if (hinfop->ach_IOMMU_deviceid != iommu->aiomt_bdf)
821 				continue;
822 
823 			switch (hinfop->ach_dev_type) {
824 			case DEVENTRY_ALL:
825 				for (j = 0; j < AMD_IOMMU_MAX_DEVICEID; j++)
826 					set_deventry(iommu, j, hinfop);
827 				break;
828 			case DEVENTRY_SELECT:
829 			case DEVENTRY_EXTENDED_SELECT:
830 				set_deventry(iommu,
831 				    hinfop->ach_deviceid_start,
832 				    hinfop);
833 				break;
834 			case DEVENTRY_RANGE:
835 			case DEVENTRY_EXTENDED_RANGE:
836 				for (j = hinfop->ach_deviceid_start;
837 				    j <= hinfop->ach_deviceid_end;
838 				    j++)
839 					set_deventry(iommu, j, hinfop);
840 				break;
841 			case DEVENTRY_ALIAS_SELECT:
842 			case DEVENTRY_ALIAS_RANGE:
843 			case DEVENTRY_SPECIAL_DEVICE:
844 				set_deventry(iommu,
845 				    hinfop->ach_src_deviceid,
846 				    hinfop);
847 				break;
848 			default:
849 				cmn_err(CE_WARN,
850 				    "%s: Unknown deventry type",
851 				    amd_iommu_modname);
852 				return (DDI_FAILURE);
853 			}
854 		}
855 	}
856 
857 	return (DDI_SUCCESS);
858 }
859 
860 amd_iommu_acpi_global_t *
861 amd_iommu_lookup_acpi_global(void)
862 {
863 	ASSERT(amd_iommu_acpi_global);
864 
865 	return (amd_iommu_acpi_global);
866 }
867 
868 amd_iommu_acpi_ivhd_t *
869 amd_iommu_lookup_all_ivhd(void)
870 {
871 	amd_iommu_acpi_ivhd_t *hinfop;
872 
873 	hinfop = amd_iommu_acpi_ivhd_hash[AMD_IOMMU_ACPI_INFO_HASH_SZ];
874 	for (; hinfop; hinfop = hinfop->ach_next) {
875 		if (hinfop->ach_deviceid_start == 0 &&
876 		    hinfop->ach_deviceid_end == (uint16_t)-1) {
877 			break;
878 		}
879 	}
880 
881 	return (hinfop);
882 }
883 
884 amd_iommu_acpi_ivmd_t *
885 amd_iommu_lookup_all_ivmd(void)
886 {
887 	amd_iommu_acpi_ivmd_t *minfop;
888 
889 	minfop = amd_iommu_acpi_ivmd_hash[AMD_IOMMU_ACPI_INFO_HASH_SZ];
890 	for (; minfop; minfop = minfop->acm_next) {
891 		if (minfop->acm_deviceid_start == 0 &&
892 		    minfop->acm_deviceid_end == (uint16_t)-1) {
893 			break;
894 		}
895 	}
896 
897 	return (minfop);
898 }
899 
900 amd_iommu_acpi_ivhd_t *
901 amd_iommu_lookup_any_ivhd(amd_iommu_t *iommu)
902 {
903 	int i;
904 	amd_iommu_acpi_ivhd_t *hinfop;
905 
906 	for (i = AMD_IOMMU_ACPI_INFO_HASH_SZ; i >= 0; i--) {
907 		hinfop = amd_iommu_acpi_ivhd_hash[i];
908 		if ((hinfop != NULL) &&
909 		    hinfop->ach_IOMMU_deviceid == iommu->aiomt_bdf)
910 			break;
911 	}
912 
913 	return (hinfop);
914 }
915 
916 amd_iommu_acpi_ivmd_t *
917 amd_iommu_lookup_any_ivmd(void)
918 {
919 	int i;
920 	amd_iommu_acpi_ivmd_t *minfop;
921 
922 	for (i = AMD_IOMMU_ACPI_INFO_HASH_SZ; i >= 0; i--) {
923 		if ((minfop = amd_iommu_acpi_ivmd_hash[i]) != NULL)
924 			break;
925 	}
926 
927 	return (minfop);
928 }
929 
930 static void
931 dump_acpi_aliases(void)
932 {
933 	amd_iommu_acpi_ivhd_t *hinfop;
934 	uint16_t idx;
935 
936 	for (idx = 0; idx <= AMD_IOMMU_ACPI_INFO_HASH_SZ; idx++) {
937 		hinfop = amd_iommu_acpi_ivhd_hash[idx];
938 		for (; hinfop; hinfop = hinfop->ach_next) {
939 			cmn_err(CE_NOTE, "start=%d, end=%d, src_bdf=%d",
940 			    hinfop->ach_deviceid_start,
941 			    hinfop->ach_deviceid_end,
942 			    hinfop->ach_src_deviceid);
943 		}
944 	}
945 }
946 
947 amd_iommu_acpi_ivhd_t *
948 amd_iommu_lookup_ivhd(int32_t deviceid)
949 {
950 	amd_iommu_acpi_ivhd_t *hinfop;
951 	uint16_t idx;
952 
953 	if (amd_iommu_debug & AMD_IOMMU_DEBUG_ACPI) {
954 		cmn_err(CE_NOTE, "Attempting to get ACPI IVHD info "
955 		    "for deviceid: %d", deviceid);
956 	}
957 
958 	ASSERT(amd_iommu_acpi_ivhd_hash);
959 
960 	/* check if special device */
961 	if (deviceid == -1) {
962 		hinfop = amd_iommu_acpi_ivhd_hash[AMD_IOMMU_ACPI_INFO_HASH_SZ];
963 		for (; hinfop; hinfop = hinfop->ach_next) {
964 			if (hinfop->ach_deviceid_start  == -1 &&
965 			    hinfop->ach_deviceid_end == -1) {
966 				break;
967 			}
968 		}
969 		return (hinfop);
970 	}
971 
972 	/* First search for an exact match */
973 
974 	idx = deviceid_hashfn(deviceid);
975 
976 
977 range:
978 	hinfop = amd_iommu_acpi_ivhd_hash[idx];
979 
980 	for (; hinfop; hinfop = hinfop->ach_next) {
981 		if (deviceid < hinfop->ach_deviceid_start ||
982 		    deviceid > hinfop->ach_deviceid_end)
983 			continue;
984 
985 		if (amd_iommu_debug & AMD_IOMMU_DEBUG_ACPI) {
986 			cmn_err(CE_NOTE, "Found ACPI IVHD match: %p, "
987 			    "actual deviceid = %u, start = %u, end = %u",
988 			    (void *)hinfop, deviceid,
989 			    hinfop->ach_deviceid_start,
990 			    hinfop->ach_deviceid_end);
991 		}
992 		goto out;
993 	}
994 
995 	if (idx !=  AMD_IOMMU_ACPI_INFO_HASH_SZ) {
996 		idx = AMD_IOMMU_ACPI_INFO_HASH_SZ;
997 		goto range;
998 	}
999 
1000 out:
1001 	if (amd_iommu_debug & AMD_IOMMU_DEBUG_ACPI) {
1002 		cmn_err(CE_NOTE, "%u: %s ACPI IVHD %p", deviceid,
1003 		    hinfop ? "GOT" : "Did NOT get", (void *)hinfop);
1004 	}
1005 
1006 	return (hinfop);
1007 }
1008 
1009 amd_iommu_acpi_ivmd_t *
1010 amd_iommu_lookup_ivmd(int32_t deviceid)
1011 {
1012 	amd_iommu_acpi_ivmd_t *minfop;
1013 	uint16_t idx;
1014 
1015 	if (amd_iommu_debug & AMD_IOMMU_DEBUG_ACPI) {
1016 		cmn_err(CE_NOTE, "Attempting to get ACPI IVMD info "
1017 		    "for deviceid: %u", deviceid);
1018 	}
1019 
1020 	ASSERT(amd_iommu_acpi_ivmd_hash);
1021 
1022 	/* First search for an exact match */
1023 
1024 	idx = deviceid_hashfn(deviceid);
1025 
1026 range:
1027 	minfop = amd_iommu_acpi_ivmd_hash[idx];
1028 
1029 	for (; minfop; minfop = minfop->acm_next) {
1030 		if (deviceid < minfop->acm_deviceid_start &&
1031 		    deviceid > minfop->acm_deviceid_end)
1032 			continue;
1033 
1034 		if (amd_iommu_debug & AMD_IOMMU_DEBUG_ACPI) {
1035 			cmn_err(CE_NOTE, "Found ACPI IVMD match: %p, "
1036 			    "actual deviceid = %u, start = %u, end = %u",
1037 			    (void *)minfop, deviceid,
1038 			    minfop->acm_deviceid_start,
1039 			    minfop->acm_deviceid_end);
1040 		}
1041 
1042 		goto out;
1043 	}
1044 
1045 	if (idx !=  AMD_IOMMU_ACPI_INFO_HASH_SZ) {
1046 		idx = AMD_IOMMU_ACPI_INFO_HASH_SZ;
1047 		goto range;
1048 	}
1049 
1050 out:
1051 	if (amd_iommu_debug & AMD_IOMMU_DEBUG_ACPI) {
1052 		cmn_err(CE_NOTE, "%u: %s ACPI IVMD info %p", deviceid,
1053 		    minfop ? "GOT" : "Did NOT get", (void *)minfop);
1054 	}
1055 
1056 	return (minfop);
1057 }
1058