xref: /illumos-gate/usr/src/uts/i86pc/os/cmi_hw.c (revision 67dbe2be0c0f1e2eb428b89088bb5667e8f0b9f6)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 /*
28  * CPU Module Interface - hardware abstraction.
29  */
30 
31 #ifdef __xpv
32 #include <sys/xpv_user.h>
33 #endif
34 
35 #include <sys/types.h>
36 #include <sys/cpu_module.h>
37 #include <sys/kmem.h>
38 #include <sys/x86_archext.h>
39 #include <sys/cpuvar.h>
40 #include <sys/ksynch.h>
41 #include <sys/x_call.h>
42 #include <sys/pghw.h>
43 #include <sys/pci_cfgspace.h>
44 #include <sys/archsystm.h>
45 #include <sys/ontrap.h>
46 #include <sys/controlregs.h>
47 #include <sys/sunddi.h>
48 #include <sys/trap.h>
49 #include <sys/mca_x86.h>
50 #include <sys/processor.h>
51 #include <sys/cmn_err.h>
52 #include <sys/nvpair.h>
53 #include <sys/fm/util.h>
54 #include <sys/fm/protocol.h>
55 #include <sys/fm/smb/fmsmb.h>
56 #include <sys/cpu_module_impl.h>
57 
58 /*
59  * Variable which determines if the SMBIOS supports x86 generic topology; or
60  * if legacy topolgy enumeration will occur.
61  */
62 extern int x86gentopo_legacy;
63 
64 /*
65  * Outside of this file consumers use the opaque cmi_hdl_t.  This
66  * definition is duplicated in the generic_cpu mdb module, so keep
67  * them in-sync when making changes.
68  */
69 typedef struct cmi_hdl_impl {
70 	enum cmi_hdl_class cmih_class;		/* Handle nature */
71 	const struct cmi_hdl_ops *cmih_ops;	/* Operations vector */
72 	uint_t cmih_chipid;			/* Chipid of cpu resource */
73 	uint_t cmih_procnodeid;			/* Nodeid of cpu resource */
74 	uint_t cmih_coreid;			/* Core within die */
75 	uint_t cmih_strandid;			/* Thread within core */
76 	uint_t cmih_procnodes_per_pkg;		/* Nodes in a processor */
77 	boolean_t cmih_mstrand;			/* cores are multithreaded */
78 	volatile uint32_t *cmih_refcntp;	/* Reference count pointer */
79 	uint64_t cmih_msrsrc;			/* MSR data source flags */
80 	void *cmih_hdlpriv;			/* cmi_hw.c private data */
81 	void *cmih_spec;			/* cmi_hdl_{set,get}_specific */
82 	void *cmih_cmi;				/* cpu mod control structure */
83 	void *cmih_cmidata;			/* cpu mod private data */
84 	const struct cmi_mc_ops *cmih_mcops;	/* Memory-controller ops */
85 	void *cmih_mcdata;			/* Memory-controller data */
86 	uint64_t cmih_flags;			/* See CMIH_F_* below */
87 	uint16_t cmih_smbiosid;			/* SMBIOS Type 4 struct ID */
88 	uint_t cmih_smb_chipid;			/* SMBIOS factored chipid */
89 	nvlist_t *cmih_smb_bboard;		/* SMBIOS bboard nvlist */
90 } cmi_hdl_impl_t;
91 
92 #define	IMPLHDL(ophdl)	((cmi_hdl_impl_t *)ophdl)
93 #define	HDLOPS(hdl)	((hdl)->cmih_ops)
94 
95 #define	CMIH_F_INJACTV		0x1ULL
96 
97 /*
98  * Ops structure for handle operations.
99  */
100 struct cmi_hdl_ops {
101 	/*
102 	 * These ops are required in an implementation.
103 	 */
104 	uint_t (*cmio_vendor)(cmi_hdl_impl_t *);
105 	const char *(*cmio_vendorstr)(cmi_hdl_impl_t *);
106 	uint_t (*cmio_family)(cmi_hdl_impl_t *);
107 	uint_t (*cmio_model)(cmi_hdl_impl_t *);
108 	uint_t (*cmio_stepping)(cmi_hdl_impl_t *);
109 	uint_t (*cmio_chipid)(cmi_hdl_impl_t *);
110 	uint_t (*cmio_procnodeid)(cmi_hdl_impl_t *);
111 	uint_t (*cmio_coreid)(cmi_hdl_impl_t *);
112 	uint_t (*cmio_strandid)(cmi_hdl_impl_t *);
113 	uint_t (*cmio_procnodes_per_pkg)(cmi_hdl_impl_t *);
114 	uint_t (*cmio_strand_apicid)(cmi_hdl_impl_t *);
115 	uint32_t (*cmio_chiprev)(cmi_hdl_impl_t *);
116 	const char *(*cmio_chiprevstr)(cmi_hdl_impl_t *);
117 	uint32_t (*cmio_getsockettype)(cmi_hdl_impl_t *);
118 	const char *(*cmio_getsocketstr)(cmi_hdl_impl_t *);
119 
120 	id_t (*cmio_logical_id)(cmi_hdl_impl_t *);
121 	/*
122 	 * These ops are optional in an implementation.
123 	 */
124 	ulong_t (*cmio_getcr4)(cmi_hdl_impl_t *);
125 	void (*cmio_setcr4)(cmi_hdl_impl_t *, ulong_t);
126 	cmi_errno_t (*cmio_rdmsr)(cmi_hdl_impl_t *, uint_t, uint64_t *);
127 	cmi_errno_t (*cmio_wrmsr)(cmi_hdl_impl_t *, uint_t, uint64_t);
128 	cmi_errno_t (*cmio_msrinterpose)(cmi_hdl_impl_t *, uint_t, uint64_t);
129 	void (*cmio_int)(cmi_hdl_impl_t *, int);
130 	int (*cmio_online)(cmi_hdl_impl_t *, int, int *);
131 	uint16_t (*cmio_smbiosid) (cmi_hdl_impl_t *);
132 	uint_t (*cmio_smb_chipid)(cmi_hdl_impl_t *);
133 	nvlist_t *(*cmio_smb_bboard)(cmi_hdl_impl_t *);
134 };
135 
136 static const struct cmi_hdl_ops cmi_hdl_ops;
137 
138 /*
139  * Handles are looked up from contexts such as polling, injection etc
140  * where the context is reasonably well defined (although a poller could
141  * interrupt any old thread holding any old lock).  They are also looked
142  * up by machine check handlers, which may strike at inconvenient times
143  * such as during handle initialization or destruction or during handle
144  * lookup (which the #MC handler itself will also have to perform).
145  *
146  * So keeping handles in a linked list makes locking difficult when we
147  * consider #MC handlers.  Our solution is to have a look-up table indexed
148  * by that which uniquely identifies a handle - chip/core/strand id -
149  * with each entry a structure including a pointer to a handle
150  * structure for the resource, and a reference count for the handle.
151  * Reference counts are modified atomically.  The public cmi_hdl_hold
152  * always succeeds because this can only be used after handle creation
153  * and before the call to destruct, so the hold count is already at least one.
154  * In other functions that lookup a handle (cmi_hdl_lookup, cmi_hdl_any)
155  * we must be certain that the count has not already decrmented to zero
156  * before applying our hold.
157  *
158  * The table is an array of maximum number of chips defined in
159  * CMI_CHIPID_ARR_SZ indexed by the chip id. If the chip is not present, the
160  * entry is NULL. Each entry is a pointer to another array which contains a
161  * list of all strands of the chip. This first level table is allocated when
162  * first we want to populate an entry. The size of the latter (per chip) table
163  * is CMI_MAX_STRANDS_PER_CHIP and it is populated when one of its cpus starts.
164  *
165  * Ideally we should only allocate to the actual number of chips, cores per
166  * chip and strand per core. The number of chips is not available until all
167  * of them are passed. The number of cores and strands are partially available.
168  * For now we stick with the above approach.
169  */
170 #define	CMI_MAX_CHIPID_NBITS		6	/* max chipid of 63 */
171 #define	CMI_MAX_CORES_PER_CHIP_NBITS	4	/* 16 cores per chip max */
172 #define	CMI_MAX_STRANDS_PER_CORE_NBITS	3	/* 8 strands per core max */
173 
174 #define	CMI_MAX_CHIPID			((1 << (CMI_MAX_CHIPID_NBITS)) - 1)
175 #define	CMI_MAX_CORES_PER_CHIP		(1 << CMI_MAX_CORES_PER_CHIP_NBITS)
176 #define	CMI_MAX_STRANDS_PER_CORE	(1 << CMI_MAX_STRANDS_PER_CORE_NBITS)
177 #define	CMI_MAX_STRANDS_PER_CHIP	(CMI_MAX_CORES_PER_CHIP * \
178 					    CMI_MAX_STRANDS_PER_CORE)
179 
180 /*
181  * Handle array indexing within a per-chip table
182  *	[6:3] = Core in package,
183  *	[2:0] = Strand in core,
184  */
185 #define	CMI_HDL_ARR_IDX_CORE(coreid) \
186 	(((coreid) & (CMI_MAX_CORES_PER_CHIP - 1)) << \
187 	CMI_MAX_STRANDS_PER_CORE_NBITS)
188 
189 #define	CMI_HDL_ARR_IDX_STRAND(strandid) \
190 	(((strandid) & (CMI_MAX_STRANDS_PER_CORE - 1)))
191 
192 #define	CMI_HDL_ARR_IDX(coreid, strandid) \
193 	(CMI_HDL_ARR_IDX_CORE(coreid) | CMI_HDL_ARR_IDX_STRAND(strandid))
194 
195 #define	CMI_CHIPID_ARR_SZ		(1 << CMI_MAX_CHIPID_NBITS)
196 
197 typedef struct cmi_hdl_ent {
198 	volatile uint32_t cmae_refcnt;
199 	cmi_hdl_impl_t *cmae_hdlp;
200 } cmi_hdl_ent_t;
201 
202 static cmi_hdl_ent_t *cmi_chip_tab[CMI_CHIPID_ARR_SZ];
203 
204 /*
205  * Controls where we will source PCI config space data.
206  */
207 #define	CMI_PCICFG_FLAG_RD_HWOK		0x0001
208 #define	CMI_PCICFG_FLAG_RD_INTERPOSEOK	0X0002
209 #define	CMI_PCICFG_FLAG_WR_HWOK		0x0004
210 #define	CMI_PCICFG_FLAG_WR_INTERPOSEOK	0X0008
211 
212 static uint64_t cmi_pcicfg_flags =
213     CMI_PCICFG_FLAG_RD_HWOK | CMI_PCICFG_FLAG_RD_INTERPOSEOK |
214     CMI_PCICFG_FLAG_WR_HWOK | CMI_PCICFG_FLAG_WR_INTERPOSEOK;
215 
216 /*
217  * The flags for individual cpus are kept in their per-cpu handle cmih_msrsrc
218  */
219 #define	CMI_MSR_FLAG_RD_HWOK		0x0001
220 #define	CMI_MSR_FLAG_RD_INTERPOSEOK	0x0002
221 #define	CMI_MSR_FLAG_WR_HWOK		0x0004
222 #define	CMI_MSR_FLAG_WR_INTERPOSEOK	0x0008
223 
224 int cmi_call_func_ntv_tries = 3;
225 
226 static cmi_errno_t
227 call_func_ntv(int cpuid, xc_func_t func, xc_arg_t arg1, xc_arg_t arg2)
228 {
229 	cmi_errno_t rc = -1;
230 	int i;
231 
232 	kpreempt_disable();
233 
234 	if (CPU->cpu_id == cpuid) {
235 		(*func)(arg1, arg2, (xc_arg_t)&rc);
236 	} else {
237 		/*
238 		 * This should not happen for a #MC trap or a poll, so
239 		 * this is likely an error injection or similar.
240 		 * We will try to cross call with xc_trycall - we
241 		 * can't guarantee success with xc_call because
242 		 * the interrupt code in the case of a #MC may
243 		 * already hold the xc mutex.
244 		 */
245 		for (i = 0; i < cmi_call_func_ntv_tries; i++) {
246 			cpuset_t cpus;
247 
248 			CPUSET_ONLY(cpus, cpuid);
249 			xc_priority(arg1, arg2, (xc_arg_t)&rc,
250 			    CPUSET2BV(cpus), func);
251 			if (rc != -1)
252 				break;
253 
254 			DELAY(1);
255 		}
256 	}
257 
258 	kpreempt_enable();
259 
260 	return (rc != -1 ? rc : CMIERR_DEADLOCK);
261 }
262 
263 static uint64_t injcnt;
264 
265 void
266 cmi_hdl_inj_begin(cmi_hdl_t ophdl)
267 {
268 	cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
269 
270 	if (hdl != NULL)
271 		hdl->cmih_flags |= CMIH_F_INJACTV;
272 	if (injcnt++ == 0) {
273 		cmn_err(CE_NOTE, "Hardware error injection/simulation "
274 		    "activity noted");
275 	}
276 }
277 
278 void
279 cmi_hdl_inj_end(cmi_hdl_t ophdl)
280 {
281 	cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
282 
283 	ASSERT(hdl == NULL || hdl->cmih_flags & CMIH_F_INJACTV);
284 	if (hdl != NULL)
285 		hdl->cmih_flags &= ~CMIH_F_INJACTV;
286 }
287 
288 boolean_t
289 cmi_inj_tainted(void)
290 {
291 	return (injcnt != 0 ? B_TRUE : B_FALSE);
292 }
293 
294 /*
295  *	 =======================================================
296  *	|	MSR Interposition				|
297  *	|	-----------------				|
298  *	|							|
299  *	 -------------------------------------------------------
300  */
301 
302 #define	CMI_MSRI_HASHSZ		16
303 #define	CMI_MSRI_HASHIDX(hdl, msr) \
304 	(((uintptr_t)(hdl) >> 3 + (msr)) % (CMI_MSRI_HASHSZ - 1))
305 
306 struct cmi_msri_bkt {
307 	kmutex_t msrib_lock;
308 	struct cmi_msri_hashent *msrib_head;
309 };
310 
311 struct cmi_msri_hashent {
312 	struct cmi_msri_hashent *msrie_next;
313 	struct cmi_msri_hashent *msrie_prev;
314 	cmi_hdl_impl_t *msrie_hdl;
315 	uint_t msrie_msrnum;
316 	uint64_t msrie_msrval;
317 };
318 
319 #define	CMI_MSRI_MATCH(ent, hdl, req_msr) \
320 	((ent)->msrie_hdl == (hdl) && (ent)->msrie_msrnum == (req_msr))
321 
322 static struct cmi_msri_bkt msrihash[CMI_MSRI_HASHSZ];
323 
324 static void
325 msri_addent(cmi_hdl_impl_t *hdl, uint_t msr, uint64_t val)
326 {
327 	int idx = CMI_MSRI_HASHIDX(hdl, msr);
328 	struct cmi_msri_bkt *hbp = &msrihash[idx];
329 	struct cmi_msri_hashent *hep;
330 
331 	mutex_enter(&hbp->msrib_lock);
332 
333 	for (hep = hbp->msrib_head; hep != NULL; hep = hep->msrie_next) {
334 		if (CMI_MSRI_MATCH(hep, hdl, msr))
335 			break;
336 	}
337 
338 	if (hep != NULL) {
339 		hep->msrie_msrval = val;
340 	} else {
341 		hep = kmem_alloc(sizeof (*hep), KM_SLEEP);
342 		hep->msrie_hdl = hdl;
343 		hep->msrie_msrnum = msr;
344 		hep->msrie_msrval = val;
345 
346 		if (hbp->msrib_head != NULL)
347 			hbp->msrib_head->msrie_prev = hep;
348 		hep->msrie_next = hbp->msrib_head;
349 		hep->msrie_prev = NULL;
350 		hbp->msrib_head = hep;
351 	}
352 
353 	mutex_exit(&hbp->msrib_lock);
354 }
355 
356 /*
357  * Look for a match for the given hanlde and msr.  Return 1 with valp
358  * filled if a match is found, otherwise return 0 with valp untouched.
359  */
360 static int
361 msri_lookup(cmi_hdl_impl_t *hdl, uint_t msr, uint64_t *valp)
362 {
363 	int idx = CMI_MSRI_HASHIDX(hdl, msr);
364 	struct cmi_msri_bkt *hbp = &msrihash[idx];
365 	struct cmi_msri_hashent *hep;
366 
367 	/*
368 	 * This function is called during #MC trap handling, so we should
369 	 * consider the possibility that the hash mutex is held by the
370 	 * interrupted thread.  This should not happen because interposition
371 	 * is an artificial injection mechanism and the #MC is requested
372 	 * after adding entries, but just in case of a real #MC at an
373 	 * unlucky moment we'll use mutex_tryenter here.
374 	 */
375 	if (!mutex_tryenter(&hbp->msrib_lock))
376 		return (0);
377 
378 	for (hep = hbp->msrib_head; hep != NULL; hep = hep->msrie_next) {
379 		if (CMI_MSRI_MATCH(hep, hdl, msr)) {
380 			*valp = hep->msrie_msrval;
381 			break;
382 		}
383 	}
384 
385 	mutex_exit(&hbp->msrib_lock);
386 
387 	return (hep != NULL);
388 }
389 
390 /*
391  * Remove any interposed value that matches.
392  */
393 static void
394 msri_rment(cmi_hdl_impl_t *hdl, uint_t msr)
395 {
396 
397 	int idx = CMI_MSRI_HASHIDX(hdl, msr);
398 	struct cmi_msri_bkt *hbp = &msrihash[idx];
399 	struct cmi_msri_hashent *hep;
400 
401 	if (!mutex_tryenter(&hbp->msrib_lock))
402 		return;
403 
404 	for (hep = hbp->msrib_head; hep != NULL; hep = hep->msrie_next) {
405 		if (CMI_MSRI_MATCH(hep, hdl, msr)) {
406 			if (hep->msrie_prev != NULL)
407 				hep->msrie_prev->msrie_next = hep->msrie_next;
408 
409 			if (hep->msrie_next != NULL)
410 				hep->msrie_next->msrie_prev = hep->msrie_prev;
411 
412 			if (hbp->msrib_head == hep)
413 				hbp->msrib_head = hep->msrie_next;
414 
415 			kmem_free(hep, sizeof (*hep));
416 			break;
417 		}
418 	}
419 
420 	mutex_exit(&hbp->msrib_lock);
421 }
422 
423 /*
424  *	 =======================================================
425  *	|	PCI Config Space Interposition			|
426  *	|	------------------------------			|
427  *	|							|
428  *	 -------------------------------------------------------
429  */
430 
431 /*
432  * Hash for interposed PCI config space values.  We lookup on bus/dev/fun/offset
433  * and then record whether the value stashed was made with a byte, word or
434  * doubleword access;  we will only return a hit for an access of the
435  * same size.  If you access say a 32-bit register using byte accesses
436  * and then attempt to read the full 32-bit value back you will not obtain
437  * any sort of merged result - you get a lookup miss.
438  */
439 
440 #define	CMI_PCII_HASHSZ		16
441 #define	CMI_PCII_HASHIDX(b, d, f, o) \
442 	(((b) + (d) + (f) + (o)) % (CMI_PCII_HASHSZ - 1))
443 
444 struct cmi_pcii_bkt {
445 	kmutex_t pciib_lock;
446 	struct cmi_pcii_hashent *pciib_head;
447 };
448 
449 struct cmi_pcii_hashent {
450 	struct cmi_pcii_hashent *pcii_next;
451 	struct cmi_pcii_hashent *pcii_prev;
452 	int pcii_bus;
453 	int pcii_dev;
454 	int pcii_func;
455 	int pcii_reg;
456 	int pcii_asize;
457 	uint32_t pcii_val;
458 };
459 
460 #define	CMI_PCII_MATCH(ent, b, d, f, r, asz) \
461 	((ent)->pcii_bus == (b) && (ent)->pcii_dev == (d) && \
462 	(ent)->pcii_func == (f) && (ent)->pcii_reg == (r) && \
463 	(ent)->pcii_asize == (asz))
464 
465 static struct cmi_pcii_bkt pciihash[CMI_PCII_HASHSZ];
466 
467 
468 /*
469  * Add a new entry to the PCI interpose hash, overwriting any existing
470  * entry that is found.
471  */
472 static void
473 pcii_addent(int bus, int dev, int func, int reg, uint32_t val, int asz)
474 {
475 	int idx = CMI_PCII_HASHIDX(bus, dev, func, reg);
476 	struct cmi_pcii_bkt *hbp = &pciihash[idx];
477 	struct cmi_pcii_hashent *hep;
478 
479 	cmi_hdl_inj_begin(NULL);
480 
481 	mutex_enter(&hbp->pciib_lock);
482 
483 	for (hep = hbp->pciib_head; hep != NULL; hep = hep->pcii_next) {
484 		if (CMI_PCII_MATCH(hep, bus, dev, func, reg, asz))
485 			break;
486 	}
487 
488 	if (hep != NULL) {
489 		hep->pcii_val = val;
490 	} else {
491 		hep = kmem_alloc(sizeof (*hep), KM_SLEEP);
492 		hep->pcii_bus = bus;
493 		hep->pcii_dev = dev;
494 		hep->pcii_func = func;
495 		hep->pcii_reg = reg;
496 		hep->pcii_asize = asz;
497 		hep->pcii_val = val;
498 
499 		if (hbp->pciib_head != NULL)
500 			hbp->pciib_head->pcii_prev = hep;
501 		hep->pcii_next = hbp->pciib_head;
502 		hep->pcii_prev = NULL;
503 		hbp->pciib_head = hep;
504 	}
505 
506 	mutex_exit(&hbp->pciib_lock);
507 
508 	cmi_hdl_inj_end(NULL);
509 }
510 
511 /*
512  * Look for a match for the given bus/dev/func/reg; return 1 with valp
513  * filled if a match is found, otherwise return 0 with valp untouched.
514  */
515 static int
516 pcii_lookup(int bus, int dev, int func, int reg, int asz, uint32_t *valp)
517 {
518 	int idx = CMI_PCII_HASHIDX(bus, dev, func, reg);
519 	struct cmi_pcii_bkt *hbp = &pciihash[idx];
520 	struct cmi_pcii_hashent *hep;
521 
522 	if (!mutex_tryenter(&hbp->pciib_lock))
523 		return (0);
524 
525 	for (hep = hbp->pciib_head; hep != NULL; hep = hep->pcii_next) {
526 		if (CMI_PCII_MATCH(hep, bus, dev, func, reg, asz)) {
527 			*valp = hep->pcii_val;
528 			break;
529 		}
530 	}
531 
532 	mutex_exit(&hbp->pciib_lock);
533 
534 	return (hep != NULL);
535 }
536 
537 static void
538 pcii_rment(int bus, int dev, int func, int reg, int asz)
539 {
540 	int idx = CMI_PCII_HASHIDX(bus, dev, func, reg);
541 	struct cmi_pcii_bkt *hbp = &pciihash[idx];
542 	struct cmi_pcii_hashent *hep;
543 
544 	mutex_enter(&hbp->pciib_lock);
545 
546 	for (hep = hbp->pciib_head; hep != NULL; hep = hep->pcii_next) {
547 		if (CMI_PCII_MATCH(hep, bus, dev, func, reg, asz)) {
548 			if (hep->pcii_prev != NULL)
549 				hep->pcii_prev->pcii_next = hep->pcii_next;
550 
551 			if (hep->pcii_next != NULL)
552 				hep->pcii_next->pcii_prev = hep->pcii_prev;
553 
554 			if (hbp->pciib_head == hep)
555 				hbp->pciib_head = hep->pcii_next;
556 
557 			kmem_free(hep, sizeof (*hep));
558 			break;
559 		}
560 	}
561 
562 	mutex_exit(&hbp->pciib_lock);
563 }
564 
565 #ifndef __xpv
566 
567 /*
568  *	 =======================================================
569  *	|	Native methods					|
570  *	|	--------------					|
571  *	|							|
572  *	| These are used when we are running native on bare-	|
573  *	| metal, or simply don't know any better.		|
574  *	---------------------------------------------------------
575  */
576 
577 #define	HDLPRIV(hdl)	((cpu_t *)(hdl)->cmih_hdlpriv)
578 
579 static uint_t
580 ntv_vendor(cmi_hdl_impl_t *hdl)
581 {
582 	return (cpuid_getvendor(HDLPRIV(hdl)));
583 }
584 
585 static const char *
586 ntv_vendorstr(cmi_hdl_impl_t *hdl)
587 {
588 	return (cpuid_getvendorstr(HDLPRIV(hdl)));
589 }
590 
591 static uint_t
592 ntv_family(cmi_hdl_impl_t *hdl)
593 {
594 	return (cpuid_getfamily(HDLPRIV(hdl)));
595 }
596 
597 static uint_t
598 ntv_model(cmi_hdl_impl_t *hdl)
599 {
600 	return (cpuid_getmodel(HDLPRIV(hdl)));
601 }
602 
603 static uint_t
604 ntv_stepping(cmi_hdl_impl_t *hdl)
605 {
606 	return (cpuid_getstep(HDLPRIV(hdl)));
607 }
608 
609 static uint_t
610 ntv_chipid(cmi_hdl_impl_t *hdl)
611 {
612 	return (hdl->cmih_chipid);
613 
614 }
615 
616 static uint_t
617 ntv_procnodeid(cmi_hdl_impl_t *hdl)
618 {
619 	return (hdl->cmih_procnodeid);
620 }
621 
622 static uint_t
623 ntv_procnodes_per_pkg(cmi_hdl_impl_t *hdl)
624 {
625 	return (hdl->cmih_procnodes_per_pkg);
626 }
627 
628 static uint_t
629 ntv_coreid(cmi_hdl_impl_t *hdl)
630 {
631 	return (hdl->cmih_coreid);
632 }
633 
634 static uint_t
635 ntv_strandid(cmi_hdl_impl_t *hdl)
636 {
637 	return (hdl->cmih_strandid);
638 }
639 
640 static uint_t
641 ntv_strand_apicid(cmi_hdl_impl_t *hdl)
642 {
643 	return (cpuid_get_apicid(HDLPRIV(hdl)));
644 }
645 
646 static uint16_t
647 ntv_smbiosid(cmi_hdl_impl_t *hdl)
648 {
649 	return (hdl->cmih_smbiosid);
650 }
651 
652 static uint_t
653 ntv_smb_chipid(cmi_hdl_impl_t *hdl)
654 {
655 	return (hdl->cmih_smb_chipid);
656 }
657 
658 static nvlist_t *
659 ntv_smb_bboard(cmi_hdl_impl_t *hdl)
660 {
661 	return (hdl->cmih_smb_bboard);
662 }
663 
664 static uint32_t
665 ntv_chiprev(cmi_hdl_impl_t *hdl)
666 {
667 	return (cpuid_getchiprev(HDLPRIV(hdl)));
668 }
669 
670 static const char *
671 ntv_chiprevstr(cmi_hdl_impl_t *hdl)
672 {
673 	return (cpuid_getchiprevstr(HDLPRIV(hdl)));
674 }
675 
676 static uint32_t
677 ntv_getsockettype(cmi_hdl_impl_t *hdl)
678 {
679 	return (cpuid_getsockettype(HDLPRIV(hdl)));
680 }
681 
682 static const char *
683 ntv_getsocketstr(cmi_hdl_impl_t *hdl)
684 {
685 	return (cpuid_getsocketstr(HDLPRIV(hdl)));
686 }
687 
688 static id_t
689 ntv_logical_id(cmi_hdl_impl_t *hdl)
690 {
691 	return (HDLPRIV(hdl)->cpu_id);
692 }
693 
694 /*ARGSUSED*/
695 static int
696 ntv_getcr4_xc(xc_arg_t arg1, xc_arg_t arg2, xc_arg_t arg3)
697 {
698 	ulong_t *dest = (ulong_t *)arg1;
699 	cmi_errno_t *rcp = (cmi_errno_t *)arg3;
700 
701 	*dest = getcr4();
702 	*rcp = CMI_SUCCESS;
703 
704 	return (0);
705 }
706 
707 static ulong_t
708 ntv_getcr4(cmi_hdl_impl_t *hdl)
709 {
710 	cpu_t *cp = HDLPRIV(hdl);
711 	ulong_t val;
712 
713 	(void) call_func_ntv(cp->cpu_id, ntv_getcr4_xc, (xc_arg_t)&val, NULL);
714 
715 	return (val);
716 }
717 
718 /*ARGSUSED*/
719 static int
720 ntv_setcr4_xc(xc_arg_t arg1, xc_arg_t arg2, xc_arg_t arg3)
721 {
722 	ulong_t val = (ulong_t)arg1;
723 	cmi_errno_t *rcp = (cmi_errno_t *)arg3;
724 
725 	setcr4(val);
726 	*rcp = CMI_SUCCESS;
727 
728 	return (0);
729 }
730 
731 static void
732 ntv_setcr4(cmi_hdl_impl_t *hdl, ulong_t val)
733 {
734 	cpu_t *cp = HDLPRIV(hdl);
735 
736 	(void) call_func_ntv(cp->cpu_id, ntv_setcr4_xc, (xc_arg_t)val, NULL);
737 }
738 
739 volatile uint32_t cmi_trapped_rdmsr;
740 
741 /*ARGSUSED*/
742 static int
743 ntv_rdmsr_xc(xc_arg_t arg1, xc_arg_t arg2, xc_arg_t arg3)
744 {
745 	uint_t msr = (uint_t)arg1;
746 	uint64_t *valp = (uint64_t *)arg2;
747 	cmi_errno_t *rcp = (cmi_errno_t *)arg3;
748 
749 	on_trap_data_t otd;
750 
751 	if (on_trap(&otd, OT_DATA_ACCESS) == 0) {
752 		if (checked_rdmsr(msr, valp) == 0)
753 			*rcp = CMI_SUCCESS;
754 		else
755 			*rcp = CMIERR_NOTSUP;
756 	} else {
757 		*rcp = CMIERR_MSRGPF;
758 		atomic_inc_32(&cmi_trapped_rdmsr);
759 	}
760 	no_trap();
761 
762 	return (0);
763 }
764 
765 static cmi_errno_t
766 ntv_rdmsr(cmi_hdl_impl_t *hdl, uint_t msr, uint64_t *valp)
767 {
768 	cpu_t *cp = HDLPRIV(hdl);
769 
770 	if (!(hdl->cmih_msrsrc & CMI_MSR_FLAG_RD_HWOK))
771 		return (CMIERR_INTERPOSE);
772 
773 	return (call_func_ntv(cp->cpu_id, ntv_rdmsr_xc,
774 	    (xc_arg_t)msr, (xc_arg_t)valp));
775 }
776 
777 volatile uint32_t cmi_trapped_wrmsr;
778 
779 /*ARGSUSED*/
780 static int
781 ntv_wrmsr_xc(xc_arg_t arg1, xc_arg_t arg2, xc_arg_t arg3)
782 {
783 	uint_t msr = (uint_t)arg1;
784 	uint64_t val = *((uint64_t *)arg2);
785 	cmi_errno_t *rcp = (cmi_errno_t *)arg3;
786 	on_trap_data_t otd;
787 
788 	if (on_trap(&otd, OT_DATA_ACCESS) == 0) {
789 		if (checked_wrmsr(msr, val) == 0)
790 			*rcp = CMI_SUCCESS;
791 		else
792 			*rcp = CMIERR_NOTSUP;
793 	} else {
794 		*rcp = CMIERR_MSRGPF;
795 		atomic_inc_32(&cmi_trapped_wrmsr);
796 	}
797 	no_trap();
798 
799 	return (0);
800 
801 }
802 
803 static cmi_errno_t
804 ntv_wrmsr(cmi_hdl_impl_t *hdl, uint_t msr, uint64_t val)
805 {
806 	cpu_t *cp = HDLPRIV(hdl);
807 
808 	if (!(hdl->cmih_msrsrc & CMI_MSR_FLAG_WR_HWOK))
809 		return (CMI_SUCCESS);
810 
811 	return (call_func_ntv(cp->cpu_id, ntv_wrmsr_xc,
812 	    (xc_arg_t)msr, (xc_arg_t)&val));
813 }
814 
815 static cmi_errno_t
816 ntv_msrinterpose(cmi_hdl_impl_t *hdl, uint_t msr, uint64_t val)
817 {
818 	msri_addent(hdl, msr, val);
819 	return (CMI_SUCCESS);
820 }
821 
822 /*ARGSUSED*/
823 static int
824 ntv_int_xc(xc_arg_t arg1, xc_arg_t arg2, xc_arg_t arg3)
825 {
826 	cmi_errno_t *rcp = (cmi_errno_t *)arg3;
827 	int int_no = (int)arg1;
828 
829 	if (int_no == T_MCE)
830 		int18();
831 	else
832 		int_cmci();
833 	*rcp = CMI_SUCCESS;
834 
835 	return (0);
836 }
837 
838 static void
839 ntv_int(cmi_hdl_impl_t *hdl, int int_no)
840 {
841 	cpu_t *cp = HDLPRIV(hdl);
842 
843 	(void) call_func_ntv(cp->cpu_id, ntv_int_xc, (xc_arg_t)int_no, NULL);
844 }
845 
846 static int
847 ntv_online(cmi_hdl_impl_t *hdl, int new_status, int *old_status)
848 {
849 	processorid_t cpuid = HDLPRIV(hdl)->cpu_id;
850 
851 	return (p_online_internal(cpuid, new_status, old_status));
852 }
853 
854 #else	/* __xpv */
855 
856 /*
857  *	 =======================================================
858  *	|	xVM dom0 methods				|
859  *	|	----------------				|
860  *	|							|
861  *	| These are used when we are running as dom0 in		|
862  *	| a Solaris xVM context.				|
863  *	---------------------------------------------------------
864  */
865 
866 #define	HDLPRIV(hdl)	((xen_mc_lcpu_cookie_t)(hdl)->cmih_hdlpriv)
867 
868 extern uint_t _cpuid_vendorstr_to_vendorcode(char *);
869 
870 
871 static uint_t
872 xpv_vendor(cmi_hdl_impl_t *hdl)
873 {
874 	return (_cpuid_vendorstr_to_vendorcode((char *)xen_physcpu_vendorstr(
875 	    HDLPRIV(hdl))));
876 }
877 
878 static const char *
879 xpv_vendorstr(cmi_hdl_impl_t *hdl)
880 {
881 	return (xen_physcpu_vendorstr(HDLPRIV(hdl)));
882 }
883 
884 static uint_t
885 xpv_family(cmi_hdl_impl_t *hdl)
886 {
887 	return (xen_physcpu_family(HDLPRIV(hdl)));
888 }
889 
890 static uint_t
891 xpv_model(cmi_hdl_impl_t *hdl)
892 {
893 	return (xen_physcpu_model(HDLPRIV(hdl)));
894 }
895 
896 static uint_t
897 xpv_stepping(cmi_hdl_impl_t *hdl)
898 {
899 	return (xen_physcpu_stepping(HDLPRIV(hdl)));
900 }
901 
902 static uint_t
903 xpv_chipid(cmi_hdl_impl_t *hdl)
904 {
905 	return (hdl->cmih_chipid);
906 }
907 
908 static uint_t
909 xpv_procnodeid(cmi_hdl_impl_t *hdl)
910 {
911 	return (hdl->cmih_procnodeid);
912 }
913 
914 static uint_t
915 xpv_procnodes_per_pkg(cmi_hdl_impl_t *hdl)
916 {
917 	return (hdl->cmih_procnodes_per_pkg);
918 }
919 
920 static uint_t
921 xpv_coreid(cmi_hdl_impl_t *hdl)
922 {
923 	return (hdl->cmih_coreid);
924 }
925 
926 static uint_t
927 xpv_strandid(cmi_hdl_impl_t *hdl)
928 {
929 	return (hdl->cmih_strandid);
930 }
931 
932 static uint_t
933 xpv_strand_apicid(cmi_hdl_impl_t *hdl)
934 {
935 	return (xen_physcpu_initial_apicid(HDLPRIV(hdl)));
936 }
937 
938 static uint16_t
939 xpv_smbiosid(cmi_hdl_impl_t *hdl)
940 {
941 	return (hdl->cmih_smbiosid);
942 }
943 
944 static uint_t
945 xpv_smb_chipid(cmi_hdl_impl_t *hdl)
946 {
947 	return (hdl->cmih_smb_chipid);
948 }
949 
950 static nvlist_t *
951 xpv_smb_bboard(cmi_hdl_impl_t *hdl)
952 {
953 	return (hdl->cmih_smb_bboard);
954 }
955 
956 extern uint32_t _cpuid_chiprev(uint_t, uint_t, uint_t, uint_t);
957 
958 static uint32_t
959 xpv_chiprev(cmi_hdl_impl_t *hdl)
960 {
961 	return (_cpuid_chiprev(xpv_vendor(hdl), xpv_family(hdl),
962 	    xpv_model(hdl), xpv_stepping(hdl)));
963 }
964 
965 extern const char *_cpuid_chiprevstr(uint_t, uint_t, uint_t, uint_t);
966 
967 static const char *
968 xpv_chiprevstr(cmi_hdl_impl_t *hdl)
969 {
970 	return (_cpuid_chiprevstr(xpv_vendor(hdl), xpv_family(hdl),
971 	    xpv_model(hdl), xpv_stepping(hdl)));
972 }
973 
974 extern uint32_t _cpuid_skt(uint_t, uint_t, uint_t, uint_t);
975 
976 static uint32_t
977 xpv_getsockettype(cmi_hdl_impl_t *hdl)
978 {
979 	return (_cpuid_skt(xpv_vendor(hdl), xpv_family(hdl),
980 	    xpv_model(hdl), xpv_stepping(hdl)));
981 }
982 
983 extern const char *_cpuid_sktstr(uint_t, uint_t, uint_t, uint_t);
984 
985 static const char *
986 xpv_getsocketstr(cmi_hdl_impl_t *hdl)
987 {
988 	return (_cpuid_sktstr(xpv_vendor(hdl), xpv_family(hdl),
989 	    xpv_model(hdl), xpv_stepping(hdl)));
990 }
991 
992 static id_t
993 xpv_logical_id(cmi_hdl_impl_t *hdl)
994 {
995 	return (xen_physcpu_logical_id(HDLPRIV(hdl)));
996 }
997 
998 static cmi_errno_t
999 xpv_rdmsr(cmi_hdl_impl_t *hdl, uint_t msr, uint64_t *valp)
1000 {
1001 	switch (msr) {
1002 	case IA32_MSR_MCG_CAP:
1003 		*valp = xen_physcpu_mcg_cap(HDLPRIV(hdl));
1004 		break;
1005 
1006 	default:
1007 		return (CMIERR_NOTSUP);
1008 	}
1009 
1010 	return (CMI_SUCCESS);
1011 }
1012 
1013 /*
1014  * Request the hypervisor to write an MSR for us.  The hypervisor
1015  * will only accept MCA-related MSRs, as this is for MCA error
1016  * simulation purposes alone.  We will pre-screen MSRs for injection
1017  * so we don't bother the HV with bogus requests.  We will permit
1018  * injection to any MCA bank register, and to MCG_STATUS.
1019  */
1020 
1021 #define	IS_MCA_INJ_MSR(msr) \
1022 	(((msr) >= IA32_MSR_MC(0, CTL) && (msr) <= IA32_MSR_MC(10, MISC)) || \
1023 	(msr) == IA32_MSR_MCG_STATUS)
1024 
1025 static cmi_errno_t
1026 xpv_wrmsr_cmn(cmi_hdl_impl_t *hdl, uint_t msr, uint64_t val, boolean_t intpose)
1027 {
1028 	xen_mc_t xmc;
1029 	struct xen_mc_msrinject *mci = &xmc.u.mc_msrinject;
1030 
1031 	if (!(hdl->cmih_flags & CMIH_F_INJACTV))
1032 		return (CMIERR_NOTSUP);		/* for injection use only! */
1033 
1034 	if (!IS_MCA_INJ_MSR(msr))
1035 		return (CMIERR_API);
1036 
1037 	if (panicstr)
1038 		return (CMIERR_DEADLOCK);
1039 
1040 	mci->mcinj_cpunr = xen_physcpu_logical_id(HDLPRIV(hdl));
1041 	mci->mcinj_flags = intpose ? MC_MSRINJ_F_INTERPOSE : 0;
1042 	mci->mcinj_count = 1;	/* learn to batch sometime */
1043 	mci->mcinj_msr[0].reg = msr;
1044 	mci->mcinj_msr[0].value = val;
1045 
1046 	return (HYPERVISOR_mca(XEN_MC_msrinject, &xmc) ==
1047 	    0 ?  CMI_SUCCESS : CMIERR_NOTSUP);
1048 }
1049 
1050 static cmi_errno_t
1051 xpv_wrmsr(cmi_hdl_impl_t *hdl, uint_t msr, uint64_t val)
1052 {
1053 	return (xpv_wrmsr_cmn(hdl, msr, val, B_FALSE));
1054 }
1055 
1056 
1057 static cmi_errno_t
1058 xpv_msrinterpose(cmi_hdl_impl_t *hdl, uint_t msr, uint64_t val)
1059 {
1060 	return (xpv_wrmsr_cmn(hdl, msr, val, B_TRUE));
1061 }
1062 
1063 static void
1064 xpv_int(cmi_hdl_impl_t *hdl, int int_no)
1065 {
1066 	xen_mc_t xmc;
1067 	struct xen_mc_mceinject *mce = &xmc.u.mc_mceinject;
1068 
1069 	if (!(hdl->cmih_flags & CMIH_F_INJACTV))
1070 		return;
1071 
1072 	if (int_no != T_MCE) {
1073 		cmn_err(CE_WARN, "xpv_int: int_no %d unimplemented\n",
1074 		    int_no);
1075 	}
1076 
1077 	mce->mceinj_cpunr = xen_physcpu_logical_id(HDLPRIV(hdl));
1078 
1079 	(void) HYPERVISOR_mca(XEN_MC_mceinject, &xmc);
1080 }
1081 
1082 static int
1083 xpv_online(cmi_hdl_impl_t *hdl, int new_status, int *old_status)
1084 {
1085 	xen_sysctl_t xs;
1086 	int op, rc, status;
1087 
1088 	new_status &= ~P_FORCED;
1089 
1090 	switch (new_status) {
1091 	case P_STATUS:
1092 		op = XEN_SYSCTL_CPU_HOTPLUG_STATUS;
1093 		break;
1094 	case P_FAULTED:
1095 	case P_OFFLINE:
1096 		op = XEN_SYSCTL_CPU_HOTPLUG_OFFLINE;
1097 		break;
1098 	case P_ONLINE:
1099 		op = XEN_SYSCTL_CPU_HOTPLUG_ONLINE;
1100 		break;
1101 	default:
1102 		return (-1);
1103 	}
1104 
1105 	xs.cmd = XEN_SYSCTL_cpu_hotplug;
1106 	xs.interface_version = XEN_SYSCTL_INTERFACE_VERSION;
1107 	xs.u.cpu_hotplug.cpu = xen_physcpu_logical_id(HDLPRIV(hdl));
1108 	xs.u.cpu_hotplug.op = op;
1109 
1110 	if ((rc = HYPERVISOR_sysctl(&xs)) >= 0) {
1111 		status = rc;
1112 		rc = 0;
1113 		switch (status) {
1114 		case XEN_CPU_HOTPLUG_STATUS_NEW:
1115 			*old_status = P_OFFLINE;
1116 			break;
1117 		case XEN_CPU_HOTPLUG_STATUS_OFFLINE:
1118 			*old_status = P_FAULTED;
1119 			break;
1120 		case XEN_CPU_HOTPLUG_STATUS_ONLINE:
1121 			*old_status = P_ONLINE;
1122 			break;
1123 		default:
1124 			return (-1);
1125 		}
1126 	}
1127 
1128 	return (-rc);
1129 }
1130 
1131 #endif
1132 
1133 /*ARGSUSED*/
1134 static void *
1135 cpu_search(enum cmi_hdl_class class, uint_t chipid, uint_t coreid,
1136     uint_t strandid)
1137 {
1138 #ifdef __xpv
1139 	xen_mc_lcpu_cookie_t cpi;
1140 
1141 	for (cpi = xen_physcpu_next(NULL); cpi != NULL;
1142 	    cpi = xen_physcpu_next(cpi)) {
1143 		if (xen_physcpu_chipid(cpi) == chipid &&
1144 		    xen_physcpu_coreid(cpi) == coreid &&
1145 		    xen_physcpu_strandid(cpi) == strandid)
1146 			return ((void *)cpi);
1147 	}
1148 	return (NULL);
1149 
1150 #else	/* __xpv */
1151 
1152 	cpu_t *cp, *startcp;
1153 
1154 	kpreempt_disable();
1155 	cp = startcp = CPU;
1156 	do {
1157 		if (cmi_ntv_hwchipid(cp) == chipid &&
1158 		    cmi_ntv_hwcoreid(cp) == coreid &&
1159 		    cmi_ntv_hwstrandid(cp) == strandid) {
1160 			kpreempt_enable();
1161 			return ((void *)cp);
1162 		}
1163 
1164 		cp = cp->cpu_next;
1165 	} while (cp != startcp);
1166 	kpreempt_enable();
1167 	return (NULL);
1168 #endif	/* __ xpv */
1169 }
1170 
1171 static boolean_t
1172 cpu_is_cmt(void *priv)
1173 {
1174 #ifdef __xpv
1175 	return (xen_physcpu_is_cmt((xen_mc_lcpu_cookie_t)priv));
1176 #else /* __xpv */
1177 	cpu_t *cp = (cpu_t *)priv;
1178 
1179 	int strands_per_core = cpuid_get_ncpu_per_chip(cp) /
1180 	    cpuid_get_ncore_per_chip(cp);
1181 
1182 	return (strands_per_core > 1);
1183 #endif /* __xpv */
1184 }
1185 
1186 /*
1187  * Find the handle entry of a given cpu identified by a <chip,core,strand>
1188  * tuple.
1189  */
1190 static cmi_hdl_ent_t *
1191 cmi_hdl_ent_lookup(uint_t chipid, uint_t coreid, uint_t strandid)
1192 {
1193 	/*
1194 	 * Allocate per-chip table which contains a list of handle of
1195 	 * all strands of the chip.
1196 	 */
1197 	if (cmi_chip_tab[chipid] == NULL) {
1198 		size_t sz;
1199 		cmi_hdl_ent_t *pg;
1200 
1201 		sz = CMI_MAX_STRANDS_PER_CHIP * sizeof (cmi_hdl_ent_t);
1202 		pg = kmem_zalloc(sz, KM_SLEEP);
1203 
1204 		/* test and set the per-chip table if it is not allocated */
1205 		if (atomic_cas_ptr(&cmi_chip_tab[chipid], NULL, pg) != NULL)
1206 			kmem_free(pg, sz); /* someone beat us */
1207 	}
1208 
1209 	return (cmi_chip_tab[chipid] + CMI_HDL_ARR_IDX(coreid, strandid));
1210 }
1211 
1212 cmi_hdl_t
1213 cmi_hdl_create(enum cmi_hdl_class class, uint_t chipid, uint_t coreid,
1214     uint_t strandid)
1215 {
1216 	cmi_hdl_impl_t *hdl;
1217 	void *priv;
1218 	cmi_hdl_ent_t *ent;
1219 
1220 #ifdef __xpv
1221 	ASSERT(class == CMI_HDL_SOLARIS_xVM_MCA);
1222 #else
1223 	ASSERT(class == CMI_HDL_NATIVE);
1224 #endif
1225 
1226 	if (chipid > CMI_MAX_CHIPID ||
1227 	    coreid > CMI_MAX_CORES_PER_CHIP - 1 ||
1228 	    strandid > CMI_MAX_STRANDS_PER_CORE - 1)
1229 		return (NULL);
1230 
1231 	if ((priv = cpu_search(class, chipid, coreid, strandid)) == NULL)
1232 		return (NULL);
1233 
1234 	hdl = kmem_zalloc(sizeof (*hdl), KM_SLEEP);
1235 
1236 	hdl->cmih_class = class;
1237 	HDLOPS(hdl) = &cmi_hdl_ops;
1238 	hdl->cmih_chipid = chipid;
1239 	hdl->cmih_coreid = coreid;
1240 	hdl->cmih_strandid = strandid;
1241 	hdl->cmih_mstrand = cpu_is_cmt(priv);
1242 	hdl->cmih_hdlpriv = priv;
1243 #ifdef __xpv
1244 	hdl->cmih_msrsrc = CMI_MSR_FLAG_RD_INTERPOSEOK |
1245 	    CMI_MSR_FLAG_WR_INTERPOSEOK;
1246 
1247 	/*
1248 	 * XXX: need hypervisor support for procnodeid, for now assume
1249 	 * single-node processors (procnodeid = chipid)
1250 	 */
1251 	hdl->cmih_procnodeid = xen_physcpu_chipid((xen_mc_lcpu_cookie_t)priv);
1252 	hdl->cmih_procnodes_per_pkg = 1;
1253 #else   /* __xpv */
1254 	hdl->cmih_msrsrc = CMI_MSR_FLAG_RD_HWOK | CMI_MSR_FLAG_RD_INTERPOSEOK |
1255 	    CMI_MSR_FLAG_WR_HWOK | CMI_MSR_FLAG_WR_INTERPOSEOK;
1256 	hdl->cmih_procnodeid = cpuid_get_procnodeid((cpu_t *)priv);
1257 	hdl->cmih_procnodes_per_pkg =
1258 	    cpuid_get_procnodes_per_pkg((cpu_t *)priv);
1259 #endif  /* __xpv */
1260 
1261 	ent = cmi_hdl_ent_lookup(chipid, coreid, strandid);
1262 	if (ent->cmae_refcnt != 0 || ent->cmae_hdlp != NULL) {
1263 		/*
1264 		 * Somehow this (chipid, coreid, strandid) id tuple has
1265 		 * already been assigned!  This indicates that the
1266 		 * callers logic in determining these values is busted,
1267 		 * or perhaps undermined by bad BIOS setup.  Complain,
1268 		 * and refuse to initialize this tuple again as bad things
1269 		 * will happen.
1270 		 */
1271 		cmn_err(CE_NOTE, "cmi_hdl_create: chipid %d coreid %d "
1272 		    "strandid %d handle already allocated!",
1273 		    chipid, coreid, strandid);
1274 		kmem_free(hdl, sizeof (*hdl));
1275 		return (NULL);
1276 	}
1277 
1278 	/*
1279 	 * Once we store a nonzero reference count others can find this
1280 	 * handle via cmi_hdl_lookup etc.  This initial hold on the handle
1281 	 * is to be dropped only if some other part of cmi initialization
1282 	 * fails or, if it succeeds, at later cpu deconfigure.  Note the
1283 	 * the module private data we hold in cmih_cmi and cmih_cmidata
1284 	 * is still NULL at this point (the caller will fill it with
1285 	 * cmi_hdl_setcmi if it initializes) so consumers of handles
1286 	 * should always be ready for that possibility.
1287 	 */
1288 	ent->cmae_hdlp = hdl;
1289 	hdl->cmih_refcntp = &ent->cmae_refcnt;
1290 	ent->cmae_refcnt = 1;
1291 
1292 	return ((cmi_hdl_t)hdl);
1293 }
1294 
1295 void
1296 cmi_read_smbios(cmi_hdl_t ophdl)
1297 {
1298 
1299 	uint_t strand_apicid;
1300 	uint_t chip_inst;
1301 	uint16_t smb_id;
1302 	int rc = 0;
1303 
1304 	cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
1305 
1306 	/* set x86gentopo compatibility */
1307 	fm_smb_fmacompat();
1308 
1309 #ifndef __xpv
1310 	strand_apicid = ntv_strand_apicid(hdl);
1311 #else
1312 	strand_apicid = xpv_strand_apicid(hdl);
1313 #endif
1314 
1315 	if (!x86gentopo_legacy) {
1316 		/*
1317 		 * If fm_smb_chipinst() or fm_smb_bboard() fails,
1318 		 * topo reverts to legacy mode
1319 		 */
1320 		rc = fm_smb_chipinst(strand_apicid, &chip_inst, &smb_id);
1321 		if (rc == 0) {
1322 			hdl->cmih_smb_chipid = chip_inst;
1323 			hdl->cmih_smbiosid = smb_id;
1324 		} else {
1325 #ifdef DEBUG
1326 			cmn_err(CE_NOTE, "!cmi reads smbios chip info failed");
1327 #endif /* DEBUG */
1328 			return;
1329 		}
1330 
1331 		hdl->cmih_smb_bboard  = fm_smb_bboard(strand_apicid);
1332 #ifdef DEBUG
1333 		if (hdl->cmih_smb_bboard == NULL)
1334 			cmn_err(CE_NOTE,
1335 			    "!cmi reads smbios base boards info failed");
1336 #endif /* DEBUG */
1337 	}
1338 }
1339 
1340 void
1341 cmi_hdl_hold(cmi_hdl_t ophdl)
1342 {
1343 	cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
1344 
1345 	ASSERT(*hdl->cmih_refcntp != 0); /* must not be the initial hold */
1346 
1347 	atomic_inc_32(hdl->cmih_refcntp);
1348 }
1349 
1350 static int
1351 cmi_hdl_canref(cmi_hdl_ent_t *ent)
1352 {
1353 	volatile uint32_t *refcntp;
1354 	uint32_t refcnt;
1355 
1356 	refcntp = &ent->cmae_refcnt;
1357 	refcnt = *refcntp;
1358 
1359 	if (refcnt == 0) {
1360 		/*
1361 		 * Associated object never existed, is being destroyed,
1362 		 * or has been destroyed.
1363 		 */
1364 		return (0);
1365 	}
1366 
1367 	/*
1368 	 * We cannot use atomic increment here because once the reference
1369 	 * count reaches zero it must never be bumped up again.
1370 	 */
1371 	while (refcnt != 0) {
1372 		if (atomic_cas_32(refcntp, refcnt, refcnt + 1) == refcnt)
1373 			return (1);
1374 		refcnt = *refcntp;
1375 	}
1376 
1377 	/*
1378 	 * Somebody dropped the reference count to 0 after our initial
1379 	 * check.
1380 	 */
1381 	return (0);
1382 }
1383 
1384 
1385 void
1386 cmi_hdl_rele(cmi_hdl_t ophdl)
1387 {
1388 	cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
1389 	cmi_hdl_ent_t *ent;
1390 
1391 	ASSERT(*hdl->cmih_refcntp > 0);
1392 
1393 	if (atomic_dec_32_nv(hdl->cmih_refcntp) > 0)
1394 		return;
1395 
1396 	ent = cmi_hdl_ent_lookup(hdl->cmih_chipid, hdl->cmih_coreid,
1397 	    hdl->cmih_strandid);
1398 	ent->cmae_hdlp = NULL;
1399 
1400 	kmem_free(hdl, sizeof (*hdl));
1401 }
1402 
1403 void
1404 cmi_hdl_setspecific(cmi_hdl_t ophdl, void *arg)
1405 {
1406 	IMPLHDL(ophdl)->cmih_spec = arg;
1407 }
1408 
1409 void *
1410 cmi_hdl_getspecific(cmi_hdl_t ophdl)
1411 {
1412 	return (IMPLHDL(ophdl)->cmih_spec);
1413 }
1414 
1415 void
1416 cmi_hdl_setmc(cmi_hdl_t ophdl, const struct cmi_mc_ops *mcops, void *mcdata)
1417 {
1418 	cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
1419 
1420 	ASSERT(hdl->cmih_mcops == NULL && hdl->cmih_mcdata == NULL);
1421 	hdl->cmih_mcops = mcops;
1422 	hdl->cmih_mcdata = mcdata;
1423 }
1424 
1425 const struct cmi_mc_ops *
1426 cmi_hdl_getmcops(cmi_hdl_t ophdl)
1427 {
1428 	return (IMPLHDL(ophdl)->cmih_mcops);
1429 }
1430 
1431 void *
1432 cmi_hdl_getmcdata(cmi_hdl_t ophdl)
1433 {
1434 	return (IMPLHDL(ophdl)->cmih_mcdata);
1435 }
1436 
1437 cmi_hdl_t
1438 cmi_hdl_lookup(enum cmi_hdl_class class, uint_t chipid, uint_t coreid,
1439     uint_t strandid)
1440 {
1441 	cmi_hdl_ent_t *ent;
1442 
1443 	if (chipid > CMI_MAX_CHIPID ||
1444 	    coreid > CMI_MAX_CORES_PER_CHIP - 1 ||
1445 	    strandid > CMI_MAX_STRANDS_PER_CORE - 1)
1446 		return (NULL);
1447 
1448 	ent = cmi_hdl_ent_lookup(chipid, coreid, strandid);
1449 
1450 	if (class == CMI_HDL_NEUTRAL)
1451 #ifdef __xpv
1452 		class = CMI_HDL_SOLARIS_xVM_MCA;
1453 #else
1454 		class = CMI_HDL_NATIVE;
1455 #endif
1456 
1457 	if (!cmi_hdl_canref(ent))
1458 		return (NULL);
1459 
1460 	if (ent->cmae_hdlp->cmih_class != class) {
1461 		cmi_hdl_rele((cmi_hdl_t)ent->cmae_hdlp);
1462 		return (NULL);
1463 	}
1464 
1465 	return ((cmi_hdl_t)ent->cmae_hdlp);
1466 }
1467 
1468 cmi_hdl_t
1469 cmi_hdl_any(void)
1470 {
1471 	int i, j;
1472 	cmi_hdl_ent_t *ent;
1473 
1474 	for (i = 0; i < CMI_CHIPID_ARR_SZ; i++) {
1475 		if (cmi_chip_tab[i] == NULL)
1476 			continue;
1477 		for (j = 0, ent = cmi_chip_tab[i]; j < CMI_MAX_STRANDS_PER_CHIP;
1478 		    j++, ent++) {
1479 			if (cmi_hdl_canref(ent))
1480 				return ((cmi_hdl_t)ent->cmae_hdlp);
1481 		}
1482 	}
1483 
1484 	return (NULL);
1485 }
1486 
1487 void
1488 cmi_hdl_walk(int (*cbfunc)(cmi_hdl_t, void *, void *, void *),
1489     void *arg1, void *arg2, void *arg3)
1490 {
1491 	int i, j;
1492 	cmi_hdl_ent_t *ent;
1493 
1494 	for (i = 0; i < CMI_CHIPID_ARR_SZ; i++) {
1495 		if (cmi_chip_tab[i] == NULL)
1496 			continue;
1497 		for (j = 0, ent = cmi_chip_tab[i]; j < CMI_MAX_STRANDS_PER_CHIP;
1498 		    j++, ent++) {
1499 			if (cmi_hdl_canref(ent)) {
1500 				cmi_hdl_impl_t *hdl = ent->cmae_hdlp;
1501 				if ((*cbfunc)((cmi_hdl_t)hdl, arg1, arg2, arg3)
1502 				    == CMI_HDL_WALK_DONE) {
1503 					cmi_hdl_rele((cmi_hdl_t)hdl);
1504 					return;
1505 				}
1506 				cmi_hdl_rele((cmi_hdl_t)hdl);
1507 			}
1508 		}
1509 	}
1510 }
1511 
1512 void
1513 cmi_hdl_setcmi(cmi_hdl_t ophdl, void *cmi, void *cmidata)
1514 {
1515 	IMPLHDL(ophdl)->cmih_cmidata = cmidata;
1516 	IMPLHDL(ophdl)->cmih_cmi = cmi;
1517 }
1518 
1519 void *
1520 cmi_hdl_getcmi(cmi_hdl_t ophdl)
1521 {
1522 	return (IMPLHDL(ophdl)->cmih_cmi);
1523 }
1524 
1525 void *
1526 cmi_hdl_getcmidata(cmi_hdl_t ophdl)
1527 {
1528 	return (IMPLHDL(ophdl)->cmih_cmidata);
1529 }
1530 
1531 enum cmi_hdl_class
1532 cmi_hdl_class(cmi_hdl_t ophdl)
1533 {
1534 	return (IMPLHDL(ophdl)->cmih_class);
1535 }
1536 
1537 #define	CMI_HDL_OPFUNC(what, type)				\
1538 	type							\
1539 	cmi_hdl_##what(cmi_hdl_t ophdl)				\
1540 	{							\
1541 		return (HDLOPS(IMPLHDL(ophdl))->		\
1542 		    cmio_##what(IMPLHDL(ophdl)));		\
1543 	}
1544 
1545 CMI_HDL_OPFUNC(vendor, uint_t)
1546 CMI_HDL_OPFUNC(vendorstr, const char *)
1547 CMI_HDL_OPFUNC(family, uint_t)
1548 CMI_HDL_OPFUNC(model, uint_t)
1549 CMI_HDL_OPFUNC(stepping, uint_t)
1550 CMI_HDL_OPFUNC(chipid, uint_t)
1551 CMI_HDL_OPFUNC(procnodeid, uint_t)
1552 CMI_HDL_OPFUNC(coreid, uint_t)
1553 CMI_HDL_OPFUNC(strandid, uint_t)
1554 CMI_HDL_OPFUNC(procnodes_per_pkg, uint_t)
1555 CMI_HDL_OPFUNC(strand_apicid, uint_t)
1556 CMI_HDL_OPFUNC(chiprev, uint32_t)
1557 CMI_HDL_OPFUNC(chiprevstr, const char *)
1558 CMI_HDL_OPFUNC(getsockettype, uint32_t)
1559 CMI_HDL_OPFUNC(getsocketstr, const char *)
1560 CMI_HDL_OPFUNC(logical_id, id_t)
1561 CMI_HDL_OPFUNC(smbiosid, uint16_t)
1562 CMI_HDL_OPFUNC(smb_chipid, uint_t)
1563 CMI_HDL_OPFUNC(smb_bboard, nvlist_t *)
1564 
1565 boolean_t
1566 cmi_hdl_is_cmt(cmi_hdl_t ophdl)
1567 {
1568 	return (IMPLHDL(ophdl)->cmih_mstrand);
1569 }
1570 
1571 void
1572 cmi_hdl_int(cmi_hdl_t ophdl, int num)
1573 {
1574 	if (HDLOPS(IMPLHDL(ophdl))->cmio_int == NULL)
1575 		return;
1576 
1577 	cmi_hdl_inj_begin(ophdl);
1578 	HDLOPS(IMPLHDL(ophdl))->cmio_int(IMPLHDL(ophdl), num);
1579 	cmi_hdl_inj_end(NULL);
1580 }
1581 
1582 int
1583 cmi_hdl_online(cmi_hdl_t ophdl, int new_status, int *old_status)
1584 {
1585 	return (HDLOPS(IMPLHDL(ophdl))->cmio_online(IMPLHDL(ophdl),
1586 	    new_status, old_status));
1587 }
1588 
1589 #ifndef	__xpv
1590 /*
1591  * Return hardware chip instance; cpuid_get_chipid provides this directly.
1592  */
1593 uint_t
1594 cmi_ntv_hwchipid(cpu_t *cp)
1595 {
1596 	return (cpuid_get_chipid(cp));
1597 }
1598 
1599 /*
1600  * Return hardware node instance; cpuid_get_procnodeid provides this directly.
1601  */
1602 uint_t
1603 cmi_ntv_hwprocnodeid(cpu_t *cp)
1604 {
1605 	return (cpuid_get_procnodeid(cp));
1606 }
1607 
1608 /*
1609  * Return core instance within a single chip.
1610  */
1611 uint_t
1612 cmi_ntv_hwcoreid(cpu_t *cp)
1613 {
1614 	return (cpuid_get_pkgcoreid(cp));
1615 }
1616 
1617 /*
1618  * Return strand number within a single core.  cpuid_get_clogid numbers
1619  * all execution units (strands, or cores in unstranded models) sequentially
1620  * within a single chip.
1621  */
1622 uint_t
1623 cmi_ntv_hwstrandid(cpu_t *cp)
1624 {
1625 	int strands_per_core = cpuid_get_ncpu_per_chip(cp) /
1626 	    cpuid_get_ncore_per_chip(cp);
1627 
1628 	return (cpuid_get_clogid(cp) % strands_per_core);
1629 }
1630 #endif	/* __xpv */
1631 
1632 void
1633 cmi_hdlconf_rdmsr_nohw(cmi_hdl_t ophdl)
1634 {
1635 	cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
1636 
1637 	hdl->cmih_msrsrc &= ~CMI_MSR_FLAG_RD_HWOK;
1638 }
1639 
1640 void
1641 cmi_hdlconf_wrmsr_nohw(cmi_hdl_t ophdl)
1642 {
1643 	cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
1644 
1645 	hdl->cmih_msrsrc &= ~CMI_MSR_FLAG_WR_HWOK;
1646 }
1647 
1648 cmi_errno_t
1649 cmi_hdl_rdmsr(cmi_hdl_t ophdl, uint_t msr, uint64_t *valp)
1650 {
1651 	cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
1652 
1653 	/*
1654 	 * Regardless of the handle class, we first check for am
1655 	 * interposed value.  In the xVM case you probably want to
1656 	 * place interposed values within the hypervisor itself, but
1657 	 * we still allow interposing them in dom0 for test and bringup
1658 	 * purposes.
1659 	 */
1660 	if ((hdl->cmih_msrsrc & CMI_MSR_FLAG_RD_INTERPOSEOK) &&
1661 	    msri_lookup(hdl, msr, valp))
1662 		return (CMI_SUCCESS);
1663 
1664 	if (HDLOPS(hdl)->cmio_rdmsr == NULL)
1665 		return (CMIERR_NOTSUP);
1666 
1667 	return (HDLOPS(hdl)->cmio_rdmsr(hdl, msr, valp));
1668 }
1669 
1670 cmi_errno_t
1671 cmi_hdl_wrmsr(cmi_hdl_t ophdl, uint_t msr, uint64_t val)
1672 {
1673 	cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
1674 
1675 	/* Invalidate any interposed value */
1676 	msri_rment(hdl, msr);
1677 
1678 	if (HDLOPS(hdl)->cmio_wrmsr == NULL)
1679 		return (CMI_SUCCESS);	/* pretend all is ok */
1680 
1681 	return (HDLOPS(hdl)->cmio_wrmsr(hdl, msr, val));
1682 }
1683 
1684 void
1685 cmi_hdl_enable_mce(cmi_hdl_t ophdl)
1686 {
1687 	cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
1688 	ulong_t cr4;
1689 
1690 	if (HDLOPS(hdl)->cmio_getcr4 == NULL ||
1691 	    HDLOPS(hdl)->cmio_setcr4 == NULL)
1692 		return;
1693 
1694 	cr4 = HDLOPS(hdl)->cmio_getcr4(hdl);
1695 
1696 	HDLOPS(hdl)->cmio_setcr4(hdl, cr4 | CR4_MCE);
1697 }
1698 
1699 void
1700 cmi_hdl_msrinterpose(cmi_hdl_t ophdl, cmi_mca_regs_t *regs, uint_t nregs)
1701 {
1702 	cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
1703 	int i;
1704 
1705 	if (HDLOPS(hdl)->cmio_msrinterpose == NULL)
1706 		return;
1707 
1708 	cmi_hdl_inj_begin(ophdl);
1709 
1710 	for (i = 0; i < nregs; i++, regs++)
1711 		HDLOPS(hdl)->cmio_msrinterpose(hdl, regs->cmr_msrnum,
1712 		    regs->cmr_msrval);
1713 
1714 	cmi_hdl_inj_end(ophdl);
1715 }
1716 
1717 /*ARGSUSED*/
1718 void
1719 cmi_hdl_msrforward(cmi_hdl_t ophdl, cmi_mca_regs_t *regs, uint_t nregs)
1720 {
1721 #ifdef __xpv
1722 	cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
1723 	int i;
1724 
1725 	for (i = 0; i < nregs; i++, regs++)
1726 		msri_addent(hdl, regs->cmr_msrnum, regs->cmr_msrval);
1727 #endif
1728 }
1729 
1730 
1731 void
1732 cmi_pcird_nohw(void)
1733 {
1734 	cmi_pcicfg_flags &= ~CMI_PCICFG_FLAG_RD_HWOK;
1735 }
1736 
1737 void
1738 cmi_pciwr_nohw(void)
1739 {
1740 	cmi_pcicfg_flags &= ~CMI_PCICFG_FLAG_WR_HWOK;
1741 }
1742 
1743 static uint32_t
1744 cmi_pci_get_cmn(int bus, int dev, int func, int reg, int asz,
1745     int *interpose, ddi_acc_handle_t hdl)
1746 {
1747 	uint32_t val;
1748 
1749 	if (cmi_pcicfg_flags & CMI_PCICFG_FLAG_RD_INTERPOSEOK &&
1750 	    pcii_lookup(bus, dev, func, reg, asz, &val)) {
1751 		if (interpose)
1752 			*interpose = 1;
1753 		return (val);
1754 	}
1755 	if (interpose)
1756 		*interpose = 0;
1757 
1758 	if (!(cmi_pcicfg_flags & CMI_PCICFG_FLAG_RD_HWOK))
1759 		return (0);
1760 
1761 	switch (asz) {
1762 	case 1:
1763 		if (hdl)
1764 			val = pci_config_get8(hdl, (off_t)reg);
1765 		else
1766 			val = (*pci_getb_func)(bus, dev, func, reg);
1767 		break;
1768 	case 2:
1769 		if (hdl)
1770 			val = pci_config_get16(hdl, (off_t)reg);
1771 		else
1772 			val = (*pci_getw_func)(bus, dev, func, reg);
1773 		break;
1774 	case 4:
1775 		if (hdl)
1776 			val = pci_config_get32(hdl, (off_t)reg);
1777 		else
1778 			val = (*pci_getl_func)(bus, dev, func, reg);
1779 		break;
1780 	default:
1781 		val = 0;
1782 	}
1783 	return (val);
1784 }
1785 
1786 uint8_t
1787 cmi_pci_getb(int bus, int dev, int func, int reg, int *interpose,
1788     ddi_acc_handle_t hdl)
1789 {
1790 	return ((uint8_t)cmi_pci_get_cmn(bus, dev, func, reg, 1, interpose,
1791 	    hdl));
1792 }
1793 
1794 uint16_t
1795 cmi_pci_getw(int bus, int dev, int func, int reg, int *interpose,
1796     ddi_acc_handle_t hdl)
1797 {
1798 	return ((uint16_t)cmi_pci_get_cmn(bus, dev, func, reg, 2, interpose,
1799 	    hdl));
1800 }
1801 
1802 uint32_t
1803 cmi_pci_getl(int bus, int dev, int func, int reg, int *interpose,
1804     ddi_acc_handle_t hdl)
1805 {
1806 	return (cmi_pci_get_cmn(bus, dev, func, reg, 4, interpose, hdl));
1807 }
1808 
1809 void
1810 cmi_pci_interposeb(int bus, int dev, int func, int reg, uint8_t val)
1811 {
1812 	pcii_addent(bus, dev, func, reg, val, 1);
1813 }
1814 
1815 void
1816 cmi_pci_interposew(int bus, int dev, int func, int reg, uint16_t val)
1817 {
1818 	pcii_addent(bus, dev, func, reg, val, 2);
1819 }
1820 
1821 void
1822 cmi_pci_interposel(int bus, int dev, int func, int reg, uint32_t val)
1823 {
1824 	pcii_addent(bus, dev, func, reg, val, 4);
1825 }
1826 
1827 static void
1828 cmi_pci_put_cmn(int bus, int dev, int func, int reg, int asz,
1829     ddi_acc_handle_t hdl, uint32_t val)
1830 {
1831 	/*
1832 	 * If there is an interposed value for this register invalidate it.
1833 	 */
1834 	pcii_rment(bus, dev, func, reg, asz);
1835 
1836 	if (!(cmi_pcicfg_flags & CMI_PCICFG_FLAG_WR_HWOK))
1837 		return;
1838 
1839 	switch (asz) {
1840 	case 1:
1841 		if (hdl)
1842 			pci_config_put8(hdl, (off_t)reg, (uint8_t)val);
1843 		else
1844 			(*pci_putb_func)(bus, dev, func, reg, (uint8_t)val);
1845 		break;
1846 
1847 	case 2:
1848 		if (hdl)
1849 			pci_config_put16(hdl, (off_t)reg, (uint16_t)val);
1850 		else
1851 			(*pci_putw_func)(bus, dev, func, reg, (uint16_t)val);
1852 		break;
1853 
1854 	case 4:
1855 		if (hdl)
1856 			pci_config_put32(hdl, (off_t)reg, val);
1857 		else
1858 			(*pci_putl_func)(bus, dev, func, reg, val);
1859 		break;
1860 
1861 	default:
1862 		break;
1863 	}
1864 }
1865 
1866 void
1867 cmi_pci_putb(int bus, int dev, int func, int reg, ddi_acc_handle_t hdl,
1868     uint8_t val)
1869 {
1870 	cmi_pci_put_cmn(bus, dev, func, reg, 1, hdl, val);
1871 }
1872 
1873 void
1874 cmi_pci_putw(int bus, int dev, int func, int reg, ddi_acc_handle_t hdl,
1875     uint16_t val)
1876 {
1877 	cmi_pci_put_cmn(bus, dev, func, reg, 2, hdl, val);
1878 }
1879 
1880 void
1881 cmi_pci_putl(int bus, int dev, int func, int reg, ddi_acc_handle_t hdl,
1882     uint32_t val)
1883 {
1884 	cmi_pci_put_cmn(bus, dev, func, reg, 4, hdl, val);
1885 }
1886 
1887 static const struct cmi_hdl_ops cmi_hdl_ops = {
1888 #ifdef __xpv
1889 	/*
1890 	 * CMI_HDL_SOLARIS_xVM_MCA - ops when we are an xVM dom0
1891 	 */
1892 	xpv_vendor,		/* cmio_vendor */
1893 	xpv_vendorstr,		/* cmio_vendorstr */
1894 	xpv_family,		/* cmio_family */
1895 	xpv_model,		/* cmio_model */
1896 	xpv_stepping,		/* cmio_stepping */
1897 	xpv_chipid,		/* cmio_chipid */
1898 	xpv_procnodeid,		/* cmio_procnodeid */
1899 	xpv_coreid,		/* cmio_coreid */
1900 	xpv_strandid,		/* cmio_strandid */
1901 	xpv_procnodes_per_pkg,	/* cmio_procnodes_per_pkg */
1902 	xpv_strand_apicid,	/* cmio_strand_apicid */
1903 	xpv_chiprev,		/* cmio_chiprev */
1904 	xpv_chiprevstr,		/* cmio_chiprevstr */
1905 	xpv_getsockettype,	/* cmio_getsockettype */
1906 	xpv_getsocketstr,	/* cmio_getsocketstr */
1907 	xpv_logical_id,		/* cmio_logical_id */
1908 	NULL,			/* cmio_getcr4 */
1909 	NULL,			/* cmio_setcr4 */
1910 	xpv_rdmsr,		/* cmio_rdmsr */
1911 	xpv_wrmsr,		/* cmio_wrmsr */
1912 	xpv_msrinterpose,	/* cmio_msrinterpose */
1913 	xpv_int,		/* cmio_int */
1914 	xpv_online,		/* cmio_online */
1915 	xpv_smbiosid,		/* cmio_smbiosid */
1916 	xpv_smb_chipid,		/* cmio_smb_chipid */
1917 	xpv_smb_bboard		/* cmio_smb_bboard */
1918 
1919 #else	/* __xpv */
1920 
1921 	/*
1922 	 * CMI_HDL_NATIVE - ops when apparently running on bare-metal
1923 	 */
1924 	ntv_vendor,		/* cmio_vendor */
1925 	ntv_vendorstr,		/* cmio_vendorstr */
1926 	ntv_family,		/* cmio_family */
1927 	ntv_model,		/* cmio_model */
1928 	ntv_stepping,		/* cmio_stepping */
1929 	ntv_chipid,		/* cmio_chipid */
1930 	ntv_procnodeid,		/* cmio_procnodeid */
1931 	ntv_coreid,		/* cmio_coreid */
1932 	ntv_strandid,		/* cmio_strandid */
1933 	ntv_procnodes_per_pkg,	/* cmio_procnodes_per_pkg */
1934 	ntv_strand_apicid,	/* cmio_strand_apicid */
1935 	ntv_chiprev,		/* cmio_chiprev */
1936 	ntv_chiprevstr,		/* cmio_chiprevstr */
1937 	ntv_getsockettype,	/* cmio_getsockettype */
1938 	ntv_getsocketstr,	/* cmio_getsocketstr */
1939 	ntv_logical_id,		/* cmio_logical_id */
1940 	ntv_getcr4,		/* cmio_getcr4 */
1941 	ntv_setcr4,		/* cmio_setcr4 */
1942 	ntv_rdmsr,		/* cmio_rdmsr */
1943 	ntv_wrmsr,		/* cmio_wrmsr */
1944 	ntv_msrinterpose,	/* cmio_msrinterpose */
1945 	ntv_int,		/* cmio_int */
1946 	ntv_online,		/* cmio_online */
1947 	ntv_smbiosid,		/* cmio_smbiosid */
1948 	ntv_smb_chipid,		/* cmio_smb_chipid */
1949 	ntv_smb_bboard		/* cmio_smb_bboard */
1950 #endif
1951 };
1952