xref: /illumos-gate/usr/src/uts/i86pc/os/cmi_hw.c (revision dd23d762c65e503874085a3893fbd3df9688da30)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Copyright (c) 2019, Joyent, Inc.
25  * Copyright 2023 Oxide Computer Company
26  */
27 /*
28  * Copyright (c) 2010, Intel Corporation.
29  * All rights reserved.
30  */
31 
32 /*
33  * CPU Module Interface - hardware abstraction.
34  */
35 
36 #ifdef __xpv
37 #include <sys/xpv_user.h>
38 #endif
39 
40 #include <sys/types.h>
41 #include <sys/cpu_module.h>
42 #include <sys/kmem.h>
43 #include <sys/x86_archext.h>
44 #include <sys/cpuvar.h>
45 #include <sys/ksynch.h>
46 #include <sys/x_call.h>
47 #include <sys/pghw.h>
48 #include <sys/pci_cfgacc.h>
49 #include <sys/pci_cfgspace.h>
50 #include <sys/archsystm.h>
51 #include <sys/ontrap.h>
52 #include <sys/controlregs.h>
53 #include <sys/sunddi.h>
54 #include <sys/trap.h>
55 #include <sys/mca_x86.h>
56 #include <sys/processor.h>
57 #include <sys/cmn_err.h>
58 #include <sys/nvpair.h>
59 #include <sys/fm/util.h>
60 #include <sys/fm/protocol.h>
61 #include <sys/fm/smb/fmsmb.h>
62 #include <sys/cpu_module_impl.h>
63 
64 /*
65  * Variable which determines if the SMBIOS supports x86 generic topology; or
66  * if legacy topolgy enumeration will occur.
67  */
68 extern int x86gentopo_legacy;
69 
70 /*
71  * Outside of this file consumers use the opaque cmi_hdl_t.  This
72  * definition is duplicated in the generic_cpu mdb module, so keep
73  * them in-sync when making changes.
74  */
75 typedef struct cmi_hdl_impl {
76 	enum cmi_hdl_class cmih_class;		/* Handle nature */
77 	const struct cmi_hdl_ops *cmih_ops;	/* Operations vector */
78 	uint_t cmih_chipid;			/* Chipid of cpu resource */
79 	uint_t cmih_procnodeid;			/* Nodeid of cpu resource */
80 	uint_t cmih_coreid;			/* Core within die */
81 	uint_t cmih_strandid;			/* Thread within core */
82 	uint_t cmih_procnodes_per_pkg;		/* Nodes in a processor */
83 	boolean_t cmih_mstrand;			/* cores are multithreaded */
84 	volatile uint32_t *cmih_refcntp;	/* Reference count pointer */
85 	uint64_t cmih_msrsrc;			/* MSR data source flags */
86 	void *cmih_hdlpriv;			/* cmi_hw.c private data */
87 	void *cmih_spec;			/* cmi_hdl_{set,get}_specific */
88 	void *cmih_cmi;				/* cpu mod control structure */
89 	void *cmih_cmidata;			/* cpu mod private data */
90 	const struct cmi_mc_ops *cmih_mcops;	/* Memory-controller ops */
91 	void *cmih_mcdata;			/* Memory-controller data */
92 	uint64_t cmih_flags;			/* See CMIH_F_* below */
93 	uint16_t cmih_smbiosid;			/* SMBIOS Type 4 struct ID */
94 	uint_t cmih_smb_chipid;			/* SMBIOS factored chipid */
95 	nvlist_t *cmih_smb_bboard;		/* SMBIOS bboard nvlist */
96 } cmi_hdl_impl_t;
97 
98 #define	IMPLHDL(ophdl)	((cmi_hdl_impl_t *)ophdl)
99 #define	HDLOPS(hdl)	((hdl)->cmih_ops)
100 
101 #define	CMIH_F_INJACTV		0x1ULL
102 #define	CMIH_F_DEAD		0x2ULL
103 
104 /*
105  * Ops structure for handle operations.
106  */
107 struct cmi_hdl_ops {
108 	/*
109 	 * These ops are required in an implementation.
110 	 */
111 	uint_t (*cmio_vendor)(cmi_hdl_impl_t *);
112 	const char *(*cmio_vendorstr)(cmi_hdl_impl_t *);
113 	uint_t (*cmio_family)(cmi_hdl_impl_t *);
114 	uint_t (*cmio_model)(cmi_hdl_impl_t *);
115 	uint_t (*cmio_stepping)(cmi_hdl_impl_t *);
116 	uint_t (*cmio_chipid)(cmi_hdl_impl_t *);
117 	uint_t (*cmio_procnodeid)(cmi_hdl_impl_t *);
118 	uint_t (*cmio_coreid)(cmi_hdl_impl_t *);
119 	uint_t (*cmio_strandid)(cmi_hdl_impl_t *);
120 	uint_t (*cmio_procnodes_per_pkg)(cmi_hdl_impl_t *);
121 	uint_t (*cmio_strand_apicid)(cmi_hdl_impl_t *);
122 	x86_chiprev_t (*cmio_chiprev)(cmi_hdl_impl_t *);
123 	const char *(*cmio_chiprevstr)(cmi_hdl_impl_t *);
124 	uint32_t (*cmio_getsockettype)(cmi_hdl_impl_t *);
125 	const char *(*cmio_getsocketstr)(cmi_hdl_impl_t *);
126 	uint_t (*cmio_chipsig)(cmi_hdl_impl_t *);
127 	cmi_errno_t (*cmio_ncache)(cmi_hdl_impl_t *, uint32_t *);
128 	cmi_errno_t (*cmio_cache)(cmi_hdl_impl_t *, uint32_t, x86_cache_t *);
129 
130 	id_t (*cmio_logical_id)(cmi_hdl_impl_t *);
131 	/*
132 	 * These ops are optional in an implementation.
133 	 */
134 	ulong_t (*cmio_getcr4)(cmi_hdl_impl_t *);
135 	void (*cmio_setcr4)(cmi_hdl_impl_t *, ulong_t);
136 	cmi_errno_t (*cmio_rdmsr)(cmi_hdl_impl_t *, uint_t, uint64_t *);
137 	cmi_errno_t (*cmio_wrmsr)(cmi_hdl_impl_t *, uint_t, uint64_t);
138 	cmi_errno_t (*cmio_msrinterpose)(cmi_hdl_impl_t *, uint_t, uint64_t);
139 	void (*cmio_int)(cmi_hdl_impl_t *, int);
140 	int (*cmio_online)(cmi_hdl_impl_t *, int, int *);
141 	uint16_t (*cmio_smbiosid) (cmi_hdl_impl_t *);
142 	uint_t (*cmio_smb_chipid)(cmi_hdl_impl_t *);
143 	nvlist_t *(*cmio_smb_bboard)(cmi_hdl_impl_t *);
144 };
145 
146 static const struct cmi_hdl_ops cmi_hdl_ops;
147 
148 /*
149  * Handles are looked up from contexts such as polling, injection etc
150  * where the context is reasonably well defined (although a poller could
151  * interrupt any old thread holding any old lock).  They are also looked
152  * up by machine check handlers, which may strike at inconvenient times
153  * such as during handle initialization or destruction or during handle
154  * lookup (which the #MC handler itself will also have to perform).
155  *
156  * So keeping handles in a linked list makes locking difficult when we
157  * consider #MC handlers.  Our solution is to have a look-up table indexed
158  * by that which uniquely identifies a handle - chip/core/strand id -
159  * with each entry a structure including a pointer to a handle
160  * structure for the resource, and a reference count for the handle.
161  * Reference counts are modified atomically.  The public cmi_hdl_hold
162  * always succeeds because this can only be used after handle creation
163  * and before the call to destruct, so the hold count is already at least one.
164  * In other functions that lookup a handle (cmi_hdl_lookup, cmi_hdl_any)
165  * we must be certain that the count has not already decrmented to zero
166  * before applying our hold.
167  *
168  * The table is an array of maximum number of chips defined in
169  * CMI_CHIPID_ARR_SZ indexed by the chip id. If the chip is not present, the
170  * entry is NULL. Each entry is a pointer to another array which contains a
171  * list of all strands of the chip. This first level table is allocated when
172  * first we want to populate an entry. The size of the latter (per chip) table
173  * is CMI_MAX_STRANDS_PER_CHIP and it is populated when one of its cpus starts.
174  *
175  * Ideally we should only allocate to the actual number of chips, cores per
176  * chip and strand per core. The number of chips is not available until all
177  * of them are passed. The number of cores and strands are partially available.
178  * For now we stick with the above approach.
179  */
180 #define	CMI_MAX_CHIPID_NBITS		6	/* max chipid of 63 */
181 #define	CMI_MAX_CORES_PER_CHIP_NBITS	4	/* 16 cores per chip max */
182 #define	CMI_MAX_STRANDS_PER_CORE_NBITS	3	/* 8 strands per core max */
183 
184 #define	CMI_MAX_CHIPID			((1 << (CMI_MAX_CHIPID_NBITS)) - 1)
185 #define	CMI_MAX_CORES_PER_CHIP(cbits)	(1 << (cbits))
186 #define	CMI_MAX_COREID(cbits)		((1 << (cbits)) - 1)
187 #define	CMI_MAX_STRANDS_PER_CORE(sbits)	(1 << (sbits))
188 #define	CMI_MAX_STRANDID(sbits)		((1 << (sbits)) - 1)
189 #define	CMI_MAX_STRANDS_PER_CHIP(cbits, sbits)	\
190 	(CMI_MAX_CORES_PER_CHIP(cbits) * CMI_MAX_STRANDS_PER_CORE(sbits))
191 
192 #define	CMI_CHIPID_ARR_SZ		(1 << CMI_MAX_CHIPID_NBITS)
193 
194 typedef struct cmi_hdl_ent {
195 	volatile uint32_t cmae_refcnt;
196 	cmi_hdl_impl_t *cmae_hdlp;
197 } cmi_hdl_ent_t;
198 
199 static cmi_hdl_ent_t *cmi_chip_tab[CMI_CHIPID_ARR_SZ];
200 
201 /*
202  * Default values for the number of core and strand bits.
203  */
204 uint_t cmi_core_nbits = CMI_MAX_CORES_PER_CHIP_NBITS;
205 uint_t cmi_strand_nbits = CMI_MAX_STRANDS_PER_CORE_NBITS;
206 static int cmi_ext_topo_check = 0;
207 
208 /*
209  * Controls where we will source PCI config space data.
210  */
211 #define	CMI_PCICFG_FLAG_RD_HWOK		0x0001
212 #define	CMI_PCICFG_FLAG_RD_INTERPOSEOK	0X0002
213 #define	CMI_PCICFG_FLAG_WR_HWOK		0x0004
214 #define	CMI_PCICFG_FLAG_WR_INTERPOSEOK	0X0008
215 
216 static uint64_t cmi_pcicfg_flags =
217     CMI_PCICFG_FLAG_RD_HWOK | CMI_PCICFG_FLAG_RD_INTERPOSEOK |
218     CMI_PCICFG_FLAG_WR_HWOK | CMI_PCICFG_FLAG_WR_INTERPOSEOK;
219 
220 /*
221  * The flags for individual cpus are kept in their per-cpu handle cmih_msrsrc
222  */
223 #define	CMI_MSR_FLAG_RD_HWOK		0x0001
224 #define	CMI_MSR_FLAG_RD_INTERPOSEOK	0x0002
225 #define	CMI_MSR_FLAG_WR_HWOK		0x0004
226 #define	CMI_MSR_FLAG_WR_INTERPOSEOK	0x0008
227 
228 int cmi_call_func_ntv_tries = 3;
229 
230 static cmi_errno_t
call_func_ntv(int cpuid,xc_func_t func,xc_arg_t arg1,xc_arg_t arg2)231 call_func_ntv(int cpuid, xc_func_t func, xc_arg_t arg1, xc_arg_t arg2)
232 {
233 	cmi_errno_t rc = -1;
234 	int i;
235 
236 	kpreempt_disable();
237 
238 	if (CPU->cpu_id == cpuid) {
239 		(*func)(arg1, arg2, (xc_arg_t)&rc);
240 	} else {
241 		/*
242 		 * This should not happen for a #MC trap or a poll, so
243 		 * this is likely an error injection or similar.
244 		 * We will try to cross call with xc_trycall - we
245 		 * can't guarantee success with xc_call because
246 		 * the interrupt code in the case of a #MC may
247 		 * already hold the xc mutex.
248 		 */
249 		for (i = 0; i < cmi_call_func_ntv_tries; i++) {
250 			cpuset_t cpus;
251 
252 			CPUSET_ONLY(cpus, cpuid);
253 			xc_priority(arg1, arg2, (xc_arg_t)&rc,
254 			    CPUSET2BV(cpus), func);
255 			if (rc != -1)
256 				break;
257 
258 			DELAY(1);
259 		}
260 	}
261 
262 	kpreempt_enable();
263 
264 	return (rc != -1 ? rc : CMIERR_DEADLOCK);
265 }
266 
267 static uint64_t injcnt;
268 
269 void
cmi_hdl_inj_begin(cmi_hdl_t ophdl)270 cmi_hdl_inj_begin(cmi_hdl_t ophdl)
271 {
272 	cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
273 
274 	if (hdl != NULL)
275 		hdl->cmih_flags |= CMIH_F_INJACTV;
276 	if (injcnt++ == 0) {
277 		cmn_err(CE_NOTE, "Hardware error injection/simulation "
278 		    "activity noted");
279 	}
280 }
281 
282 void
cmi_hdl_inj_end(cmi_hdl_t ophdl)283 cmi_hdl_inj_end(cmi_hdl_t ophdl)
284 {
285 	cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
286 
287 	ASSERT(hdl == NULL || hdl->cmih_flags & CMIH_F_INJACTV);
288 	if (hdl != NULL)
289 		hdl->cmih_flags &= ~CMIH_F_INJACTV;
290 }
291 
292 boolean_t
cmi_inj_tainted(void)293 cmi_inj_tainted(void)
294 {
295 	return (injcnt != 0 ? B_TRUE : B_FALSE);
296 }
297 
298 /*
299  *	 =======================================================
300  *	|	MSR Interposition				|
301  *	|	-----------------				|
302  *	|							|
303  *	 -------------------------------------------------------
304  */
305 
306 #define	CMI_MSRI_HASHSZ		16
307 #define	CMI_MSRI_HASHIDX(hdl, msr) \
308 	((((uintptr_t)(hdl) >> 3) + (msr)) % (CMI_MSRI_HASHSZ - 1))
309 
310 struct cmi_msri_bkt {
311 	kmutex_t msrib_lock;
312 	struct cmi_msri_hashent *msrib_head;
313 };
314 
315 struct cmi_msri_hashent {
316 	struct cmi_msri_hashent *msrie_next;
317 	struct cmi_msri_hashent *msrie_prev;
318 	cmi_hdl_impl_t *msrie_hdl;
319 	uint_t msrie_msrnum;
320 	uint64_t msrie_msrval;
321 };
322 
323 #define	CMI_MSRI_MATCH(ent, hdl, req_msr) \
324 	((ent)->msrie_hdl == (hdl) && (ent)->msrie_msrnum == (req_msr))
325 
326 static struct cmi_msri_bkt msrihash[CMI_MSRI_HASHSZ];
327 
328 static void
msri_addent(cmi_hdl_impl_t * hdl,uint_t msr,uint64_t val)329 msri_addent(cmi_hdl_impl_t *hdl, uint_t msr, uint64_t val)
330 {
331 	int idx = CMI_MSRI_HASHIDX(hdl, msr);
332 	struct cmi_msri_bkt *hbp = &msrihash[idx];
333 	struct cmi_msri_hashent *hep;
334 
335 	mutex_enter(&hbp->msrib_lock);
336 
337 	for (hep = hbp->msrib_head; hep != NULL; hep = hep->msrie_next) {
338 		if (CMI_MSRI_MATCH(hep, hdl, msr))
339 			break;
340 	}
341 
342 	if (hep != NULL) {
343 		hep->msrie_msrval = val;
344 	} else {
345 		hep = kmem_alloc(sizeof (*hep), KM_SLEEP);
346 		hep->msrie_hdl = hdl;
347 		hep->msrie_msrnum = msr;
348 		hep->msrie_msrval = val;
349 
350 		if (hbp->msrib_head != NULL)
351 			hbp->msrib_head->msrie_prev = hep;
352 		hep->msrie_next = hbp->msrib_head;
353 		hep->msrie_prev = NULL;
354 		hbp->msrib_head = hep;
355 	}
356 
357 	mutex_exit(&hbp->msrib_lock);
358 }
359 
360 /*
361  * Look for a match for the given hanlde and msr.  Return 1 with valp
362  * filled if a match is found, otherwise return 0 with valp untouched.
363  */
364 static int
msri_lookup(cmi_hdl_impl_t * hdl,uint_t msr,uint64_t * valp)365 msri_lookup(cmi_hdl_impl_t *hdl, uint_t msr, uint64_t *valp)
366 {
367 	int idx = CMI_MSRI_HASHIDX(hdl, msr);
368 	struct cmi_msri_bkt *hbp = &msrihash[idx];
369 	struct cmi_msri_hashent *hep;
370 
371 	/*
372 	 * This function is called during #MC trap handling, so we should
373 	 * consider the possibility that the hash mutex is held by the
374 	 * interrupted thread.  This should not happen because interposition
375 	 * is an artificial injection mechanism and the #MC is requested
376 	 * after adding entries, but just in case of a real #MC at an
377 	 * unlucky moment we'll use mutex_tryenter here.
378 	 */
379 	if (!mutex_tryenter(&hbp->msrib_lock))
380 		return (0);
381 
382 	for (hep = hbp->msrib_head; hep != NULL; hep = hep->msrie_next) {
383 		if (CMI_MSRI_MATCH(hep, hdl, msr)) {
384 			*valp = hep->msrie_msrval;
385 			break;
386 		}
387 	}
388 
389 	mutex_exit(&hbp->msrib_lock);
390 
391 	return (hep != NULL);
392 }
393 
394 /*
395  * Remove any interposed value that matches.
396  */
397 static void
msri_rment(cmi_hdl_impl_t * hdl,uint_t msr)398 msri_rment(cmi_hdl_impl_t *hdl, uint_t msr)
399 {
400 
401 	int idx = CMI_MSRI_HASHIDX(hdl, msr);
402 	struct cmi_msri_bkt *hbp = &msrihash[idx];
403 	struct cmi_msri_hashent *hep;
404 
405 	if (!mutex_tryenter(&hbp->msrib_lock))
406 		return;
407 
408 	for (hep = hbp->msrib_head; hep != NULL; hep = hep->msrie_next) {
409 		if (CMI_MSRI_MATCH(hep, hdl, msr)) {
410 			if (hep->msrie_prev != NULL)
411 				hep->msrie_prev->msrie_next = hep->msrie_next;
412 
413 			if (hep->msrie_next != NULL)
414 				hep->msrie_next->msrie_prev = hep->msrie_prev;
415 
416 			if (hbp->msrib_head == hep)
417 				hbp->msrib_head = hep->msrie_next;
418 
419 			kmem_free(hep, sizeof (*hep));
420 			break;
421 		}
422 	}
423 
424 	mutex_exit(&hbp->msrib_lock);
425 }
426 
427 /*
428  *	 =======================================================
429  *	|	PCI Config Space Interposition			|
430  *	|	------------------------------			|
431  *	|							|
432  *	 -------------------------------------------------------
433  */
434 
435 /*
436  * Hash for interposed PCI config space values.  We lookup on bus/dev/fun/offset
437  * and then record whether the value stashed was made with a byte, word or
438  * doubleword access;  we will only return a hit for an access of the
439  * same size.  If you access say a 32-bit register using byte accesses
440  * and then attempt to read the full 32-bit value back you will not obtain
441  * any sort of merged result - you get a lookup miss.
442  */
443 
444 #define	CMI_PCII_HASHSZ		16
445 #define	CMI_PCII_HASHIDX(b, d, f, o) \
446 	(((b) + (d) + (f) + (o)) % (CMI_PCII_HASHSZ - 1))
447 
448 struct cmi_pcii_bkt {
449 	kmutex_t pciib_lock;
450 	struct cmi_pcii_hashent *pciib_head;
451 };
452 
453 struct cmi_pcii_hashent {
454 	struct cmi_pcii_hashent *pcii_next;
455 	struct cmi_pcii_hashent *pcii_prev;
456 	int pcii_bus;
457 	int pcii_dev;
458 	int pcii_func;
459 	int pcii_reg;
460 	int pcii_asize;
461 	uint32_t pcii_val;
462 };
463 
464 #define	CMI_PCII_MATCH(ent, b, d, f, r, asz) \
465 	((ent)->pcii_bus == (b) && (ent)->pcii_dev == (d) && \
466 	(ent)->pcii_func == (f) && (ent)->pcii_reg == (r) && \
467 	(ent)->pcii_asize == (asz))
468 
469 static struct cmi_pcii_bkt pciihash[CMI_PCII_HASHSZ];
470 
471 
472 /*
473  * Add a new entry to the PCI interpose hash, overwriting any existing
474  * entry that is found.
475  */
476 static void
pcii_addent(int bus,int dev,int func,int reg,uint32_t val,int asz)477 pcii_addent(int bus, int dev, int func, int reg, uint32_t val, int asz)
478 {
479 	int idx = CMI_PCII_HASHIDX(bus, dev, func, reg);
480 	struct cmi_pcii_bkt *hbp = &pciihash[idx];
481 	struct cmi_pcii_hashent *hep;
482 
483 	cmi_hdl_inj_begin(NULL);
484 
485 	mutex_enter(&hbp->pciib_lock);
486 
487 	for (hep = hbp->pciib_head; hep != NULL; hep = hep->pcii_next) {
488 		if (CMI_PCII_MATCH(hep, bus, dev, func, reg, asz))
489 			break;
490 	}
491 
492 	if (hep != NULL) {
493 		hep->pcii_val = val;
494 	} else {
495 		hep = kmem_alloc(sizeof (*hep), KM_SLEEP);
496 		hep->pcii_bus = bus;
497 		hep->pcii_dev = dev;
498 		hep->pcii_func = func;
499 		hep->pcii_reg = reg;
500 		hep->pcii_asize = asz;
501 		hep->pcii_val = val;
502 
503 		if (hbp->pciib_head != NULL)
504 			hbp->pciib_head->pcii_prev = hep;
505 		hep->pcii_next = hbp->pciib_head;
506 		hep->pcii_prev = NULL;
507 		hbp->pciib_head = hep;
508 	}
509 
510 	mutex_exit(&hbp->pciib_lock);
511 
512 	cmi_hdl_inj_end(NULL);
513 }
514 
515 /*
516  * Look for a match for the given bus/dev/func/reg; return 1 with valp
517  * filled if a match is found, otherwise return 0 with valp untouched.
518  */
519 static int
pcii_lookup(int bus,int dev,int func,int reg,int asz,uint32_t * valp)520 pcii_lookup(int bus, int dev, int func, int reg, int asz, uint32_t *valp)
521 {
522 	int idx = CMI_PCII_HASHIDX(bus, dev, func, reg);
523 	struct cmi_pcii_bkt *hbp = &pciihash[idx];
524 	struct cmi_pcii_hashent *hep;
525 
526 	if (!mutex_tryenter(&hbp->pciib_lock))
527 		return (0);
528 
529 	for (hep = hbp->pciib_head; hep != NULL; hep = hep->pcii_next) {
530 		if (CMI_PCII_MATCH(hep, bus, dev, func, reg, asz)) {
531 			*valp = hep->pcii_val;
532 			break;
533 		}
534 	}
535 
536 	mutex_exit(&hbp->pciib_lock);
537 
538 	return (hep != NULL);
539 }
540 
541 static void
pcii_rment(int bus,int dev,int func,int reg,int asz)542 pcii_rment(int bus, int dev, int func, int reg, int asz)
543 {
544 	int idx = CMI_PCII_HASHIDX(bus, dev, func, reg);
545 	struct cmi_pcii_bkt *hbp = &pciihash[idx];
546 	struct cmi_pcii_hashent *hep;
547 
548 	mutex_enter(&hbp->pciib_lock);
549 
550 	for (hep = hbp->pciib_head; hep != NULL; hep = hep->pcii_next) {
551 		if (CMI_PCII_MATCH(hep, bus, dev, func, reg, asz)) {
552 			if (hep->pcii_prev != NULL)
553 				hep->pcii_prev->pcii_next = hep->pcii_next;
554 
555 			if (hep->pcii_next != NULL)
556 				hep->pcii_next->pcii_prev = hep->pcii_prev;
557 
558 			if (hbp->pciib_head == hep)
559 				hbp->pciib_head = hep->pcii_next;
560 
561 			kmem_free(hep, sizeof (*hep));
562 			break;
563 		}
564 	}
565 
566 	mutex_exit(&hbp->pciib_lock);
567 }
568 
569 #ifndef __xpv
570 
571 /*
572  *	 =======================================================
573  *	|	Native methods					|
574  *	|	--------------					|
575  *	|							|
576  *	| These are used when we are running native on bare-	|
577  *	| metal, or simply don't know any better.		|
578  *	---------------------------------------------------------
579  */
580 
581 #define	HDLPRIV(hdl)	((cpu_t *)(hdl)->cmih_hdlpriv)
582 
583 static uint_t
ntv_vendor(cmi_hdl_impl_t * hdl)584 ntv_vendor(cmi_hdl_impl_t *hdl)
585 {
586 	return (cpuid_getvendor(HDLPRIV(hdl)));
587 }
588 
589 static const char *
ntv_vendorstr(cmi_hdl_impl_t * hdl)590 ntv_vendorstr(cmi_hdl_impl_t *hdl)
591 {
592 	return (cpuid_getvendorstr(HDLPRIV(hdl)));
593 }
594 
595 static uint_t
ntv_family(cmi_hdl_impl_t * hdl)596 ntv_family(cmi_hdl_impl_t *hdl)
597 {
598 	return (cpuid_getfamily(HDLPRIV(hdl)));
599 }
600 
601 static uint_t
ntv_model(cmi_hdl_impl_t * hdl)602 ntv_model(cmi_hdl_impl_t *hdl)
603 {
604 	return (cpuid_getmodel(HDLPRIV(hdl)));
605 }
606 
607 static uint_t
ntv_stepping(cmi_hdl_impl_t * hdl)608 ntv_stepping(cmi_hdl_impl_t *hdl)
609 {
610 	return (cpuid_getstep(HDLPRIV(hdl)));
611 }
612 
613 static uint_t
ntv_chipid(cmi_hdl_impl_t * hdl)614 ntv_chipid(cmi_hdl_impl_t *hdl)
615 {
616 	return (hdl->cmih_chipid);
617 
618 }
619 
620 static uint_t
ntv_procnodeid(cmi_hdl_impl_t * hdl)621 ntv_procnodeid(cmi_hdl_impl_t *hdl)
622 {
623 	return (hdl->cmih_procnodeid);
624 }
625 
626 static uint_t
ntv_procnodes_per_pkg(cmi_hdl_impl_t * hdl)627 ntv_procnodes_per_pkg(cmi_hdl_impl_t *hdl)
628 {
629 	return (hdl->cmih_procnodes_per_pkg);
630 }
631 
632 static uint_t
ntv_coreid(cmi_hdl_impl_t * hdl)633 ntv_coreid(cmi_hdl_impl_t *hdl)
634 {
635 	return (hdl->cmih_coreid);
636 }
637 
638 static uint_t
ntv_strandid(cmi_hdl_impl_t * hdl)639 ntv_strandid(cmi_hdl_impl_t *hdl)
640 {
641 	return (hdl->cmih_strandid);
642 }
643 
644 static uint_t
ntv_strand_apicid(cmi_hdl_impl_t * hdl)645 ntv_strand_apicid(cmi_hdl_impl_t *hdl)
646 {
647 	return (cpuid_get_apicid(HDLPRIV(hdl)));
648 }
649 
650 static uint16_t
ntv_smbiosid(cmi_hdl_impl_t * hdl)651 ntv_smbiosid(cmi_hdl_impl_t *hdl)
652 {
653 	return (hdl->cmih_smbiosid);
654 }
655 
656 static uint_t
ntv_smb_chipid(cmi_hdl_impl_t * hdl)657 ntv_smb_chipid(cmi_hdl_impl_t *hdl)
658 {
659 	return (hdl->cmih_smb_chipid);
660 }
661 
662 static nvlist_t *
ntv_smb_bboard(cmi_hdl_impl_t * hdl)663 ntv_smb_bboard(cmi_hdl_impl_t *hdl)
664 {
665 	return (hdl->cmih_smb_bboard);
666 }
667 
668 static x86_chiprev_t
ntv_chiprev(cmi_hdl_impl_t * hdl)669 ntv_chiprev(cmi_hdl_impl_t *hdl)
670 {
671 	return (cpuid_getchiprev(HDLPRIV(hdl)));
672 }
673 
674 static const char *
ntv_chiprevstr(cmi_hdl_impl_t * hdl)675 ntv_chiprevstr(cmi_hdl_impl_t *hdl)
676 {
677 	return (cpuid_getchiprevstr(HDLPRIV(hdl)));
678 }
679 
680 static uint32_t
ntv_getsockettype(cmi_hdl_impl_t * hdl)681 ntv_getsockettype(cmi_hdl_impl_t *hdl)
682 {
683 	return (cpuid_getsockettype(HDLPRIV(hdl)));
684 }
685 
686 static const char *
ntv_getsocketstr(cmi_hdl_impl_t * hdl)687 ntv_getsocketstr(cmi_hdl_impl_t *hdl)
688 {
689 	return (cpuid_getsocketstr(HDLPRIV(hdl)));
690 }
691 
692 static uint_t
ntv_chipsig(cmi_hdl_impl_t * hdl)693 ntv_chipsig(cmi_hdl_impl_t *hdl)
694 {
695 	return (cpuid_getsig(HDLPRIV(hdl)));
696 }
697 
698 static cmi_errno_t
cmi_cpuid_cache_to_cmi(int err)699 cmi_cpuid_cache_to_cmi(int err)
700 {
701 	switch (err) {
702 	case 0:
703 		return (CMI_SUCCESS);
704 	case ENOTSUP:
705 		return (CMIERR_C_NODATA);
706 	case EINVAL:
707 		return (CMIERR_C_BADCACHENO);
708 	default:
709 		return (CMIERR_UNKNOWN);
710 	}
711 }
712 
713 static cmi_errno_t
ntv_ncache(cmi_hdl_impl_t * hdl,uint32_t * ncache)714 ntv_ncache(cmi_hdl_impl_t *hdl, uint32_t *ncache)
715 {
716 	int ret = cpuid_getncaches(HDLPRIV(hdl), ncache);
717 	return (cmi_cpuid_cache_to_cmi(ret));
718 }
719 
720 static cmi_errno_t
ntv_cache(cmi_hdl_impl_t * hdl,uint32_t cno,x86_cache_t * cachep)721 ntv_cache(cmi_hdl_impl_t *hdl, uint32_t cno, x86_cache_t *cachep)
722 {
723 	int ret = cpuid_getcache(HDLPRIV(hdl), cno, cachep);
724 	return (cmi_cpuid_cache_to_cmi(ret));
725 }
726 
727 static id_t
ntv_logical_id(cmi_hdl_impl_t * hdl)728 ntv_logical_id(cmi_hdl_impl_t *hdl)
729 {
730 	return (HDLPRIV(hdl)->cpu_id);
731 }
732 
733 /*ARGSUSED*/
734 static int
ntv_getcr4_xc(xc_arg_t arg1,xc_arg_t arg2,xc_arg_t arg3)735 ntv_getcr4_xc(xc_arg_t arg1, xc_arg_t arg2, xc_arg_t arg3)
736 {
737 	ulong_t *dest = (ulong_t *)arg1;
738 	cmi_errno_t *rcp = (cmi_errno_t *)arg3;
739 
740 	*dest = getcr4();
741 	*rcp = CMI_SUCCESS;
742 
743 	return (0);
744 }
745 
746 static ulong_t
ntv_getcr4(cmi_hdl_impl_t * hdl)747 ntv_getcr4(cmi_hdl_impl_t *hdl)
748 {
749 	cpu_t *cp = HDLPRIV(hdl);
750 	ulong_t val;
751 
752 	(void) call_func_ntv(cp->cpu_id, ntv_getcr4_xc, (xc_arg_t)&val, 0);
753 
754 	return (val);
755 }
756 
757 /*ARGSUSED*/
758 static int
ntv_setcr4_xc(xc_arg_t arg1,xc_arg_t arg2,xc_arg_t arg3)759 ntv_setcr4_xc(xc_arg_t arg1, xc_arg_t arg2, xc_arg_t arg3)
760 {
761 	ulong_t val = (ulong_t)arg1;
762 	cmi_errno_t *rcp = (cmi_errno_t *)arg3;
763 
764 	setcr4(val);
765 	*rcp = CMI_SUCCESS;
766 
767 	return (0);
768 }
769 
770 static void
ntv_setcr4(cmi_hdl_impl_t * hdl,ulong_t val)771 ntv_setcr4(cmi_hdl_impl_t *hdl, ulong_t val)
772 {
773 	cpu_t *cp = HDLPRIV(hdl);
774 
775 	(void) call_func_ntv(cp->cpu_id, ntv_setcr4_xc, (xc_arg_t)val, 0);
776 }
777 
778 volatile uint32_t cmi_trapped_rdmsr;
779 
780 /*ARGSUSED*/
781 static int
ntv_rdmsr_xc(xc_arg_t arg1,xc_arg_t arg2,xc_arg_t arg3)782 ntv_rdmsr_xc(xc_arg_t arg1, xc_arg_t arg2, xc_arg_t arg3)
783 {
784 	uint_t msr = (uint_t)arg1;
785 	uint64_t *valp = (uint64_t *)arg2;
786 	cmi_errno_t *rcp = (cmi_errno_t *)arg3;
787 
788 	on_trap_data_t otd;
789 
790 	if (on_trap(&otd, OT_DATA_ACCESS) == 0) {
791 		if (checked_rdmsr(msr, valp) == 0)
792 			*rcp = CMI_SUCCESS;
793 		else
794 			*rcp = CMIERR_NOTSUP;
795 	} else {
796 		*rcp = CMIERR_MSRGPF;
797 		atomic_inc_32(&cmi_trapped_rdmsr);
798 	}
799 	no_trap();
800 
801 	return (0);
802 }
803 
804 static cmi_errno_t
ntv_rdmsr(cmi_hdl_impl_t * hdl,uint_t msr,uint64_t * valp)805 ntv_rdmsr(cmi_hdl_impl_t *hdl, uint_t msr, uint64_t *valp)
806 {
807 	cpu_t *cp = HDLPRIV(hdl);
808 
809 	if (!(hdl->cmih_msrsrc & CMI_MSR_FLAG_RD_HWOK))
810 		return (CMIERR_INTERPOSE);
811 
812 	return (call_func_ntv(cp->cpu_id, ntv_rdmsr_xc,
813 	    (xc_arg_t)msr, (xc_arg_t)valp));
814 }
815 
816 volatile uint32_t cmi_trapped_wrmsr;
817 
818 /*ARGSUSED*/
819 static int
ntv_wrmsr_xc(xc_arg_t arg1,xc_arg_t arg2,xc_arg_t arg3)820 ntv_wrmsr_xc(xc_arg_t arg1, xc_arg_t arg2, xc_arg_t arg3)
821 {
822 	uint_t msr = (uint_t)arg1;
823 	uint64_t val = *((uint64_t *)arg2);
824 	cmi_errno_t *rcp = (cmi_errno_t *)arg3;
825 	on_trap_data_t otd;
826 
827 	if (on_trap(&otd, OT_DATA_ACCESS) == 0) {
828 		if (checked_wrmsr(msr, val) == 0)
829 			*rcp = CMI_SUCCESS;
830 		else
831 			*rcp = CMIERR_NOTSUP;
832 	} else {
833 		*rcp = CMIERR_MSRGPF;
834 		atomic_inc_32(&cmi_trapped_wrmsr);
835 	}
836 	no_trap();
837 
838 	return (0);
839 
840 }
841 
842 static cmi_errno_t
ntv_wrmsr(cmi_hdl_impl_t * hdl,uint_t msr,uint64_t val)843 ntv_wrmsr(cmi_hdl_impl_t *hdl, uint_t msr, uint64_t val)
844 {
845 	cpu_t *cp = HDLPRIV(hdl);
846 
847 	if (!(hdl->cmih_msrsrc & CMI_MSR_FLAG_WR_HWOK))
848 		return (CMI_SUCCESS);
849 
850 	return (call_func_ntv(cp->cpu_id, ntv_wrmsr_xc,
851 	    (xc_arg_t)msr, (xc_arg_t)&val));
852 }
853 
854 static cmi_errno_t
ntv_msrinterpose(cmi_hdl_impl_t * hdl,uint_t msr,uint64_t val)855 ntv_msrinterpose(cmi_hdl_impl_t *hdl, uint_t msr, uint64_t val)
856 {
857 	msri_addent(hdl, msr, val);
858 	return (CMI_SUCCESS);
859 }
860 
861 /*ARGSUSED*/
862 static int
ntv_int_xc(xc_arg_t arg1,xc_arg_t arg2,xc_arg_t arg3)863 ntv_int_xc(xc_arg_t arg1, xc_arg_t arg2, xc_arg_t arg3)
864 {
865 	cmi_errno_t *rcp = (cmi_errno_t *)arg3;
866 	int int_no = (int)arg1;
867 
868 	if (int_no == T_MCE)
869 		int18();
870 	else
871 		int_cmci();
872 	*rcp = CMI_SUCCESS;
873 
874 	return (0);
875 }
876 
877 static void
ntv_int(cmi_hdl_impl_t * hdl,int int_no)878 ntv_int(cmi_hdl_impl_t *hdl, int int_no)
879 {
880 	cpu_t *cp = HDLPRIV(hdl);
881 
882 	(void) call_func_ntv(cp->cpu_id, ntv_int_xc, (xc_arg_t)int_no, 0);
883 }
884 
885 static int
ntv_online(cmi_hdl_impl_t * hdl,int new_status,int * old_status)886 ntv_online(cmi_hdl_impl_t *hdl, int new_status, int *old_status)
887 {
888 	int rc;
889 	processorid_t cpuid = HDLPRIV(hdl)->cpu_id;
890 
891 	while (mutex_tryenter(&cpu_lock) == 0) {
892 		if (hdl->cmih_flags & CMIH_F_DEAD)
893 			return (EBUSY);
894 		delay(1);
895 	}
896 	rc = p_online_internal_locked(cpuid, new_status, old_status);
897 	mutex_exit(&cpu_lock);
898 
899 	return (rc);
900 }
901 
902 #else	/* __xpv */
903 
904 /*
905  *	 =======================================================
906  *	|	xVM dom0 methods				|
907  *	|	----------------				|
908  *	|							|
909  *	| These are used when we are running as dom0 in		|
910  *	| a Solaris xVM context.				|
911  *	---------------------------------------------------------
912  */
913 
914 #define	HDLPRIV(hdl)	((xen_mc_lcpu_cookie_t)(hdl)->cmih_hdlpriv)
915 
916 extern uint_t _cpuid_vendorstr_to_vendorcode(char *);
917 
918 
919 static uint_t
xpv_vendor(cmi_hdl_impl_t * hdl)920 xpv_vendor(cmi_hdl_impl_t *hdl)
921 {
922 	return (_cpuid_vendorstr_to_vendorcode((char *)xen_physcpu_vendorstr(
923 	    HDLPRIV(hdl))));
924 }
925 
926 static const char *
xpv_vendorstr(cmi_hdl_impl_t * hdl)927 xpv_vendorstr(cmi_hdl_impl_t *hdl)
928 {
929 	return (xen_physcpu_vendorstr(HDLPRIV(hdl)));
930 }
931 
932 static uint_t
xpv_family(cmi_hdl_impl_t * hdl)933 xpv_family(cmi_hdl_impl_t *hdl)
934 {
935 	return (xen_physcpu_family(HDLPRIV(hdl)));
936 }
937 
938 static uint_t
xpv_model(cmi_hdl_impl_t * hdl)939 xpv_model(cmi_hdl_impl_t *hdl)
940 {
941 	return (xen_physcpu_model(HDLPRIV(hdl)));
942 }
943 
944 static uint_t
xpv_stepping(cmi_hdl_impl_t * hdl)945 xpv_stepping(cmi_hdl_impl_t *hdl)
946 {
947 	return (xen_physcpu_stepping(HDLPRIV(hdl)));
948 }
949 
950 static uint_t
xpv_chipid(cmi_hdl_impl_t * hdl)951 xpv_chipid(cmi_hdl_impl_t *hdl)
952 {
953 	return (hdl->cmih_chipid);
954 }
955 
956 static uint_t
xpv_procnodeid(cmi_hdl_impl_t * hdl)957 xpv_procnodeid(cmi_hdl_impl_t *hdl)
958 {
959 	return (hdl->cmih_procnodeid);
960 }
961 
962 static uint_t
xpv_procnodes_per_pkg(cmi_hdl_impl_t * hdl)963 xpv_procnodes_per_pkg(cmi_hdl_impl_t *hdl)
964 {
965 	return (hdl->cmih_procnodes_per_pkg);
966 }
967 
968 static uint_t
xpv_coreid(cmi_hdl_impl_t * hdl)969 xpv_coreid(cmi_hdl_impl_t *hdl)
970 {
971 	return (hdl->cmih_coreid);
972 }
973 
974 static uint_t
xpv_strandid(cmi_hdl_impl_t * hdl)975 xpv_strandid(cmi_hdl_impl_t *hdl)
976 {
977 	return (hdl->cmih_strandid);
978 }
979 
980 static uint_t
xpv_strand_apicid(cmi_hdl_impl_t * hdl)981 xpv_strand_apicid(cmi_hdl_impl_t *hdl)
982 {
983 	return (xen_physcpu_initial_apicid(HDLPRIV(hdl)));
984 }
985 
986 static uint16_t
xpv_smbiosid(cmi_hdl_impl_t * hdl)987 xpv_smbiosid(cmi_hdl_impl_t *hdl)
988 {
989 	return (hdl->cmih_smbiosid);
990 }
991 
992 static uint_t
xpv_smb_chipid(cmi_hdl_impl_t * hdl)993 xpv_smb_chipid(cmi_hdl_impl_t *hdl)
994 {
995 	return (hdl->cmih_smb_chipid);
996 }
997 
998 static nvlist_t *
xpv_smb_bboard(cmi_hdl_impl_t * hdl)999 xpv_smb_bboard(cmi_hdl_impl_t *hdl)
1000 {
1001 	return (hdl->cmih_smb_bboard);
1002 }
1003 
1004 extern x86_chiprev_t _cpuid_chiprev(uint_t, uint_t, uint_t, uint_t);
1005 
1006 static x86_chiprev_t
xpv_chiprev(cmi_hdl_impl_t * hdl)1007 xpv_chiprev(cmi_hdl_impl_t *hdl)
1008 {
1009 	return (_cpuid_chiprev(xpv_vendor(hdl), xpv_family(hdl),
1010 	    xpv_model(hdl), xpv_stepping(hdl)));
1011 }
1012 
1013 extern const char *_cpuid_chiprevstr(uint_t, uint_t, uint_t, uint_t);
1014 
1015 static const char *
xpv_chiprevstr(cmi_hdl_impl_t * hdl)1016 xpv_chiprevstr(cmi_hdl_impl_t *hdl)
1017 {
1018 	return (_cpuid_chiprevstr(xpv_vendor(hdl), xpv_family(hdl),
1019 	    xpv_model(hdl), xpv_stepping(hdl)));
1020 }
1021 
1022 extern uint32_t _cpuid_skt(uint_t, uint_t, uint_t, uint_t);
1023 
1024 static uint32_t
xpv_getsockettype(cmi_hdl_impl_t * hdl)1025 xpv_getsockettype(cmi_hdl_impl_t *hdl)
1026 {
1027 	return (_cpuid_skt(xpv_vendor(hdl), xpv_family(hdl),
1028 	    xpv_model(hdl), xpv_stepping(hdl)));
1029 }
1030 
1031 extern const char *_cpuid_sktstr(uint_t, uint_t, uint_t, uint_t);
1032 
1033 static const char *
xpv_getsocketstr(cmi_hdl_impl_t * hdl)1034 xpv_getsocketstr(cmi_hdl_impl_t *hdl)
1035 {
1036 	return (_cpuid_sktstr(xpv_vendor(hdl), xpv_family(hdl),
1037 	    xpv_model(hdl), xpv_stepping(hdl)));
1038 }
1039 
1040 /* ARGSUSED */
1041 static uint_t
xpv_chipsig(cmi_hdl_impl_t * hdl)1042 xpv_chipsig(cmi_hdl_impl_t *hdl)
1043 {
1044 	return (0);
1045 }
1046 
1047 static cmi_errno_t
xpv_ncache(cmi_hdl_impl_t * hdl,uint32_t * ncache)1048 xpv_ncache(cmi_hdl_impl_t *hdl, uint32_t *ncache)
1049 {
1050 	return (CMIERR_NOTSUP);
1051 }
1052 
1053 static cmi_errno_t
xpv_cache(cmi_hdl_impl_t * hdl,uint32_t cno,x86_cache_t * cachep)1054 xpv_cache(cmi_hdl_impl_t *hdl, uint32_t cno, x86_cache_t *cachep)
1055 {
1056 	return (CMIERR_NOTSUP);
1057 }
1058 
1059 static id_t
xpv_logical_id(cmi_hdl_impl_t * hdl)1060 xpv_logical_id(cmi_hdl_impl_t *hdl)
1061 {
1062 	return (xen_physcpu_logical_id(HDLPRIV(hdl)));
1063 }
1064 
1065 static cmi_errno_t
xpv_rdmsr(cmi_hdl_impl_t * hdl,uint_t msr,uint64_t * valp)1066 xpv_rdmsr(cmi_hdl_impl_t *hdl, uint_t msr, uint64_t *valp)
1067 {
1068 	switch (msr) {
1069 	case IA32_MSR_MCG_CAP:
1070 		*valp = xen_physcpu_mcg_cap(HDLPRIV(hdl));
1071 		break;
1072 
1073 	default:
1074 		return (CMIERR_NOTSUP);
1075 	}
1076 
1077 	return (CMI_SUCCESS);
1078 }
1079 
1080 /*
1081  * Request the hypervisor to write an MSR for us.  The hypervisor
1082  * will only accept MCA-related MSRs, as this is for MCA error
1083  * simulation purposes alone.  We will pre-screen MSRs for injection
1084  * so we don't bother the HV with bogus requests.  We will permit
1085  * injection to any MCA bank register, and to MCG_STATUS.
1086  */
1087 
1088 #define	IS_MCA_INJ_MSR(msr) \
1089 	(((msr) >= IA32_MSR_MC(0, CTL) && (msr) <= IA32_MSR_MC(10, MISC)) || \
1090 	(msr) == IA32_MSR_MCG_STATUS)
1091 
1092 static cmi_errno_t
xpv_wrmsr_cmn(cmi_hdl_impl_t * hdl,uint_t msr,uint64_t val,boolean_t intpose)1093 xpv_wrmsr_cmn(cmi_hdl_impl_t *hdl, uint_t msr, uint64_t val, boolean_t intpose)
1094 {
1095 	xen_mc_t xmc;
1096 	struct xen_mc_msrinject *mci = &xmc.u.mc_msrinject;
1097 
1098 	if (!(hdl->cmih_flags & CMIH_F_INJACTV))
1099 		return (CMIERR_NOTSUP);		/* for injection use only! */
1100 
1101 	if (!IS_MCA_INJ_MSR(msr))
1102 		return (CMIERR_API);
1103 
1104 	if (panicstr)
1105 		return (CMIERR_DEADLOCK);
1106 
1107 	mci->mcinj_cpunr = xen_physcpu_logical_id(HDLPRIV(hdl));
1108 	mci->mcinj_flags = intpose ? MC_MSRINJ_F_INTERPOSE : 0;
1109 	mci->mcinj_count = 1;	/* learn to batch sometime */
1110 	mci->mcinj_msr[0].reg = msr;
1111 	mci->mcinj_msr[0].value = val;
1112 
1113 	return (HYPERVISOR_mca(XEN_MC_msrinject, &xmc) ==
1114 	    0 ?  CMI_SUCCESS : CMIERR_NOTSUP);
1115 }
1116 
1117 static cmi_errno_t
xpv_wrmsr(cmi_hdl_impl_t * hdl,uint_t msr,uint64_t val)1118 xpv_wrmsr(cmi_hdl_impl_t *hdl, uint_t msr, uint64_t val)
1119 {
1120 	return (xpv_wrmsr_cmn(hdl, msr, val, B_FALSE));
1121 }
1122 
1123 
1124 static cmi_errno_t
xpv_msrinterpose(cmi_hdl_impl_t * hdl,uint_t msr,uint64_t val)1125 xpv_msrinterpose(cmi_hdl_impl_t *hdl, uint_t msr, uint64_t val)
1126 {
1127 	return (xpv_wrmsr_cmn(hdl, msr, val, B_TRUE));
1128 }
1129 
1130 static void
xpv_int(cmi_hdl_impl_t * hdl,int int_no)1131 xpv_int(cmi_hdl_impl_t *hdl, int int_no)
1132 {
1133 	xen_mc_t xmc;
1134 	struct xen_mc_mceinject *mce = &xmc.u.mc_mceinject;
1135 
1136 	if (!(hdl->cmih_flags & CMIH_F_INJACTV))
1137 		return;
1138 
1139 	if (int_no != T_MCE) {
1140 		cmn_err(CE_WARN, "xpv_int: int_no %d unimplemented\n",
1141 		    int_no);
1142 	}
1143 
1144 	mce->mceinj_cpunr = xen_physcpu_logical_id(HDLPRIV(hdl));
1145 
1146 	(void) HYPERVISOR_mca(XEN_MC_mceinject, &xmc);
1147 }
1148 
1149 static int
xpv_online(cmi_hdl_impl_t * hdl,int new_status,int * old_status)1150 xpv_online(cmi_hdl_impl_t *hdl, int new_status, int *old_status)
1151 {
1152 	xen_sysctl_t xs;
1153 	int op, rc, status;
1154 
1155 	new_status &= ~P_FORCED;
1156 
1157 	switch (new_status) {
1158 	case P_STATUS:
1159 		op = XEN_SYSCTL_CPU_HOTPLUG_STATUS;
1160 		break;
1161 	case P_FAULTED:
1162 	case P_OFFLINE:
1163 		op = XEN_SYSCTL_CPU_HOTPLUG_OFFLINE;
1164 		break;
1165 	case P_ONLINE:
1166 		op = XEN_SYSCTL_CPU_HOTPLUG_ONLINE;
1167 		break;
1168 	default:
1169 		return (-1);
1170 	}
1171 
1172 	xs.cmd = XEN_SYSCTL_cpu_hotplug;
1173 	xs.interface_version = XEN_SYSCTL_INTERFACE_VERSION;
1174 	xs.u.cpu_hotplug.cpu = xen_physcpu_logical_id(HDLPRIV(hdl));
1175 	xs.u.cpu_hotplug.op = op;
1176 
1177 	if ((rc = HYPERVISOR_sysctl(&xs)) >= 0) {
1178 		status = rc;
1179 		rc = 0;
1180 		switch (status) {
1181 		case XEN_CPU_HOTPLUG_STATUS_NEW:
1182 			*old_status = P_OFFLINE;
1183 			break;
1184 		case XEN_CPU_HOTPLUG_STATUS_OFFLINE:
1185 			*old_status = P_FAULTED;
1186 			break;
1187 		case XEN_CPU_HOTPLUG_STATUS_ONLINE:
1188 			*old_status = P_ONLINE;
1189 			break;
1190 		default:
1191 			return (-1);
1192 		}
1193 	}
1194 
1195 	return (-rc);
1196 }
1197 
1198 #endif
1199 
1200 /*ARGSUSED*/
1201 static void *
cpu_search(enum cmi_hdl_class class,uint_t chipid,uint_t coreid,uint_t strandid)1202 cpu_search(enum cmi_hdl_class class, uint_t chipid, uint_t coreid,
1203     uint_t strandid)
1204 {
1205 #ifdef __xpv
1206 	xen_mc_lcpu_cookie_t cpi;
1207 
1208 	for (cpi = xen_physcpu_next(NULL); cpi != NULL;
1209 	    cpi = xen_physcpu_next(cpi)) {
1210 		if (xen_physcpu_chipid(cpi) == chipid &&
1211 		    xen_physcpu_coreid(cpi) == coreid &&
1212 		    xen_physcpu_strandid(cpi) == strandid)
1213 			return ((void *)cpi);
1214 	}
1215 	return (NULL);
1216 
1217 #else	/* __xpv */
1218 
1219 	cpu_t *cp, *startcp;
1220 
1221 	kpreempt_disable();
1222 	cp = startcp = CPU;
1223 	do {
1224 		if (cmi_ntv_hwchipid(cp) == chipid &&
1225 		    cmi_ntv_hwcoreid(cp) == coreid &&
1226 		    cmi_ntv_hwstrandid(cp) == strandid) {
1227 			kpreempt_enable();
1228 			return ((void *)cp);
1229 		}
1230 
1231 		cp = cp->cpu_next;
1232 	} while (cp != startcp);
1233 	kpreempt_enable();
1234 	return (NULL);
1235 #endif	/* __ xpv */
1236 }
1237 
1238 static boolean_t
cpu_is_cmt(void * priv)1239 cpu_is_cmt(void *priv)
1240 {
1241 #ifdef __xpv
1242 	return (xen_physcpu_is_cmt((xen_mc_lcpu_cookie_t)priv));
1243 #else /* __xpv */
1244 	cpu_t *cp = (cpu_t *)priv;
1245 
1246 	int strands_per_core = cpuid_get_ncpu_per_chip(cp) /
1247 	    cpuid_get_ncore_per_chip(cp);
1248 
1249 	return (strands_per_core > 1);
1250 #endif /* __xpv */
1251 }
1252 
1253 /*
1254  * Find the handle entry of a given cpu identified by a <chip,core,strand>
1255  * tuple.
1256  */
1257 static cmi_hdl_ent_t *
cmi_hdl_ent_lookup(uint_t chipid,uint_t coreid,uint_t strandid)1258 cmi_hdl_ent_lookup(uint_t chipid, uint_t coreid, uint_t strandid)
1259 {
1260 	int max_strands = CMI_MAX_STRANDS_PER_CHIP(cmi_core_nbits,
1261 	    cmi_strand_nbits);
1262 
1263 	/*
1264 	 * Allocate per-chip table which contains a list of handle of
1265 	 * all strands of the chip.
1266 	 */
1267 	if (cmi_chip_tab[chipid] == NULL) {
1268 		size_t sz;
1269 		cmi_hdl_ent_t *pg;
1270 
1271 		sz = max_strands * sizeof (cmi_hdl_ent_t);
1272 		pg = kmem_zalloc(sz, KM_SLEEP);
1273 
1274 		/* test and set the per-chip table if it is not allocated */
1275 		if (atomic_cas_ptr(&cmi_chip_tab[chipid], NULL, pg) != NULL)
1276 			kmem_free(pg, sz); /* someone beats us */
1277 	}
1278 
1279 	return (cmi_chip_tab[chipid] +
1280 	    ((((coreid) & CMI_MAX_COREID(cmi_core_nbits)) << cmi_strand_nbits) |
1281 	    ((strandid) & CMI_MAX_STRANDID(cmi_strand_nbits))));
1282 }
1283 
1284 extern void cpuid_get_ext_topo(cpu_t *, uint_t *, uint_t *);
1285 
1286 cmi_hdl_t
cmi_hdl_create(enum cmi_hdl_class class,uint_t chipid,uint_t coreid,uint_t strandid)1287 cmi_hdl_create(enum cmi_hdl_class class, uint_t chipid, uint_t coreid,
1288     uint_t strandid)
1289 {
1290 	cmi_hdl_impl_t *hdl;
1291 	void *priv;
1292 	cmi_hdl_ent_t *ent;
1293 	uint_t vendor;
1294 
1295 #ifdef __xpv
1296 	ASSERT(class == CMI_HDL_SOLARIS_xVM_MCA);
1297 #else
1298 	ASSERT(class == CMI_HDL_NATIVE);
1299 #endif
1300 
1301 	if ((priv = cpu_search(class, chipid, coreid, strandid)) == NULL)
1302 		return (NULL);
1303 
1304 	/*
1305 	 * Assume all chips in the system are the same type.
1306 	 * For Intel, attempt to check if extended topology is available
1307 	 * CPUID.EAX=0xB. If so, get the number of core and strand bits.
1308 	 */
1309 #ifdef __xpv
1310 	vendor = _cpuid_vendorstr_to_vendorcode(
1311 	    (char *)xen_physcpu_vendorstr((xen_mc_lcpu_cookie_t)priv));
1312 #else
1313 	vendor = cpuid_getvendor((cpu_t *)priv);
1314 #endif
1315 
1316 	switch (vendor) {
1317 	case X86_VENDOR_Intel:
1318 	case X86_VENDOR_AMD:
1319 	case X86_VENDOR_HYGON:
1320 		if (cmi_ext_topo_check == 0) {
1321 			cpuid_get_ext_topo((cpu_t *)priv, &cmi_core_nbits,
1322 			    &cmi_strand_nbits);
1323 			cmi_ext_topo_check = 1;
1324 		}
1325 	default:
1326 		break;
1327 	}
1328 
1329 	if (chipid > CMI_MAX_CHIPID ||
1330 	    coreid > CMI_MAX_COREID(cmi_core_nbits) ||
1331 	    strandid > CMI_MAX_STRANDID(cmi_strand_nbits))
1332 		return (NULL);
1333 
1334 	hdl = kmem_zalloc(sizeof (*hdl), KM_SLEEP);
1335 
1336 	hdl->cmih_class = class;
1337 	HDLOPS(hdl) = &cmi_hdl_ops;
1338 	hdl->cmih_chipid = chipid;
1339 	hdl->cmih_coreid = coreid;
1340 	hdl->cmih_strandid = strandid;
1341 	hdl->cmih_mstrand = cpu_is_cmt(priv);
1342 	hdl->cmih_hdlpriv = priv;
1343 #ifdef __xpv
1344 	hdl->cmih_msrsrc = CMI_MSR_FLAG_RD_INTERPOSEOK |
1345 	    CMI_MSR_FLAG_WR_INTERPOSEOK;
1346 
1347 	/*
1348 	 * XXX: need hypervisor support for procnodeid, for now assume
1349 	 * single-node processors (procnodeid = chipid)
1350 	 */
1351 	hdl->cmih_procnodeid = xen_physcpu_chipid((xen_mc_lcpu_cookie_t)priv);
1352 	hdl->cmih_procnodes_per_pkg = 1;
1353 #else   /* __xpv */
1354 	hdl->cmih_msrsrc = CMI_MSR_FLAG_RD_HWOK | CMI_MSR_FLAG_RD_INTERPOSEOK |
1355 	    CMI_MSR_FLAG_WR_HWOK | CMI_MSR_FLAG_WR_INTERPOSEOK;
1356 	hdl->cmih_procnodeid = cpuid_get_procnodeid((cpu_t *)priv);
1357 	hdl->cmih_procnodes_per_pkg =
1358 	    cpuid_get_procnodes_per_pkg((cpu_t *)priv);
1359 #endif  /* __xpv */
1360 
1361 	ent = cmi_hdl_ent_lookup(chipid, coreid, strandid);
1362 	if (ent->cmae_refcnt != 0 || ent->cmae_hdlp != NULL) {
1363 		/*
1364 		 * Somehow this (chipid, coreid, strandid) id tuple has
1365 		 * already been assigned!  This indicates that the
1366 		 * callers logic in determining these values is busted,
1367 		 * or perhaps undermined by bad BIOS setup.  Complain,
1368 		 * and refuse to initialize this tuple again as bad things
1369 		 * will happen.
1370 		 */
1371 		cmn_err(CE_NOTE, "cmi_hdl_create: chipid %d coreid %d "
1372 		    "strandid %d handle already allocated!",
1373 		    chipid, coreid, strandid);
1374 		kmem_free(hdl, sizeof (*hdl));
1375 		return (NULL);
1376 	}
1377 
1378 	/*
1379 	 * Once we store a nonzero reference count others can find this
1380 	 * handle via cmi_hdl_lookup etc.  This initial hold on the handle
1381 	 * is to be dropped only if some other part of cmi initialization
1382 	 * fails or, if it succeeds, at later cpu deconfigure.  Note the
1383 	 * the module private data we hold in cmih_cmi and cmih_cmidata
1384 	 * is still NULL at this point (the caller will fill it with
1385 	 * cmi_hdl_setcmi if it initializes) so consumers of handles
1386 	 * should always be ready for that possibility.
1387 	 */
1388 	ent->cmae_hdlp = hdl;
1389 	hdl->cmih_refcntp = &ent->cmae_refcnt;
1390 	ent->cmae_refcnt = 1;
1391 
1392 	return ((cmi_hdl_t)hdl);
1393 }
1394 
1395 void
cmi_read_smbios(cmi_hdl_t ophdl)1396 cmi_read_smbios(cmi_hdl_t ophdl)
1397 {
1398 
1399 	uint_t strand_apicid = UINT_MAX;
1400 	uint_t chip_inst = UINT_MAX;
1401 	uint16_t smb_id = USHRT_MAX;
1402 	int rc = 0;
1403 
1404 	cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
1405 
1406 	/* set x86gentopo compatibility */
1407 	fm_smb_fmacompat();
1408 
1409 #ifndef __xpv
1410 	strand_apicid = ntv_strand_apicid(hdl);
1411 #else
1412 	strand_apicid = xpv_strand_apicid(hdl);
1413 #endif
1414 
1415 	if (!x86gentopo_legacy) {
1416 		/*
1417 		 * If fm_smb_chipinst() or fm_smb_bboard() fails,
1418 		 * topo reverts to legacy mode
1419 		 */
1420 		rc = fm_smb_chipinst(strand_apicid, &chip_inst, &smb_id);
1421 		if (rc == 0) {
1422 			hdl->cmih_smb_chipid = chip_inst;
1423 			hdl->cmih_smbiosid = smb_id;
1424 		} else {
1425 #ifdef DEBUG
1426 			cmn_err(CE_NOTE, "!cmi reads smbios chip info failed");
1427 #endif /* DEBUG */
1428 			return;
1429 		}
1430 
1431 		hdl->cmih_smb_bboard  = fm_smb_bboard(strand_apicid);
1432 #ifdef DEBUG
1433 		if (hdl->cmih_smb_bboard == NULL)
1434 			cmn_err(CE_NOTE,
1435 			    "!cmi reads smbios base boards info failed");
1436 #endif /* DEBUG */
1437 	}
1438 }
1439 
1440 void
cmi_hdl_hold(cmi_hdl_t ophdl)1441 cmi_hdl_hold(cmi_hdl_t ophdl)
1442 {
1443 	cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
1444 
1445 	ASSERT(*hdl->cmih_refcntp != 0); /* must not be the initial hold */
1446 
1447 	atomic_inc_32(hdl->cmih_refcntp);
1448 }
1449 
1450 static int
cmi_hdl_canref(cmi_hdl_ent_t * ent)1451 cmi_hdl_canref(cmi_hdl_ent_t *ent)
1452 {
1453 	volatile uint32_t *refcntp;
1454 	uint32_t refcnt;
1455 
1456 	refcntp = &ent->cmae_refcnt;
1457 	refcnt = *refcntp;
1458 
1459 	if (refcnt == 0) {
1460 		/*
1461 		 * Associated object never existed, is being destroyed,
1462 		 * or has been destroyed.
1463 		 */
1464 		return (0);
1465 	}
1466 
1467 	/*
1468 	 * We cannot use atomic increment here because once the reference
1469 	 * count reaches zero it must never be bumped up again.
1470 	 */
1471 	while (refcnt != 0) {
1472 		if (atomic_cas_32(refcntp, refcnt, refcnt + 1) == refcnt)
1473 			return (1);
1474 		refcnt = *refcntp;
1475 	}
1476 
1477 	/*
1478 	 * Somebody dropped the reference count to 0 after our initial
1479 	 * check.
1480 	 */
1481 	return (0);
1482 }
1483 
1484 
1485 void
cmi_hdl_rele(cmi_hdl_t ophdl)1486 cmi_hdl_rele(cmi_hdl_t ophdl)
1487 {
1488 	cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
1489 
1490 	ASSERT(*hdl->cmih_refcntp > 0);
1491 	atomic_dec_32(hdl->cmih_refcntp);
1492 }
1493 
1494 void
cmi_hdl_destroy(cmi_hdl_t ophdl)1495 cmi_hdl_destroy(cmi_hdl_t ophdl)
1496 {
1497 	cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
1498 	cmi_hdl_ent_t *ent;
1499 
1500 	/* Release the reference count held by cmi_hdl_create(). */
1501 	ASSERT(*hdl->cmih_refcntp > 0);
1502 	atomic_dec_32(hdl->cmih_refcntp);
1503 	hdl->cmih_flags |= CMIH_F_DEAD;
1504 
1505 	ent = cmi_hdl_ent_lookup(hdl->cmih_chipid, hdl->cmih_coreid,
1506 	    hdl->cmih_strandid);
1507 	/*
1508 	 * Use busy polling instead of condition variable here because
1509 	 * cmi_hdl_rele() may be called from #MC handler.
1510 	 */
1511 	while (cmi_hdl_canref(ent)) {
1512 		cmi_hdl_rele(ophdl);
1513 		delay(1);
1514 	}
1515 	ent->cmae_hdlp = NULL;
1516 
1517 	kmem_free(hdl, sizeof (*hdl));
1518 }
1519 
1520 void
cmi_hdl_setspecific(cmi_hdl_t ophdl,void * arg)1521 cmi_hdl_setspecific(cmi_hdl_t ophdl, void *arg)
1522 {
1523 	IMPLHDL(ophdl)->cmih_spec = arg;
1524 }
1525 
1526 void *
cmi_hdl_getspecific(cmi_hdl_t ophdl)1527 cmi_hdl_getspecific(cmi_hdl_t ophdl)
1528 {
1529 	return (IMPLHDL(ophdl)->cmih_spec);
1530 }
1531 
1532 void
cmi_hdl_setmc(cmi_hdl_t ophdl,const struct cmi_mc_ops * mcops,void * mcdata)1533 cmi_hdl_setmc(cmi_hdl_t ophdl, const struct cmi_mc_ops *mcops, void *mcdata)
1534 {
1535 	cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
1536 
1537 	ASSERT(hdl->cmih_mcops == NULL && hdl->cmih_mcdata == NULL);
1538 	hdl->cmih_mcops = mcops;
1539 	hdl->cmih_mcdata = mcdata;
1540 }
1541 
1542 const struct cmi_mc_ops *
cmi_hdl_getmcops(cmi_hdl_t ophdl)1543 cmi_hdl_getmcops(cmi_hdl_t ophdl)
1544 {
1545 	return (IMPLHDL(ophdl)->cmih_mcops);
1546 }
1547 
1548 void *
cmi_hdl_getmcdata(cmi_hdl_t ophdl)1549 cmi_hdl_getmcdata(cmi_hdl_t ophdl)
1550 {
1551 	return (IMPLHDL(ophdl)->cmih_mcdata);
1552 }
1553 
1554 cmi_hdl_t
cmi_hdl_lookup(enum cmi_hdl_class class,uint_t chipid,uint_t coreid,uint_t strandid)1555 cmi_hdl_lookup(enum cmi_hdl_class class, uint_t chipid, uint_t coreid,
1556     uint_t strandid)
1557 {
1558 	cmi_hdl_ent_t *ent;
1559 
1560 	if (chipid > CMI_MAX_CHIPID ||
1561 	    coreid > CMI_MAX_COREID(cmi_core_nbits) ||
1562 	    strandid > CMI_MAX_STRANDID(cmi_strand_nbits))
1563 		return (NULL);
1564 
1565 	ent = cmi_hdl_ent_lookup(chipid, coreid, strandid);
1566 
1567 	if (class == CMI_HDL_NEUTRAL)
1568 #ifdef __xpv
1569 		class = CMI_HDL_SOLARIS_xVM_MCA;
1570 #else
1571 		class = CMI_HDL_NATIVE;
1572 #endif
1573 
1574 	if (!cmi_hdl_canref(ent))
1575 		return (NULL);
1576 
1577 	if (ent->cmae_hdlp->cmih_class != class) {
1578 		cmi_hdl_rele((cmi_hdl_t)ent->cmae_hdlp);
1579 		return (NULL);
1580 	}
1581 
1582 	return ((cmi_hdl_t)ent->cmae_hdlp);
1583 }
1584 
1585 cmi_hdl_t
cmi_hdl_any(void)1586 cmi_hdl_any(void)
1587 {
1588 	int i, j;
1589 	cmi_hdl_ent_t *ent;
1590 	int max_strands = CMI_MAX_STRANDS_PER_CHIP(cmi_core_nbits,
1591 	    cmi_strand_nbits);
1592 
1593 	for (i = 0; i < CMI_CHIPID_ARR_SZ; i++) {
1594 		if (cmi_chip_tab[i] == NULL)
1595 			continue;
1596 		for (j = 0, ent = cmi_chip_tab[i]; j < max_strands;
1597 		    j++, ent++) {
1598 			if (cmi_hdl_canref(ent))
1599 				return ((cmi_hdl_t)ent->cmae_hdlp);
1600 		}
1601 	}
1602 
1603 	return (NULL);
1604 }
1605 
1606 void
cmi_hdl_walk(int (* cbfunc)(cmi_hdl_t,void *,void *,void *),void * arg1,void * arg2,void * arg3)1607 cmi_hdl_walk(int (*cbfunc)(cmi_hdl_t, void *, void *, void *),
1608     void *arg1, void *arg2, void *arg3)
1609 {
1610 	int i, j;
1611 	cmi_hdl_ent_t *ent;
1612 	int max_strands = CMI_MAX_STRANDS_PER_CHIP(cmi_core_nbits,
1613 	    cmi_strand_nbits);
1614 
1615 	for (i = 0; i < CMI_CHIPID_ARR_SZ; i++) {
1616 		if (cmi_chip_tab[i] == NULL)
1617 			continue;
1618 		for (j = 0, ent = cmi_chip_tab[i]; j < max_strands;
1619 		    j++, ent++) {
1620 			if (cmi_hdl_canref(ent)) {
1621 				cmi_hdl_impl_t *hdl = ent->cmae_hdlp;
1622 				if ((*cbfunc)((cmi_hdl_t)hdl, arg1, arg2, arg3)
1623 				    == CMI_HDL_WALK_DONE) {
1624 					cmi_hdl_rele((cmi_hdl_t)hdl);
1625 					return;
1626 				}
1627 				cmi_hdl_rele((cmi_hdl_t)hdl);
1628 			}
1629 		}
1630 	}
1631 }
1632 
1633 void
cmi_hdl_setcmi(cmi_hdl_t ophdl,void * cmi,void * cmidata)1634 cmi_hdl_setcmi(cmi_hdl_t ophdl, void *cmi, void *cmidata)
1635 {
1636 	IMPLHDL(ophdl)->cmih_cmidata = cmidata;
1637 	IMPLHDL(ophdl)->cmih_cmi = cmi;
1638 }
1639 
1640 void *
cmi_hdl_getcmi(cmi_hdl_t ophdl)1641 cmi_hdl_getcmi(cmi_hdl_t ophdl)
1642 {
1643 	return (IMPLHDL(ophdl)->cmih_cmi);
1644 }
1645 
1646 void *
cmi_hdl_getcmidata(cmi_hdl_t ophdl)1647 cmi_hdl_getcmidata(cmi_hdl_t ophdl)
1648 {
1649 	return (IMPLHDL(ophdl)->cmih_cmidata);
1650 }
1651 
1652 enum cmi_hdl_class
cmi_hdl_class(cmi_hdl_t ophdl)1653 cmi_hdl_class(cmi_hdl_t ophdl)
1654 {
1655 	return (IMPLHDL(ophdl)->cmih_class);
1656 }
1657 
1658 #define	CMI_HDL_OPFUNC(what, type)				\
1659 	type							\
1660 	cmi_hdl_##what(cmi_hdl_t ophdl)				\
1661 	{							\
1662 		return (HDLOPS(IMPLHDL(ophdl))->		\
1663 		    cmio_##what(IMPLHDL(ophdl)));		\
1664 	}
1665 
1666 /* BEGIN CSTYLED */
CMI_HDL_OPFUNC(vendor,uint_t)1667 CMI_HDL_OPFUNC(vendor, uint_t)
1668 CMI_HDL_OPFUNC(vendorstr, const char *)
1669 CMI_HDL_OPFUNC(family, uint_t)
1670 CMI_HDL_OPFUNC(model, uint_t)
1671 CMI_HDL_OPFUNC(stepping, uint_t)
1672 CMI_HDL_OPFUNC(chipid, uint_t)
1673 CMI_HDL_OPFUNC(procnodeid, uint_t)
1674 CMI_HDL_OPFUNC(coreid, uint_t)
1675 CMI_HDL_OPFUNC(strandid, uint_t)
1676 CMI_HDL_OPFUNC(procnodes_per_pkg, uint_t)
1677 CMI_HDL_OPFUNC(strand_apicid, uint_t)
1678 CMI_HDL_OPFUNC(chiprev, x86_chiprev_t)
1679 CMI_HDL_OPFUNC(chiprevstr, const char *)
1680 CMI_HDL_OPFUNC(getsockettype, uint32_t)
1681 CMI_HDL_OPFUNC(getsocketstr, const char *)
1682 CMI_HDL_OPFUNC(logical_id, id_t)
1683 CMI_HDL_OPFUNC(smbiosid, uint16_t)
1684 CMI_HDL_OPFUNC(smb_chipid, uint_t)
1685 CMI_HDL_OPFUNC(smb_bboard, nvlist_t *)
1686 CMI_HDL_OPFUNC(chipsig, uint_t)
1687 /* END CSTYLED */
1688 
1689 boolean_t
1690 cmi_hdl_is_cmt(cmi_hdl_t ophdl)
1691 {
1692 	return (IMPLHDL(ophdl)->cmih_mstrand);
1693 }
1694 
1695 void
cmi_hdl_int(cmi_hdl_t ophdl,int num)1696 cmi_hdl_int(cmi_hdl_t ophdl, int num)
1697 {
1698 	if (HDLOPS(IMPLHDL(ophdl))->cmio_int == NULL)
1699 		return;
1700 
1701 	cmi_hdl_inj_begin(ophdl);
1702 	HDLOPS(IMPLHDL(ophdl))->cmio_int(IMPLHDL(ophdl), num);
1703 	cmi_hdl_inj_end(NULL);
1704 }
1705 
1706 int
cmi_hdl_online(cmi_hdl_t ophdl,int new_status,int * old_status)1707 cmi_hdl_online(cmi_hdl_t ophdl, int new_status, int *old_status)
1708 {
1709 	return (HDLOPS(IMPLHDL(ophdl))->cmio_online(IMPLHDL(ophdl),
1710 	    new_status, old_status));
1711 }
1712 
1713 #ifndef	__xpv
1714 /*
1715  * Return hardware chip instance; cpuid_get_chipid provides this directly.
1716  */
1717 uint_t
cmi_ntv_hwchipid(cpu_t * cp)1718 cmi_ntv_hwchipid(cpu_t *cp)
1719 {
1720 	return (cpuid_get_chipid(cp));
1721 }
1722 
1723 /*
1724  * Return hardware node instance; cpuid_get_procnodeid provides this directly.
1725  */
1726 uint_t
cmi_ntv_hwprocnodeid(cpu_t * cp)1727 cmi_ntv_hwprocnodeid(cpu_t *cp)
1728 {
1729 	return (cpuid_get_procnodeid(cp));
1730 }
1731 
1732 /*
1733  * Return core instance within a single chip.
1734  */
1735 uint_t
cmi_ntv_hwcoreid(cpu_t * cp)1736 cmi_ntv_hwcoreid(cpu_t *cp)
1737 {
1738 	return (cpuid_get_pkgcoreid(cp));
1739 }
1740 
1741 /*
1742  * Return strand number within a single core.  cpuid_get_clogid numbers
1743  * all execution units (strands, or cores in unstranded models) sequentially
1744  * within a single chip.
1745  */
1746 uint_t
cmi_ntv_hwstrandid(cpu_t * cp)1747 cmi_ntv_hwstrandid(cpu_t *cp)
1748 {
1749 	int strands_per_core = cpuid_get_ncpu_per_chip(cp) /
1750 	    cpuid_get_ncore_per_chip(cp);
1751 
1752 	return (cpuid_get_clogid(cp) % strands_per_core);
1753 }
1754 
1755 static void
cmi_ntv_hwdisable_mce_xc(void)1756 cmi_ntv_hwdisable_mce_xc(void)
1757 {
1758 	ulong_t cr4;
1759 
1760 	cr4 = getcr4();
1761 	cr4 = cr4 & (~CR4_MCE);
1762 	setcr4(cr4);
1763 }
1764 
1765 void
cmi_ntv_hwdisable_mce(cmi_hdl_t hdl)1766 cmi_ntv_hwdisable_mce(cmi_hdl_t hdl)
1767 {
1768 	cpuset_t	set;
1769 	cmi_hdl_impl_t *thdl = IMPLHDL(hdl);
1770 	cpu_t *cp = HDLPRIV(thdl);
1771 
1772 	if (CPU->cpu_id == cp->cpu_id) {
1773 		cmi_ntv_hwdisable_mce_xc();
1774 	} else {
1775 		CPUSET_ONLY(set, cp->cpu_id);
1776 		xc_call(0, 0, 0, CPUSET2BV(set),
1777 		    (xc_func_t)cmi_ntv_hwdisable_mce_xc);
1778 	}
1779 }
1780 
1781 #endif	/* __xpv */
1782 
1783 void
cmi_hdlconf_rdmsr_nohw(cmi_hdl_t ophdl)1784 cmi_hdlconf_rdmsr_nohw(cmi_hdl_t ophdl)
1785 {
1786 	cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
1787 
1788 	hdl->cmih_msrsrc &= ~CMI_MSR_FLAG_RD_HWOK;
1789 }
1790 
1791 void
cmi_hdlconf_wrmsr_nohw(cmi_hdl_t ophdl)1792 cmi_hdlconf_wrmsr_nohw(cmi_hdl_t ophdl)
1793 {
1794 	cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
1795 
1796 	hdl->cmih_msrsrc &= ~CMI_MSR_FLAG_WR_HWOK;
1797 }
1798 
1799 cmi_errno_t
cmi_hdl_rdmsr(cmi_hdl_t ophdl,uint_t msr,uint64_t * valp)1800 cmi_hdl_rdmsr(cmi_hdl_t ophdl, uint_t msr, uint64_t *valp)
1801 {
1802 	cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
1803 
1804 	/*
1805 	 * Regardless of the handle class, we first check for am
1806 	 * interposed value.  In the xVM case you probably want to
1807 	 * place interposed values within the hypervisor itself, but
1808 	 * we still allow interposing them in dom0 for test and bringup
1809 	 * purposes.
1810 	 */
1811 	if ((hdl->cmih_msrsrc & CMI_MSR_FLAG_RD_INTERPOSEOK) &&
1812 	    msri_lookup(hdl, msr, valp))
1813 		return (CMI_SUCCESS);
1814 
1815 	if (HDLOPS(hdl)->cmio_rdmsr == NULL)
1816 		return (CMIERR_NOTSUP);
1817 
1818 	return (HDLOPS(hdl)->cmio_rdmsr(hdl, msr, valp));
1819 }
1820 
1821 cmi_errno_t
cmi_hdl_wrmsr(cmi_hdl_t ophdl,uint_t msr,uint64_t val)1822 cmi_hdl_wrmsr(cmi_hdl_t ophdl, uint_t msr, uint64_t val)
1823 {
1824 	cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
1825 
1826 	/* Invalidate any interposed value */
1827 	msri_rment(hdl, msr);
1828 
1829 	if (HDLOPS(hdl)->cmio_wrmsr == NULL)
1830 		return (CMI_SUCCESS);	/* pretend all is ok */
1831 
1832 	return (HDLOPS(hdl)->cmio_wrmsr(hdl, msr, val));
1833 }
1834 
1835 void
cmi_hdl_enable_mce(cmi_hdl_t ophdl)1836 cmi_hdl_enable_mce(cmi_hdl_t ophdl)
1837 {
1838 	cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
1839 	ulong_t cr4;
1840 
1841 	if (HDLOPS(hdl)->cmio_getcr4 == NULL ||
1842 	    HDLOPS(hdl)->cmio_setcr4 == NULL)
1843 		return;
1844 
1845 	cr4 = HDLOPS(hdl)->cmio_getcr4(hdl);
1846 
1847 	HDLOPS(hdl)->cmio_setcr4(hdl, cr4 | CR4_MCE);
1848 }
1849 
1850 void
cmi_hdl_msrinterpose(cmi_hdl_t ophdl,cmi_mca_regs_t * regs,uint_t nregs)1851 cmi_hdl_msrinterpose(cmi_hdl_t ophdl, cmi_mca_regs_t *regs, uint_t nregs)
1852 {
1853 	cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
1854 	int i;
1855 
1856 	if (HDLOPS(hdl)->cmio_msrinterpose == NULL)
1857 		return;
1858 
1859 	cmi_hdl_inj_begin(ophdl);
1860 
1861 	for (i = 0; i < nregs; i++, regs++)
1862 		HDLOPS(hdl)->cmio_msrinterpose(hdl, regs->cmr_msrnum,
1863 		    regs->cmr_msrval);
1864 
1865 	cmi_hdl_inj_end(ophdl);
1866 }
1867 
1868 /*ARGSUSED*/
1869 void
cmi_hdl_msrforward(cmi_hdl_t ophdl,cmi_mca_regs_t * regs,uint_t nregs)1870 cmi_hdl_msrforward(cmi_hdl_t ophdl, cmi_mca_regs_t *regs, uint_t nregs)
1871 {
1872 #ifdef __xpv
1873 	cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
1874 	int i;
1875 
1876 	for (i = 0; i < nregs; i++, regs++)
1877 		msri_addent(hdl, regs->cmr_msrnum, regs->cmr_msrval);
1878 #endif
1879 }
1880 
1881 
1882 void
cmi_pcird_nohw(void)1883 cmi_pcird_nohw(void)
1884 {
1885 	cmi_pcicfg_flags &= ~CMI_PCICFG_FLAG_RD_HWOK;
1886 }
1887 
1888 void
cmi_pciwr_nohw(void)1889 cmi_pciwr_nohw(void)
1890 {
1891 	cmi_pcicfg_flags &= ~CMI_PCICFG_FLAG_WR_HWOK;
1892 }
1893 
1894 static uint32_t
cmi_pci_get_cmn(int bus,int dev,int func,int reg,int asz,int * interpose,ddi_acc_handle_t hdl)1895 cmi_pci_get_cmn(int bus, int dev, int func, int reg, int asz,
1896     int *interpose, ddi_acc_handle_t hdl)
1897 {
1898 	uint32_t val;
1899 
1900 	if (cmi_pcicfg_flags & CMI_PCICFG_FLAG_RD_INTERPOSEOK &&
1901 	    pcii_lookup(bus, dev, func, reg, asz, &val)) {
1902 		if (interpose)
1903 			*interpose = 1;
1904 		return (val);
1905 	}
1906 	if (interpose)
1907 		*interpose = 0;
1908 
1909 	if (!(cmi_pcicfg_flags & CMI_PCICFG_FLAG_RD_HWOK))
1910 		return (0);
1911 
1912 	switch (asz) {
1913 	case 1:
1914 		if (hdl)
1915 			val = pci_config_get8(hdl, (off_t)reg);
1916 		else
1917 			val = pci_cfgacc_get8(NULL, PCI_GETBDF(bus, dev, func),
1918 			    reg);
1919 		break;
1920 	case 2:
1921 		if (hdl)
1922 			val = pci_config_get16(hdl, (off_t)reg);
1923 		else
1924 			val = pci_cfgacc_get16(NULL, PCI_GETBDF(bus, dev, func),
1925 			    reg);
1926 		break;
1927 	case 4:
1928 		if (hdl)
1929 			val = pci_config_get32(hdl, (off_t)reg);
1930 		else
1931 			val = pci_cfgacc_get32(NULL, PCI_GETBDF(bus, dev, func),
1932 			    reg);
1933 		break;
1934 	default:
1935 		val = 0;
1936 	}
1937 	return (val);
1938 }
1939 
1940 uint8_t
cmi_pci_getb(int bus,int dev,int func,int reg,int * interpose,ddi_acc_handle_t hdl)1941 cmi_pci_getb(int bus, int dev, int func, int reg, int *interpose,
1942     ddi_acc_handle_t hdl)
1943 {
1944 	return ((uint8_t)cmi_pci_get_cmn(bus, dev, func, reg, 1, interpose,
1945 	    hdl));
1946 }
1947 
1948 uint16_t
cmi_pci_getw(int bus,int dev,int func,int reg,int * interpose,ddi_acc_handle_t hdl)1949 cmi_pci_getw(int bus, int dev, int func, int reg, int *interpose,
1950     ddi_acc_handle_t hdl)
1951 {
1952 	return ((uint16_t)cmi_pci_get_cmn(bus, dev, func, reg, 2, interpose,
1953 	    hdl));
1954 }
1955 
1956 uint32_t
cmi_pci_getl(int bus,int dev,int func,int reg,int * interpose,ddi_acc_handle_t hdl)1957 cmi_pci_getl(int bus, int dev, int func, int reg, int *interpose,
1958     ddi_acc_handle_t hdl)
1959 {
1960 	return (cmi_pci_get_cmn(bus, dev, func, reg, 4, interpose, hdl));
1961 }
1962 
1963 void
cmi_pci_interposeb(int bus,int dev,int func,int reg,uint8_t val)1964 cmi_pci_interposeb(int bus, int dev, int func, int reg, uint8_t val)
1965 {
1966 	pcii_addent(bus, dev, func, reg, val, 1);
1967 }
1968 
1969 void
cmi_pci_interposew(int bus,int dev,int func,int reg,uint16_t val)1970 cmi_pci_interposew(int bus, int dev, int func, int reg, uint16_t val)
1971 {
1972 	pcii_addent(bus, dev, func, reg, val, 2);
1973 }
1974 
1975 void
cmi_pci_interposel(int bus,int dev,int func,int reg,uint32_t val)1976 cmi_pci_interposel(int bus, int dev, int func, int reg, uint32_t val)
1977 {
1978 	pcii_addent(bus, dev, func, reg, val, 4);
1979 }
1980 
1981 static void
cmi_pci_put_cmn(int bus,int dev,int func,int reg,int asz,ddi_acc_handle_t hdl,uint32_t val)1982 cmi_pci_put_cmn(int bus, int dev, int func, int reg, int asz,
1983     ddi_acc_handle_t hdl, uint32_t val)
1984 {
1985 	/*
1986 	 * If there is an interposed value for this register invalidate it.
1987 	 */
1988 	pcii_rment(bus, dev, func, reg, asz);
1989 
1990 	if (!(cmi_pcicfg_flags & CMI_PCICFG_FLAG_WR_HWOK))
1991 		return;
1992 
1993 	switch (asz) {
1994 	case 1:
1995 		if (hdl)
1996 			pci_config_put8(hdl, (off_t)reg, (uint8_t)val);
1997 		else
1998 			pci_cfgacc_put8(NULL, PCI_GETBDF(bus, dev, func), reg,
1999 			    (uint8_t)val);
2000 		break;
2001 
2002 	case 2:
2003 		if (hdl)
2004 			pci_config_put16(hdl, (off_t)reg, (uint16_t)val);
2005 		else
2006 			pci_cfgacc_put16(NULL, PCI_GETBDF(bus, dev, func), reg,
2007 			    (uint16_t)val);
2008 		break;
2009 
2010 	case 4:
2011 		if (hdl)
2012 			pci_config_put32(hdl, (off_t)reg, val);
2013 		else
2014 			pci_cfgacc_put32(NULL, PCI_GETBDF(bus, dev, func), reg,
2015 			    val);
2016 		break;
2017 
2018 	default:
2019 		break;
2020 	}
2021 }
2022 
2023 void
cmi_pci_putb(int bus,int dev,int func,int reg,ddi_acc_handle_t hdl,uint8_t val)2024 cmi_pci_putb(int bus, int dev, int func, int reg, ddi_acc_handle_t hdl,
2025     uint8_t val)
2026 {
2027 	cmi_pci_put_cmn(bus, dev, func, reg, 1, hdl, val);
2028 }
2029 
2030 void
cmi_pci_putw(int bus,int dev,int func,int reg,ddi_acc_handle_t hdl,uint16_t val)2031 cmi_pci_putw(int bus, int dev, int func, int reg, ddi_acc_handle_t hdl,
2032     uint16_t val)
2033 {
2034 	cmi_pci_put_cmn(bus, dev, func, reg, 2, hdl, val);
2035 }
2036 
2037 void
cmi_pci_putl(int bus,int dev,int func,int reg,ddi_acc_handle_t hdl,uint32_t val)2038 cmi_pci_putl(int bus, int dev, int func, int reg, ddi_acc_handle_t hdl,
2039     uint32_t val)
2040 {
2041 	cmi_pci_put_cmn(bus, dev, func, reg, 4, hdl, val);
2042 }
2043 
2044 cmi_errno_t
cmi_cache_ncaches(cmi_hdl_t hdl,uint32_t * ncache)2045 cmi_cache_ncaches(cmi_hdl_t hdl, uint32_t *ncache)
2046 {
2047 	return (HDLOPS(IMPLHDL(hdl))->cmio_ncache(IMPLHDL(hdl), ncache));
2048 }
2049 
2050 
2051 cmi_errno_t
cmi_cache_info(cmi_hdl_t hdl,uint32_t cno,x86_cache_t * cachep)2052 cmi_cache_info(cmi_hdl_t hdl, uint32_t cno, x86_cache_t *cachep)
2053 {
2054 	return (HDLOPS(IMPLHDL(hdl))->cmio_cache(IMPLHDL(hdl), cno, cachep));
2055 }
2056 
2057 static const struct cmi_hdl_ops cmi_hdl_ops = {
2058 #ifdef __xpv
2059 	/*
2060 	 * CMI_HDL_SOLARIS_xVM_MCA - ops when we are an xVM dom0
2061 	 */
2062 	xpv_vendor,		/* cmio_vendor */
2063 	xpv_vendorstr,		/* cmio_vendorstr */
2064 	xpv_family,		/* cmio_family */
2065 	xpv_model,		/* cmio_model */
2066 	xpv_stepping,		/* cmio_stepping */
2067 	xpv_chipid,		/* cmio_chipid */
2068 	xpv_procnodeid,		/* cmio_procnodeid */
2069 	xpv_coreid,		/* cmio_coreid */
2070 	xpv_strandid,		/* cmio_strandid */
2071 	xpv_procnodes_per_pkg,	/* cmio_procnodes_per_pkg */
2072 	xpv_strand_apicid,	/* cmio_strand_apicid */
2073 	xpv_chiprev,		/* cmio_chiprev */
2074 	xpv_chiprevstr,		/* cmio_chiprevstr */
2075 	xpv_getsockettype,	/* cmio_getsockettype */
2076 	xpv_getsocketstr,	/* cmio_getsocketstr */
2077 	xpv_chipsig,		/* cmio_chipsig */
2078 	xpv_ncache,		/* cmio_ncache */
2079 	xpv_cache,		/* cmio_cache */
2080 	xpv_logical_id,		/* cmio_logical_id */
2081 	NULL,			/* cmio_getcr4 */
2082 	NULL,			/* cmio_setcr4 */
2083 	xpv_rdmsr,		/* cmio_rdmsr */
2084 	xpv_wrmsr,		/* cmio_wrmsr */
2085 	xpv_msrinterpose,	/* cmio_msrinterpose */
2086 	xpv_int,		/* cmio_int */
2087 	xpv_online,		/* cmio_online */
2088 	xpv_smbiosid,		/* cmio_smbiosid */
2089 	xpv_smb_chipid,		/* cmio_smb_chipid */
2090 	xpv_smb_bboard		/* cmio_smb_bboard */
2091 
2092 #else	/* __xpv */
2093 
2094 	/*
2095 	 * CMI_HDL_NATIVE - ops when apparently running on bare-metal
2096 	 */
2097 	ntv_vendor,		/* cmio_vendor */
2098 	ntv_vendorstr,		/* cmio_vendorstr */
2099 	ntv_family,		/* cmio_family */
2100 	ntv_model,		/* cmio_model */
2101 	ntv_stepping,		/* cmio_stepping */
2102 	ntv_chipid,		/* cmio_chipid */
2103 	ntv_procnodeid,		/* cmio_procnodeid */
2104 	ntv_coreid,		/* cmio_coreid */
2105 	ntv_strandid,		/* cmio_strandid */
2106 	ntv_procnodes_per_pkg,	/* cmio_procnodes_per_pkg */
2107 	ntv_strand_apicid,	/* cmio_strand_apicid */
2108 	ntv_chiprev,		/* cmio_chiprev */
2109 	ntv_chiprevstr,		/* cmio_chiprevstr */
2110 	ntv_getsockettype,	/* cmio_getsockettype */
2111 	ntv_getsocketstr,	/* cmio_getsocketstr */
2112 	ntv_chipsig,		/* cmio_chipsig */
2113 	ntv_ncache,		/* cmio_ncache */
2114 	ntv_cache,		/* cmio_cache */
2115 	ntv_logical_id,		/* cmio_logical_id */
2116 	ntv_getcr4,		/* cmio_getcr4 */
2117 	ntv_setcr4,		/* cmio_setcr4 */
2118 	ntv_rdmsr,		/* cmio_rdmsr */
2119 	ntv_wrmsr,		/* cmio_wrmsr */
2120 	ntv_msrinterpose,	/* cmio_msrinterpose */
2121 	ntv_int,		/* cmio_int */
2122 	ntv_online,		/* cmio_online */
2123 	ntv_smbiosid,		/* cmio_smbiosid */
2124 	ntv_smb_chipid,		/* cmio_smb_chipid */
2125 	ntv_smb_bboard		/* cmio_smb_bboard */
2126 #endif
2127 };
2128