xref: /illumos-gate/usr/src/uts/i86pc/os/cpupm/pwrnow.c (revision ab5bb018eb284290d89d61bbae1913c3ea82b3af)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
23  * Copyright 2022 Oxide Computer Co.
24  */
25 
26 #include <sys/x86_archext.h>
27 #include <sys/machsystm.h>
28 #include <sys/x_call.h>
29 #include <sys/acpi/acpi.h>
30 #include <sys/acpica.h>
31 #include <sys/pwrnow.h>
32 #include <sys/cpu_acpi.h>
33 #include <sys/cpupm.h>
34 #include <sys/dtrace.h>
35 #include <sys/sdt.h>
36 
37 static int pwrnow_init(cpu_t *);
38 static void pwrnow_fini(cpu_t *);
39 static void pwrnow_power(cpuset_t, uint32_t);
40 static void pwrnow_stop(cpu_t *);
41 
42 static boolean_t pwrnow_cpb_supported(void);
43 
44 /*
45  * Interfaces for modules implementing AMD's PowerNow!.
46  */
47 cpupm_state_ops_t pwrnow_ops = {
48 	"PowerNow! Technology",
49 	pwrnow_init,
50 	pwrnow_fini,
51 	pwrnow_power,
52 	pwrnow_stop
53 };
54 
55 /*
56  * Error returns
57  */
58 #define	PWRNOW_RET_SUCCESS		0x00
59 #define	PWRNOW_RET_NO_PM		0x01
60 #define	PWRNOW_RET_UNSUP_STATE		0x02
61 #define	PWRNOW_RET_TRANS_INCOMPLETE	0x03
62 
63 #define	PWRNOW_LATENCY_WAIT		10
64 
65 /*
66  * MSR registers for changing and reading processor power state.
67  */
68 #define	PWRNOW_PERF_CTL_MSR		0xC0010062
69 #define	PWRNOW_PERF_STATUS_MSR		0xC0010063
70 
71 #define	AMD_CPUID_PSTATE_HARDWARE	(1<<7)
72 #define	AMD_CPUID_TSC_CONSTANT		(1<<8)
73 #define	AMD_CPUID_CPB			(1<<9)
74 
75 /*
76  * Debugging support
77  */
78 #ifdef	DEBUG
79 volatile int pwrnow_debug = 0;
80 #define	PWRNOW_DEBUG(arglist) if (pwrnow_debug) printf arglist;
81 #else
82 #define	PWRNOW_DEBUG(arglist)
83 #endif
84 
85 /*
86  * Write the ctrl register.
87  */
88 static void
write_ctrl(cpu_acpi_handle_t handle,uint32_t ctrl)89 write_ctrl(cpu_acpi_handle_t handle, uint32_t ctrl)
90 {
91 	cpu_acpi_pct_t *pct_ctrl;
92 	uint64_t reg;
93 
94 	pct_ctrl = CPU_ACPI_PCT_CTRL(handle);
95 
96 	switch (pct_ctrl->cr_addrspace_id) {
97 	case ACPI_ADR_SPACE_FIXED_HARDWARE:
98 		reg = ctrl;
99 		wrmsr(PWRNOW_PERF_CTL_MSR, reg);
100 		break;
101 
102 	default:
103 		DTRACE_PROBE1(pwrnow_ctrl_unsupported_type, uint8_t,
104 		    pct_ctrl->cr_addrspace_id);
105 		return;
106 	}
107 
108 	DTRACE_PROBE1(pwrnow_ctrl_write, uint32_t, ctrl);
109 }
110 
111 /*
112  * Transition the current processor to the requested state.
113  */
114 static int
pwrnow_pstate_transition(xc_arg_t arg1,xc_arg_t arg2 __unused,xc_arg_t arg3 __unused)115 pwrnow_pstate_transition(xc_arg_t arg1, xc_arg_t arg2 __unused,
116     xc_arg_t arg3 __unused)
117 {
118 	uint32_t req_state = (uint32_t)arg1;
119 	cpupm_mach_state_t *mach_state =
120 	    (cpupm_mach_state_t *)CPU->cpu_m.mcpu_pm_mach_state;
121 	cpu_acpi_handle_t handle = mach_state->ms_acpi_handle;
122 	cpu_acpi_pstate_t *req_pstate;
123 	uint32_t ctrl;
124 
125 	req_pstate = (cpu_acpi_pstate_t *)CPU_ACPI_PSTATES(handle);
126 	req_pstate += req_state;
127 
128 	DTRACE_PROBE1(pwrnow_transition_freq, uint32_t,
129 	    CPU_ACPI_FREQ(req_pstate));
130 
131 	/*
132 	 * Initiate the processor p-state change.
133 	 */
134 	ctrl = CPU_ACPI_PSTATE_CTRL(req_pstate);
135 	write_ctrl(handle, ctrl);
136 
137 	if (mach_state->ms_turbo != NULL)
138 		cpupm_record_turbo_info(mach_state->ms_turbo,
139 		    mach_state->ms_pstate.cma_state.pstate, req_state);
140 
141 	mach_state->ms_pstate.cma_state.pstate = req_state;
142 	cpu_set_curr_clock((uint64_t)CPU_ACPI_FREQ(req_pstate) * 1000000);
143 	return (0);
144 }
145 
146 static void
pwrnow_power(cpuset_t set,uint32_t req_state)147 pwrnow_power(cpuset_t set, uint32_t req_state)
148 {
149 	/*
150 	 * If thread is already running on target CPU then just
151 	 * make the transition request. Otherwise, we'll need to
152 	 * make a cross-call.
153 	 */
154 	kpreempt_disable();
155 	if (CPU_IN_SET(set, CPU->cpu_id)) {
156 		(void) pwrnow_pstate_transition(req_state, 0, 0);
157 		CPUSET_DEL(set, CPU->cpu_id);
158 	}
159 	if (!CPUSET_ISNULL(set)) {
160 		xc_call((xc_arg_t)req_state, 0, 0,
161 		    CPUSET2BV(set), pwrnow_pstate_transition);
162 	}
163 	kpreempt_enable();
164 }
165 
166 /*
167  * Validate that this processor supports PowerNow! and if so,
168  * get the P-state data from ACPI and cache it.
169  */
170 static int
pwrnow_init(cpu_t * cp)171 pwrnow_init(cpu_t *cp)
172 {
173 	cpupm_mach_state_t *mach_state =
174 	    (cpupm_mach_state_t *)cp->cpu_m.mcpu_pm_mach_state;
175 	cpu_acpi_handle_t handle = mach_state->ms_acpi_handle;
176 	cpu_acpi_pct_t *pct_stat;
177 	static int logged = 0;
178 
179 	PWRNOW_DEBUG(("pwrnow_init: processor %d\n", cp->cpu_id));
180 
181 	/*
182 	 * Cache the P-state specific ACPI data.
183 	 */
184 	if (cpu_acpi_cache_pstate_data(handle) != 0) {
185 		if (!logged) {
186 			cmn_err(CE_NOTE, "!PowerNow! support is being "
187 			    "disabled due to errors parsing ACPI P-state "
188 			    "objects exported by BIOS.");
189 			logged = 1;
190 		}
191 		pwrnow_fini(cp);
192 		return (PWRNOW_RET_NO_PM);
193 	}
194 
195 	pct_stat = CPU_ACPI_PCT_STATUS(handle);
196 	switch (pct_stat->cr_addrspace_id) {
197 	case ACPI_ADR_SPACE_FIXED_HARDWARE:
198 		PWRNOW_DEBUG(("Transitions will use fixed hardware\n"));
199 		break;
200 	default:
201 		cmn_err(CE_WARN, "!_PCT configured for unsupported "
202 		    "addrspace = %d.", pct_stat->cr_addrspace_id);
203 		cmn_err(CE_NOTE, "!CPU power management will not function.");
204 		pwrnow_fini(cp);
205 		return (PWRNOW_RET_NO_PM);
206 	}
207 
208 	cpupm_alloc_domains(cp, CPUPM_P_STATES);
209 
210 	/*
211 	 * Check for Core Performance Boost support
212 	 */
213 	if (pwrnow_cpb_supported())
214 		mach_state->ms_turbo = cpupm_turbo_init(cp);
215 
216 	PWRNOW_DEBUG(("Processor %d succeeded.\n", cp->cpu_id))
217 	return (PWRNOW_RET_SUCCESS);
218 }
219 
220 /*
221  * Free resources allocated by pwrnow_init().
222  */
223 static void
pwrnow_fini(cpu_t * cp)224 pwrnow_fini(cpu_t *cp)
225 {
226 	cpupm_mach_state_t *mach_state =
227 	    (cpupm_mach_state_t *)(cp->cpu_m.mcpu_pm_mach_state);
228 	cpu_acpi_handle_t handle = mach_state->ms_acpi_handle;
229 
230 	cpupm_free_domains(&cpupm_pstate_domains);
231 	cpu_acpi_free_pstate_data(handle);
232 
233 	if (mach_state->ms_turbo != NULL)
234 		cpupm_turbo_fini(mach_state->ms_turbo);
235 	mach_state->ms_turbo = NULL;
236 }
237 
238 boolean_t
pwrnow_supported()239 pwrnow_supported()
240 {
241 	struct cpuid_regs cpu_regs;
242 
243 	/* Required features */
244 	ASSERT(is_x86_feature(x86_featureset, X86FSET_CPUID));
245 	if (!is_x86_feature(x86_featureset, X86FSET_MSR)) {
246 		PWRNOW_DEBUG(("No CPUID or MSR support."));
247 		return (B_FALSE);
248 	}
249 
250 	/*
251 	 * Get the Advanced Power Management Information.
252 	 */
253 	cpu_regs.cp_eax = 0x80000007;
254 	(void) __cpuid_insn(&cpu_regs);
255 
256 	/*
257 	 * We currently only support CPU power management of
258 	 * processors that are P-state TSC invariant
259 	 */
260 	if (!(cpu_regs.cp_edx & AMD_CPUID_TSC_CONSTANT)) {
261 		PWRNOW_DEBUG(("No support for CPUs that are not P-state "
262 		    "TSC invariant.\n"));
263 		return (B_FALSE);
264 	}
265 
266 	/*
267 	 * We only support the "Fire and Forget" style of PowerNow! (i.e.,
268 	 * single MSR write to change speed).
269 	 */
270 	if (!(cpu_regs.cp_edx & AMD_CPUID_PSTATE_HARDWARE)) {
271 		PWRNOW_DEBUG(("Hardware P-State control is not supported.\n"));
272 		return (B_FALSE);
273 	}
274 	return (B_TRUE);
275 }
276 
277 static boolean_t
pwrnow_cpb_supported(void)278 pwrnow_cpb_supported(void)
279 {
280 	struct cpuid_regs cpu_regs;
281 
282 	/* Required features */
283 	ASSERT(is_x86_feature(x86_featureset, X86FSET_CPUID));
284 	if (!is_x86_feature(x86_featureset, X86FSET_MSR)) {
285 		PWRNOW_DEBUG(("No CPUID or MSR support."));
286 		return (B_FALSE);
287 	}
288 
289 	/*
290 	 * Get the Advanced Power Management Information.
291 	 */
292 	cpu_regs.cp_eax = 0x80000007;
293 	(void) __cpuid_insn(&cpu_regs);
294 
295 	if (!(cpu_regs.cp_edx & AMD_CPUID_CPB))
296 		return (B_FALSE);
297 
298 	return (B_TRUE);
299 }
300 
301 static void
pwrnow_stop(cpu_t * cp)302 pwrnow_stop(cpu_t *cp)
303 {
304 	cpupm_mach_state_t *mach_state =
305 	    (cpupm_mach_state_t *)(cp->cpu_m.mcpu_pm_mach_state);
306 	cpu_acpi_handle_t handle = mach_state->ms_acpi_handle;
307 
308 	cpupm_remove_domains(cp, CPUPM_P_STATES, &cpupm_pstate_domains);
309 	cpu_acpi_free_pstate_data(handle);
310 
311 	if (mach_state->ms_turbo != NULL)
312 		cpupm_turbo_fini(mach_state->ms_turbo);
313 	mach_state->ms_turbo = NULL;
314 }
315