xref: /linux/drivers/acpi/processor_thermal.c (revision ab520be8cd5d56867fc95cfbc34b90880faf1f9d)
1 /*
2  * processor_thermal.c - Passive cooling submodule of the ACPI processor driver
3  *
4  *  Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
5  *  Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
6  *  Copyright (C) 2004       Dominik Brodowski <linux@brodo.de>
7  *  Copyright (C) 2004  Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
8  *  			- Added processor hotplug support
9  *
10  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
11  *
12  *  This program is free software; you can redistribute it and/or modify
13  *  it under the terms of the GNU General Public License as published by
14  *  the Free Software Foundation; either version 2 of the License, or (at
15  *  your option) any later version.
16  *
17  *  This program is distributed in the hope that it will be useful, but
18  *  WITHOUT ANY WARRANTY; without even the implied warranty of
19  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
20  *  General Public License for more details.
21  *
22  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
23  */
24 
25 #include <linux/kernel.h>
26 #include <linux/module.h>
27 #include <linux/init.h>
28 #include <linux/cpufreq.h>
29 #include <linux/acpi.h>
30 #include <acpi/processor.h>
31 #include <linux/uaccess.h>
32 
33 #define PREFIX "ACPI: "
34 
35 #define ACPI_PROCESSOR_CLASS            "processor"
36 #define _COMPONENT              ACPI_PROCESSOR_COMPONENT
37 ACPI_MODULE_NAME("processor_thermal");
38 
39 #ifdef CONFIG_CPU_FREQ
40 
41 /* If a passive cooling situation is detected, primarily CPUfreq is used, as it
42  * offers (in most cases) voltage scaling in addition to frequency scaling, and
43  * thus a cubic (instead of linear) reduction of energy. Also, we allow for
44  * _any_ cpufreq driver and not only the acpi-cpufreq driver.
45  */
46 
47 #define CPUFREQ_THERMAL_MIN_STEP 0
48 #define CPUFREQ_THERMAL_MAX_STEP 3
49 
50 static DEFINE_PER_CPU(unsigned int, cpufreq_thermal_reduction_pctg);
51 static unsigned int acpi_thermal_cpufreq_is_init = 0;
52 
53 #define reduction_pctg(cpu) \
54 	per_cpu(cpufreq_thermal_reduction_pctg, phys_package_first_cpu(cpu))
55 
56 /*
57  * Emulate "per package data" using per cpu data (which should really be
58  * provided elsewhere)
59  *
60  * Note we can lose a CPU on cpu hotunplug, in this case we forget the state
61  * temporarily. Fortunately that's not a big issue here (I hope)
62  */
63 static int phys_package_first_cpu(int cpu)
64 {
65 	int i;
66 	int id = topology_physical_package_id(cpu);
67 
68 	for_each_online_cpu(i)
69 		if (topology_physical_package_id(i) == id)
70 			return i;
71 	return 0;
72 }
73 
74 static int cpu_has_cpufreq(unsigned int cpu)
75 {
76 	struct cpufreq_policy policy;
77 	if (!acpi_thermal_cpufreq_is_init || cpufreq_get_policy(&policy, cpu))
78 		return 0;
79 	return 1;
80 }
81 
82 static int acpi_thermal_cpufreq_notifier(struct notifier_block *nb,
83 					 unsigned long event, void *data)
84 {
85 	struct cpufreq_policy *policy = data;
86 	unsigned long max_freq = 0;
87 
88 	if (event != CPUFREQ_ADJUST)
89 		goto out;
90 
91 	max_freq = (
92 	    policy->cpuinfo.max_freq *
93 	    (100 - reduction_pctg(policy->cpu) * 20)
94 	) / 100;
95 
96 	cpufreq_verify_within_limits(policy, 0, max_freq);
97 
98       out:
99 	return 0;
100 }
101 
102 static struct notifier_block acpi_thermal_cpufreq_notifier_block = {
103 	.notifier_call = acpi_thermal_cpufreq_notifier,
104 };
105 
106 static int cpufreq_get_max_state(unsigned int cpu)
107 {
108 	if (!cpu_has_cpufreq(cpu))
109 		return 0;
110 
111 	return CPUFREQ_THERMAL_MAX_STEP;
112 }
113 
114 static int cpufreq_get_cur_state(unsigned int cpu)
115 {
116 	if (!cpu_has_cpufreq(cpu))
117 		return 0;
118 
119 	return reduction_pctg(cpu);
120 }
121 
122 static int cpufreq_set_cur_state(unsigned int cpu, int state)
123 {
124 	int i;
125 
126 	if (!cpu_has_cpufreq(cpu))
127 		return 0;
128 
129 	reduction_pctg(cpu) = state;
130 
131 	/*
132 	 * Update all the CPUs in the same package because they all
133 	 * contribute to the temperature and often share the same
134 	 * frequency.
135 	 */
136 	for_each_online_cpu(i) {
137 		if (topology_physical_package_id(i) ==
138 		    topology_physical_package_id(cpu))
139 			cpufreq_update_policy(i);
140 	}
141 	return 0;
142 }
143 
144 void acpi_thermal_cpufreq_init(void)
145 {
146 	int i;
147 
148 	i = cpufreq_register_notifier(&acpi_thermal_cpufreq_notifier_block,
149 				      CPUFREQ_POLICY_NOTIFIER);
150 	if (!i)
151 		acpi_thermal_cpufreq_is_init = 1;
152 }
153 
154 void acpi_thermal_cpufreq_exit(void)
155 {
156 	if (acpi_thermal_cpufreq_is_init)
157 		cpufreq_unregister_notifier
158 		    (&acpi_thermal_cpufreq_notifier_block,
159 		     CPUFREQ_POLICY_NOTIFIER);
160 
161 	acpi_thermal_cpufreq_is_init = 0;
162 }
163 
164 #else				/* ! CONFIG_CPU_FREQ */
165 static int cpufreq_get_max_state(unsigned int cpu)
166 {
167 	return 0;
168 }
169 
170 static int cpufreq_get_cur_state(unsigned int cpu)
171 {
172 	return 0;
173 }
174 
175 static int cpufreq_set_cur_state(unsigned int cpu, int state)
176 {
177 	return 0;
178 }
179 
180 #endif
181 
182 /* thermal cooling device callbacks */
183 static int acpi_processor_max_state(struct acpi_processor *pr)
184 {
185 	int max_state = 0;
186 
187 	/*
188 	 * There exists four states according to
189 	 * cpufreq_thermal_reduction_pctg. 0, 1, 2, 3
190 	 */
191 	max_state += cpufreq_get_max_state(pr->id);
192 	if (pr->flags.throttling)
193 		max_state += (pr->throttling.state_count -1);
194 
195 	return max_state;
196 }
197 static int
198 processor_get_max_state(struct thermal_cooling_device *cdev,
199 			unsigned long *state)
200 {
201 	struct acpi_device *device = cdev->devdata;
202 	struct acpi_processor *pr;
203 
204 	if (!device)
205 		return -EINVAL;
206 
207 	pr = acpi_driver_data(device);
208 	if (!pr)
209 		return -EINVAL;
210 
211 	*state = acpi_processor_max_state(pr);
212 	return 0;
213 }
214 
215 static int
216 processor_get_cur_state(struct thermal_cooling_device *cdev,
217 			unsigned long *cur_state)
218 {
219 	struct acpi_device *device = cdev->devdata;
220 	struct acpi_processor *pr;
221 
222 	if (!device)
223 		return -EINVAL;
224 
225 	pr = acpi_driver_data(device);
226 	if (!pr)
227 		return -EINVAL;
228 
229 	*cur_state = cpufreq_get_cur_state(pr->id);
230 	if (pr->flags.throttling)
231 		*cur_state += pr->throttling.state;
232 	return 0;
233 }
234 
235 static int
236 processor_set_cur_state(struct thermal_cooling_device *cdev,
237 			unsigned long state)
238 {
239 	struct acpi_device *device = cdev->devdata;
240 	struct acpi_processor *pr;
241 	int result = 0;
242 	int max_pstate;
243 
244 	if (!device)
245 		return -EINVAL;
246 
247 	pr = acpi_driver_data(device);
248 	if (!pr)
249 		return -EINVAL;
250 
251 	max_pstate = cpufreq_get_max_state(pr->id);
252 
253 	if (state > acpi_processor_max_state(pr))
254 		return -EINVAL;
255 
256 	if (state <= max_pstate) {
257 		if (pr->flags.throttling && pr->throttling.state)
258 			result = acpi_processor_set_throttling(pr, 0, false);
259 		cpufreq_set_cur_state(pr->id, state);
260 	} else {
261 		cpufreq_set_cur_state(pr->id, max_pstate);
262 		result = acpi_processor_set_throttling(pr,
263 				state - max_pstate, false);
264 	}
265 	return result;
266 }
267 
268 const struct thermal_cooling_device_ops processor_cooling_ops = {
269 	.get_max_state = processor_get_max_state,
270 	.get_cur_state = processor_get_cur_state,
271 	.set_cur_state = processor_set_cur_state,
272 };
273