xref: /linux/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c (revision eeb9f5c2dcec90009d7cf12e780e7f9631993fc5)
1 /*
2  * Copyright 2019 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22 
23 #define SWSMU_CODE_LAYER_L1
24 
25 #include <linux/firmware.h>
26 #include <linux/pci.h>
27 #include <linux/power_supply.h>
28 #include <linux/reboot.h>
29 
30 #include "amdgpu.h"
31 #include "amdgpu_smu.h"
32 #include "smu_internal.h"
33 #include "atom.h"
34 #include "arcturus_ppt.h"
35 #include "navi10_ppt.h"
36 #include "sienna_cichlid_ppt.h"
37 #include "renoir_ppt.h"
38 #include "vangogh_ppt.h"
39 #include "aldebaran_ppt.h"
40 #include "yellow_carp_ppt.h"
41 #include "cyan_skillfish_ppt.h"
42 #include "smu_v13_0_0_ppt.h"
43 #include "smu_v13_0_4_ppt.h"
44 #include "smu_v13_0_5_ppt.h"
45 #include "smu_v13_0_6_ppt.h"
46 #include "smu_v13_0_7_ppt.h"
47 #include "smu_v14_0_0_ppt.h"
48 #include "amd_pcie.h"
49 
50 /*
51  * DO NOT use these for err/warn/info/debug messages.
52  * Use dev_err, dev_warn, dev_info and dev_dbg instead.
53  * They are more MGPU friendly.
54  */
55 #undef pr_err
56 #undef pr_warn
57 #undef pr_info
58 #undef pr_debug
59 
60 static const struct amd_pm_funcs swsmu_pm_funcs;
61 static int smu_force_smuclk_levels(struct smu_context *smu,
62 				   enum smu_clk_type clk_type,
63 				   uint32_t mask);
64 static int smu_handle_task(struct smu_context *smu,
65 			   enum amd_dpm_forced_level level,
66 			   enum amd_pp_task task_id);
67 static int smu_reset(struct smu_context *smu);
68 static int smu_set_fan_speed_pwm(void *handle, u32 speed);
69 static int smu_set_fan_control_mode(void *handle, u32 value);
70 static int smu_set_power_limit(void *handle, uint32_t limit);
71 static int smu_set_fan_speed_rpm(void *handle, uint32_t speed);
72 static int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled);
73 static int smu_set_mp1_state(void *handle, enum pp_mp1_state mp1_state);
74 
75 static int smu_sys_get_pp_feature_mask(void *handle,
76 				       char *buf)
77 {
78 	struct smu_context *smu = handle;
79 
80 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
81 		return -EOPNOTSUPP;
82 
83 	return smu_get_pp_feature_mask(smu, buf);
84 }
85 
86 static int smu_sys_set_pp_feature_mask(void *handle,
87 				       uint64_t new_mask)
88 {
89 	struct smu_context *smu = handle;
90 
91 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
92 		return -EOPNOTSUPP;
93 
94 	return smu_set_pp_feature_mask(smu, new_mask);
95 }
96 
97 int smu_set_residency_gfxoff(struct smu_context *smu, bool value)
98 {
99 	if (!smu->ppt_funcs->set_gfx_off_residency)
100 		return -EINVAL;
101 
102 	return smu_set_gfx_off_residency(smu, value);
103 }
104 
105 int smu_get_residency_gfxoff(struct smu_context *smu, u32 *value)
106 {
107 	if (!smu->ppt_funcs->get_gfx_off_residency)
108 		return -EINVAL;
109 
110 	return smu_get_gfx_off_residency(smu, value);
111 }
112 
113 int smu_get_entrycount_gfxoff(struct smu_context *smu, u64 *value)
114 {
115 	if (!smu->ppt_funcs->get_gfx_off_entrycount)
116 		return -EINVAL;
117 
118 	return smu_get_gfx_off_entrycount(smu, value);
119 }
120 
121 int smu_get_status_gfxoff(struct smu_context *smu, uint32_t *value)
122 {
123 	if (!smu->ppt_funcs->get_gfx_off_status)
124 		return -EINVAL;
125 
126 	*value = smu_get_gfx_off_status(smu);
127 
128 	return 0;
129 }
130 
131 int smu_set_soft_freq_range(struct smu_context *smu,
132 			    enum smu_clk_type clk_type,
133 			    uint32_t min,
134 			    uint32_t max)
135 {
136 	int ret = 0;
137 
138 	if (smu->ppt_funcs->set_soft_freq_limited_range)
139 		ret = smu->ppt_funcs->set_soft_freq_limited_range(smu,
140 								  clk_type,
141 								  min,
142 								  max);
143 
144 	return ret;
145 }
146 
147 int smu_get_dpm_freq_range(struct smu_context *smu,
148 			   enum smu_clk_type clk_type,
149 			   uint32_t *min,
150 			   uint32_t *max)
151 {
152 	int ret = -ENOTSUPP;
153 
154 	if (!min && !max)
155 		return -EINVAL;
156 
157 	if (smu->ppt_funcs->get_dpm_ultimate_freq)
158 		ret = smu->ppt_funcs->get_dpm_ultimate_freq(smu,
159 							    clk_type,
160 							    min,
161 							    max);
162 
163 	return ret;
164 }
165 
166 int smu_set_gfx_power_up_by_imu(struct smu_context *smu)
167 {
168 	int ret = 0;
169 	struct amdgpu_device *adev = smu->adev;
170 
171 	if (smu->ppt_funcs->set_gfx_power_up_by_imu) {
172 		ret = smu->ppt_funcs->set_gfx_power_up_by_imu(smu);
173 		if (ret)
174 			dev_err(adev->dev, "Failed to enable gfx imu!\n");
175 	}
176 	return ret;
177 }
178 
179 static u32 smu_get_mclk(void *handle, bool low)
180 {
181 	struct smu_context *smu = handle;
182 	uint32_t clk_freq;
183 	int ret = 0;
184 
185 	ret = smu_get_dpm_freq_range(smu, SMU_UCLK,
186 				     low ? &clk_freq : NULL,
187 				     !low ? &clk_freq : NULL);
188 	if (ret)
189 		return 0;
190 	return clk_freq * 100;
191 }
192 
193 static u32 smu_get_sclk(void *handle, bool low)
194 {
195 	struct smu_context *smu = handle;
196 	uint32_t clk_freq;
197 	int ret = 0;
198 
199 	ret = smu_get_dpm_freq_range(smu, SMU_GFXCLK,
200 				     low ? &clk_freq : NULL,
201 				     !low ? &clk_freq : NULL);
202 	if (ret)
203 		return 0;
204 	return clk_freq * 100;
205 }
206 
207 static int smu_set_gfx_imu_enable(struct smu_context *smu)
208 {
209 	struct amdgpu_device *adev = smu->adev;
210 
211 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
212 		return 0;
213 
214 	if (amdgpu_in_reset(smu->adev) || adev->in_s0ix)
215 		return 0;
216 
217 	return smu_set_gfx_power_up_by_imu(smu);
218 }
219 
220 static bool is_vcn_enabled(struct amdgpu_device *adev)
221 {
222 	int i;
223 
224 	for (i = 0; i < adev->num_ip_blocks; i++) {
225 		if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_VCN ||
226 			adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_JPEG) &&
227 			!adev->ip_blocks[i].status.valid)
228 			return false;
229 	}
230 
231 	return true;
232 }
233 
234 static int smu_dpm_set_vcn_enable(struct smu_context *smu,
235 				  bool enable)
236 {
237 	struct smu_power_context *smu_power = &smu->smu_power;
238 	struct smu_power_gate *power_gate = &smu_power->power_gate;
239 	int ret = 0;
240 
241 	/*
242 	 * don't poweron vcn/jpeg when they are skipped.
243 	 */
244 	if (!is_vcn_enabled(smu->adev))
245 		return 0;
246 
247 	if (!smu->ppt_funcs->dpm_set_vcn_enable)
248 		return 0;
249 
250 	if (atomic_read(&power_gate->vcn_gated) ^ enable)
251 		return 0;
252 
253 	ret = smu->ppt_funcs->dpm_set_vcn_enable(smu, enable);
254 	if (!ret)
255 		atomic_set(&power_gate->vcn_gated, !enable);
256 
257 	return ret;
258 }
259 
260 static int smu_dpm_set_jpeg_enable(struct smu_context *smu,
261 				   bool enable)
262 {
263 	struct smu_power_context *smu_power = &smu->smu_power;
264 	struct smu_power_gate *power_gate = &smu_power->power_gate;
265 	int ret = 0;
266 
267 	if (!is_vcn_enabled(smu->adev))
268 		return 0;
269 
270 	if (!smu->ppt_funcs->dpm_set_jpeg_enable)
271 		return 0;
272 
273 	if (atomic_read(&power_gate->jpeg_gated) ^ enable)
274 		return 0;
275 
276 	ret = smu->ppt_funcs->dpm_set_jpeg_enable(smu, enable);
277 	if (!ret)
278 		atomic_set(&power_gate->jpeg_gated, !enable);
279 
280 	return ret;
281 }
282 
283 static int smu_dpm_set_vpe_enable(struct smu_context *smu,
284 				   bool enable)
285 {
286 	struct smu_power_context *smu_power = &smu->smu_power;
287 	struct smu_power_gate *power_gate = &smu_power->power_gate;
288 	int ret = 0;
289 
290 	if (!smu->ppt_funcs->dpm_set_vpe_enable)
291 		return 0;
292 
293 	if (atomic_read(&power_gate->vpe_gated) ^ enable)
294 		return 0;
295 
296 	ret = smu->ppt_funcs->dpm_set_vpe_enable(smu, enable);
297 	if (!ret)
298 		atomic_set(&power_gate->vpe_gated, !enable);
299 
300 	return ret;
301 }
302 
303 static int smu_dpm_set_umsch_mm_enable(struct smu_context *smu,
304 				   bool enable)
305 {
306 	struct smu_power_context *smu_power = &smu->smu_power;
307 	struct smu_power_gate *power_gate = &smu_power->power_gate;
308 	int ret = 0;
309 
310 	if (!smu->adev->enable_umsch_mm)
311 		return 0;
312 
313 	if (!smu->ppt_funcs->dpm_set_umsch_mm_enable)
314 		return 0;
315 
316 	if (atomic_read(&power_gate->umsch_mm_gated) ^ enable)
317 		return 0;
318 
319 	ret = smu->ppt_funcs->dpm_set_umsch_mm_enable(smu, enable);
320 	if (!ret)
321 		atomic_set(&power_gate->umsch_mm_gated, !enable);
322 
323 	return ret;
324 }
325 
326 /**
327  * smu_dpm_set_power_gate - power gate/ungate the specific IP block
328  *
329  * @handle:        smu_context pointer
330  * @block_type: the IP block to power gate/ungate
331  * @gate:       to power gate if true, ungate otherwise
332  *
333  * This API uses no smu->mutex lock protection due to:
334  * 1. It is either called by other IP block(gfx/sdma/vcn/uvd/vce).
335  *    This is guarded to be race condition free by the caller.
336  * 2. Or get called on user setting request of power_dpm_force_performance_level.
337  *    Under this case, the smu->mutex lock protection is already enforced on
338  *    the parent API smu_force_performance_level of the call path.
339  */
340 static int smu_dpm_set_power_gate(void *handle,
341 				  uint32_t block_type,
342 				  bool gate)
343 {
344 	struct smu_context *smu = handle;
345 	int ret = 0;
346 
347 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) {
348 		dev_WARN(smu->adev->dev,
349 			 "SMU uninitialized but power %s requested for %u!\n",
350 			 gate ? "gate" : "ungate", block_type);
351 		return -EOPNOTSUPP;
352 	}
353 
354 	switch (block_type) {
355 	/*
356 	 * Some legacy code of amdgpu_vcn.c and vcn_v2*.c still uses
357 	 * AMD_IP_BLOCK_TYPE_UVD for VCN. So, here both of them are kept.
358 	 */
359 	case AMD_IP_BLOCK_TYPE_UVD:
360 	case AMD_IP_BLOCK_TYPE_VCN:
361 		ret = smu_dpm_set_vcn_enable(smu, !gate);
362 		if (ret)
363 			dev_err(smu->adev->dev, "Failed to power %s VCN!\n",
364 				gate ? "gate" : "ungate");
365 		break;
366 	case AMD_IP_BLOCK_TYPE_GFX:
367 		ret = smu_gfx_off_control(smu, gate);
368 		if (ret)
369 			dev_err(smu->adev->dev, "Failed to %s gfxoff!\n",
370 				gate ? "enable" : "disable");
371 		break;
372 	case AMD_IP_BLOCK_TYPE_SDMA:
373 		ret = smu_powergate_sdma(smu, gate);
374 		if (ret)
375 			dev_err(smu->adev->dev, "Failed to power %s SDMA!\n",
376 				gate ? "gate" : "ungate");
377 		break;
378 	case AMD_IP_BLOCK_TYPE_JPEG:
379 		ret = smu_dpm_set_jpeg_enable(smu, !gate);
380 		if (ret)
381 			dev_err(smu->adev->dev, "Failed to power %s JPEG!\n",
382 				gate ? "gate" : "ungate");
383 		break;
384 	case AMD_IP_BLOCK_TYPE_VPE:
385 		ret = smu_dpm_set_vpe_enable(smu, !gate);
386 		if (ret)
387 			dev_err(smu->adev->dev, "Failed to power %s VPE!\n",
388 				gate ? "gate" : "ungate");
389 		break;
390 	default:
391 		dev_err(smu->adev->dev, "Unsupported block type!\n");
392 		return -EINVAL;
393 	}
394 
395 	return ret;
396 }
397 
398 /**
399  * smu_set_user_clk_dependencies - set user profile clock dependencies
400  *
401  * @smu:	smu_context pointer
402  * @clk:	enum smu_clk_type type
403  *
404  * Enable/Disable the clock dependency for the @clk type.
405  */
406 static void smu_set_user_clk_dependencies(struct smu_context *smu, enum smu_clk_type clk)
407 {
408 	if (smu->adev->in_suspend)
409 		return;
410 
411 	if (clk == SMU_MCLK) {
412 		smu->user_dpm_profile.clk_dependency = 0;
413 		smu->user_dpm_profile.clk_dependency = BIT(SMU_FCLK) | BIT(SMU_SOCCLK);
414 	} else if (clk == SMU_FCLK) {
415 		/* MCLK takes precedence over FCLK */
416 		if (smu->user_dpm_profile.clk_dependency == (BIT(SMU_FCLK) | BIT(SMU_SOCCLK)))
417 			return;
418 
419 		smu->user_dpm_profile.clk_dependency = 0;
420 		smu->user_dpm_profile.clk_dependency = BIT(SMU_MCLK) | BIT(SMU_SOCCLK);
421 	} else if (clk == SMU_SOCCLK) {
422 		/* MCLK takes precedence over SOCCLK */
423 		if (smu->user_dpm_profile.clk_dependency == (BIT(SMU_FCLK) | BIT(SMU_SOCCLK)))
424 			return;
425 
426 		smu->user_dpm_profile.clk_dependency = 0;
427 		smu->user_dpm_profile.clk_dependency = BIT(SMU_MCLK) | BIT(SMU_FCLK);
428 	} else
429 		/* Add clk dependencies here, if any */
430 		return;
431 }
432 
433 /**
434  * smu_restore_dpm_user_profile - reinstate user dpm profile
435  *
436  * @smu:	smu_context pointer
437  *
438  * Restore the saved user power configurations include power limit,
439  * clock frequencies, fan control mode and fan speed.
440  */
441 static void smu_restore_dpm_user_profile(struct smu_context *smu)
442 {
443 	struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
444 	int ret = 0;
445 
446 	if (!smu->adev->in_suspend)
447 		return;
448 
449 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
450 		return;
451 
452 	/* Enable restore flag */
453 	smu->user_dpm_profile.flags |= SMU_DPM_USER_PROFILE_RESTORE;
454 
455 	/* set the user dpm power limit */
456 	if (smu->user_dpm_profile.power_limit) {
457 		ret = smu_set_power_limit(smu, smu->user_dpm_profile.power_limit);
458 		if (ret)
459 			dev_err(smu->adev->dev, "Failed to set power limit value\n");
460 	}
461 
462 	/* set the user dpm clock configurations */
463 	if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
464 		enum smu_clk_type clk_type;
465 
466 		for (clk_type = 0; clk_type < SMU_CLK_COUNT; clk_type++) {
467 			/*
468 			 * Iterate over smu clk type and force the saved user clk
469 			 * configs, skip if clock dependency is enabled
470 			 */
471 			if (!(smu->user_dpm_profile.clk_dependency & BIT(clk_type)) &&
472 					smu->user_dpm_profile.clk_mask[clk_type]) {
473 				ret = smu_force_smuclk_levels(smu, clk_type,
474 						smu->user_dpm_profile.clk_mask[clk_type]);
475 				if (ret)
476 					dev_err(smu->adev->dev,
477 						"Failed to set clock type = %d\n", clk_type);
478 			}
479 		}
480 	}
481 
482 	/* set the user dpm fan configurations */
483 	if (smu->user_dpm_profile.fan_mode == AMD_FAN_CTRL_MANUAL ||
484 	    smu->user_dpm_profile.fan_mode == AMD_FAN_CTRL_NONE) {
485 		ret = smu_set_fan_control_mode(smu, smu->user_dpm_profile.fan_mode);
486 		if (ret != -EOPNOTSUPP) {
487 			smu->user_dpm_profile.fan_speed_pwm = 0;
488 			smu->user_dpm_profile.fan_speed_rpm = 0;
489 			smu->user_dpm_profile.fan_mode = AMD_FAN_CTRL_AUTO;
490 			dev_err(smu->adev->dev, "Failed to set manual fan control mode\n");
491 		}
492 
493 		if (smu->user_dpm_profile.fan_speed_pwm) {
494 			ret = smu_set_fan_speed_pwm(smu, smu->user_dpm_profile.fan_speed_pwm);
495 			if (ret != -EOPNOTSUPP)
496 				dev_err(smu->adev->dev, "Failed to set manual fan speed in pwm\n");
497 		}
498 
499 		if (smu->user_dpm_profile.fan_speed_rpm) {
500 			ret = smu_set_fan_speed_rpm(smu, smu->user_dpm_profile.fan_speed_rpm);
501 			if (ret != -EOPNOTSUPP)
502 				dev_err(smu->adev->dev, "Failed to set manual fan speed in rpm\n");
503 		}
504 	}
505 
506 	/* Restore user customized OD settings */
507 	if (smu->user_dpm_profile.user_od) {
508 		if (smu->ppt_funcs->restore_user_od_settings) {
509 			ret = smu->ppt_funcs->restore_user_od_settings(smu);
510 			if (ret)
511 				dev_err(smu->adev->dev, "Failed to upload customized OD settings\n");
512 		}
513 	}
514 
515 	/* Disable restore flag */
516 	smu->user_dpm_profile.flags &= ~SMU_DPM_USER_PROFILE_RESTORE;
517 }
518 
519 static int smu_get_power_num_states(void *handle,
520 				    struct pp_states_info *state_info)
521 {
522 	if (!state_info)
523 		return -EINVAL;
524 
525 	/* not support power state */
526 	memset(state_info, 0, sizeof(struct pp_states_info));
527 	state_info->nums = 1;
528 	state_info->states[0] = POWER_STATE_TYPE_DEFAULT;
529 
530 	return 0;
531 }
532 
533 bool is_support_sw_smu(struct amdgpu_device *adev)
534 {
535 	/* vega20 is 11.0.2, but it's supported via the powerplay code */
536 	if (adev->asic_type == CHIP_VEGA20)
537 		return false;
538 
539 	if (amdgpu_ip_version(adev, MP1_HWIP, 0) >= IP_VERSION(11, 0, 0))
540 		return true;
541 
542 	return false;
543 }
544 
545 bool is_support_cclk_dpm(struct amdgpu_device *adev)
546 {
547 	struct smu_context *smu = adev->powerplay.pp_handle;
548 
549 	if (!smu_feature_is_enabled(smu, SMU_FEATURE_CCLK_DPM_BIT))
550 		return false;
551 
552 	return true;
553 }
554 
555 
556 static int smu_sys_get_pp_table(void *handle,
557 				char **table)
558 {
559 	struct smu_context *smu = handle;
560 	struct smu_table_context *smu_table = &smu->smu_table;
561 
562 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
563 		return -EOPNOTSUPP;
564 
565 	if (!smu_table->power_play_table && !smu_table->hardcode_pptable)
566 		return -EINVAL;
567 
568 	if (smu_table->hardcode_pptable)
569 		*table = smu_table->hardcode_pptable;
570 	else
571 		*table = smu_table->power_play_table;
572 
573 	return smu_table->power_play_table_size;
574 }
575 
576 static int smu_sys_set_pp_table(void *handle,
577 				const char *buf,
578 				size_t size)
579 {
580 	struct smu_context *smu = handle;
581 	struct smu_table_context *smu_table = &smu->smu_table;
582 	ATOM_COMMON_TABLE_HEADER *header = (ATOM_COMMON_TABLE_HEADER *)buf;
583 	int ret = 0;
584 
585 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
586 		return -EOPNOTSUPP;
587 
588 	if (header->usStructureSize != size) {
589 		dev_err(smu->adev->dev, "pp table size not matched !\n");
590 		return -EIO;
591 	}
592 
593 	if (!smu_table->hardcode_pptable) {
594 		smu_table->hardcode_pptable = kzalloc(size, GFP_KERNEL);
595 		if (!smu_table->hardcode_pptable)
596 			return -ENOMEM;
597 	}
598 
599 	memcpy(smu_table->hardcode_pptable, buf, size);
600 	smu_table->power_play_table = smu_table->hardcode_pptable;
601 	smu_table->power_play_table_size = size;
602 
603 	/*
604 	 * Special hw_fini action(for Navi1x, the DPMs disablement will be
605 	 * skipped) may be needed for custom pptable uploading.
606 	 */
607 	smu->uploading_custom_pp_table = true;
608 
609 	ret = smu_reset(smu);
610 	if (ret)
611 		dev_info(smu->adev->dev, "smu reset failed, ret = %d\n", ret);
612 
613 	smu->uploading_custom_pp_table = false;
614 
615 	return ret;
616 }
617 
618 static int smu_get_driver_allowed_feature_mask(struct smu_context *smu)
619 {
620 	struct smu_feature *feature = &smu->smu_feature;
621 	uint32_t allowed_feature_mask[SMU_FEATURE_MAX/32];
622 	int ret = 0;
623 
624 	/*
625 	 * With SCPM enabled, the allowed featuremasks setting(via
626 	 * PPSMC_MSG_SetAllowedFeaturesMaskLow/High) is not permitted.
627 	 * That means there is no way to let PMFW knows the settings below.
628 	 * Thus, we just assume all the features are allowed under
629 	 * such scenario.
630 	 */
631 	if (smu->adev->scpm_enabled) {
632 		bitmap_fill(feature->allowed, SMU_FEATURE_MAX);
633 		return 0;
634 	}
635 
636 	bitmap_zero(feature->allowed, SMU_FEATURE_MAX);
637 
638 	ret = smu_get_allowed_feature_mask(smu, allowed_feature_mask,
639 					     SMU_FEATURE_MAX/32);
640 	if (ret)
641 		return ret;
642 
643 	bitmap_or(feature->allowed, feature->allowed,
644 		      (unsigned long *)allowed_feature_mask,
645 		      feature->feature_num);
646 
647 	return ret;
648 }
649 
650 static int smu_set_funcs(struct amdgpu_device *adev)
651 {
652 	struct smu_context *smu = adev->powerplay.pp_handle;
653 
654 	if (adev->pm.pp_feature & PP_OVERDRIVE_MASK)
655 		smu->od_enabled = true;
656 
657 	switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
658 	case IP_VERSION(11, 0, 0):
659 	case IP_VERSION(11, 0, 5):
660 	case IP_VERSION(11, 0, 9):
661 		navi10_set_ppt_funcs(smu);
662 		break;
663 	case IP_VERSION(11, 0, 7):
664 	case IP_VERSION(11, 0, 11):
665 	case IP_VERSION(11, 0, 12):
666 	case IP_VERSION(11, 0, 13):
667 		sienna_cichlid_set_ppt_funcs(smu);
668 		break;
669 	case IP_VERSION(12, 0, 0):
670 	case IP_VERSION(12, 0, 1):
671 		renoir_set_ppt_funcs(smu);
672 		break;
673 	case IP_VERSION(11, 5, 0):
674 		vangogh_set_ppt_funcs(smu);
675 		break;
676 	case IP_VERSION(13, 0, 1):
677 	case IP_VERSION(13, 0, 3):
678 	case IP_VERSION(13, 0, 8):
679 		yellow_carp_set_ppt_funcs(smu);
680 		break;
681 	case IP_VERSION(13, 0, 4):
682 	case IP_VERSION(13, 0, 11):
683 		smu_v13_0_4_set_ppt_funcs(smu);
684 		break;
685 	case IP_VERSION(13, 0, 5):
686 		smu_v13_0_5_set_ppt_funcs(smu);
687 		break;
688 	case IP_VERSION(11, 0, 8):
689 		cyan_skillfish_set_ppt_funcs(smu);
690 		break;
691 	case IP_VERSION(11, 0, 2):
692 		adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
693 		arcturus_set_ppt_funcs(smu);
694 		/* OD is not supported on Arcturus */
695 		smu->od_enabled = false;
696 		break;
697 	case IP_VERSION(13, 0, 2):
698 		aldebaran_set_ppt_funcs(smu);
699 		/* Enable pp_od_clk_voltage node */
700 		smu->od_enabled = true;
701 		break;
702 	case IP_VERSION(13, 0, 0):
703 	case IP_VERSION(13, 0, 10):
704 		smu_v13_0_0_set_ppt_funcs(smu);
705 		break;
706 	case IP_VERSION(13, 0, 6):
707 		smu_v13_0_6_set_ppt_funcs(smu);
708 		/* Enable pp_od_clk_voltage node */
709 		smu->od_enabled = true;
710 		break;
711 	case IP_VERSION(13, 0, 7):
712 		smu_v13_0_7_set_ppt_funcs(smu);
713 		break;
714 	case IP_VERSION(14, 0, 0):
715 		smu_v14_0_0_set_ppt_funcs(smu);
716 		break;
717 	default:
718 		return -EINVAL;
719 	}
720 
721 	return 0;
722 }
723 
724 static int smu_early_init(void *handle)
725 {
726 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
727 	struct smu_context *smu;
728 	int r;
729 
730 	smu = kzalloc(sizeof(struct smu_context), GFP_KERNEL);
731 	if (!smu)
732 		return -ENOMEM;
733 
734 	smu->adev = adev;
735 	smu->pm_enabled = !!amdgpu_dpm;
736 	smu->is_apu = false;
737 	smu->smu_baco.state = SMU_BACO_STATE_EXIT;
738 	smu->smu_baco.platform_support = false;
739 	smu->user_dpm_profile.fan_mode = -1;
740 
741 	mutex_init(&smu->message_lock);
742 
743 	adev->powerplay.pp_handle = smu;
744 	adev->powerplay.pp_funcs = &swsmu_pm_funcs;
745 
746 	r = smu_set_funcs(adev);
747 	if (r)
748 		return r;
749 	return smu_init_microcode(smu);
750 }
751 
752 static int smu_set_default_dpm_table(struct smu_context *smu)
753 {
754 	struct smu_power_context *smu_power = &smu->smu_power;
755 	struct smu_power_gate *power_gate = &smu_power->power_gate;
756 	int vcn_gate, jpeg_gate;
757 	int ret = 0;
758 
759 	if (!smu->ppt_funcs->set_default_dpm_table)
760 		return 0;
761 
762 	vcn_gate = atomic_read(&power_gate->vcn_gated);
763 	jpeg_gate = atomic_read(&power_gate->jpeg_gated);
764 
765 	ret = smu_dpm_set_vcn_enable(smu, true);
766 	if (ret)
767 		return ret;
768 
769 	ret = smu_dpm_set_jpeg_enable(smu, true);
770 	if (ret)
771 		goto err_out;
772 
773 	ret = smu->ppt_funcs->set_default_dpm_table(smu);
774 	if (ret)
775 		dev_err(smu->adev->dev,
776 			"Failed to setup default dpm clock tables!\n");
777 
778 	smu_dpm_set_jpeg_enable(smu, !jpeg_gate);
779 err_out:
780 	smu_dpm_set_vcn_enable(smu, !vcn_gate);
781 	return ret;
782 }
783 
784 static int smu_apply_default_config_table_settings(struct smu_context *smu)
785 {
786 	struct amdgpu_device *adev = smu->adev;
787 	int ret = 0;
788 
789 	ret = smu_get_default_config_table_settings(smu,
790 						    &adev->pm.config_table);
791 	if (ret)
792 		return ret;
793 
794 	return smu_set_config_table(smu, &adev->pm.config_table);
795 }
796 
797 static int smu_late_init(void *handle)
798 {
799 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
800 	struct smu_context *smu = adev->powerplay.pp_handle;
801 	int ret = 0;
802 
803 	smu_set_fine_grain_gfx_freq_parameters(smu);
804 
805 	if (!smu->pm_enabled)
806 		return 0;
807 
808 	ret = smu_post_init(smu);
809 	if (ret) {
810 		dev_err(adev->dev, "Failed to post smu init!\n");
811 		return ret;
812 	}
813 
814 	/*
815 	 * Explicitly notify PMFW the power mode the system in. Since
816 	 * the PMFW may boot the ASIC with a different mode.
817 	 * For those supporting ACDC switch via gpio, PMFW will
818 	 * handle the switch automatically. Driver involvement
819 	 * is unnecessary.
820 	 */
821 	adev->pm.ac_power = power_supply_is_system_supplied() > 0;
822 	smu_set_ac_dc(smu);
823 
824 	if ((amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 1)) ||
825 	    (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 3)))
826 		return 0;
827 
828 	if (!amdgpu_sriov_vf(adev) || smu->od_enabled) {
829 		ret = smu_set_default_od_settings(smu);
830 		if (ret) {
831 			dev_err(adev->dev, "Failed to setup default OD settings!\n");
832 			return ret;
833 		}
834 	}
835 
836 	ret = smu_populate_umd_state_clk(smu);
837 	if (ret) {
838 		dev_err(adev->dev, "Failed to populate UMD state clocks!\n");
839 		return ret;
840 	}
841 
842 	ret = smu_get_asic_power_limits(smu,
843 					&smu->current_power_limit,
844 					&smu->default_power_limit,
845 					&smu->max_power_limit,
846 					&smu->min_power_limit);
847 	if (ret) {
848 		dev_err(adev->dev, "Failed to get asic power limits!\n");
849 		return ret;
850 	}
851 
852 	if (!amdgpu_sriov_vf(adev))
853 		smu_get_unique_id(smu);
854 
855 	smu_get_fan_parameters(smu);
856 
857 	smu_handle_task(smu,
858 			smu->smu_dpm.dpm_level,
859 			AMD_PP_TASK_COMPLETE_INIT);
860 
861 	ret = smu_apply_default_config_table_settings(smu);
862 	if (ret && (ret != -EOPNOTSUPP)) {
863 		dev_err(adev->dev, "Failed to apply default DriverSmuConfig settings!\n");
864 		return ret;
865 	}
866 
867 	smu_restore_dpm_user_profile(smu);
868 
869 	return 0;
870 }
871 
872 static int smu_init_fb_allocations(struct smu_context *smu)
873 {
874 	struct amdgpu_device *adev = smu->adev;
875 	struct smu_table_context *smu_table = &smu->smu_table;
876 	struct smu_table *tables = smu_table->tables;
877 	struct smu_table *driver_table = &(smu_table->driver_table);
878 	uint32_t max_table_size = 0;
879 	int ret, i;
880 
881 	/* VRAM allocation for tool table */
882 	if (tables[SMU_TABLE_PMSTATUSLOG].size) {
883 		ret = amdgpu_bo_create_kernel(adev,
884 					      tables[SMU_TABLE_PMSTATUSLOG].size,
885 					      tables[SMU_TABLE_PMSTATUSLOG].align,
886 					      tables[SMU_TABLE_PMSTATUSLOG].domain,
887 					      &tables[SMU_TABLE_PMSTATUSLOG].bo,
888 					      &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
889 					      &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
890 		if (ret) {
891 			dev_err(adev->dev, "VRAM allocation for tool table failed!\n");
892 			return ret;
893 		}
894 	}
895 
896 	driver_table->domain = AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT;
897 	/* VRAM allocation for driver table */
898 	for (i = 0; i < SMU_TABLE_COUNT; i++) {
899 		if (tables[i].size == 0)
900 			continue;
901 
902 		/* If one of the tables has VRAM domain restriction, keep it in
903 		 * VRAM
904 		 */
905 		if ((tables[i].domain &
906 		    (AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT)) ==
907 			    AMDGPU_GEM_DOMAIN_VRAM)
908 			driver_table->domain = AMDGPU_GEM_DOMAIN_VRAM;
909 
910 		if (i == SMU_TABLE_PMSTATUSLOG)
911 			continue;
912 
913 		if (max_table_size < tables[i].size)
914 			max_table_size = tables[i].size;
915 	}
916 
917 	driver_table->size = max_table_size;
918 	driver_table->align = PAGE_SIZE;
919 
920 	ret = amdgpu_bo_create_kernel(adev,
921 				      driver_table->size,
922 				      driver_table->align,
923 				      driver_table->domain,
924 				      &driver_table->bo,
925 				      &driver_table->mc_address,
926 				      &driver_table->cpu_addr);
927 	if (ret) {
928 		dev_err(adev->dev, "VRAM allocation for driver table failed!\n");
929 		if (tables[SMU_TABLE_PMSTATUSLOG].mc_address)
930 			amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo,
931 					      &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
932 					      &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
933 	}
934 
935 	return ret;
936 }
937 
938 static int smu_fini_fb_allocations(struct smu_context *smu)
939 {
940 	struct smu_table_context *smu_table = &smu->smu_table;
941 	struct smu_table *tables = smu_table->tables;
942 	struct smu_table *driver_table = &(smu_table->driver_table);
943 
944 	if (tables[SMU_TABLE_PMSTATUSLOG].mc_address)
945 		amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo,
946 				      &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
947 				      &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
948 
949 	amdgpu_bo_free_kernel(&driver_table->bo,
950 			      &driver_table->mc_address,
951 			      &driver_table->cpu_addr);
952 
953 	return 0;
954 }
955 
956 /**
957  * smu_alloc_memory_pool - allocate memory pool in the system memory
958  *
959  * @smu: amdgpu_device pointer
960  *
961  * This memory pool will be used for SMC use and msg SetSystemVirtualDramAddr
962  * and DramLogSetDramAddr can notify it changed.
963  *
964  * Returns 0 on success, error on failure.
965  */
966 static int smu_alloc_memory_pool(struct smu_context *smu)
967 {
968 	struct amdgpu_device *adev = smu->adev;
969 	struct smu_table_context *smu_table = &smu->smu_table;
970 	struct smu_table *memory_pool = &smu_table->memory_pool;
971 	uint64_t pool_size = smu->pool_size;
972 	int ret = 0;
973 
974 	if (pool_size == SMU_MEMORY_POOL_SIZE_ZERO)
975 		return ret;
976 
977 	memory_pool->size = pool_size;
978 	memory_pool->align = PAGE_SIZE;
979 	memory_pool->domain = AMDGPU_GEM_DOMAIN_GTT;
980 
981 	switch (pool_size) {
982 	case SMU_MEMORY_POOL_SIZE_256_MB:
983 	case SMU_MEMORY_POOL_SIZE_512_MB:
984 	case SMU_MEMORY_POOL_SIZE_1_GB:
985 	case SMU_MEMORY_POOL_SIZE_2_GB:
986 		ret = amdgpu_bo_create_kernel(adev,
987 					      memory_pool->size,
988 					      memory_pool->align,
989 					      memory_pool->domain,
990 					      &memory_pool->bo,
991 					      &memory_pool->mc_address,
992 					      &memory_pool->cpu_addr);
993 		if (ret)
994 			dev_err(adev->dev, "VRAM allocation for dramlog failed!\n");
995 		break;
996 	default:
997 		break;
998 	}
999 
1000 	return ret;
1001 }
1002 
1003 static int smu_free_memory_pool(struct smu_context *smu)
1004 {
1005 	struct smu_table_context *smu_table = &smu->smu_table;
1006 	struct smu_table *memory_pool = &smu_table->memory_pool;
1007 
1008 	if (memory_pool->size == SMU_MEMORY_POOL_SIZE_ZERO)
1009 		return 0;
1010 
1011 	amdgpu_bo_free_kernel(&memory_pool->bo,
1012 			      &memory_pool->mc_address,
1013 			      &memory_pool->cpu_addr);
1014 
1015 	memset(memory_pool, 0, sizeof(struct smu_table));
1016 
1017 	return 0;
1018 }
1019 
1020 static int smu_alloc_dummy_read_table(struct smu_context *smu)
1021 {
1022 	struct smu_table_context *smu_table = &smu->smu_table;
1023 	struct smu_table *dummy_read_1_table =
1024 			&smu_table->dummy_read_1_table;
1025 	struct amdgpu_device *adev = smu->adev;
1026 	int ret = 0;
1027 
1028 	if (!dummy_read_1_table->size)
1029 		return 0;
1030 
1031 	ret = amdgpu_bo_create_kernel(adev,
1032 				      dummy_read_1_table->size,
1033 				      dummy_read_1_table->align,
1034 				      dummy_read_1_table->domain,
1035 				      &dummy_read_1_table->bo,
1036 				      &dummy_read_1_table->mc_address,
1037 				      &dummy_read_1_table->cpu_addr);
1038 	if (ret)
1039 		dev_err(adev->dev, "VRAM allocation for dummy read table failed!\n");
1040 
1041 	return ret;
1042 }
1043 
1044 static void smu_free_dummy_read_table(struct smu_context *smu)
1045 {
1046 	struct smu_table_context *smu_table = &smu->smu_table;
1047 	struct smu_table *dummy_read_1_table =
1048 			&smu_table->dummy_read_1_table;
1049 
1050 
1051 	amdgpu_bo_free_kernel(&dummy_read_1_table->bo,
1052 			      &dummy_read_1_table->mc_address,
1053 			      &dummy_read_1_table->cpu_addr);
1054 
1055 	memset(dummy_read_1_table, 0, sizeof(struct smu_table));
1056 }
1057 
1058 static int smu_smc_table_sw_init(struct smu_context *smu)
1059 {
1060 	int ret;
1061 
1062 	/**
1063 	 * Create smu_table structure, and init smc tables such as
1064 	 * TABLE_PPTABLE, TABLE_WATERMARKS, TABLE_SMU_METRICS, and etc.
1065 	 */
1066 	ret = smu_init_smc_tables(smu);
1067 	if (ret) {
1068 		dev_err(smu->adev->dev, "Failed to init smc tables!\n");
1069 		return ret;
1070 	}
1071 
1072 	/**
1073 	 * Create smu_power_context structure, and allocate smu_dpm_context and
1074 	 * context size to fill the smu_power_context data.
1075 	 */
1076 	ret = smu_init_power(smu);
1077 	if (ret) {
1078 		dev_err(smu->adev->dev, "Failed to init smu_init_power!\n");
1079 		return ret;
1080 	}
1081 
1082 	/*
1083 	 * allocate vram bos to store smc table contents.
1084 	 */
1085 	ret = smu_init_fb_allocations(smu);
1086 	if (ret)
1087 		return ret;
1088 
1089 	ret = smu_alloc_memory_pool(smu);
1090 	if (ret)
1091 		return ret;
1092 
1093 	ret = smu_alloc_dummy_read_table(smu);
1094 	if (ret)
1095 		return ret;
1096 
1097 	ret = smu_i2c_init(smu);
1098 	if (ret)
1099 		return ret;
1100 
1101 	return 0;
1102 }
1103 
1104 static int smu_smc_table_sw_fini(struct smu_context *smu)
1105 {
1106 	int ret;
1107 
1108 	smu_i2c_fini(smu);
1109 
1110 	smu_free_dummy_read_table(smu);
1111 
1112 	ret = smu_free_memory_pool(smu);
1113 	if (ret)
1114 		return ret;
1115 
1116 	ret = smu_fini_fb_allocations(smu);
1117 	if (ret)
1118 		return ret;
1119 
1120 	ret = smu_fini_power(smu);
1121 	if (ret) {
1122 		dev_err(smu->adev->dev, "Failed to init smu_fini_power!\n");
1123 		return ret;
1124 	}
1125 
1126 	ret = smu_fini_smc_tables(smu);
1127 	if (ret) {
1128 		dev_err(smu->adev->dev, "Failed to smu_fini_smc_tables!\n");
1129 		return ret;
1130 	}
1131 
1132 	return 0;
1133 }
1134 
1135 static void smu_throttling_logging_work_fn(struct work_struct *work)
1136 {
1137 	struct smu_context *smu = container_of(work, struct smu_context,
1138 					       throttling_logging_work);
1139 
1140 	smu_log_thermal_throttling(smu);
1141 }
1142 
1143 static void smu_interrupt_work_fn(struct work_struct *work)
1144 {
1145 	struct smu_context *smu = container_of(work, struct smu_context,
1146 					       interrupt_work);
1147 
1148 	if (smu->ppt_funcs && smu->ppt_funcs->interrupt_work)
1149 		smu->ppt_funcs->interrupt_work(smu);
1150 }
1151 
1152 static void smu_swctf_delayed_work_handler(struct work_struct *work)
1153 {
1154 	struct smu_context *smu =
1155 		container_of(work, struct smu_context, swctf_delayed_work.work);
1156 	struct smu_temperature_range *range =
1157 				&smu->thermal_range;
1158 	struct amdgpu_device *adev = smu->adev;
1159 	uint32_t hotspot_tmp, size;
1160 
1161 	/*
1162 	 * If the hotspot temperature is confirmed as below SW CTF setting point
1163 	 * after the delay enforced, nothing will be done.
1164 	 * Otherwise, a graceful shutdown will be performed to prevent further damage.
1165 	 */
1166 	if (range->software_shutdown_temp &&
1167 	    smu->ppt_funcs->read_sensor &&
1168 	    !smu->ppt_funcs->read_sensor(smu,
1169 					 AMDGPU_PP_SENSOR_HOTSPOT_TEMP,
1170 					 &hotspot_tmp,
1171 					 &size) &&
1172 	    hotspot_tmp / 1000 < range->software_shutdown_temp)
1173 		return;
1174 
1175 	dev_emerg(adev->dev, "ERROR: GPU over temperature range(SW CTF) detected!\n");
1176 	dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU SW CTF!\n");
1177 	orderly_poweroff(true);
1178 }
1179 
1180 static void smu_init_xgmi_plpd_mode(struct smu_context *smu)
1181 {
1182 	if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(11, 0, 2)) {
1183 		smu->plpd_mode = XGMI_PLPD_DEFAULT;
1184 		return;
1185 	}
1186 
1187 	/* PMFW put PLPD into default policy after enabling the feature */
1188 	if (smu_feature_is_enabled(smu,
1189 				   SMU_FEATURE_XGMI_PER_LINK_PWR_DWN_BIT))
1190 		smu->plpd_mode = XGMI_PLPD_DEFAULT;
1191 	else
1192 		smu->plpd_mode = XGMI_PLPD_NONE;
1193 }
1194 
1195 static int smu_sw_init(void *handle)
1196 {
1197 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1198 	struct smu_context *smu = adev->powerplay.pp_handle;
1199 	int ret;
1200 
1201 	smu->pool_size = adev->pm.smu_prv_buffer_size;
1202 	smu->smu_feature.feature_num = SMU_FEATURE_MAX;
1203 	bitmap_zero(smu->smu_feature.supported, SMU_FEATURE_MAX);
1204 	bitmap_zero(smu->smu_feature.allowed, SMU_FEATURE_MAX);
1205 
1206 	INIT_WORK(&smu->throttling_logging_work, smu_throttling_logging_work_fn);
1207 	INIT_WORK(&smu->interrupt_work, smu_interrupt_work_fn);
1208 	atomic64_set(&smu->throttle_int_counter, 0);
1209 	smu->watermarks_bitmap = 0;
1210 	smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
1211 	smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
1212 
1213 	atomic_set(&smu->smu_power.power_gate.vcn_gated, 1);
1214 	atomic_set(&smu->smu_power.power_gate.jpeg_gated, 1);
1215 	atomic_set(&smu->smu_power.power_gate.vpe_gated, 1);
1216 	atomic_set(&smu->smu_power.power_gate.umsch_mm_gated, 1);
1217 
1218 	smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
1219 	smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0;
1220 	smu->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1;
1221 	smu->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2;
1222 	smu->workload_prority[PP_SMC_POWER_PROFILE_VIDEO] = 3;
1223 	smu->workload_prority[PP_SMC_POWER_PROFILE_VR] = 4;
1224 	smu->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 5;
1225 	smu->workload_prority[PP_SMC_POWER_PROFILE_CUSTOM] = 6;
1226 
1227 	smu->workload_setting[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
1228 	smu->workload_setting[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
1229 	smu->workload_setting[2] = PP_SMC_POWER_PROFILE_POWERSAVING;
1230 	smu->workload_setting[3] = PP_SMC_POWER_PROFILE_VIDEO;
1231 	smu->workload_setting[4] = PP_SMC_POWER_PROFILE_VR;
1232 	smu->workload_setting[5] = PP_SMC_POWER_PROFILE_COMPUTE;
1233 	smu->workload_setting[6] = PP_SMC_POWER_PROFILE_CUSTOM;
1234 	smu->display_config = &adev->pm.pm_display_cfg;
1235 
1236 	smu->smu_dpm.dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
1237 	smu->smu_dpm.requested_dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
1238 
1239 	INIT_DELAYED_WORK(&smu->swctf_delayed_work,
1240 			  smu_swctf_delayed_work_handler);
1241 
1242 	ret = smu_smc_table_sw_init(smu);
1243 	if (ret) {
1244 		dev_err(adev->dev, "Failed to sw init smc table!\n");
1245 		return ret;
1246 	}
1247 
1248 	/* get boot_values from vbios to set revision, gfxclk, and etc. */
1249 	ret = smu_get_vbios_bootup_values(smu);
1250 	if (ret) {
1251 		dev_err(adev->dev, "Failed to get VBIOS boot clock values!\n");
1252 		return ret;
1253 	}
1254 
1255 	ret = smu_init_pptable_microcode(smu);
1256 	if (ret) {
1257 		dev_err(adev->dev, "Failed to setup pptable firmware!\n");
1258 		return ret;
1259 	}
1260 
1261 	ret = smu_register_irq_handler(smu);
1262 	if (ret) {
1263 		dev_err(adev->dev, "Failed to register smc irq handler!\n");
1264 		return ret;
1265 	}
1266 
1267 	/* If there is no way to query fan control mode, fan control is not supported */
1268 	if (!smu->ppt_funcs->get_fan_control_mode)
1269 		smu->adev->pm.no_fan = true;
1270 
1271 	return 0;
1272 }
1273 
1274 static int smu_sw_fini(void *handle)
1275 {
1276 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1277 	struct smu_context *smu = adev->powerplay.pp_handle;
1278 	int ret;
1279 
1280 	ret = smu_smc_table_sw_fini(smu);
1281 	if (ret) {
1282 		dev_err(adev->dev, "Failed to sw fini smc table!\n");
1283 		return ret;
1284 	}
1285 
1286 	smu_fini_microcode(smu);
1287 
1288 	return 0;
1289 }
1290 
1291 static int smu_get_thermal_temperature_range(struct smu_context *smu)
1292 {
1293 	struct amdgpu_device *adev = smu->adev;
1294 	struct smu_temperature_range *range =
1295 				&smu->thermal_range;
1296 	int ret = 0;
1297 
1298 	if (!smu->ppt_funcs->get_thermal_temperature_range)
1299 		return 0;
1300 
1301 	ret = smu->ppt_funcs->get_thermal_temperature_range(smu, range);
1302 	if (ret)
1303 		return ret;
1304 
1305 	adev->pm.dpm.thermal.min_temp = range->min;
1306 	adev->pm.dpm.thermal.max_temp = range->max;
1307 	adev->pm.dpm.thermal.max_edge_emergency_temp = range->edge_emergency_max;
1308 	adev->pm.dpm.thermal.min_hotspot_temp = range->hotspot_min;
1309 	adev->pm.dpm.thermal.max_hotspot_crit_temp = range->hotspot_crit_max;
1310 	adev->pm.dpm.thermal.max_hotspot_emergency_temp = range->hotspot_emergency_max;
1311 	adev->pm.dpm.thermal.min_mem_temp = range->mem_min;
1312 	adev->pm.dpm.thermal.max_mem_crit_temp = range->mem_crit_max;
1313 	adev->pm.dpm.thermal.max_mem_emergency_temp = range->mem_emergency_max;
1314 
1315 	return ret;
1316 }
1317 
1318 /**
1319  * smu_wbrf_handle_exclusion_ranges - consume the wbrf exclusion ranges
1320  *
1321  * @smu: smu_context pointer
1322  *
1323  * Retrieve the wbrf exclusion ranges and send them to PMFW for proper handling.
1324  * Returns 0 on success, error on failure.
1325  */
1326 static int smu_wbrf_handle_exclusion_ranges(struct smu_context *smu)
1327 {
1328 	struct wbrf_ranges_in_out wbrf_exclusion = {0};
1329 	struct freq_band_range *wifi_bands = wbrf_exclusion.band_list;
1330 	struct amdgpu_device *adev = smu->adev;
1331 	uint32_t num_of_wbrf_ranges = MAX_NUM_OF_WBRF_RANGES;
1332 	uint64_t start, end;
1333 	int ret, i, j;
1334 
1335 	ret = amd_wbrf_retrieve_freq_band(adev->dev, &wbrf_exclusion);
1336 	if (ret) {
1337 		dev_err(adev->dev, "Failed to retrieve exclusion ranges!\n");
1338 		return ret;
1339 	}
1340 
1341 	/*
1342 	 * The exclusion ranges array we got might be filled with holes and duplicate
1343 	 * entries. For example:
1344 	 * {(2400, 2500), (0, 0), (6882, 6962), (2400, 2500), (0, 0), (6117, 6189), (0, 0)...}
1345 	 * We need to do some sortups to eliminate those holes and duplicate entries.
1346 	 * Expected output: {(2400, 2500), (6117, 6189), (6882, 6962), (0, 0)...}
1347 	 */
1348 	for (i = 0; i < num_of_wbrf_ranges; i++) {
1349 		start = wifi_bands[i].start;
1350 		end = wifi_bands[i].end;
1351 
1352 		/* get the last valid entry to fill the intermediate hole */
1353 		if (!start && !end) {
1354 			for (j = num_of_wbrf_ranges - 1; j > i; j--)
1355 				if (wifi_bands[j].start && wifi_bands[j].end)
1356 					break;
1357 
1358 			/* no valid entry left */
1359 			if (j <= i)
1360 				break;
1361 
1362 			start = wifi_bands[i].start = wifi_bands[j].start;
1363 			end = wifi_bands[i].end = wifi_bands[j].end;
1364 			wifi_bands[j].start = 0;
1365 			wifi_bands[j].end = 0;
1366 			num_of_wbrf_ranges = j;
1367 		}
1368 
1369 		/* eliminate duplicate entries */
1370 		for (j = i + 1; j < num_of_wbrf_ranges; j++) {
1371 			if ((wifi_bands[j].start == start) && (wifi_bands[j].end == end)) {
1372 				wifi_bands[j].start = 0;
1373 				wifi_bands[j].end = 0;
1374 			}
1375 		}
1376 	}
1377 
1378 	/* Send the sorted wifi_bands to PMFW */
1379 	ret = smu_set_wbrf_exclusion_ranges(smu, wifi_bands);
1380 	/* Try to set the wifi_bands again */
1381 	if (unlikely(ret == -EBUSY)) {
1382 		mdelay(5);
1383 		ret = smu_set_wbrf_exclusion_ranges(smu, wifi_bands);
1384 	}
1385 
1386 	return ret;
1387 }
1388 
1389 /**
1390  * smu_wbrf_event_handler - handle notify events
1391  *
1392  * @nb: notifier block
1393  * @action: event type
1394  * @_arg: event data
1395  *
1396  * Calls relevant amdgpu function in response to wbrf event
1397  * notification from kernel.
1398  */
1399 static int smu_wbrf_event_handler(struct notifier_block *nb,
1400 				  unsigned long action, void *_arg)
1401 {
1402 	struct smu_context *smu = container_of(nb, struct smu_context, wbrf_notifier);
1403 
1404 	switch (action) {
1405 	case WBRF_CHANGED:
1406 		schedule_delayed_work(&smu->wbrf_delayed_work,
1407 				      msecs_to_jiffies(SMU_WBRF_EVENT_HANDLING_PACE));
1408 		break;
1409 	default:
1410 		return NOTIFY_DONE;
1411 	}
1412 
1413 	return NOTIFY_OK;
1414 }
1415 
1416 /**
1417  * smu_wbrf_delayed_work_handler - callback on delayed work timer expired
1418  *
1419  * @work: struct work_struct pointer
1420  *
1421  * Flood is over and driver will consume the latest exclusion ranges.
1422  */
1423 static void smu_wbrf_delayed_work_handler(struct work_struct *work)
1424 {
1425 	struct smu_context *smu = container_of(work, struct smu_context, wbrf_delayed_work.work);
1426 
1427 	smu_wbrf_handle_exclusion_ranges(smu);
1428 }
1429 
1430 /**
1431  * smu_wbrf_support_check - check wbrf support
1432  *
1433  * @smu: smu_context pointer
1434  *
1435  * Verifies the ACPI interface whether wbrf is supported.
1436  */
1437 static void smu_wbrf_support_check(struct smu_context *smu)
1438 {
1439 	struct amdgpu_device *adev = smu->adev;
1440 
1441 	smu->wbrf_supported = smu_is_asic_wbrf_supported(smu) && amdgpu_wbrf &&
1442 							acpi_amd_wbrf_supported_consumer(adev->dev);
1443 
1444 	if (smu->wbrf_supported)
1445 		dev_info(adev->dev, "RF interference mitigation is supported\n");
1446 }
1447 
1448 /**
1449  * smu_wbrf_init - init driver wbrf support
1450  *
1451  * @smu: smu_context pointer
1452  *
1453  * Verifies the AMD ACPI interfaces and registers with the wbrf
1454  * notifier chain if wbrf feature is supported.
1455  * Returns 0 on success, error on failure.
1456  */
1457 static int smu_wbrf_init(struct smu_context *smu)
1458 {
1459 	int ret;
1460 
1461 	if (!smu->wbrf_supported)
1462 		return 0;
1463 
1464 	INIT_DELAYED_WORK(&smu->wbrf_delayed_work, smu_wbrf_delayed_work_handler);
1465 
1466 	smu->wbrf_notifier.notifier_call = smu_wbrf_event_handler;
1467 	ret = amd_wbrf_register_notifier(&smu->wbrf_notifier);
1468 	if (ret)
1469 		return ret;
1470 
1471 	/*
1472 	 * Some wifiband exclusion ranges may be already there
1473 	 * before our driver loaded. To make sure our driver
1474 	 * is awared of those exclusion ranges.
1475 	 */
1476 	schedule_delayed_work(&smu->wbrf_delayed_work,
1477 			      msecs_to_jiffies(SMU_WBRF_EVENT_HANDLING_PACE));
1478 
1479 	return 0;
1480 }
1481 
1482 /**
1483  * smu_wbrf_fini - tear down driver wbrf support
1484  *
1485  * @smu: smu_context pointer
1486  *
1487  * Unregisters with the wbrf notifier chain.
1488  */
1489 static void smu_wbrf_fini(struct smu_context *smu)
1490 {
1491 	if (!smu->wbrf_supported)
1492 		return;
1493 
1494 	amd_wbrf_unregister_notifier(&smu->wbrf_notifier);
1495 
1496 	cancel_delayed_work_sync(&smu->wbrf_delayed_work);
1497 }
1498 
1499 static int smu_smc_hw_setup(struct smu_context *smu)
1500 {
1501 	struct smu_feature *feature = &smu->smu_feature;
1502 	struct amdgpu_device *adev = smu->adev;
1503 	uint8_t pcie_gen = 0, pcie_width = 0;
1504 	uint64_t features_supported;
1505 	int ret = 0;
1506 
1507 	switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
1508 	case IP_VERSION(11, 0, 7):
1509 	case IP_VERSION(11, 0, 11):
1510 	case IP_VERSION(11, 5, 0):
1511 	case IP_VERSION(11, 0, 12):
1512 		if (adev->in_suspend && smu_is_dpm_running(smu)) {
1513 			dev_info(adev->dev, "dpm has been enabled\n");
1514 			ret = smu_system_features_control(smu, true);
1515 			if (ret)
1516 				dev_err(adev->dev, "Failed system features control!\n");
1517 			return ret;
1518 		}
1519 		break;
1520 	default:
1521 		break;
1522 	}
1523 
1524 	ret = smu_init_display_count(smu, 0);
1525 	if (ret) {
1526 		dev_info(adev->dev, "Failed to pre-set display count as 0!\n");
1527 		return ret;
1528 	}
1529 
1530 	ret = smu_set_driver_table_location(smu);
1531 	if (ret) {
1532 		dev_err(adev->dev, "Failed to SetDriverDramAddr!\n");
1533 		return ret;
1534 	}
1535 
1536 	/*
1537 	 * Set PMSTATUSLOG table bo address with SetToolsDramAddr MSG for tools.
1538 	 */
1539 	ret = smu_set_tool_table_location(smu);
1540 	if (ret) {
1541 		dev_err(adev->dev, "Failed to SetToolsDramAddr!\n");
1542 		return ret;
1543 	}
1544 
1545 	/*
1546 	 * Use msg SetSystemVirtualDramAddr and DramLogSetDramAddr can notify
1547 	 * pool location.
1548 	 */
1549 	ret = smu_notify_memory_pool_location(smu);
1550 	if (ret) {
1551 		dev_err(adev->dev, "Failed to SetDramLogDramAddr!\n");
1552 		return ret;
1553 	}
1554 
1555 	/*
1556 	 * It is assumed the pptable used before runpm is same as
1557 	 * the one used afterwards. Thus, we can reuse the stored
1558 	 * copy and do not need to resetup the pptable again.
1559 	 */
1560 	if (!adev->in_runpm) {
1561 		ret = smu_setup_pptable(smu);
1562 		if (ret) {
1563 			dev_err(adev->dev, "Failed to setup pptable!\n");
1564 			return ret;
1565 		}
1566 	}
1567 
1568 	/* smu_dump_pptable(smu); */
1569 
1570 	/*
1571 	 * With SCPM enabled, PSP is responsible for the PPTable transferring
1572 	 * (to SMU). Driver involvement is not needed and permitted.
1573 	 */
1574 	if (!adev->scpm_enabled) {
1575 		/*
1576 		 * Copy pptable bo in the vram to smc with SMU MSGs such as
1577 		 * SetDriverDramAddr and TransferTableDram2Smu.
1578 		 */
1579 		ret = smu_write_pptable(smu);
1580 		if (ret) {
1581 			dev_err(adev->dev, "Failed to transfer pptable to SMC!\n");
1582 			return ret;
1583 		}
1584 	}
1585 
1586 	/* issue Run*Btc msg */
1587 	ret = smu_run_btc(smu);
1588 	if (ret)
1589 		return ret;
1590 
1591 	/* Enable UclkShadow on wbrf supported */
1592 	if (smu->wbrf_supported) {
1593 		ret = smu_enable_uclk_shadow(smu, true);
1594 		if (ret) {
1595 			dev_err(adev->dev, "Failed to enable UclkShadow feature to support wbrf!\n");
1596 			return ret;
1597 		}
1598 	}
1599 
1600 	/*
1601 	 * With SCPM enabled, these actions(and relevant messages) are
1602 	 * not needed and permitted.
1603 	 */
1604 	if (!adev->scpm_enabled) {
1605 		ret = smu_feature_set_allowed_mask(smu);
1606 		if (ret) {
1607 			dev_err(adev->dev, "Failed to set driver allowed features mask!\n");
1608 			return ret;
1609 		}
1610 	}
1611 
1612 	ret = smu_system_features_control(smu, true);
1613 	if (ret) {
1614 		dev_err(adev->dev, "Failed to enable requested dpm features!\n");
1615 		return ret;
1616 	}
1617 
1618 	smu_init_xgmi_plpd_mode(smu);
1619 
1620 	ret = smu_feature_get_enabled_mask(smu, &features_supported);
1621 	if (ret) {
1622 		dev_err(adev->dev, "Failed to retrieve supported dpm features!\n");
1623 		return ret;
1624 	}
1625 	bitmap_copy(feature->supported,
1626 		    (unsigned long *)&features_supported,
1627 		    feature->feature_num);
1628 
1629 	if (!smu_is_dpm_running(smu))
1630 		dev_info(adev->dev, "dpm has been disabled\n");
1631 
1632 	/*
1633 	 * Set initialized values (get from vbios) to dpm tables context such as
1634 	 * gfxclk, memclk, dcefclk, and etc. And enable the DPM feature for each
1635 	 * type of clks.
1636 	 */
1637 	ret = smu_set_default_dpm_table(smu);
1638 	if (ret) {
1639 		dev_err(adev->dev, "Failed to setup default dpm clock tables!\n");
1640 		return ret;
1641 	}
1642 
1643 	if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4)
1644 		pcie_gen = 3;
1645 	else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
1646 		pcie_gen = 2;
1647 	else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)
1648 		pcie_gen = 1;
1649 	else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1)
1650 		pcie_gen = 0;
1651 
1652 	/* Bit 31:16: LCLK DPM level. 0 is DPM0, and 1 is DPM1
1653 	 * Bit 15:8:  PCIE GEN, 0 to 3 corresponds to GEN1 to GEN4
1654 	 * Bit 7:0:   PCIE lane width, 1 to 7 corresponds is x1 to x32
1655 	 */
1656 	if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16)
1657 		pcie_width = 6;
1658 	else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12)
1659 		pcie_width = 5;
1660 	else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8)
1661 		pcie_width = 4;
1662 	else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4)
1663 		pcie_width = 3;
1664 	else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2)
1665 		pcie_width = 2;
1666 	else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1)
1667 		pcie_width = 1;
1668 	ret = smu_update_pcie_parameters(smu, pcie_gen, pcie_width);
1669 	if (ret) {
1670 		dev_err(adev->dev, "Attempt to override pcie params failed!\n");
1671 		return ret;
1672 	}
1673 
1674 	ret = smu_get_thermal_temperature_range(smu);
1675 	if (ret) {
1676 		dev_err(adev->dev, "Failed to get thermal temperature ranges!\n");
1677 		return ret;
1678 	}
1679 
1680 	ret = smu_enable_thermal_alert(smu);
1681 	if (ret) {
1682 	  dev_err(adev->dev, "Failed to enable thermal alert!\n");
1683 	  return ret;
1684 	}
1685 
1686 	ret = smu_notify_display_change(smu);
1687 	if (ret) {
1688 		dev_err(adev->dev, "Failed to notify display change!\n");
1689 		return ret;
1690 	}
1691 
1692 	/*
1693 	 * Set min deep sleep dce fclk with bootup value from vbios via
1694 	 * SetMinDeepSleepDcefclk MSG.
1695 	 */
1696 	ret = smu_set_min_dcef_deep_sleep(smu,
1697 					  smu->smu_table.boot_values.dcefclk / 100);
1698 	if (ret) {
1699 		dev_err(adev->dev, "Error setting min deepsleep dcefclk\n");
1700 		return ret;
1701 	}
1702 
1703 	/* Init wbrf support. Properly setup the notifier */
1704 	ret = smu_wbrf_init(smu);
1705 	if (ret)
1706 		dev_err(adev->dev, "Error during wbrf init call\n");
1707 
1708 	return ret;
1709 }
1710 
1711 static int smu_start_smc_engine(struct smu_context *smu)
1712 {
1713 	struct amdgpu_device *adev = smu->adev;
1714 	int ret = 0;
1715 
1716 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1717 		if (amdgpu_ip_version(adev, MP1_HWIP, 0) < IP_VERSION(11, 0, 0)) {
1718 			if (smu->ppt_funcs->load_microcode) {
1719 				ret = smu->ppt_funcs->load_microcode(smu);
1720 				if (ret)
1721 					return ret;
1722 			}
1723 		}
1724 	}
1725 
1726 	if (smu->ppt_funcs->check_fw_status) {
1727 		ret = smu->ppt_funcs->check_fw_status(smu);
1728 		if (ret) {
1729 			dev_err(adev->dev, "SMC is not ready\n");
1730 			return ret;
1731 		}
1732 	}
1733 
1734 	/*
1735 	 * Send msg GetDriverIfVersion to check if the return value is equal
1736 	 * with DRIVER_IF_VERSION of smc header.
1737 	 */
1738 	ret = smu_check_fw_version(smu);
1739 	if (ret)
1740 		return ret;
1741 
1742 	return ret;
1743 }
1744 
1745 static int smu_hw_init(void *handle)
1746 {
1747 	int ret;
1748 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1749 	struct smu_context *smu = adev->powerplay.pp_handle;
1750 
1751 	if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) {
1752 		smu->pm_enabled = false;
1753 		return 0;
1754 	}
1755 
1756 	ret = smu_start_smc_engine(smu);
1757 	if (ret) {
1758 		dev_err(adev->dev, "SMC engine is not correctly up!\n");
1759 		return ret;
1760 	}
1761 
1762 	/*
1763 	 * Check whether wbrf is supported. This needs to be done
1764 	 * before SMU setup starts since part of SMU configuration
1765 	 * relies on this.
1766 	 */
1767 	smu_wbrf_support_check(smu);
1768 
1769 	if (smu->is_apu) {
1770 		ret = smu_set_gfx_imu_enable(smu);
1771 		if (ret)
1772 			return ret;
1773 		smu_dpm_set_vcn_enable(smu, true);
1774 		smu_dpm_set_jpeg_enable(smu, true);
1775 		smu_dpm_set_vpe_enable(smu, true);
1776 		smu_dpm_set_umsch_mm_enable(smu, true);
1777 		smu_set_gfx_cgpg(smu, true);
1778 	}
1779 
1780 	if (!smu->pm_enabled)
1781 		return 0;
1782 
1783 	ret = smu_get_driver_allowed_feature_mask(smu);
1784 	if (ret)
1785 		return ret;
1786 
1787 	ret = smu_smc_hw_setup(smu);
1788 	if (ret) {
1789 		dev_err(adev->dev, "Failed to setup smc hw!\n");
1790 		return ret;
1791 	}
1792 
1793 	/*
1794 	 * Move maximum sustainable clock retrieving here considering
1795 	 * 1. It is not needed on resume(from S3).
1796 	 * 2. DAL settings come between .hw_init and .late_init of SMU.
1797 	 *    And DAL needs to know the maximum sustainable clocks. Thus
1798 	 *    it cannot be put in .late_init().
1799 	 */
1800 	ret = smu_init_max_sustainable_clocks(smu);
1801 	if (ret) {
1802 		dev_err(adev->dev, "Failed to init max sustainable clocks!\n");
1803 		return ret;
1804 	}
1805 
1806 	adev->pm.dpm_enabled = true;
1807 
1808 	dev_info(adev->dev, "SMU is initialized successfully!\n");
1809 
1810 	return 0;
1811 }
1812 
1813 static int smu_disable_dpms(struct smu_context *smu)
1814 {
1815 	struct amdgpu_device *adev = smu->adev;
1816 	int ret = 0;
1817 	bool use_baco = !smu->is_apu &&
1818 		((amdgpu_in_reset(adev) &&
1819 		  (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)) ||
1820 		 ((adev->in_runpm || adev->in_s4) && amdgpu_asic_supports_baco(adev)));
1821 
1822 	/*
1823 	 * For SMU 13.0.0 and 13.0.7, PMFW will handle the DPM features(disablement or others)
1824 	 * properly on suspend/reset/unload. Driver involvement may cause some unexpected issues.
1825 	 */
1826 	switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
1827 	case IP_VERSION(13, 0, 0):
1828 	case IP_VERSION(13, 0, 7):
1829 	case IP_VERSION(13, 0, 10):
1830 		return 0;
1831 	default:
1832 		break;
1833 	}
1834 
1835 	/*
1836 	 * For custom pptable uploading, skip the DPM features
1837 	 * disable process on Navi1x ASICs.
1838 	 *   - As the gfx related features are under control of
1839 	 *     RLC on those ASICs. RLC reinitialization will be
1840 	 *     needed to reenable them. That will cost much more
1841 	 *     efforts.
1842 	 *
1843 	 *   - SMU firmware can handle the DPM reenablement
1844 	 *     properly.
1845 	 */
1846 	if (smu->uploading_custom_pp_table) {
1847 		switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
1848 		case IP_VERSION(11, 0, 0):
1849 		case IP_VERSION(11, 0, 5):
1850 		case IP_VERSION(11, 0, 9):
1851 		case IP_VERSION(11, 0, 7):
1852 		case IP_VERSION(11, 0, 11):
1853 		case IP_VERSION(11, 5, 0):
1854 		case IP_VERSION(11, 0, 12):
1855 		case IP_VERSION(11, 0, 13):
1856 			return 0;
1857 		default:
1858 			break;
1859 		}
1860 	}
1861 
1862 	/*
1863 	 * For Sienna_Cichlid, PMFW will handle the features disablement properly
1864 	 * on BACO in. Driver involvement is unnecessary.
1865 	 */
1866 	if (use_baco) {
1867 		switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
1868 		case IP_VERSION(11, 0, 7):
1869 		case IP_VERSION(11, 0, 0):
1870 		case IP_VERSION(11, 0, 5):
1871 		case IP_VERSION(11, 0, 9):
1872 		case IP_VERSION(13, 0, 7):
1873 			return 0;
1874 		default:
1875 			break;
1876 		}
1877 	}
1878 
1879 	/*
1880 	 * For SMU 13.0.4/11 and 14.0.0, PMFW will handle the features disablement properly
1881 	 * for gpu reset and S0i3 cases. Driver involvement is unnecessary.
1882 	 */
1883 	if (amdgpu_in_reset(adev) || adev->in_s0ix) {
1884 		switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
1885 		case IP_VERSION(13, 0, 4):
1886 		case IP_VERSION(13, 0, 11):
1887 		case IP_VERSION(14, 0, 0):
1888 			return 0;
1889 		default:
1890 			break;
1891 		}
1892 	}
1893 
1894 	/*
1895 	 * For gpu reset, runpm and hibernation through BACO,
1896 	 * BACO feature has to be kept enabled.
1897 	 */
1898 	if (use_baco && smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT)) {
1899 		ret = smu_disable_all_features_with_exception(smu,
1900 							      SMU_FEATURE_BACO_BIT);
1901 		if (ret)
1902 			dev_err(adev->dev, "Failed to disable smu features except BACO.\n");
1903 	} else {
1904 		/* DisableAllSmuFeatures message is not permitted with SCPM enabled */
1905 		if (!adev->scpm_enabled) {
1906 			ret = smu_system_features_control(smu, false);
1907 			if (ret)
1908 				dev_err(adev->dev, "Failed to disable smu features.\n");
1909 		}
1910 	}
1911 
1912 	/* Notify SMU RLC is going to be off, stop RLC and SMU interaction.
1913 	 * otherwise SMU will hang while interacting with RLC if RLC is halted
1914 	 * this is a WA for Vangogh asic which fix the SMU hang issue.
1915 	 */
1916 	ret = smu_notify_rlc_state(smu, false);
1917 	if (ret) {
1918 		dev_err(adev->dev, "Fail to notify rlc status!\n");
1919 		return ret;
1920 	}
1921 
1922 	if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(9, 4, 2) &&
1923 	    !((adev->flags & AMD_IS_APU) && adev->gfx.imu.funcs) &&
1924 	    !amdgpu_sriov_vf(adev) && adev->gfx.rlc.funcs->stop)
1925 		adev->gfx.rlc.funcs->stop(adev);
1926 
1927 	return ret;
1928 }
1929 
1930 static int smu_smc_hw_cleanup(struct smu_context *smu)
1931 {
1932 	struct amdgpu_device *adev = smu->adev;
1933 	int ret = 0;
1934 
1935 	smu_wbrf_fini(smu);
1936 
1937 	cancel_work_sync(&smu->throttling_logging_work);
1938 	cancel_work_sync(&smu->interrupt_work);
1939 
1940 	ret = smu_disable_thermal_alert(smu);
1941 	if (ret) {
1942 		dev_err(adev->dev, "Fail to disable thermal alert!\n");
1943 		return ret;
1944 	}
1945 
1946 	cancel_delayed_work_sync(&smu->swctf_delayed_work);
1947 
1948 	ret = smu_disable_dpms(smu);
1949 	if (ret) {
1950 		dev_err(adev->dev, "Fail to disable dpm features!\n");
1951 		return ret;
1952 	}
1953 
1954 	return 0;
1955 }
1956 
1957 static int smu_hw_fini(void *handle)
1958 {
1959 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1960 	struct smu_context *smu = adev->powerplay.pp_handle;
1961 
1962 	if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
1963 		return 0;
1964 
1965 	smu_dpm_set_vcn_enable(smu, false);
1966 	smu_dpm_set_jpeg_enable(smu, false);
1967 	smu_dpm_set_vpe_enable(smu, false);
1968 	smu_dpm_set_umsch_mm_enable(smu, false);
1969 
1970 	adev->vcn.cur_state = AMD_PG_STATE_GATE;
1971 	adev->jpeg.cur_state = AMD_PG_STATE_GATE;
1972 
1973 	if (!smu->pm_enabled)
1974 		return 0;
1975 
1976 	adev->pm.dpm_enabled = false;
1977 
1978 	return smu_smc_hw_cleanup(smu);
1979 }
1980 
1981 static void smu_late_fini(void *handle)
1982 {
1983 	struct amdgpu_device *adev = handle;
1984 	struct smu_context *smu = adev->powerplay.pp_handle;
1985 
1986 	kfree(smu);
1987 }
1988 
1989 static int smu_reset(struct smu_context *smu)
1990 {
1991 	struct amdgpu_device *adev = smu->adev;
1992 	int ret;
1993 
1994 	ret = smu_hw_fini(adev);
1995 	if (ret)
1996 		return ret;
1997 
1998 	ret = smu_hw_init(adev);
1999 	if (ret)
2000 		return ret;
2001 
2002 	ret = smu_late_init(adev);
2003 	if (ret)
2004 		return ret;
2005 
2006 	return 0;
2007 }
2008 
2009 static int smu_suspend(void *handle)
2010 {
2011 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2012 	struct smu_context *smu = adev->powerplay.pp_handle;
2013 	int ret;
2014 	uint64_t count;
2015 
2016 	if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
2017 		return 0;
2018 
2019 	if (!smu->pm_enabled)
2020 		return 0;
2021 
2022 	adev->pm.dpm_enabled = false;
2023 
2024 	ret = smu_smc_hw_cleanup(smu);
2025 	if (ret)
2026 		return ret;
2027 
2028 	smu->watermarks_bitmap &= ~(WATERMARKS_LOADED);
2029 
2030 	smu_set_gfx_cgpg(smu, false);
2031 
2032 	/*
2033 	 * pwfw resets entrycount when device is suspended, so we save the
2034 	 * last value to be used when we resume to keep it consistent
2035 	 */
2036 	ret = smu_get_entrycount_gfxoff(smu, &count);
2037 	if (!ret)
2038 		adev->gfx.gfx_off_entrycount = count;
2039 
2040 	return 0;
2041 }
2042 
2043 static int smu_resume(void *handle)
2044 {
2045 	int ret;
2046 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2047 	struct smu_context *smu = adev->powerplay.pp_handle;
2048 
2049 	if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
2050 		return 0;
2051 
2052 	if (!smu->pm_enabled)
2053 		return 0;
2054 
2055 	dev_info(adev->dev, "SMU is resuming...\n");
2056 
2057 	ret = smu_start_smc_engine(smu);
2058 	if (ret) {
2059 		dev_err(adev->dev, "SMC engine is not correctly up!\n");
2060 		return ret;
2061 	}
2062 
2063 	ret = smu_smc_hw_setup(smu);
2064 	if (ret) {
2065 		dev_err(adev->dev, "Failed to setup smc hw!\n");
2066 		return ret;
2067 	}
2068 
2069 	ret = smu_set_gfx_imu_enable(smu);
2070 	if (ret)
2071 		return ret;
2072 
2073 	smu_set_gfx_cgpg(smu, true);
2074 
2075 	smu->disable_uclk_switch = 0;
2076 
2077 	adev->pm.dpm_enabled = true;
2078 
2079 	dev_info(adev->dev, "SMU is resumed successfully!\n");
2080 
2081 	return 0;
2082 }
2083 
2084 static int smu_display_configuration_change(void *handle,
2085 					    const struct amd_pp_display_configuration *display_config)
2086 {
2087 	struct smu_context *smu = handle;
2088 
2089 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2090 		return -EOPNOTSUPP;
2091 
2092 	if (!display_config)
2093 		return -EINVAL;
2094 
2095 	smu_set_min_dcef_deep_sleep(smu,
2096 				    display_config->min_dcef_deep_sleep_set_clk / 100);
2097 
2098 	return 0;
2099 }
2100 
2101 static int smu_set_clockgating_state(void *handle,
2102 				     enum amd_clockgating_state state)
2103 {
2104 	return 0;
2105 }
2106 
2107 static int smu_set_powergating_state(void *handle,
2108 				     enum amd_powergating_state state)
2109 {
2110 	return 0;
2111 }
2112 
2113 static int smu_enable_umd_pstate(void *handle,
2114 		      enum amd_dpm_forced_level *level)
2115 {
2116 	uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
2117 					AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
2118 					AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
2119 					AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
2120 
2121 	struct smu_context *smu = (struct smu_context*)(handle);
2122 	struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
2123 
2124 	if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
2125 		return -EINVAL;
2126 
2127 	if (!(smu_dpm_ctx->dpm_level & profile_mode_mask)) {
2128 		/* enter umd pstate, save current level, disable gfx cg*/
2129 		if (*level & profile_mode_mask) {
2130 			smu_dpm_ctx->saved_dpm_level = smu_dpm_ctx->dpm_level;
2131 			smu_gpo_control(smu, false);
2132 			smu_gfx_ulv_control(smu, false);
2133 			smu_deep_sleep_control(smu, false);
2134 			amdgpu_asic_update_umd_stable_pstate(smu->adev, true);
2135 		}
2136 	} else {
2137 		/* exit umd pstate, restore level, enable gfx cg*/
2138 		if (!(*level & profile_mode_mask)) {
2139 			if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
2140 				*level = smu_dpm_ctx->saved_dpm_level;
2141 			amdgpu_asic_update_umd_stable_pstate(smu->adev, false);
2142 			smu_deep_sleep_control(smu, true);
2143 			smu_gfx_ulv_control(smu, true);
2144 			smu_gpo_control(smu, true);
2145 		}
2146 	}
2147 
2148 	return 0;
2149 }
2150 
2151 static int smu_bump_power_profile_mode(struct smu_context *smu,
2152 					   long *param,
2153 					   uint32_t param_size)
2154 {
2155 	int ret = 0;
2156 
2157 	if (smu->ppt_funcs->set_power_profile_mode)
2158 		ret = smu->ppt_funcs->set_power_profile_mode(smu, param, param_size);
2159 
2160 	return ret;
2161 }
2162 
2163 static int smu_adjust_power_state_dynamic(struct smu_context *smu,
2164 				   enum amd_dpm_forced_level level,
2165 				   bool skip_display_settings)
2166 {
2167 	int ret = 0;
2168 	int index = 0;
2169 	long workload;
2170 	struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
2171 
2172 	if (!skip_display_settings) {
2173 		ret = smu_display_config_changed(smu);
2174 		if (ret) {
2175 			dev_err(smu->adev->dev, "Failed to change display config!");
2176 			return ret;
2177 		}
2178 	}
2179 
2180 	ret = smu_apply_clocks_adjust_rules(smu);
2181 	if (ret) {
2182 		dev_err(smu->adev->dev, "Failed to apply clocks adjust rules!");
2183 		return ret;
2184 	}
2185 
2186 	if (!skip_display_settings) {
2187 		ret = smu_notify_smc_display_config(smu);
2188 		if (ret) {
2189 			dev_err(smu->adev->dev, "Failed to notify smc display config!");
2190 			return ret;
2191 		}
2192 	}
2193 
2194 	if (smu_dpm_ctx->dpm_level != level) {
2195 		ret = smu_asic_set_performance_level(smu, level);
2196 		if (ret) {
2197 			dev_err(smu->adev->dev, "Failed to set performance level!");
2198 			return ret;
2199 		}
2200 
2201 		/* update the saved copy */
2202 		smu_dpm_ctx->dpm_level = level;
2203 	}
2204 
2205 	if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
2206 		smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) {
2207 		index = fls(smu->workload_mask);
2208 		index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
2209 		workload = smu->workload_setting[index];
2210 
2211 		if (smu->power_profile_mode != workload)
2212 			smu_bump_power_profile_mode(smu, &workload, 0);
2213 	}
2214 
2215 	return ret;
2216 }
2217 
2218 static int smu_handle_task(struct smu_context *smu,
2219 			   enum amd_dpm_forced_level level,
2220 			   enum amd_pp_task task_id)
2221 {
2222 	int ret = 0;
2223 
2224 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2225 		return -EOPNOTSUPP;
2226 
2227 	switch (task_id) {
2228 	case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE:
2229 		ret = smu_pre_display_config_changed(smu);
2230 		if (ret)
2231 			return ret;
2232 		ret = smu_adjust_power_state_dynamic(smu, level, false);
2233 		break;
2234 	case AMD_PP_TASK_COMPLETE_INIT:
2235 	case AMD_PP_TASK_READJUST_POWER_STATE:
2236 		ret = smu_adjust_power_state_dynamic(smu, level, true);
2237 		break;
2238 	default:
2239 		break;
2240 	}
2241 
2242 	return ret;
2243 }
2244 
2245 static int smu_handle_dpm_task(void *handle,
2246 			       enum amd_pp_task task_id,
2247 			       enum amd_pm_state_type *user_state)
2248 {
2249 	struct smu_context *smu = handle;
2250 	struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
2251 
2252 	return smu_handle_task(smu, smu_dpm->dpm_level, task_id);
2253 
2254 }
2255 
2256 static int smu_switch_power_profile(void *handle,
2257 				    enum PP_SMC_POWER_PROFILE type,
2258 				    bool en)
2259 {
2260 	struct smu_context *smu = handle;
2261 	struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
2262 	long workload;
2263 	uint32_t index;
2264 
2265 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2266 		return -EOPNOTSUPP;
2267 
2268 	if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
2269 		return -EINVAL;
2270 
2271 	if (!en) {
2272 		smu->workload_mask &= ~(1 << smu->workload_prority[type]);
2273 		index = fls(smu->workload_mask);
2274 		index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
2275 		workload = smu->workload_setting[index];
2276 	} else {
2277 		smu->workload_mask |= (1 << smu->workload_prority[type]);
2278 		index = fls(smu->workload_mask);
2279 		index = index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
2280 		workload = smu->workload_setting[index];
2281 	}
2282 
2283 	if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
2284 		smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM)
2285 		smu_bump_power_profile_mode(smu, &workload, 0);
2286 
2287 	return 0;
2288 }
2289 
2290 static enum amd_dpm_forced_level smu_get_performance_level(void *handle)
2291 {
2292 	struct smu_context *smu = handle;
2293 	struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
2294 
2295 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2296 		return -EOPNOTSUPP;
2297 
2298 	if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
2299 		return -EINVAL;
2300 
2301 	return smu_dpm_ctx->dpm_level;
2302 }
2303 
2304 static int smu_force_performance_level(void *handle,
2305 				       enum amd_dpm_forced_level level)
2306 {
2307 	struct smu_context *smu = handle;
2308 	struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
2309 	int ret = 0;
2310 
2311 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2312 		return -EOPNOTSUPP;
2313 
2314 	if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
2315 		return -EINVAL;
2316 
2317 	ret = smu_enable_umd_pstate(smu, &level);
2318 	if (ret)
2319 		return ret;
2320 
2321 	ret = smu_handle_task(smu, level,
2322 			      AMD_PP_TASK_READJUST_POWER_STATE);
2323 
2324 	/* reset user dpm clock state */
2325 	if (!ret && smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
2326 		memset(smu->user_dpm_profile.clk_mask, 0, sizeof(smu->user_dpm_profile.clk_mask));
2327 		smu->user_dpm_profile.clk_dependency = 0;
2328 	}
2329 
2330 	return ret;
2331 }
2332 
2333 static int smu_set_display_count(void *handle, uint32_t count)
2334 {
2335 	struct smu_context *smu = handle;
2336 
2337 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2338 		return -EOPNOTSUPP;
2339 
2340 	return smu_init_display_count(smu, count);
2341 }
2342 
2343 static int smu_force_smuclk_levels(struct smu_context *smu,
2344 			 enum smu_clk_type clk_type,
2345 			 uint32_t mask)
2346 {
2347 	struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
2348 	int ret = 0;
2349 
2350 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2351 		return -EOPNOTSUPP;
2352 
2353 	if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
2354 		dev_dbg(smu->adev->dev, "force clock level is for dpm manual mode only.\n");
2355 		return -EINVAL;
2356 	}
2357 
2358 	if (smu->ppt_funcs && smu->ppt_funcs->force_clk_levels) {
2359 		ret = smu->ppt_funcs->force_clk_levels(smu, clk_type, mask);
2360 		if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
2361 			smu->user_dpm_profile.clk_mask[clk_type] = mask;
2362 			smu_set_user_clk_dependencies(smu, clk_type);
2363 		}
2364 	}
2365 
2366 	return ret;
2367 }
2368 
2369 static int smu_force_ppclk_levels(void *handle,
2370 				  enum pp_clock_type type,
2371 				  uint32_t mask)
2372 {
2373 	struct smu_context *smu = handle;
2374 	enum smu_clk_type clk_type;
2375 
2376 	switch (type) {
2377 	case PP_SCLK:
2378 		clk_type = SMU_SCLK; break;
2379 	case PP_MCLK:
2380 		clk_type = SMU_MCLK; break;
2381 	case PP_PCIE:
2382 		clk_type = SMU_PCIE; break;
2383 	case PP_SOCCLK:
2384 		clk_type = SMU_SOCCLK; break;
2385 	case PP_FCLK:
2386 		clk_type = SMU_FCLK; break;
2387 	case PP_DCEFCLK:
2388 		clk_type = SMU_DCEFCLK; break;
2389 	case PP_VCLK:
2390 		clk_type = SMU_VCLK; break;
2391 	case PP_VCLK1:
2392 		clk_type = SMU_VCLK1; break;
2393 	case PP_DCLK:
2394 		clk_type = SMU_DCLK; break;
2395 	case PP_DCLK1:
2396 		clk_type = SMU_DCLK1; break;
2397 	case OD_SCLK:
2398 		clk_type = SMU_OD_SCLK; break;
2399 	case OD_MCLK:
2400 		clk_type = SMU_OD_MCLK; break;
2401 	case OD_VDDC_CURVE:
2402 		clk_type = SMU_OD_VDDC_CURVE; break;
2403 	case OD_RANGE:
2404 		clk_type = SMU_OD_RANGE; break;
2405 	default:
2406 		return -EINVAL;
2407 	}
2408 
2409 	return smu_force_smuclk_levels(smu, clk_type, mask);
2410 }
2411 
2412 /*
2413  * On system suspending or resetting, the dpm_enabled
2414  * flag will be cleared. So that those SMU services which
2415  * are not supported will be gated.
2416  * However, the mp1 state setting should still be granted
2417  * even if the dpm_enabled cleared.
2418  */
2419 static int smu_set_mp1_state(void *handle,
2420 			     enum pp_mp1_state mp1_state)
2421 {
2422 	struct smu_context *smu = handle;
2423 	int ret = 0;
2424 
2425 	if (!smu->pm_enabled)
2426 		return -EOPNOTSUPP;
2427 
2428 	if (smu->ppt_funcs &&
2429 	    smu->ppt_funcs->set_mp1_state)
2430 		ret = smu->ppt_funcs->set_mp1_state(smu, mp1_state);
2431 
2432 	return ret;
2433 }
2434 
2435 static int smu_set_df_cstate(void *handle,
2436 			     enum pp_df_cstate state)
2437 {
2438 	struct smu_context *smu = handle;
2439 	int ret = 0;
2440 
2441 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2442 		return -EOPNOTSUPP;
2443 
2444 	if (!smu->ppt_funcs || !smu->ppt_funcs->set_df_cstate)
2445 		return 0;
2446 
2447 	ret = smu->ppt_funcs->set_df_cstate(smu, state);
2448 	if (ret)
2449 		dev_err(smu->adev->dev, "[SetDfCstate] failed!\n");
2450 
2451 	return ret;
2452 }
2453 
2454 int smu_write_watermarks_table(struct smu_context *smu)
2455 {
2456 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2457 		return -EOPNOTSUPP;
2458 
2459 	return smu_set_watermarks_table(smu, NULL);
2460 }
2461 
2462 static int smu_set_watermarks_for_clock_ranges(void *handle,
2463 					       struct pp_smu_wm_range_sets *clock_ranges)
2464 {
2465 	struct smu_context *smu = handle;
2466 
2467 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2468 		return -EOPNOTSUPP;
2469 
2470 	if (smu->disable_watermark)
2471 		return 0;
2472 
2473 	return smu_set_watermarks_table(smu, clock_ranges);
2474 }
2475 
2476 int smu_set_ac_dc(struct smu_context *smu)
2477 {
2478 	int ret = 0;
2479 
2480 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2481 		return -EOPNOTSUPP;
2482 
2483 	/* controlled by firmware */
2484 	if (smu->dc_controlled_by_gpio)
2485 		return 0;
2486 
2487 	ret = smu_set_power_source(smu,
2488 				   smu->adev->pm.ac_power ? SMU_POWER_SOURCE_AC :
2489 				   SMU_POWER_SOURCE_DC);
2490 	if (ret)
2491 		dev_err(smu->adev->dev, "Failed to switch to %s mode!\n",
2492 		       smu->adev->pm.ac_power ? "AC" : "DC");
2493 
2494 	return ret;
2495 }
2496 
2497 const struct amd_ip_funcs smu_ip_funcs = {
2498 	.name = "smu",
2499 	.early_init = smu_early_init,
2500 	.late_init = smu_late_init,
2501 	.sw_init = smu_sw_init,
2502 	.sw_fini = smu_sw_fini,
2503 	.hw_init = smu_hw_init,
2504 	.hw_fini = smu_hw_fini,
2505 	.late_fini = smu_late_fini,
2506 	.suspend = smu_suspend,
2507 	.resume = smu_resume,
2508 	.is_idle = NULL,
2509 	.check_soft_reset = NULL,
2510 	.wait_for_idle = NULL,
2511 	.soft_reset = NULL,
2512 	.set_clockgating_state = smu_set_clockgating_state,
2513 	.set_powergating_state = smu_set_powergating_state,
2514 };
2515 
2516 const struct amdgpu_ip_block_version smu_v11_0_ip_block = {
2517 	.type = AMD_IP_BLOCK_TYPE_SMC,
2518 	.major = 11,
2519 	.minor = 0,
2520 	.rev = 0,
2521 	.funcs = &smu_ip_funcs,
2522 };
2523 
2524 const struct amdgpu_ip_block_version smu_v12_0_ip_block = {
2525 	.type = AMD_IP_BLOCK_TYPE_SMC,
2526 	.major = 12,
2527 	.minor = 0,
2528 	.rev = 0,
2529 	.funcs = &smu_ip_funcs,
2530 };
2531 
2532 const struct amdgpu_ip_block_version smu_v13_0_ip_block = {
2533 	.type = AMD_IP_BLOCK_TYPE_SMC,
2534 	.major = 13,
2535 	.minor = 0,
2536 	.rev = 0,
2537 	.funcs = &smu_ip_funcs,
2538 };
2539 
2540 const struct amdgpu_ip_block_version smu_v14_0_ip_block = {
2541 	.type = AMD_IP_BLOCK_TYPE_SMC,
2542 	.major = 14,
2543 	.minor = 0,
2544 	.rev = 0,
2545 	.funcs = &smu_ip_funcs,
2546 };
2547 
2548 static int smu_load_microcode(void *handle)
2549 {
2550 	struct smu_context *smu = handle;
2551 	struct amdgpu_device *adev = smu->adev;
2552 	int ret = 0;
2553 
2554 	if (!smu->pm_enabled)
2555 		return -EOPNOTSUPP;
2556 
2557 	/* This should be used for non PSP loading */
2558 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)
2559 		return 0;
2560 
2561 	if (smu->ppt_funcs->load_microcode) {
2562 		ret = smu->ppt_funcs->load_microcode(smu);
2563 		if (ret) {
2564 			dev_err(adev->dev, "Load microcode failed\n");
2565 			return ret;
2566 		}
2567 	}
2568 
2569 	if (smu->ppt_funcs->check_fw_status) {
2570 		ret = smu->ppt_funcs->check_fw_status(smu);
2571 		if (ret) {
2572 			dev_err(adev->dev, "SMC is not ready\n");
2573 			return ret;
2574 		}
2575 	}
2576 
2577 	return ret;
2578 }
2579 
2580 static int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled)
2581 {
2582 	int ret = 0;
2583 
2584 	if (smu->ppt_funcs->set_gfx_cgpg)
2585 		ret = smu->ppt_funcs->set_gfx_cgpg(smu, enabled);
2586 
2587 	return ret;
2588 }
2589 
2590 static int smu_set_fan_speed_rpm(void *handle, uint32_t speed)
2591 {
2592 	struct smu_context *smu = handle;
2593 	int ret = 0;
2594 
2595 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2596 		return -EOPNOTSUPP;
2597 
2598 	if (!smu->ppt_funcs->set_fan_speed_rpm)
2599 		return -EOPNOTSUPP;
2600 
2601 	if (speed == U32_MAX)
2602 		return -EINVAL;
2603 
2604 	ret = smu->ppt_funcs->set_fan_speed_rpm(smu, speed);
2605 	if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
2606 		smu->user_dpm_profile.flags |= SMU_CUSTOM_FAN_SPEED_RPM;
2607 		smu->user_dpm_profile.fan_speed_rpm = speed;
2608 
2609 		/* Override custom PWM setting as they cannot co-exist */
2610 		smu->user_dpm_profile.flags &= ~SMU_CUSTOM_FAN_SPEED_PWM;
2611 		smu->user_dpm_profile.fan_speed_pwm = 0;
2612 	}
2613 
2614 	return ret;
2615 }
2616 
2617 /**
2618  * smu_get_power_limit - Request one of the SMU Power Limits
2619  *
2620  * @handle: pointer to smu context
2621  * @limit: requested limit is written back to this variable
2622  * @pp_limit_level: &pp_power_limit_level which limit of the power to return
2623  * @pp_power_type: &pp_power_type type of power
2624  * Return:  0 on success, <0 on error
2625  *
2626  */
2627 int smu_get_power_limit(void *handle,
2628 			uint32_t *limit,
2629 			enum pp_power_limit_level pp_limit_level,
2630 			enum pp_power_type pp_power_type)
2631 {
2632 	struct smu_context *smu = handle;
2633 	struct amdgpu_device *adev = smu->adev;
2634 	enum smu_ppt_limit_level limit_level;
2635 	uint32_t limit_type;
2636 	int ret = 0;
2637 
2638 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2639 		return -EOPNOTSUPP;
2640 
2641 	switch (pp_power_type) {
2642 	case PP_PWR_TYPE_SUSTAINED:
2643 		limit_type = SMU_DEFAULT_PPT_LIMIT;
2644 		break;
2645 	case PP_PWR_TYPE_FAST:
2646 		limit_type = SMU_FAST_PPT_LIMIT;
2647 		break;
2648 	default:
2649 		return -EOPNOTSUPP;
2650 	}
2651 
2652 	switch (pp_limit_level) {
2653 	case PP_PWR_LIMIT_CURRENT:
2654 		limit_level = SMU_PPT_LIMIT_CURRENT;
2655 		break;
2656 	case PP_PWR_LIMIT_DEFAULT:
2657 		limit_level = SMU_PPT_LIMIT_DEFAULT;
2658 		break;
2659 	case PP_PWR_LIMIT_MAX:
2660 		limit_level = SMU_PPT_LIMIT_MAX;
2661 		break;
2662 	case PP_PWR_LIMIT_MIN:
2663 		limit_level = SMU_PPT_LIMIT_MIN;
2664 		break;
2665 	default:
2666 		return -EOPNOTSUPP;
2667 	}
2668 
2669 	if (limit_type != SMU_DEFAULT_PPT_LIMIT) {
2670 		if (smu->ppt_funcs->get_ppt_limit)
2671 			ret = smu->ppt_funcs->get_ppt_limit(smu, limit, limit_type, limit_level);
2672 	} else {
2673 		switch (limit_level) {
2674 		case SMU_PPT_LIMIT_CURRENT:
2675 			switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
2676 			case IP_VERSION(13, 0, 2):
2677 			case IP_VERSION(13, 0, 6):
2678 			case IP_VERSION(11, 0, 7):
2679 			case IP_VERSION(11, 0, 11):
2680 			case IP_VERSION(11, 0, 12):
2681 			case IP_VERSION(11, 0, 13):
2682 				ret = smu_get_asic_power_limits(smu,
2683 								&smu->current_power_limit,
2684 								NULL, NULL, NULL);
2685 				break;
2686 			default:
2687 				break;
2688 			}
2689 			*limit = smu->current_power_limit;
2690 			break;
2691 		case SMU_PPT_LIMIT_DEFAULT:
2692 			*limit = smu->default_power_limit;
2693 			break;
2694 		case SMU_PPT_LIMIT_MAX:
2695 			*limit = smu->max_power_limit;
2696 			break;
2697 		case SMU_PPT_LIMIT_MIN:
2698 			*limit = smu->min_power_limit;
2699 			break;
2700 		default:
2701 			return -EINVAL;
2702 		}
2703 	}
2704 
2705 	return ret;
2706 }
2707 
2708 static int smu_set_power_limit(void *handle, uint32_t limit)
2709 {
2710 	struct smu_context *smu = handle;
2711 	uint32_t limit_type = limit >> 24;
2712 	int ret = 0;
2713 
2714 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2715 		return -EOPNOTSUPP;
2716 
2717 	limit &= (1<<24)-1;
2718 	if (limit_type != SMU_DEFAULT_PPT_LIMIT)
2719 		if (smu->ppt_funcs->set_power_limit)
2720 			return smu->ppt_funcs->set_power_limit(smu, limit_type, limit);
2721 
2722 	if ((limit > smu->max_power_limit) || (limit < smu->min_power_limit)) {
2723 		dev_err(smu->adev->dev,
2724 			"New power limit (%d) is out of range [%d,%d]\n",
2725 			limit, smu->min_power_limit, smu->max_power_limit);
2726 		return -EINVAL;
2727 	}
2728 
2729 	if (!limit)
2730 		limit = smu->current_power_limit;
2731 
2732 	if (smu->ppt_funcs->set_power_limit) {
2733 		ret = smu->ppt_funcs->set_power_limit(smu, limit_type, limit);
2734 		if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE))
2735 			smu->user_dpm_profile.power_limit = limit;
2736 	}
2737 
2738 	return ret;
2739 }
2740 
2741 static int smu_print_smuclk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf)
2742 {
2743 	int ret = 0;
2744 
2745 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2746 		return -EOPNOTSUPP;
2747 
2748 	if (smu->ppt_funcs->print_clk_levels)
2749 		ret = smu->ppt_funcs->print_clk_levels(smu, clk_type, buf);
2750 
2751 	return ret;
2752 }
2753 
2754 static enum smu_clk_type smu_convert_to_smuclk(enum pp_clock_type type)
2755 {
2756 	enum smu_clk_type clk_type;
2757 
2758 	switch (type) {
2759 	case PP_SCLK:
2760 		clk_type = SMU_SCLK; break;
2761 	case PP_MCLK:
2762 		clk_type = SMU_MCLK; break;
2763 	case PP_PCIE:
2764 		clk_type = SMU_PCIE; break;
2765 	case PP_SOCCLK:
2766 		clk_type = SMU_SOCCLK; break;
2767 	case PP_FCLK:
2768 		clk_type = SMU_FCLK; break;
2769 	case PP_DCEFCLK:
2770 		clk_type = SMU_DCEFCLK; break;
2771 	case PP_VCLK:
2772 		clk_type = SMU_VCLK; break;
2773 	case PP_VCLK1:
2774 		clk_type = SMU_VCLK1; break;
2775 	case PP_DCLK:
2776 		clk_type = SMU_DCLK; break;
2777 	case PP_DCLK1:
2778 		clk_type = SMU_DCLK1; break;
2779 	case OD_SCLK:
2780 		clk_type = SMU_OD_SCLK; break;
2781 	case OD_MCLK:
2782 		clk_type = SMU_OD_MCLK; break;
2783 	case OD_VDDC_CURVE:
2784 		clk_type = SMU_OD_VDDC_CURVE; break;
2785 	case OD_RANGE:
2786 		clk_type = SMU_OD_RANGE; break;
2787 	case OD_VDDGFX_OFFSET:
2788 		clk_type = SMU_OD_VDDGFX_OFFSET; break;
2789 	case OD_CCLK:
2790 		clk_type = SMU_OD_CCLK; break;
2791 	case OD_FAN_CURVE:
2792 		clk_type = SMU_OD_FAN_CURVE; break;
2793 	case OD_ACOUSTIC_LIMIT:
2794 		clk_type = SMU_OD_ACOUSTIC_LIMIT; break;
2795 	case OD_ACOUSTIC_TARGET:
2796 		clk_type = SMU_OD_ACOUSTIC_TARGET; break;
2797 	case OD_FAN_TARGET_TEMPERATURE:
2798 		clk_type = SMU_OD_FAN_TARGET_TEMPERATURE; break;
2799 	case OD_FAN_MINIMUM_PWM:
2800 		clk_type = SMU_OD_FAN_MINIMUM_PWM; break;
2801 	default:
2802 		clk_type = SMU_CLK_COUNT; break;
2803 	}
2804 
2805 	return clk_type;
2806 }
2807 
2808 static int smu_print_ppclk_levels(void *handle,
2809 				  enum pp_clock_type type,
2810 				  char *buf)
2811 {
2812 	struct smu_context *smu = handle;
2813 	enum smu_clk_type clk_type;
2814 
2815 	clk_type = smu_convert_to_smuclk(type);
2816 	if (clk_type == SMU_CLK_COUNT)
2817 		return -EINVAL;
2818 
2819 	return smu_print_smuclk_levels(smu, clk_type, buf);
2820 }
2821 
2822 static int smu_emit_ppclk_levels(void *handle, enum pp_clock_type type, char *buf, int *offset)
2823 {
2824 	struct smu_context *smu = handle;
2825 	enum smu_clk_type clk_type;
2826 
2827 	clk_type = smu_convert_to_smuclk(type);
2828 	if (clk_type == SMU_CLK_COUNT)
2829 		return -EINVAL;
2830 
2831 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2832 		return -EOPNOTSUPP;
2833 
2834 	if (!smu->ppt_funcs->emit_clk_levels)
2835 		return -ENOENT;
2836 
2837 	return smu->ppt_funcs->emit_clk_levels(smu, clk_type, buf, offset);
2838 
2839 }
2840 
2841 static int smu_od_edit_dpm_table(void *handle,
2842 				 enum PP_OD_DPM_TABLE_COMMAND type,
2843 				 long *input, uint32_t size)
2844 {
2845 	struct smu_context *smu = handle;
2846 	int ret = 0;
2847 
2848 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2849 		return -EOPNOTSUPP;
2850 
2851 	if (smu->ppt_funcs->od_edit_dpm_table) {
2852 		ret = smu->ppt_funcs->od_edit_dpm_table(smu, type, input, size);
2853 	}
2854 
2855 	return ret;
2856 }
2857 
2858 static int smu_read_sensor(void *handle,
2859 			   int sensor,
2860 			   void *data,
2861 			   int *size_arg)
2862 {
2863 	struct smu_context *smu = handle;
2864 	struct smu_umd_pstate_table *pstate_table =
2865 				&smu->pstate_table;
2866 	int ret = 0;
2867 	uint32_t *size, size_val;
2868 
2869 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2870 		return -EOPNOTSUPP;
2871 
2872 	if (!data || !size_arg)
2873 		return -EINVAL;
2874 
2875 	size_val = *size_arg;
2876 	size = &size_val;
2877 
2878 	if (smu->ppt_funcs->read_sensor)
2879 		if (!smu->ppt_funcs->read_sensor(smu, sensor, data, size))
2880 			goto unlock;
2881 
2882 	switch (sensor) {
2883 	case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK:
2884 		*((uint32_t *)data) = pstate_table->gfxclk_pstate.standard * 100;
2885 		*size = 4;
2886 		break;
2887 	case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK:
2888 		*((uint32_t *)data) = pstate_table->uclk_pstate.standard * 100;
2889 		*size = 4;
2890 		break;
2891 	case AMDGPU_PP_SENSOR_PEAK_PSTATE_SCLK:
2892 		*((uint32_t *)data) = pstate_table->gfxclk_pstate.peak * 100;
2893 		*size = 4;
2894 		break;
2895 	case AMDGPU_PP_SENSOR_PEAK_PSTATE_MCLK:
2896 		*((uint32_t *)data) = pstate_table->uclk_pstate.peak * 100;
2897 		*size = 4;
2898 		break;
2899 	case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK:
2900 		ret = smu_feature_get_enabled_mask(smu, (uint64_t *)data);
2901 		*size = 8;
2902 		break;
2903 	case AMDGPU_PP_SENSOR_UVD_POWER:
2904 		*(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UVD_BIT) ? 1 : 0;
2905 		*size = 4;
2906 		break;
2907 	case AMDGPU_PP_SENSOR_VCE_POWER:
2908 		*(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_VCE_BIT) ? 1 : 0;
2909 		*size = 4;
2910 		break;
2911 	case AMDGPU_PP_SENSOR_VCN_POWER_STATE:
2912 		*(uint32_t *)data = atomic_read(&smu->smu_power.power_gate.vcn_gated) ? 0 : 1;
2913 		*size = 4;
2914 		break;
2915 	case AMDGPU_PP_SENSOR_MIN_FAN_RPM:
2916 		*(uint32_t *)data = 0;
2917 		*size = 4;
2918 		break;
2919 	default:
2920 		*size = 0;
2921 		ret = -EOPNOTSUPP;
2922 		break;
2923 	}
2924 
2925 unlock:
2926 	// assign uint32_t to int
2927 	*size_arg = size_val;
2928 
2929 	return ret;
2930 }
2931 
2932 static int smu_get_apu_thermal_limit(void *handle, uint32_t *limit)
2933 {
2934 	int ret = -EOPNOTSUPP;
2935 	struct smu_context *smu = handle;
2936 
2937 	if (smu->ppt_funcs && smu->ppt_funcs->get_apu_thermal_limit)
2938 		ret = smu->ppt_funcs->get_apu_thermal_limit(smu, limit);
2939 
2940 	return ret;
2941 }
2942 
2943 static int smu_set_apu_thermal_limit(void *handle, uint32_t limit)
2944 {
2945 	int ret = -EOPNOTSUPP;
2946 	struct smu_context *smu = handle;
2947 
2948 	if (smu->ppt_funcs && smu->ppt_funcs->set_apu_thermal_limit)
2949 		ret = smu->ppt_funcs->set_apu_thermal_limit(smu, limit);
2950 
2951 	return ret;
2952 }
2953 
2954 static int smu_get_power_profile_mode(void *handle, char *buf)
2955 {
2956 	struct smu_context *smu = handle;
2957 
2958 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled ||
2959 	    !smu->ppt_funcs->get_power_profile_mode)
2960 		return -EOPNOTSUPP;
2961 	if (!buf)
2962 		return -EINVAL;
2963 
2964 	return smu->ppt_funcs->get_power_profile_mode(smu, buf);
2965 }
2966 
2967 static int smu_set_power_profile_mode(void *handle,
2968 				      long *param,
2969 				      uint32_t param_size)
2970 {
2971 	struct smu_context *smu = handle;
2972 
2973 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled ||
2974 	    !smu->ppt_funcs->set_power_profile_mode)
2975 		return -EOPNOTSUPP;
2976 
2977 	return smu_bump_power_profile_mode(smu, param, param_size);
2978 }
2979 
2980 static int smu_get_fan_control_mode(void *handle, u32 *fan_mode)
2981 {
2982 	struct smu_context *smu = handle;
2983 
2984 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2985 		return -EOPNOTSUPP;
2986 
2987 	if (!smu->ppt_funcs->get_fan_control_mode)
2988 		return -EOPNOTSUPP;
2989 
2990 	if (!fan_mode)
2991 		return -EINVAL;
2992 
2993 	*fan_mode = smu->ppt_funcs->get_fan_control_mode(smu);
2994 
2995 	return 0;
2996 }
2997 
2998 static int smu_set_fan_control_mode(void *handle, u32 value)
2999 {
3000 	struct smu_context *smu = handle;
3001 	int ret = 0;
3002 
3003 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3004 		return -EOPNOTSUPP;
3005 
3006 	if (!smu->ppt_funcs->set_fan_control_mode)
3007 		return -EOPNOTSUPP;
3008 
3009 	if (value == U32_MAX)
3010 		return -EINVAL;
3011 
3012 	ret = smu->ppt_funcs->set_fan_control_mode(smu, value);
3013 	if (ret)
3014 		goto out;
3015 
3016 	if (!(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
3017 		smu->user_dpm_profile.fan_mode = value;
3018 
3019 		/* reset user dpm fan speed */
3020 		if (value != AMD_FAN_CTRL_MANUAL) {
3021 			smu->user_dpm_profile.fan_speed_pwm = 0;
3022 			smu->user_dpm_profile.fan_speed_rpm = 0;
3023 			smu->user_dpm_profile.flags &= ~(SMU_CUSTOM_FAN_SPEED_RPM | SMU_CUSTOM_FAN_SPEED_PWM);
3024 		}
3025 	}
3026 
3027 out:
3028 	return ret;
3029 }
3030 
3031 static int smu_get_fan_speed_pwm(void *handle, u32 *speed)
3032 {
3033 	struct smu_context *smu = handle;
3034 	int ret = 0;
3035 
3036 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3037 		return -EOPNOTSUPP;
3038 
3039 	if (!smu->ppt_funcs->get_fan_speed_pwm)
3040 		return -EOPNOTSUPP;
3041 
3042 	if (!speed)
3043 		return -EINVAL;
3044 
3045 	ret = smu->ppt_funcs->get_fan_speed_pwm(smu, speed);
3046 
3047 	return ret;
3048 }
3049 
3050 static int smu_set_fan_speed_pwm(void *handle, u32 speed)
3051 {
3052 	struct smu_context *smu = handle;
3053 	int ret = 0;
3054 
3055 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3056 		return -EOPNOTSUPP;
3057 
3058 	if (!smu->ppt_funcs->set_fan_speed_pwm)
3059 		return -EOPNOTSUPP;
3060 
3061 	if (speed == U32_MAX)
3062 		return -EINVAL;
3063 
3064 	ret = smu->ppt_funcs->set_fan_speed_pwm(smu, speed);
3065 	if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
3066 		smu->user_dpm_profile.flags |= SMU_CUSTOM_FAN_SPEED_PWM;
3067 		smu->user_dpm_profile.fan_speed_pwm = speed;
3068 
3069 		/* Override custom RPM setting as they cannot co-exist */
3070 		smu->user_dpm_profile.flags &= ~SMU_CUSTOM_FAN_SPEED_RPM;
3071 		smu->user_dpm_profile.fan_speed_rpm = 0;
3072 	}
3073 
3074 	return ret;
3075 }
3076 
3077 static int smu_get_fan_speed_rpm(void *handle, uint32_t *speed)
3078 {
3079 	struct smu_context *smu = handle;
3080 	int ret = 0;
3081 
3082 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3083 		return -EOPNOTSUPP;
3084 
3085 	if (!smu->ppt_funcs->get_fan_speed_rpm)
3086 		return -EOPNOTSUPP;
3087 
3088 	if (!speed)
3089 		return -EINVAL;
3090 
3091 	ret = smu->ppt_funcs->get_fan_speed_rpm(smu, speed);
3092 
3093 	return ret;
3094 }
3095 
3096 static int smu_set_deep_sleep_dcefclk(void *handle, uint32_t clk)
3097 {
3098 	struct smu_context *smu = handle;
3099 
3100 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3101 		return -EOPNOTSUPP;
3102 
3103 	return smu_set_min_dcef_deep_sleep(smu, clk);
3104 }
3105 
3106 static int smu_get_clock_by_type_with_latency(void *handle,
3107 					      enum amd_pp_clock_type type,
3108 					      struct pp_clock_levels_with_latency *clocks)
3109 {
3110 	struct smu_context *smu = handle;
3111 	enum smu_clk_type clk_type;
3112 	int ret = 0;
3113 
3114 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3115 		return -EOPNOTSUPP;
3116 
3117 	if (smu->ppt_funcs->get_clock_by_type_with_latency) {
3118 		switch (type) {
3119 		case amd_pp_sys_clock:
3120 			clk_type = SMU_GFXCLK;
3121 			break;
3122 		case amd_pp_mem_clock:
3123 			clk_type = SMU_MCLK;
3124 			break;
3125 		case amd_pp_dcef_clock:
3126 			clk_type = SMU_DCEFCLK;
3127 			break;
3128 		case amd_pp_disp_clock:
3129 			clk_type = SMU_DISPCLK;
3130 			break;
3131 		default:
3132 			dev_err(smu->adev->dev, "Invalid clock type!\n");
3133 			return -EINVAL;
3134 		}
3135 
3136 		ret = smu->ppt_funcs->get_clock_by_type_with_latency(smu, clk_type, clocks);
3137 	}
3138 
3139 	return ret;
3140 }
3141 
3142 static int smu_display_clock_voltage_request(void *handle,
3143 					     struct pp_display_clock_request *clock_req)
3144 {
3145 	struct smu_context *smu = handle;
3146 	int ret = 0;
3147 
3148 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3149 		return -EOPNOTSUPP;
3150 
3151 	if (smu->ppt_funcs->display_clock_voltage_request)
3152 		ret = smu->ppt_funcs->display_clock_voltage_request(smu, clock_req);
3153 
3154 	return ret;
3155 }
3156 
3157 
3158 static int smu_display_disable_memory_clock_switch(void *handle,
3159 						   bool disable_memory_clock_switch)
3160 {
3161 	struct smu_context *smu = handle;
3162 	int ret = -EINVAL;
3163 
3164 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3165 		return -EOPNOTSUPP;
3166 
3167 	if (smu->ppt_funcs->display_disable_memory_clock_switch)
3168 		ret = smu->ppt_funcs->display_disable_memory_clock_switch(smu, disable_memory_clock_switch);
3169 
3170 	return ret;
3171 }
3172 
3173 static int smu_set_xgmi_pstate(void *handle,
3174 			       uint32_t pstate)
3175 {
3176 	struct smu_context *smu = handle;
3177 	int ret = 0;
3178 
3179 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3180 		return -EOPNOTSUPP;
3181 
3182 	if (smu->ppt_funcs->set_xgmi_pstate)
3183 		ret = smu->ppt_funcs->set_xgmi_pstate(smu, pstate);
3184 
3185 	if (ret)
3186 		dev_err(smu->adev->dev, "Failed to set XGMI pstate!\n");
3187 
3188 	return ret;
3189 }
3190 
3191 static bool smu_get_baco_capability(void *handle)
3192 {
3193 	struct smu_context *smu = handle;
3194 
3195 	if (!smu->pm_enabled)
3196 		return false;
3197 
3198 	if (!smu->ppt_funcs || !smu->ppt_funcs->baco_is_support)
3199 		return false;
3200 
3201 	return smu->ppt_funcs->baco_is_support(smu);
3202 }
3203 
3204 static int smu_baco_set_state(void *handle, int state)
3205 {
3206 	struct smu_context *smu = handle;
3207 	int ret = 0;
3208 
3209 	if (!smu->pm_enabled)
3210 		return -EOPNOTSUPP;
3211 
3212 	if (state == 0) {
3213 		if (smu->ppt_funcs->baco_exit)
3214 			ret = smu->ppt_funcs->baco_exit(smu);
3215 	} else if (state == 1) {
3216 		if (smu->ppt_funcs->baco_enter)
3217 			ret = smu->ppt_funcs->baco_enter(smu);
3218 	} else {
3219 		return -EINVAL;
3220 	}
3221 
3222 	if (ret)
3223 		dev_err(smu->adev->dev, "Failed to %s BACO state!\n",
3224 				(state)?"enter":"exit");
3225 
3226 	return ret;
3227 }
3228 
3229 bool smu_mode1_reset_is_support(struct smu_context *smu)
3230 {
3231 	bool ret = false;
3232 
3233 	if (!smu->pm_enabled)
3234 		return false;
3235 
3236 	if (smu->ppt_funcs && smu->ppt_funcs->mode1_reset_is_support)
3237 		ret = smu->ppt_funcs->mode1_reset_is_support(smu);
3238 
3239 	return ret;
3240 }
3241 
3242 bool smu_mode2_reset_is_support(struct smu_context *smu)
3243 {
3244 	bool ret = false;
3245 
3246 	if (!smu->pm_enabled)
3247 		return false;
3248 
3249 	if (smu->ppt_funcs && smu->ppt_funcs->mode2_reset_is_support)
3250 		ret = smu->ppt_funcs->mode2_reset_is_support(smu);
3251 
3252 	return ret;
3253 }
3254 
3255 int smu_mode1_reset(struct smu_context *smu)
3256 {
3257 	int ret = 0;
3258 
3259 	if (!smu->pm_enabled)
3260 		return -EOPNOTSUPP;
3261 
3262 	if (smu->ppt_funcs->mode1_reset)
3263 		ret = smu->ppt_funcs->mode1_reset(smu);
3264 
3265 	return ret;
3266 }
3267 
3268 static int smu_mode2_reset(void *handle)
3269 {
3270 	struct smu_context *smu = handle;
3271 	int ret = 0;
3272 
3273 	if (!smu->pm_enabled)
3274 		return -EOPNOTSUPP;
3275 
3276 	if (smu->ppt_funcs->mode2_reset)
3277 		ret = smu->ppt_funcs->mode2_reset(smu);
3278 
3279 	if (ret)
3280 		dev_err(smu->adev->dev, "Mode2 reset failed!\n");
3281 
3282 	return ret;
3283 }
3284 
3285 static int smu_enable_gfx_features(void *handle)
3286 {
3287 	struct smu_context *smu = handle;
3288 	int ret = 0;
3289 
3290 	if (!smu->pm_enabled)
3291 		return -EOPNOTSUPP;
3292 
3293 	if (smu->ppt_funcs->enable_gfx_features)
3294 		ret = smu->ppt_funcs->enable_gfx_features(smu);
3295 
3296 	if (ret)
3297 		dev_err(smu->adev->dev, "enable gfx features failed!\n");
3298 
3299 	return ret;
3300 }
3301 
3302 static int smu_get_max_sustainable_clocks_by_dc(void *handle,
3303 						struct pp_smu_nv_clock_table *max_clocks)
3304 {
3305 	struct smu_context *smu = handle;
3306 	int ret = 0;
3307 
3308 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3309 		return -EOPNOTSUPP;
3310 
3311 	if (smu->ppt_funcs->get_max_sustainable_clocks_by_dc)
3312 		ret = smu->ppt_funcs->get_max_sustainable_clocks_by_dc(smu, max_clocks);
3313 
3314 	return ret;
3315 }
3316 
3317 static int smu_get_uclk_dpm_states(void *handle,
3318 				   unsigned int *clock_values_in_khz,
3319 				   unsigned int *num_states)
3320 {
3321 	struct smu_context *smu = handle;
3322 	int ret = 0;
3323 
3324 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3325 		return -EOPNOTSUPP;
3326 
3327 	if (smu->ppt_funcs->get_uclk_dpm_states)
3328 		ret = smu->ppt_funcs->get_uclk_dpm_states(smu, clock_values_in_khz, num_states);
3329 
3330 	return ret;
3331 }
3332 
3333 static enum amd_pm_state_type smu_get_current_power_state(void *handle)
3334 {
3335 	struct smu_context *smu = handle;
3336 	enum amd_pm_state_type pm_state = POWER_STATE_TYPE_DEFAULT;
3337 
3338 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3339 		return -EOPNOTSUPP;
3340 
3341 	if (smu->ppt_funcs->get_current_power_state)
3342 		pm_state = smu->ppt_funcs->get_current_power_state(smu);
3343 
3344 	return pm_state;
3345 }
3346 
3347 static int smu_get_dpm_clock_table(void *handle,
3348 				   struct dpm_clocks *clock_table)
3349 {
3350 	struct smu_context *smu = handle;
3351 	int ret = 0;
3352 
3353 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3354 		return -EOPNOTSUPP;
3355 
3356 	if (smu->ppt_funcs->get_dpm_clock_table)
3357 		ret = smu->ppt_funcs->get_dpm_clock_table(smu, clock_table);
3358 
3359 	return ret;
3360 }
3361 
3362 static ssize_t smu_sys_get_gpu_metrics(void *handle, void **table)
3363 {
3364 	struct smu_context *smu = handle;
3365 
3366 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3367 		return -EOPNOTSUPP;
3368 
3369 	if (!smu->ppt_funcs->get_gpu_metrics)
3370 		return -EOPNOTSUPP;
3371 
3372 	return smu->ppt_funcs->get_gpu_metrics(smu, table);
3373 }
3374 
3375 static ssize_t smu_sys_get_pm_metrics(void *handle, void *pm_metrics,
3376 				      size_t size)
3377 {
3378 	struct smu_context *smu = handle;
3379 
3380 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3381 		return -EOPNOTSUPP;
3382 
3383 	if (!smu->ppt_funcs->get_pm_metrics)
3384 		return -EOPNOTSUPP;
3385 
3386 	return smu->ppt_funcs->get_pm_metrics(smu, pm_metrics, size);
3387 }
3388 
3389 static int smu_enable_mgpu_fan_boost(void *handle)
3390 {
3391 	struct smu_context *smu = handle;
3392 	int ret = 0;
3393 
3394 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3395 		return -EOPNOTSUPP;
3396 
3397 	if (smu->ppt_funcs->enable_mgpu_fan_boost)
3398 		ret = smu->ppt_funcs->enable_mgpu_fan_boost(smu);
3399 
3400 	return ret;
3401 }
3402 
3403 static int smu_gfx_state_change_set(void *handle,
3404 				    uint32_t state)
3405 {
3406 	struct smu_context *smu = handle;
3407 	int ret = 0;
3408 
3409 	if (smu->ppt_funcs->gfx_state_change_set)
3410 		ret = smu->ppt_funcs->gfx_state_change_set(smu, state);
3411 
3412 	return ret;
3413 }
3414 
3415 int smu_handle_passthrough_sbr(struct smu_context *smu, bool enable)
3416 {
3417 	int ret = 0;
3418 
3419 	if (smu->ppt_funcs->smu_handle_passthrough_sbr)
3420 		ret = smu->ppt_funcs->smu_handle_passthrough_sbr(smu, enable);
3421 
3422 	return ret;
3423 }
3424 
3425 int smu_get_ecc_info(struct smu_context *smu, void *umc_ecc)
3426 {
3427 	int ret = -EOPNOTSUPP;
3428 
3429 	if (smu->ppt_funcs &&
3430 		smu->ppt_funcs->get_ecc_info)
3431 		ret = smu->ppt_funcs->get_ecc_info(smu, umc_ecc);
3432 
3433 	return ret;
3434 
3435 }
3436 
3437 static int smu_get_prv_buffer_details(void *handle, void **addr, size_t *size)
3438 {
3439 	struct smu_context *smu = handle;
3440 	struct smu_table_context *smu_table = &smu->smu_table;
3441 	struct smu_table *memory_pool = &smu_table->memory_pool;
3442 
3443 	if (!addr || !size)
3444 		return -EINVAL;
3445 
3446 	*addr = NULL;
3447 	*size = 0;
3448 	if (memory_pool->bo) {
3449 		*addr = memory_pool->cpu_addr;
3450 		*size = memory_pool->size;
3451 	}
3452 
3453 	return 0;
3454 }
3455 
3456 int smu_set_xgmi_plpd_mode(struct smu_context *smu,
3457 			   enum pp_xgmi_plpd_mode mode)
3458 {
3459 	int ret = -EOPNOTSUPP;
3460 
3461 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3462 		return ret;
3463 
3464 	/* PLPD policy is not supported if it's NONE */
3465 	if (smu->plpd_mode == XGMI_PLPD_NONE)
3466 		return ret;
3467 
3468 	if (smu->plpd_mode == mode)
3469 		return 0;
3470 
3471 	if (smu->ppt_funcs && smu->ppt_funcs->select_xgmi_plpd_policy)
3472 		ret = smu->ppt_funcs->select_xgmi_plpd_policy(smu, mode);
3473 
3474 	if (!ret)
3475 		smu->plpd_mode = mode;
3476 
3477 	return ret;
3478 }
3479 
3480 static const struct amd_pm_funcs swsmu_pm_funcs = {
3481 	/* export for sysfs */
3482 	.set_fan_control_mode    = smu_set_fan_control_mode,
3483 	.get_fan_control_mode    = smu_get_fan_control_mode,
3484 	.set_fan_speed_pwm   = smu_set_fan_speed_pwm,
3485 	.get_fan_speed_pwm   = smu_get_fan_speed_pwm,
3486 	.force_clock_level       = smu_force_ppclk_levels,
3487 	.print_clock_levels      = smu_print_ppclk_levels,
3488 	.emit_clock_levels       = smu_emit_ppclk_levels,
3489 	.force_performance_level = smu_force_performance_level,
3490 	.read_sensor             = smu_read_sensor,
3491 	.get_apu_thermal_limit       = smu_get_apu_thermal_limit,
3492 	.set_apu_thermal_limit       = smu_set_apu_thermal_limit,
3493 	.get_performance_level   = smu_get_performance_level,
3494 	.get_current_power_state = smu_get_current_power_state,
3495 	.get_fan_speed_rpm       = smu_get_fan_speed_rpm,
3496 	.set_fan_speed_rpm       = smu_set_fan_speed_rpm,
3497 	.get_pp_num_states       = smu_get_power_num_states,
3498 	.get_pp_table            = smu_sys_get_pp_table,
3499 	.set_pp_table            = smu_sys_set_pp_table,
3500 	.switch_power_profile    = smu_switch_power_profile,
3501 	/* export to amdgpu */
3502 	.dispatch_tasks          = smu_handle_dpm_task,
3503 	.load_firmware           = smu_load_microcode,
3504 	.set_powergating_by_smu  = smu_dpm_set_power_gate,
3505 	.set_power_limit         = smu_set_power_limit,
3506 	.get_power_limit         = smu_get_power_limit,
3507 	.get_power_profile_mode  = smu_get_power_profile_mode,
3508 	.set_power_profile_mode  = smu_set_power_profile_mode,
3509 	.odn_edit_dpm_table      = smu_od_edit_dpm_table,
3510 	.set_mp1_state           = smu_set_mp1_state,
3511 	.gfx_state_change_set    = smu_gfx_state_change_set,
3512 	/* export to DC */
3513 	.get_sclk                         = smu_get_sclk,
3514 	.get_mclk                         = smu_get_mclk,
3515 	.display_configuration_change     = smu_display_configuration_change,
3516 	.get_clock_by_type_with_latency   = smu_get_clock_by_type_with_latency,
3517 	.display_clock_voltage_request    = smu_display_clock_voltage_request,
3518 	.enable_mgpu_fan_boost            = smu_enable_mgpu_fan_boost,
3519 	.set_active_display_count         = smu_set_display_count,
3520 	.set_min_deep_sleep_dcefclk       = smu_set_deep_sleep_dcefclk,
3521 	.get_asic_baco_capability         = smu_get_baco_capability,
3522 	.set_asic_baco_state              = smu_baco_set_state,
3523 	.get_ppfeature_status             = smu_sys_get_pp_feature_mask,
3524 	.set_ppfeature_status             = smu_sys_set_pp_feature_mask,
3525 	.asic_reset_mode_2                = smu_mode2_reset,
3526 	.asic_reset_enable_gfx_features   = smu_enable_gfx_features,
3527 	.set_df_cstate                    = smu_set_df_cstate,
3528 	.set_xgmi_pstate                  = smu_set_xgmi_pstate,
3529 	.get_gpu_metrics                  = smu_sys_get_gpu_metrics,
3530 	.get_pm_metrics                   = smu_sys_get_pm_metrics,
3531 	.set_watermarks_for_clock_ranges     = smu_set_watermarks_for_clock_ranges,
3532 	.display_disable_memory_clock_switch = smu_display_disable_memory_clock_switch,
3533 	.get_max_sustainable_clocks_by_dc    = smu_get_max_sustainable_clocks_by_dc,
3534 	.get_uclk_dpm_states              = smu_get_uclk_dpm_states,
3535 	.get_dpm_clock_table              = smu_get_dpm_clock_table,
3536 	.get_smu_prv_buf_details = smu_get_prv_buffer_details,
3537 };
3538 
3539 int smu_wait_for_event(struct smu_context *smu, enum smu_event_type event,
3540 		       uint64_t event_arg)
3541 {
3542 	int ret = -EINVAL;
3543 
3544 	if (smu->ppt_funcs->wait_for_event)
3545 		ret = smu->ppt_funcs->wait_for_event(smu, event, event_arg);
3546 
3547 	return ret;
3548 }
3549 
3550 int smu_stb_collect_info(struct smu_context *smu, void *buf, uint32_t size)
3551 {
3552 
3553 	if (!smu->ppt_funcs->stb_collect_info || !smu->stb_context.enabled)
3554 		return -EOPNOTSUPP;
3555 
3556 	/* Confirm the buffer allocated is of correct size */
3557 	if (size != smu->stb_context.stb_buf_size)
3558 		return -EINVAL;
3559 
3560 	/*
3561 	 * No need to lock smu mutex as we access STB directly through MMIO
3562 	 * and not going through SMU messaging route (for now at least).
3563 	 * For registers access rely on implementation internal locking.
3564 	 */
3565 	return smu->ppt_funcs->stb_collect_info(smu, buf, size);
3566 }
3567 
3568 #if defined(CONFIG_DEBUG_FS)
3569 
3570 static int smu_stb_debugfs_open(struct inode *inode, struct file *filp)
3571 {
3572 	struct amdgpu_device *adev = filp->f_inode->i_private;
3573 	struct smu_context *smu = adev->powerplay.pp_handle;
3574 	unsigned char *buf;
3575 	int r;
3576 
3577 	buf = kvmalloc_array(smu->stb_context.stb_buf_size, sizeof(*buf), GFP_KERNEL);
3578 	if (!buf)
3579 		return -ENOMEM;
3580 
3581 	r = smu_stb_collect_info(smu, buf, smu->stb_context.stb_buf_size);
3582 	if (r)
3583 		goto out;
3584 
3585 	filp->private_data = buf;
3586 
3587 	return 0;
3588 
3589 out:
3590 	kvfree(buf);
3591 	return r;
3592 }
3593 
3594 static ssize_t smu_stb_debugfs_read(struct file *filp, char __user *buf, size_t size,
3595 				loff_t *pos)
3596 {
3597 	struct amdgpu_device *adev = filp->f_inode->i_private;
3598 	struct smu_context *smu = adev->powerplay.pp_handle;
3599 
3600 
3601 	if (!filp->private_data)
3602 		return -EINVAL;
3603 
3604 	return simple_read_from_buffer(buf,
3605 				       size,
3606 				       pos, filp->private_data,
3607 				       smu->stb_context.stb_buf_size);
3608 }
3609 
3610 static int smu_stb_debugfs_release(struct inode *inode, struct file *filp)
3611 {
3612 	kvfree(filp->private_data);
3613 	filp->private_data = NULL;
3614 
3615 	return 0;
3616 }
3617 
3618 /*
3619  * We have to define not only read method but also
3620  * open and release because .read takes up to PAGE_SIZE
3621  * data each time so and so is invoked multiple times.
3622  *  We allocate the STB buffer in .open and release it
3623  *  in .release
3624  */
3625 static const struct file_operations smu_stb_debugfs_fops = {
3626 	.owner = THIS_MODULE,
3627 	.open = smu_stb_debugfs_open,
3628 	.read = smu_stb_debugfs_read,
3629 	.release = smu_stb_debugfs_release,
3630 	.llseek = default_llseek,
3631 };
3632 
3633 #endif
3634 
3635 void amdgpu_smu_stb_debug_fs_init(struct amdgpu_device *adev)
3636 {
3637 #if defined(CONFIG_DEBUG_FS)
3638 
3639 	struct smu_context *smu = adev->powerplay.pp_handle;
3640 
3641 	if (!smu || (!smu->stb_context.stb_buf_size))
3642 		return;
3643 
3644 	debugfs_create_file_size("amdgpu_smu_stb_dump",
3645 			    S_IRUSR,
3646 			    adev_to_drm(adev)->primary->debugfs_root,
3647 			    adev,
3648 			    &smu_stb_debugfs_fops,
3649 			    smu->stb_context.stb_buf_size);
3650 #endif
3651 }
3652 
3653 int smu_send_hbm_bad_pages_num(struct smu_context *smu, uint32_t size)
3654 {
3655 	int ret = 0;
3656 
3657 	if (smu->ppt_funcs && smu->ppt_funcs->send_hbm_bad_pages_num)
3658 		ret = smu->ppt_funcs->send_hbm_bad_pages_num(smu, size);
3659 
3660 	return ret;
3661 }
3662 
3663 int smu_send_hbm_bad_channel_flag(struct smu_context *smu, uint32_t size)
3664 {
3665 	int ret = 0;
3666 
3667 	if (smu->ppt_funcs && smu->ppt_funcs->send_hbm_bad_channel_flag)
3668 		ret = smu->ppt_funcs->send_hbm_bad_channel_flag(smu, size);
3669 
3670 	return ret;
3671 }
3672