xref: /linux/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu8_smumgr.c (revision a460513ed4b6994bfeb7bd86f72853140bc1ac12)
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <linux/delay.h>
25 #include <linux/gfp.h>
26 #include <linux/kernel.h>
27 #include <linux/ktime.h>
28 #include <linux/slab.h>
29 #include <linux/types.h>
30 
31 #include "cgs_common.h"
32 #include "smu/smu_8_0_d.h"
33 #include "smu/smu_8_0_sh_mask.h"
34 #include "smu8.h"
35 #include "smu8_fusion.h"
36 #include "smu8_smumgr.h"
37 #include "cz_ppsmc.h"
38 #include "smu_ucode_xfer_cz.h"
39 #include "gca/gfx_8_0_d.h"
40 #include "gca/gfx_8_0_sh_mask.h"
41 #include "smumgr.h"
42 
43 #define SIZE_ALIGN_32(x)    (((x) + 31) / 32 * 32)
44 
45 static const enum smu8_scratch_entry firmware_list[] = {
46 	SMU8_SCRATCH_ENTRY_UCODE_ID_SDMA0,
47 	SMU8_SCRATCH_ENTRY_UCODE_ID_SDMA1,
48 	SMU8_SCRATCH_ENTRY_UCODE_ID_CP_CE,
49 	SMU8_SCRATCH_ENTRY_UCODE_ID_CP_PFP,
50 	SMU8_SCRATCH_ENTRY_UCODE_ID_CP_ME,
51 	SMU8_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1,
52 	SMU8_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2,
53 	SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_G,
54 };
55 
56 static uint32_t smu8_get_argument(struct pp_hwmgr *hwmgr)
57 {
58 	if (hwmgr == NULL || hwmgr->device == NULL)
59 		return 0;
60 
61 	return cgs_read_register(hwmgr->device,
62 					mmSMU_MP1_SRBM2P_ARG_0);
63 }
64 
65 /* Send a message to the SMC, and wait for its response.*/
66 static int smu8_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
67 					    uint16_t msg, uint32_t parameter)
68 {
69 	int result = 0;
70 	ktime_t t_start;
71 	s64 elapsed_us;
72 
73 	if (hwmgr == NULL || hwmgr->device == NULL)
74 		return -EINVAL;
75 
76 	result = PHM_WAIT_FIELD_UNEQUAL(hwmgr,
77 					SMU_MP1_SRBM2P_RESP_0, CONTENT, 0);
78 	if (result != 0) {
79 		/* Read the last message to SMU, to report actual cause */
80 		uint32_t val = cgs_read_register(hwmgr->device,
81 						 mmSMU_MP1_SRBM2P_MSG_0);
82 		pr_err("%s(0x%04x) aborted; SMU still servicing msg (0x%04x)\n",
83 			__func__, msg, val);
84 		return result;
85 	}
86 	t_start = ktime_get();
87 
88 	cgs_write_register(hwmgr->device, mmSMU_MP1_SRBM2P_ARG_0, parameter);
89 
90 	cgs_write_register(hwmgr->device, mmSMU_MP1_SRBM2P_RESP_0, 0);
91 	cgs_write_register(hwmgr->device, mmSMU_MP1_SRBM2P_MSG_0, msg);
92 
93 	result = PHM_WAIT_FIELD_UNEQUAL(hwmgr,
94 					SMU_MP1_SRBM2P_RESP_0, CONTENT, 0);
95 
96 	elapsed_us = ktime_us_delta(ktime_get(), t_start);
97 
98 	WARN(result, "%s(0x%04x, %#x) timed out after %lld us\n",
99 			__func__, msg, parameter, elapsed_us);
100 
101 	return result;
102 }
103 
104 static int smu8_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
105 {
106 	return smu8_send_msg_to_smc_with_parameter(hwmgr, msg, 0);
107 }
108 
109 static int smu8_set_smc_sram_address(struct pp_hwmgr *hwmgr,
110 				     uint32_t smc_address, uint32_t limit)
111 {
112 	if (hwmgr == NULL || hwmgr->device == NULL)
113 		return -EINVAL;
114 
115 	if (0 != (3 & smc_address)) {
116 		pr_err("SMC address must be 4 byte aligned\n");
117 		return -EINVAL;
118 	}
119 
120 	if (limit <= (smc_address + 3)) {
121 		pr_err("SMC address beyond the SMC RAM area\n");
122 		return -EINVAL;
123 	}
124 
125 	cgs_write_register(hwmgr->device, mmMP0PUB_IND_INDEX_0,
126 				SMN_MP1_SRAM_START_ADDR + smc_address);
127 
128 	return 0;
129 }
130 
131 static int smu8_write_smc_sram_dword(struct pp_hwmgr *hwmgr,
132 		uint32_t smc_address, uint32_t value, uint32_t limit)
133 {
134 	int result;
135 
136 	if (hwmgr == NULL || hwmgr->device == NULL)
137 		return -EINVAL;
138 
139 	result = smu8_set_smc_sram_address(hwmgr, smc_address, limit);
140 	if (!result)
141 		cgs_write_register(hwmgr->device, mmMP0PUB_IND_DATA_0, value);
142 
143 	return result;
144 }
145 
146 static int smu8_check_fw_load_finish(struct pp_hwmgr *hwmgr,
147 				   uint32_t firmware)
148 {
149 	int i;
150 	uint32_t index = SMN_MP1_SRAM_START_ADDR +
151 			 SMU8_FIRMWARE_HEADER_LOCATION +
152 			 offsetof(struct SMU8_Firmware_Header, UcodeLoadStatus);
153 
154 	if (hwmgr == NULL || hwmgr->device == NULL)
155 		return -EINVAL;
156 
157 	cgs_write_register(hwmgr->device, mmMP0PUB_IND_INDEX, index);
158 
159 	for (i = 0; i < hwmgr->usec_timeout; i++) {
160 		if (firmware ==
161 			(cgs_read_register(hwmgr->device, mmMP0PUB_IND_DATA) & firmware))
162 			break;
163 		udelay(1);
164 	}
165 
166 	if (i >= hwmgr->usec_timeout) {
167 		pr_err("SMU check loaded firmware failed.\n");
168 		return -EINVAL;
169 	}
170 
171 	return 0;
172 }
173 
174 static int smu8_load_mec_firmware(struct pp_hwmgr *hwmgr)
175 {
176 	uint32_t reg_data;
177 	uint32_t tmp;
178 	int ret = 0;
179 	struct cgs_firmware_info info = {0};
180 
181 	if (hwmgr == NULL || hwmgr->device == NULL)
182 		return -EINVAL;
183 
184 	ret = cgs_get_firmware_info(hwmgr->device,
185 						CGS_UCODE_ID_CP_MEC, &info);
186 
187 	if (ret)
188 		return -EINVAL;
189 
190 	/* Disable MEC parsing/prefetching */
191 	tmp = cgs_read_register(hwmgr->device,
192 					mmCP_MEC_CNTL);
193 	tmp = PHM_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME1_HALT, 1);
194 	tmp = PHM_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME2_HALT, 1);
195 	cgs_write_register(hwmgr->device, mmCP_MEC_CNTL, tmp);
196 
197 	tmp = cgs_read_register(hwmgr->device,
198 					mmCP_CPC_IC_BASE_CNTL);
199 
200 	tmp = PHM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0);
201 	tmp = PHM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, ATC, 0);
202 	tmp = PHM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
203 	tmp = PHM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, MTYPE, 1);
204 	cgs_write_register(hwmgr->device, mmCP_CPC_IC_BASE_CNTL, tmp);
205 
206 	reg_data = lower_32_bits(info.mc_addr) &
207 			PHM_FIELD_MASK(CP_CPC_IC_BASE_LO, IC_BASE_LO);
208 	cgs_write_register(hwmgr->device, mmCP_CPC_IC_BASE_LO, reg_data);
209 
210 	reg_data = upper_32_bits(info.mc_addr) &
211 			PHM_FIELD_MASK(CP_CPC_IC_BASE_HI, IC_BASE_HI);
212 	cgs_write_register(hwmgr->device, mmCP_CPC_IC_BASE_HI, reg_data);
213 
214 	return 0;
215 }
216 
217 static uint8_t smu8_translate_firmware_enum_to_arg(struct pp_hwmgr *hwmgr,
218 			enum smu8_scratch_entry firmware_enum)
219 {
220 	uint8_t ret = 0;
221 
222 	switch (firmware_enum) {
223 	case SMU8_SCRATCH_ENTRY_UCODE_ID_SDMA0:
224 		ret = UCODE_ID_SDMA0;
225 		break;
226 	case SMU8_SCRATCH_ENTRY_UCODE_ID_SDMA1:
227 		if (hwmgr->chip_id == CHIP_STONEY)
228 			ret = UCODE_ID_SDMA0;
229 		else
230 			ret = UCODE_ID_SDMA1;
231 		break;
232 	case SMU8_SCRATCH_ENTRY_UCODE_ID_CP_CE:
233 		ret = UCODE_ID_CP_CE;
234 		break;
235 	case SMU8_SCRATCH_ENTRY_UCODE_ID_CP_PFP:
236 		ret = UCODE_ID_CP_PFP;
237 		break;
238 	case SMU8_SCRATCH_ENTRY_UCODE_ID_CP_ME:
239 		ret = UCODE_ID_CP_ME;
240 		break;
241 	case SMU8_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1:
242 		ret = UCODE_ID_CP_MEC_JT1;
243 		break;
244 	case SMU8_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2:
245 		if (hwmgr->chip_id == CHIP_STONEY)
246 			ret = UCODE_ID_CP_MEC_JT1;
247 		else
248 			ret = UCODE_ID_CP_MEC_JT2;
249 		break;
250 	case SMU8_SCRATCH_ENTRY_UCODE_ID_GMCON_RENG:
251 		ret = UCODE_ID_GMCON_RENG;
252 		break;
253 	case SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_G:
254 		ret = UCODE_ID_RLC_G;
255 		break;
256 	case SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH:
257 		ret = UCODE_ID_RLC_SCRATCH;
258 		break;
259 	case SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM:
260 		ret = UCODE_ID_RLC_SRM_ARAM;
261 		break;
262 	case SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM:
263 		ret = UCODE_ID_RLC_SRM_DRAM;
264 		break;
265 	case SMU8_SCRATCH_ENTRY_UCODE_ID_DMCU_ERAM:
266 		ret = UCODE_ID_DMCU_ERAM;
267 		break;
268 	case SMU8_SCRATCH_ENTRY_UCODE_ID_DMCU_IRAM:
269 		ret = UCODE_ID_DMCU_IRAM;
270 		break;
271 	case SMU8_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING:
272 		ret = TASK_ARG_INIT_MM_PWR_LOG;
273 		break;
274 	case SMU8_SCRATCH_ENTRY_DATA_ID_SDMA_HALT:
275 	case SMU8_SCRATCH_ENTRY_DATA_ID_SYS_CLOCKGATING:
276 	case SMU8_SCRATCH_ENTRY_DATA_ID_SDMA_RING_REGS:
277 	case SMU8_SCRATCH_ENTRY_DATA_ID_NONGFX_REINIT:
278 	case SMU8_SCRATCH_ENTRY_DATA_ID_SDMA_START:
279 	case SMU8_SCRATCH_ENTRY_DATA_ID_IH_REGISTERS:
280 		ret = TASK_ARG_REG_MMIO;
281 		break;
282 	case SMU8_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE:
283 		ret = TASK_ARG_INIT_CLK_TABLE;
284 		break;
285 	}
286 
287 	return ret;
288 }
289 
290 static enum cgs_ucode_id smu8_convert_fw_type_to_cgs(uint32_t fw_type)
291 {
292 	enum cgs_ucode_id result = CGS_UCODE_ID_MAXIMUM;
293 
294 	switch (fw_type) {
295 	case UCODE_ID_SDMA0:
296 		result = CGS_UCODE_ID_SDMA0;
297 		break;
298 	case UCODE_ID_SDMA1:
299 		result = CGS_UCODE_ID_SDMA1;
300 		break;
301 	case UCODE_ID_CP_CE:
302 		result = CGS_UCODE_ID_CP_CE;
303 		break;
304 	case UCODE_ID_CP_PFP:
305 		result = CGS_UCODE_ID_CP_PFP;
306 		break;
307 	case UCODE_ID_CP_ME:
308 		result = CGS_UCODE_ID_CP_ME;
309 		break;
310 	case UCODE_ID_CP_MEC_JT1:
311 		result = CGS_UCODE_ID_CP_MEC_JT1;
312 		break;
313 	case UCODE_ID_CP_MEC_JT2:
314 		result = CGS_UCODE_ID_CP_MEC_JT2;
315 		break;
316 	case UCODE_ID_RLC_G:
317 		result = CGS_UCODE_ID_RLC_G;
318 		break;
319 	default:
320 		break;
321 	}
322 
323 	return result;
324 }
325 
326 static int smu8_smu_populate_single_scratch_task(
327 			struct pp_hwmgr *hwmgr,
328 			enum smu8_scratch_entry fw_enum,
329 			uint8_t type, bool is_last)
330 {
331 	uint8_t i;
332 	struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
333 	struct TOC *toc = (struct TOC *)smu8_smu->toc_buffer.kaddr;
334 	struct SMU_Task *task = &toc->tasks[smu8_smu->toc_entry_used_count++];
335 
336 	task->type = type;
337 	task->arg = smu8_translate_firmware_enum_to_arg(hwmgr, fw_enum);
338 	task->next = is_last ? END_OF_TASK_LIST : smu8_smu->toc_entry_used_count;
339 
340 	for (i = 0; i < smu8_smu->scratch_buffer_length; i++)
341 		if (smu8_smu->scratch_buffer[i].firmware_ID == fw_enum)
342 			break;
343 
344 	if (i >= smu8_smu->scratch_buffer_length) {
345 		pr_err("Invalid Firmware Type\n");
346 		return -EINVAL;
347 	}
348 
349 	task->addr.low = lower_32_bits(smu8_smu->scratch_buffer[i].mc_addr);
350 	task->addr.high = upper_32_bits(smu8_smu->scratch_buffer[i].mc_addr);
351 	task->size_bytes = smu8_smu->scratch_buffer[i].data_size;
352 
353 	if (SMU8_SCRATCH_ENTRY_DATA_ID_IH_REGISTERS == fw_enum) {
354 		struct smu8_ih_meta_data *pIHReg_restore =
355 		     (struct smu8_ih_meta_data *)smu8_smu->scratch_buffer[i].kaddr;
356 		pIHReg_restore->command =
357 			METADATA_CMD_MODE0 | METADATA_PERFORM_ON_LOAD;
358 	}
359 
360 	return 0;
361 }
362 
363 static int smu8_smu_populate_single_ucode_load_task(
364 					struct pp_hwmgr *hwmgr,
365 					enum smu8_scratch_entry fw_enum,
366 					bool is_last)
367 {
368 	uint8_t i;
369 	struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
370 	struct TOC *toc = (struct TOC *)smu8_smu->toc_buffer.kaddr;
371 	struct SMU_Task *task = &toc->tasks[smu8_smu->toc_entry_used_count++];
372 
373 	task->type = TASK_TYPE_UCODE_LOAD;
374 	task->arg = smu8_translate_firmware_enum_to_arg(hwmgr, fw_enum);
375 	task->next = is_last ? END_OF_TASK_LIST : smu8_smu->toc_entry_used_count;
376 
377 	for (i = 0; i < smu8_smu->driver_buffer_length; i++)
378 		if (smu8_smu->driver_buffer[i].firmware_ID == fw_enum)
379 			break;
380 
381 	if (i >= smu8_smu->driver_buffer_length) {
382 		pr_err("Invalid Firmware Type\n");
383 		return -EINVAL;
384 	}
385 
386 	task->addr.low = lower_32_bits(smu8_smu->driver_buffer[i].mc_addr);
387 	task->addr.high = upper_32_bits(smu8_smu->driver_buffer[i].mc_addr);
388 	task->size_bytes = smu8_smu->driver_buffer[i].data_size;
389 
390 	return 0;
391 }
392 
393 static int smu8_smu_construct_toc_for_rlc_aram_save(struct pp_hwmgr *hwmgr)
394 {
395 	struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
396 
397 	smu8_smu->toc_entry_aram = smu8_smu->toc_entry_used_count;
398 	smu8_smu_populate_single_scratch_task(hwmgr,
399 				SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM,
400 				TASK_TYPE_UCODE_SAVE, true);
401 
402 	return 0;
403 }
404 
405 static int smu8_smu_initialize_toc_empty_job_list(struct pp_hwmgr *hwmgr)
406 {
407 	int i;
408 	struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
409 	struct TOC *toc = (struct TOC *)smu8_smu->toc_buffer.kaddr;
410 
411 	for (i = 0; i < NUM_JOBLIST_ENTRIES; i++)
412 		toc->JobList[i] = (uint8_t)IGNORE_JOB;
413 
414 	return 0;
415 }
416 
417 static int smu8_smu_construct_toc_for_vddgfx_enter(struct pp_hwmgr *hwmgr)
418 {
419 	struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
420 	struct TOC *toc = (struct TOC *)smu8_smu->toc_buffer.kaddr;
421 
422 	toc->JobList[JOB_GFX_SAVE] = (uint8_t)smu8_smu->toc_entry_used_count;
423 	smu8_smu_populate_single_scratch_task(hwmgr,
424 				    SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
425 				    TASK_TYPE_UCODE_SAVE, false);
426 
427 	smu8_smu_populate_single_scratch_task(hwmgr,
428 				    SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM,
429 				    TASK_TYPE_UCODE_SAVE, true);
430 
431 	return 0;
432 }
433 
434 
435 static int smu8_smu_construct_toc_for_vddgfx_exit(struct pp_hwmgr *hwmgr)
436 {
437 	struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
438 	struct TOC *toc = (struct TOC *)smu8_smu->toc_buffer.kaddr;
439 
440 	toc->JobList[JOB_GFX_RESTORE] = (uint8_t)smu8_smu->toc_entry_used_count;
441 
442 	smu8_smu_populate_single_ucode_load_task(hwmgr,
443 				SMU8_SCRATCH_ENTRY_UCODE_ID_CP_CE, false);
444 	smu8_smu_populate_single_ucode_load_task(hwmgr,
445 				SMU8_SCRATCH_ENTRY_UCODE_ID_CP_PFP, false);
446 	smu8_smu_populate_single_ucode_load_task(hwmgr,
447 				SMU8_SCRATCH_ENTRY_UCODE_ID_CP_ME, false);
448 	smu8_smu_populate_single_ucode_load_task(hwmgr,
449 				SMU8_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
450 
451 	if (hwmgr->chip_id == CHIP_STONEY)
452 		smu8_smu_populate_single_ucode_load_task(hwmgr,
453 				SMU8_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
454 	else
455 		smu8_smu_populate_single_ucode_load_task(hwmgr,
456 				SMU8_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, false);
457 
458 	smu8_smu_populate_single_ucode_load_task(hwmgr,
459 				SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_G, false);
460 
461 	/* populate scratch */
462 	smu8_smu_populate_single_scratch_task(hwmgr,
463 				SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
464 				TASK_TYPE_UCODE_LOAD, false);
465 
466 	smu8_smu_populate_single_scratch_task(hwmgr,
467 				SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM,
468 				TASK_TYPE_UCODE_LOAD, false);
469 
470 	smu8_smu_populate_single_scratch_task(hwmgr,
471 				SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM,
472 				TASK_TYPE_UCODE_LOAD, true);
473 
474 	return 0;
475 }
476 
477 static int smu8_smu_construct_toc_for_power_profiling(struct pp_hwmgr *hwmgr)
478 {
479 	struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
480 
481 	smu8_smu->toc_entry_power_profiling_index = smu8_smu->toc_entry_used_count;
482 
483 	smu8_smu_populate_single_scratch_task(hwmgr,
484 				SMU8_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING,
485 				TASK_TYPE_INITIALIZE, true);
486 	return 0;
487 }
488 
489 static int smu8_smu_construct_toc_for_bootup(struct pp_hwmgr *hwmgr)
490 {
491 	struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
492 
493 	smu8_smu->toc_entry_initialize_index = smu8_smu->toc_entry_used_count;
494 
495 	smu8_smu_populate_single_ucode_load_task(hwmgr,
496 				SMU8_SCRATCH_ENTRY_UCODE_ID_SDMA0, false);
497 	if (hwmgr->chip_id != CHIP_STONEY)
498 		smu8_smu_populate_single_ucode_load_task(hwmgr,
499 				SMU8_SCRATCH_ENTRY_UCODE_ID_SDMA1, false);
500 	smu8_smu_populate_single_ucode_load_task(hwmgr,
501 				SMU8_SCRATCH_ENTRY_UCODE_ID_CP_CE, false);
502 	smu8_smu_populate_single_ucode_load_task(hwmgr,
503 				SMU8_SCRATCH_ENTRY_UCODE_ID_CP_PFP, false);
504 	smu8_smu_populate_single_ucode_load_task(hwmgr,
505 				SMU8_SCRATCH_ENTRY_UCODE_ID_CP_ME, false);
506 	smu8_smu_populate_single_ucode_load_task(hwmgr,
507 				SMU8_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
508 	if (hwmgr->chip_id != CHIP_STONEY)
509 		smu8_smu_populate_single_ucode_load_task(hwmgr,
510 				SMU8_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, false);
511 	smu8_smu_populate_single_ucode_load_task(hwmgr,
512 				SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_G, true);
513 
514 	return 0;
515 }
516 
517 static int smu8_smu_construct_toc_for_clock_table(struct pp_hwmgr *hwmgr)
518 {
519 	struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
520 
521 	smu8_smu->toc_entry_clock_table = smu8_smu->toc_entry_used_count;
522 
523 	smu8_smu_populate_single_scratch_task(hwmgr,
524 				SMU8_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE,
525 				TASK_TYPE_INITIALIZE, true);
526 
527 	return 0;
528 }
529 
530 static int smu8_smu_construct_toc(struct pp_hwmgr *hwmgr)
531 {
532 	struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
533 
534 	smu8_smu->toc_entry_used_count = 0;
535 	smu8_smu_initialize_toc_empty_job_list(hwmgr);
536 	smu8_smu_construct_toc_for_rlc_aram_save(hwmgr);
537 	smu8_smu_construct_toc_for_vddgfx_enter(hwmgr);
538 	smu8_smu_construct_toc_for_vddgfx_exit(hwmgr);
539 	smu8_smu_construct_toc_for_power_profiling(hwmgr);
540 	smu8_smu_construct_toc_for_bootup(hwmgr);
541 	smu8_smu_construct_toc_for_clock_table(hwmgr);
542 
543 	return 0;
544 }
545 
546 static int smu8_smu_populate_firmware_entries(struct pp_hwmgr *hwmgr)
547 {
548 	struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
549 	uint32_t firmware_type;
550 	uint32_t i;
551 	int ret;
552 	enum cgs_ucode_id ucode_id;
553 	struct cgs_firmware_info info = {0};
554 
555 	smu8_smu->driver_buffer_length = 0;
556 
557 	for (i = 0; i < ARRAY_SIZE(firmware_list); i++) {
558 
559 		firmware_type = smu8_translate_firmware_enum_to_arg(hwmgr,
560 					firmware_list[i]);
561 
562 		ucode_id = smu8_convert_fw_type_to_cgs(firmware_type);
563 
564 		ret = cgs_get_firmware_info(hwmgr->device,
565 							ucode_id, &info);
566 
567 		if (ret == 0) {
568 			smu8_smu->driver_buffer[i].mc_addr = info.mc_addr;
569 
570 			smu8_smu->driver_buffer[i].data_size = info.image_size;
571 
572 			smu8_smu->driver_buffer[i].firmware_ID = firmware_list[i];
573 			smu8_smu->driver_buffer_length++;
574 		}
575 	}
576 
577 	return 0;
578 }
579 
580 static int smu8_smu_populate_single_scratch_entry(
581 				struct pp_hwmgr *hwmgr,
582 				enum smu8_scratch_entry scratch_type,
583 				uint32_t ulsize_byte,
584 				struct smu8_buffer_entry *entry)
585 {
586 	struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
587 	uint32_t ulsize_aligned = SIZE_ALIGN_32(ulsize_byte);
588 
589 	entry->data_size = ulsize_byte;
590 	entry->kaddr = (char *) smu8_smu->smu_buffer.kaddr +
591 				smu8_smu->smu_buffer_used_bytes;
592 	entry->mc_addr = smu8_smu->smu_buffer.mc_addr + smu8_smu->smu_buffer_used_bytes;
593 	entry->firmware_ID = scratch_type;
594 
595 	smu8_smu->smu_buffer_used_bytes += ulsize_aligned;
596 
597 	return 0;
598 }
599 
600 static int smu8_download_pptable_settings(struct pp_hwmgr *hwmgr, void **table)
601 {
602 	struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
603 	unsigned long i;
604 
605 	for (i = 0; i < smu8_smu->scratch_buffer_length; i++) {
606 		if (smu8_smu->scratch_buffer[i].firmware_ID
607 			== SMU8_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE)
608 			break;
609 	}
610 
611 	*table = (struct SMU8_Fusion_ClkTable *)smu8_smu->scratch_buffer[i].kaddr;
612 
613 	smum_send_msg_to_smc_with_parameter(hwmgr,
614 				PPSMC_MSG_SetClkTableAddrHi,
615 				upper_32_bits(smu8_smu->scratch_buffer[i].mc_addr),
616 				NULL);
617 
618 	smum_send_msg_to_smc_with_parameter(hwmgr,
619 				PPSMC_MSG_SetClkTableAddrLo,
620 				lower_32_bits(smu8_smu->scratch_buffer[i].mc_addr),
621 				NULL);
622 
623 	smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ExecuteJob,
624 				smu8_smu->toc_entry_clock_table,
625 				NULL);
626 
627 	smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ClkTableXferToDram, NULL);
628 
629 	return 0;
630 }
631 
632 static int smu8_upload_pptable_settings(struct pp_hwmgr *hwmgr)
633 {
634 	struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
635 	unsigned long i;
636 
637 	for (i = 0; i < smu8_smu->scratch_buffer_length; i++) {
638 		if (smu8_smu->scratch_buffer[i].firmware_ID
639 				== SMU8_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE)
640 			break;
641 	}
642 
643 	smum_send_msg_to_smc_with_parameter(hwmgr,
644 				PPSMC_MSG_SetClkTableAddrHi,
645 				upper_32_bits(smu8_smu->scratch_buffer[i].mc_addr),
646 				NULL);
647 
648 	smum_send_msg_to_smc_with_parameter(hwmgr,
649 				PPSMC_MSG_SetClkTableAddrLo,
650 				lower_32_bits(smu8_smu->scratch_buffer[i].mc_addr),
651 				NULL);
652 
653 	smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ExecuteJob,
654 				smu8_smu->toc_entry_clock_table,
655 				NULL);
656 
657 	smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ClkTableXferToSmu, NULL);
658 
659 	return 0;
660 }
661 
662 static int smu8_request_smu_load_fw(struct pp_hwmgr *hwmgr)
663 {
664 	struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
665 	uint32_t smc_address;
666 	uint32_t fw_to_check = 0;
667 	int ret;
668 
669 	amdgpu_ucode_init_bo(hwmgr->adev);
670 
671 	smu8_smu_populate_firmware_entries(hwmgr);
672 
673 	smu8_smu_construct_toc(hwmgr);
674 
675 	smc_address = SMU8_FIRMWARE_HEADER_LOCATION +
676 		offsetof(struct SMU8_Firmware_Header, UcodeLoadStatus);
677 
678 	smu8_write_smc_sram_dword(hwmgr, smc_address, 0, smc_address+4);
679 
680 	smum_send_msg_to_smc_with_parameter(hwmgr,
681 					PPSMC_MSG_DriverDramAddrHi,
682 					upper_32_bits(smu8_smu->toc_buffer.mc_addr),
683 					NULL);
684 
685 	smum_send_msg_to_smc_with_parameter(hwmgr,
686 					PPSMC_MSG_DriverDramAddrLo,
687 					lower_32_bits(smu8_smu->toc_buffer.mc_addr),
688 					NULL);
689 
690 	smum_send_msg_to_smc(hwmgr, PPSMC_MSG_InitJobs, NULL);
691 
692 	smum_send_msg_to_smc_with_parameter(hwmgr,
693 					PPSMC_MSG_ExecuteJob,
694 					smu8_smu->toc_entry_aram,
695 					NULL);
696 	smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ExecuteJob,
697 				smu8_smu->toc_entry_power_profiling_index,
698 				NULL);
699 
700 	smum_send_msg_to_smc_with_parameter(hwmgr,
701 					PPSMC_MSG_ExecuteJob,
702 					smu8_smu->toc_entry_initialize_index,
703 					NULL);
704 
705 	fw_to_check = UCODE_ID_RLC_G_MASK |
706 			UCODE_ID_SDMA0_MASK |
707 			UCODE_ID_SDMA1_MASK |
708 			UCODE_ID_CP_CE_MASK |
709 			UCODE_ID_CP_ME_MASK |
710 			UCODE_ID_CP_PFP_MASK |
711 			UCODE_ID_CP_MEC_JT1_MASK |
712 			UCODE_ID_CP_MEC_JT2_MASK;
713 
714 	if (hwmgr->chip_id == CHIP_STONEY)
715 		fw_to_check &= ~(UCODE_ID_SDMA1_MASK | UCODE_ID_CP_MEC_JT2_MASK);
716 
717 	ret = smu8_check_fw_load_finish(hwmgr, fw_to_check);
718 	if (ret) {
719 		pr_err("SMU firmware load failed\n");
720 		return ret;
721 	}
722 
723 	ret = smu8_load_mec_firmware(hwmgr);
724 	if (ret) {
725 		pr_err("Mec Firmware load failed\n");
726 		return ret;
727 	}
728 
729 	return 0;
730 }
731 
732 static int smu8_start_smu(struct pp_hwmgr *hwmgr)
733 {
734 	struct amdgpu_device *adev;
735 
736 	uint32_t index = SMN_MP1_SRAM_START_ADDR +
737 			 SMU8_FIRMWARE_HEADER_LOCATION +
738 			 offsetof(struct SMU8_Firmware_Header, Version);
739 
740 	if (hwmgr == NULL || hwmgr->device == NULL)
741 		return -EINVAL;
742 
743 	adev = hwmgr->adev;
744 
745 	cgs_write_register(hwmgr->device, mmMP0PUB_IND_INDEX, index);
746 	hwmgr->smu_version = cgs_read_register(hwmgr->device, mmMP0PUB_IND_DATA);
747 	pr_info("smu version %02d.%02d.%02d\n",
748 		((hwmgr->smu_version >> 16) & 0xFF),
749 		((hwmgr->smu_version >> 8) & 0xFF),
750 		(hwmgr->smu_version & 0xFF));
751 	adev->pm.fw_version = hwmgr->smu_version >> 8;
752 
753 	return smu8_request_smu_load_fw(hwmgr);
754 }
755 
756 static int smu8_smu_init(struct pp_hwmgr *hwmgr)
757 {
758 	int ret = 0;
759 	struct smu8_smumgr *smu8_smu;
760 
761 	smu8_smu = kzalloc(sizeof(struct smu8_smumgr), GFP_KERNEL);
762 	if (smu8_smu == NULL)
763 		return -ENOMEM;
764 
765 	hwmgr->smu_backend = smu8_smu;
766 
767 	smu8_smu->toc_buffer.data_size = 4096;
768 	smu8_smu->smu_buffer.data_size =
769 		ALIGN(UCODE_ID_RLC_SCRATCH_SIZE_BYTE, 32) +
770 		ALIGN(UCODE_ID_RLC_SRM_ARAM_SIZE_BYTE, 32) +
771 		ALIGN(UCODE_ID_RLC_SRM_DRAM_SIZE_BYTE, 32) +
772 		ALIGN(sizeof(struct SMU8_MultimediaPowerLogData), 32) +
773 		ALIGN(sizeof(struct SMU8_Fusion_ClkTable), 32);
774 
775 	ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
776 				smu8_smu->toc_buffer.data_size,
777 				PAGE_SIZE,
778 				AMDGPU_GEM_DOMAIN_VRAM,
779 				&smu8_smu->toc_buffer.handle,
780 				&smu8_smu->toc_buffer.mc_addr,
781 				&smu8_smu->toc_buffer.kaddr);
782 	if (ret)
783 		goto err2;
784 
785 	ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
786 				smu8_smu->smu_buffer.data_size,
787 				PAGE_SIZE,
788 				AMDGPU_GEM_DOMAIN_VRAM,
789 				&smu8_smu->smu_buffer.handle,
790 				&smu8_smu->smu_buffer.mc_addr,
791 				&smu8_smu->smu_buffer.kaddr);
792 	if (ret)
793 		goto err1;
794 
795 	if (0 != smu8_smu_populate_single_scratch_entry(hwmgr,
796 		SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
797 		UCODE_ID_RLC_SCRATCH_SIZE_BYTE,
798 		&smu8_smu->scratch_buffer[smu8_smu->scratch_buffer_length++])) {
799 		pr_err("Error when Populate Firmware Entry.\n");
800 		goto err0;
801 	}
802 
803 	if (0 != smu8_smu_populate_single_scratch_entry(hwmgr,
804 		SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM,
805 		UCODE_ID_RLC_SRM_ARAM_SIZE_BYTE,
806 		&smu8_smu->scratch_buffer[smu8_smu->scratch_buffer_length++])) {
807 		pr_err("Error when Populate Firmware Entry.\n");
808 		goto err0;
809 	}
810 	if (0 != smu8_smu_populate_single_scratch_entry(hwmgr,
811 		SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM,
812 		UCODE_ID_RLC_SRM_DRAM_SIZE_BYTE,
813 		&smu8_smu->scratch_buffer[smu8_smu->scratch_buffer_length++])) {
814 		pr_err("Error when Populate Firmware Entry.\n");
815 		goto err0;
816 	}
817 
818 	if (0 != smu8_smu_populate_single_scratch_entry(hwmgr,
819 		SMU8_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING,
820 		sizeof(struct SMU8_MultimediaPowerLogData),
821 		&smu8_smu->scratch_buffer[smu8_smu->scratch_buffer_length++])) {
822 		pr_err("Error when Populate Firmware Entry.\n");
823 		goto err0;
824 	}
825 
826 	if (0 != smu8_smu_populate_single_scratch_entry(hwmgr,
827 		SMU8_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE,
828 		sizeof(struct SMU8_Fusion_ClkTable),
829 		&smu8_smu->scratch_buffer[smu8_smu->scratch_buffer_length++])) {
830 		pr_err("Error when Populate Firmware Entry.\n");
831 		goto err0;
832 	}
833 
834 	return 0;
835 
836 err0:
837 	amdgpu_bo_free_kernel(&smu8_smu->smu_buffer.handle,
838 				&smu8_smu->smu_buffer.mc_addr,
839 				&smu8_smu->smu_buffer.kaddr);
840 err1:
841 	amdgpu_bo_free_kernel(&smu8_smu->toc_buffer.handle,
842 				&smu8_smu->toc_buffer.mc_addr,
843 				&smu8_smu->toc_buffer.kaddr);
844 err2:
845 	kfree(smu8_smu);
846 	return -EINVAL;
847 }
848 
849 static int smu8_smu_fini(struct pp_hwmgr *hwmgr)
850 {
851 	struct smu8_smumgr *smu8_smu;
852 
853 	if (hwmgr == NULL || hwmgr->device == NULL)
854 		return -EINVAL;
855 
856 	smu8_smu = hwmgr->smu_backend;
857 	if (smu8_smu) {
858 		amdgpu_bo_free_kernel(&smu8_smu->toc_buffer.handle,
859 					&smu8_smu->toc_buffer.mc_addr,
860 					&smu8_smu->toc_buffer.kaddr);
861 		amdgpu_bo_free_kernel(&smu8_smu->smu_buffer.handle,
862 					&smu8_smu->smu_buffer.mc_addr,
863 					&smu8_smu->smu_buffer.kaddr);
864 		kfree(smu8_smu);
865 	}
866 
867 	return 0;
868 }
869 
870 static bool smu8_dpm_check_smu_features(struct pp_hwmgr *hwmgr,
871 				unsigned long check_feature)
872 {
873 	int result;
874 	uint32_t features;
875 
876 	result = smum_send_msg_to_smc_with_parameter(hwmgr,
877 				PPSMC_MSG_GetFeatureStatus,
878 				0,
879 				&features);
880 	if (result == 0) {
881 		if (features & check_feature)
882 			return true;
883 	}
884 
885 	return false;
886 }
887 
888 static bool smu8_is_dpm_running(struct pp_hwmgr *hwmgr)
889 {
890 	if (smu8_dpm_check_smu_features(hwmgr, SMU_EnabledFeatureScoreboard_SclkDpmOn))
891 		return true;
892 	return false;
893 }
894 
895 const struct pp_smumgr_func smu8_smu_funcs = {
896 	.name = "smu8_smu",
897 	.smu_init = smu8_smu_init,
898 	.smu_fini = smu8_smu_fini,
899 	.start_smu = smu8_start_smu,
900 	.check_fw_load_finish = smu8_check_fw_load_finish,
901 	.request_smu_load_fw = NULL,
902 	.request_smu_load_specific_fw = NULL,
903 	.get_argument = smu8_get_argument,
904 	.send_msg_to_smc = smu8_send_msg_to_smc,
905 	.send_msg_to_smc_with_parameter = smu8_send_msg_to_smc_with_parameter,
906 	.download_pptable_settings = smu8_download_pptable_settings,
907 	.upload_pptable_settings = smu8_upload_pptable_settings,
908 	.is_dpm_running = smu8_is_dpm_running,
909 };
910 
911