xref: /linux/drivers/accel/habanalabs/common/firmware_if.c (revision bf5802238dc181b1f7375d358af1d01cd72d1c11)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 /*
4  * Copyright 2016-2022 HabanaLabs, Ltd.
5  * All Rights Reserved.
6  */
7 
8 #include "habanalabs.h"
9 #include <linux/habanalabs/hl_boot_if.h>
10 
11 #include <linux/firmware.h>
12 #include <linux/crc32.h>
13 #include <linux/slab.h>
14 #include <linux/ctype.h>
15 #include <linux/vmalloc.h>
16 
17 #include <trace/events/habanalabs.h>
18 
19 #define FW_FILE_MAX_SIZE		0x1400000 /* maximum size of 20MB */
20 
21 static char *comms_cmd_str_arr[COMMS_INVLD_LAST] = {
22 	[COMMS_NOOP] = __stringify(COMMS_NOOP),
23 	[COMMS_CLR_STS] = __stringify(COMMS_CLR_STS),
24 	[COMMS_RST_STATE] = __stringify(COMMS_RST_STATE),
25 	[COMMS_PREP_DESC] = __stringify(COMMS_PREP_DESC),
26 	[COMMS_DATA_RDY] = __stringify(COMMS_DATA_RDY),
27 	[COMMS_EXEC] = __stringify(COMMS_EXEC),
28 	[COMMS_RST_DEV] = __stringify(COMMS_RST_DEV),
29 	[COMMS_GOTO_WFE] = __stringify(COMMS_GOTO_WFE),
30 	[COMMS_SKIP_BMC] = __stringify(COMMS_SKIP_BMC),
31 	[COMMS_PREP_DESC_ELBI] = __stringify(COMMS_PREP_DESC_ELBI),
32 };
33 
34 static char *comms_sts_str_arr[COMMS_STS_INVLD_LAST] = {
35 	[COMMS_STS_NOOP] = __stringify(COMMS_STS_NOOP),
36 	[COMMS_STS_ACK] = __stringify(COMMS_STS_ACK),
37 	[COMMS_STS_OK] = __stringify(COMMS_STS_OK),
38 	[COMMS_STS_ERR] = __stringify(COMMS_STS_ERR),
39 	[COMMS_STS_VALID_ERR] = __stringify(COMMS_STS_VALID_ERR),
40 	[COMMS_STS_TIMEOUT_ERR] = __stringify(COMMS_STS_TIMEOUT_ERR),
41 };
42 
43 static char *extract_fw_ver_from_str(const char *fw_str)
44 {
45 	char *str, *fw_ver, *whitespace;
46 	u32 ver_offset;
47 
48 	fw_ver = kmalloc(VERSION_MAX_LEN, GFP_KERNEL);
49 	if (!fw_ver)
50 		return NULL;
51 
52 	str = strnstr(fw_str, "fw-", VERSION_MAX_LEN);
53 	if (!str)
54 		goto free_fw_ver;
55 
56 	/* Skip the fw- part */
57 	str += 3;
58 	ver_offset = str - fw_str;
59 
60 	/* Copy until the next whitespace */
61 	whitespace = strnstr(str, " ", VERSION_MAX_LEN - ver_offset);
62 	if (!whitespace)
63 		goto free_fw_ver;
64 
65 	strscpy(fw_ver, str, whitespace - str + 1);
66 
67 	return fw_ver;
68 
69 free_fw_ver:
70 	kfree(fw_ver);
71 	return NULL;
72 }
73 
74 /**
75  * extract_u32_until_given_char() - given a string of the format "<u32><char>*", extract the u32.
76  * @str: the given string
77  * @ver_num: the pointer to the extracted u32 to be returned to the caller.
78  * @given_char: the given char at the end of the u32 in the string
79  *
80  * Return: Upon success, return a pointer to the given_char in the string. Upon failure, return NULL
81  */
82 static char *extract_u32_until_given_char(char *str, u32 *ver_num, char given_char)
83 {
84 	char num_str[8] = {}, *ch;
85 
86 	ch = strchrnul(str, given_char);
87 	if (*ch == '\0' || ch == str || ch - str >= sizeof(num_str))
88 		return NULL;
89 
90 	memcpy(num_str, str, ch - str);
91 	if (kstrtou32(num_str, 10, ver_num))
92 		return NULL;
93 	return ch;
94 }
95 
96 /**
97  * hl_get_sw_major_minor_subminor() - extract the FW's SW version major, minor, sub-minor
98  *				      from the version string
99  * @hdev: pointer to the hl_device
100  * @fw_str: the FW's version string
101  *
102  * The extracted version is set in the hdev fields: fw_sw_{major/minor/sub_minor}_ver.
103  *
104  * fw_str is expected to have one of two possible formats, examples:
105  * 1) 'Preboot version hl-gaudi2-1.9.0-fw-42.0.1-sec-3'
106  * 2) 'Preboot version hl-gaudi2-1.9.0-rc-fw-42.0.1-sec-3'
107  * In those examples, the SW major,minor,subminor are correspondingly: 1,9,0.
108  *
109  * Return: 0 for success or a negative error code for failure.
110  */
111 static int hl_get_sw_major_minor_subminor(struct hl_device *hdev, const char *fw_str)
112 {
113 	char *end, *start;
114 
115 	end = strnstr(fw_str, "-rc-", VERSION_MAX_LEN);
116 	if (end == fw_str)
117 		return -EINVAL;
118 
119 	if (!end)
120 		end = strnstr(fw_str, "-fw-", VERSION_MAX_LEN);
121 
122 	if (end == fw_str)
123 		return -EINVAL;
124 
125 	if (!end)
126 		return -EINVAL;
127 
128 	for (start = end - 1; start != fw_str; start--) {
129 		if (*start == '-')
130 			break;
131 	}
132 
133 	if (start == fw_str)
134 		return -EINVAL;
135 
136 	/* start/end point each to the starting and ending hyphen of the sw version e.g. -1.9.0- */
137 	start++;
138 	start = extract_u32_until_given_char(start, &hdev->fw_sw_major_ver, '.');
139 	if (!start)
140 		goto err_zero_ver;
141 
142 	start++;
143 	start = extract_u32_until_given_char(start, &hdev->fw_sw_minor_ver, '.');
144 	if (!start)
145 		goto err_zero_ver;
146 
147 	start++;
148 	start = extract_u32_until_given_char(start, &hdev->fw_sw_sub_minor_ver, '-');
149 	if (!start)
150 		goto err_zero_ver;
151 
152 	return 0;
153 
154 err_zero_ver:
155 	hdev->fw_sw_major_ver = 0;
156 	hdev->fw_sw_minor_ver = 0;
157 	hdev->fw_sw_sub_minor_ver = 0;
158 	return -EINVAL;
159 }
160 
161 /**
162  * hl_get_preboot_major_minor() - extract the FW's version major, minor from the version string.
163  * @hdev: pointer to the hl_device
164  * @preboot_ver: the FW's version string
165  *
166  * preboot_ver is expected to be the format of <major>.<minor>.<sub minor>*, e.g: 42.0.1-sec-3
167  * The extracted version is set in the hdev fields: fw_inner_{major/minor}_ver.
168  *
169  * Return: 0 on success, negative error code for failure.
170  */
171 static int hl_get_preboot_major_minor(struct hl_device *hdev, char *preboot_ver)
172 {
173 	preboot_ver = extract_u32_until_given_char(preboot_ver, &hdev->fw_inner_major_ver, '.');
174 	if (!preboot_ver) {
175 		dev_err(hdev->dev, "Error parsing preboot major version\n");
176 		goto err_zero_ver;
177 	}
178 
179 	preboot_ver++;
180 
181 	preboot_ver = extract_u32_until_given_char(preboot_ver, &hdev->fw_inner_minor_ver, '.');
182 	if (!preboot_ver) {
183 		dev_err(hdev->dev, "Error parsing preboot minor version\n");
184 		goto err_zero_ver;
185 	}
186 	return 0;
187 
188 err_zero_ver:
189 	hdev->fw_inner_major_ver = 0;
190 	hdev->fw_inner_minor_ver = 0;
191 	return -EINVAL;
192 }
193 
194 static int hl_request_fw(struct hl_device *hdev,
195 				const struct firmware **firmware_p,
196 				const char *fw_name)
197 {
198 	size_t fw_size;
199 	int rc;
200 
201 	rc = request_firmware(firmware_p, fw_name, hdev->dev);
202 	if (rc) {
203 		dev_err(hdev->dev, "Firmware file %s is not found! (error %d)\n",
204 				fw_name, rc);
205 		goto out;
206 	}
207 
208 	fw_size = (*firmware_p)->size;
209 	if ((fw_size % 4) != 0) {
210 		dev_err(hdev->dev, "Illegal %s firmware size %zu\n",
211 				fw_name, fw_size);
212 		rc = -EINVAL;
213 		goto release_fw;
214 	}
215 
216 	dev_dbg(hdev->dev, "%s firmware size == %zu\n", fw_name, fw_size);
217 
218 	if (fw_size > FW_FILE_MAX_SIZE) {
219 		dev_err(hdev->dev,
220 			"FW file size %zu exceeds maximum of %u bytes\n",
221 			fw_size, FW_FILE_MAX_SIZE);
222 		rc = -EINVAL;
223 		goto release_fw;
224 	}
225 
226 	return 0;
227 
228 release_fw:
229 	release_firmware(*firmware_p);
230 out:
231 	return rc;
232 }
233 
234 /**
235  * hl_release_firmware() - release FW
236  *
237  * @fw: fw descriptor
238  *
239  * note: this inline function added to serve as a comprehensive mirror for the
240  *       hl_request_fw function.
241  */
242 static inline void hl_release_firmware(const struct firmware *fw)
243 {
244 	release_firmware(fw);
245 }
246 
247 /**
248  * hl_fw_copy_fw_to_device() - copy FW to device
249  *
250  * @hdev: pointer to hl_device structure.
251  * @fw: fw descriptor
252  * @dst: IO memory mapped address space to copy firmware to
253  * @src_offset: offset in src FW to copy from
254  * @size: amount of bytes to copy (0 to copy the whole binary)
255  *
256  * actual copy of FW binary data to device, shared by static and dynamic loaders
257  */
258 static int hl_fw_copy_fw_to_device(struct hl_device *hdev,
259 				const struct firmware *fw, void __iomem *dst,
260 				u32 src_offset, u32 size)
261 {
262 	const void *fw_data;
263 
264 	/* size 0 indicates to copy the whole file */
265 	if (!size)
266 		size = fw->size;
267 
268 	if (src_offset + size > fw->size) {
269 		dev_err(hdev->dev,
270 			"size to copy(%u) and offset(%u) are invalid\n",
271 			size, src_offset);
272 		return -EINVAL;
273 	}
274 
275 	fw_data = (const void *) fw->data;
276 
277 	memcpy_toio(dst, fw_data + src_offset, size);
278 	return 0;
279 }
280 
281 /**
282  * hl_fw_copy_msg_to_device() - copy message to device
283  *
284  * @hdev: pointer to hl_device structure.
285  * @msg: message
286  * @dst: IO memory mapped address space to copy firmware to
287  * @src_offset: offset in src message to copy from
288  * @size: amount of bytes to copy (0 to copy the whole binary)
289  *
290  * actual copy of message data to device.
291  */
292 static int hl_fw_copy_msg_to_device(struct hl_device *hdev,
293 		struct lkd_msg_comms *msg, void __iomem *dst,
294 		u32 src_offset, u32 size)
295 {
296 	void *msg_data;
297 
298 	/* size 0 indicates to copy the whole file */
299 	if (!size)
300 		size = sizeof(struct lkd_msg_comms);
301 
302 	if (src_offset + size > sizeof(struct lkd_msg_comms)) {
303 		dev_err(hdev->dev,
304 			"size to copy(%u) and offset(%u) are invalid\n",
305 			size, src_offset);
306 		return -EINVAL;
307 	}
308 
309 	msg_data = (void *) msg;
310 
311 	memcpy_toio(dst, msg_data + src_offset, size);
312 
313 	return 0;
314 }
315 
316 /**
317  * hl_fw_load_fw_to_device() - Load F/W code to device's memory.
318  *
319  * @hdev: pointer to hl_device structure.
320  * @fw_name: the firmware image name
321  * @dst: IO memory mapped address space to copy firmware to
322  * @src_offset: offset in src FW to copy from
323  * @size: amount of bytes to copy (0 to copy the whole binary)
324  *
325  * Copy fw code from firmware file to device memory.
326  *
327  * Return: 0 on success, non-zero for failure.
328  */
329 int hl_fw_load_fw_to_device(struct hl_device *hdev, const char *fw_name,
330 				void __iomem *dst, u32 src_offset, u32 size)
331 {
332 	const struct firmware *fw;
333 	int rc;
334 
335 	rc = hl_request_fw(hdev, &fw, fw_name);
336 	if (rc)
337 		return rc;
338 
339 	rc = hl_fw_copy_fw_to_device(hdev, fw, dst, src_offset, size);
340 
341 	hl_release_firmware(fw);
342 	return rc;
343 }
344 
345 int hl_fw_send_pci_access_msg(struct hl_device *hdev, u32 opcode, u64 value)
346 {
347 	struct cpucp_packet pkt = {};
348 
349 	pkt.ctl = cpu_to_le32(opcode << CPUCP_PKT_CTL_OPCODE_SHIFT);
350 	pkt.value = cpu_to_le64(value);
351 
352 	return hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), 0, NULL);
353 }
354 
355 int hl_fw_send_cpu_message(struct hl_device *hdev, u32 hw_queue_id, u32 *msg,
356 				u16 len, u32 timeout, u64 *result)
357 {
358 	struct hl_hw_queue *queue = &hdev->kernel_queues[hw_queue_id];
359 	struct asic_fixed_properties *prop = &hdev->asic_prop;
360 	struct cpucp_packet *pkt;
361 	dma_addr_t pkt_dma_addr;
362 	struct hl_bd *sent_bd;
363 	u32 tmp, expected_ack_val, pi, opcode;
364 	int rc;
365 
366 	pkt = hl_cpu_accessible_dma_pool_alloc(hdev, len, &pkt_dma_addr);
367 	if (!pkt) {
368 		dev_err(hdev->dev,
369 			"Failed to allocate DMA memory for packet to CPU\n");
370 		return -ENOMEM;
371 	}
372 
373 	memcpy(pkt, msg, len);
374 
375 	mutex_lock(&hdev->send_cpu_message_lock);
376 
377 	/* CPU-CP messages can be sent during soft-reset */
378 	if (hdev->disabled && !hdev->reset_info.in_compute_reset) {
379 		rc = 0;
380 		goto out;
381 	}
382 
383 	if (hdev->device_cpu_disabled) {
384 		rc = -EIO;
385 		goto out;
386 	}
387 
388 	/* set fence to a non valid value */
389 	pkt->fence = cpu_to_le32(UINT_MAX);
390 	pi = queue->pi;
391 
392 	/*
393 	 * The CPU queue is a synchronous queue with an effective depth of
394 	 * a single entry (although it is allocated with room for multiple
395 	 * entries). We lock on it using 'send_cpu_message_lock' which
396 	 * serializes accesses to the CPU queue.
397 	 * Which means that we don't need to lock the access to the entire H/W
398 	 * queues module when submitting a JOB to the CPU queue.
399 	 */
400 	hl_hw_queue_submit_bd(hdev, queue, hl_queue_inc_ptr(queue->pi), len, pkt_dma_addr);
401 
402 	if (prop->fw_app_cpu_boot_dev_sts0 & CPU_BOOT_DEV_STS0_PKT_PI_ACK_EN)
403 		expected_ack_val = queue->pi;
404 	else
405 		expected_ack_val = CPUCP_PACKET_FENCE_VAL;
406 
407 	rc = hl_poll_timeout_memory(hdev, &pkt->fence, tmp,
408 				(tmp == expected_ack_val), 1000,
409 				timeout, true);
410 
411 	hl_hw_queue_inc_ci_kernel(hdev, hw_queue_id);
412 
413 	if (rc == -ETIMEDOUT) {
414 		/* If FW performed reset just before sending it a packet, we will get a timeout.
415 		 * This is expected behavior, hence no need for error message.
416 		 */
417 		if (!hl_device_operational(hdev, NULL) && !hdev->reset_info.in_compute_reset)
418 			dev_dbg(hdev->dev, "Device CPU packet timeout (0x%x) due to FW reset\n",
419 					tmp);
420 		else
421 			dev_err(hdev->dev, "Device CPU packet timeout (status = 0x%x)\n", tmp);
422 		hdev->device_cpu_disabled = true;
423 		goto out;
424 	}
425 
426 	tmp = le32_to_cpu(pkt->ctl);
427 
428 	rc = (tmp & CPUCP_PKT_CTL_RC_MASK) >> CPUCP_PKT_CTL_RC_SHIFT;
429 	if (rc) {
430 		opcode = (tmp & CPUCP_PKT_CTL_OPCODE_MASK) >> CPUCP_PKT_CTL_OPCODE_SHIFT;
431 
432 		if (!prop->supports_advanced_cpucp_rc) {
433 			dev_dbg(hdev->dev, "F/W ERROR %d for CPU packet %d\n", rc, opcode);
434 			rc = -EIO;
435 			goto scrub_descriptor;
436 		}
437 
438 		switch (rc) {
439 		case cpucp_packet_invalid:
440 			dev_err(hdev->dev,
441 				"CPU packet %d is not supported by F/W\n", opcode);
442 			break;
443 		case cpucp_packet_fault:
444 			dev_err(hdev->dev,
445 				"F/W failed processing CPU packet %d\n", opcode);
446 			break;
447 		case cpucp_packet_invalid_pkt:
448 			dev_dbg(hdev->dev,
449 				"CPU packet %d is not supported by F/W\n", opcode);
450 			break;
451 		case cpucp_packet_invalid_params:
452 			dev_err(hdev->dev,
453 				"F/W reports invalid parameters for CPU packet %d\n", opcode);
454 			break;
455 
456 		default:
457 			dev_err(hdev->dev,
458 				"Unknown F/W ERROR %d for CPU packet %d\n", rc, opcode);
459 		}
460 
461 		/* propagate the return code from the f/w to the callers who want to check it */
462 		if (result)
463 			*result = rc;
464 
465 		rc = -EIO;
466 
467 	} else if (result) {
468 		*result = le64_to_cpu(pkt->result);
469 	}
470 
471 scrub_descriptor:
472 	/* Scrub previous buffer descriptor 'ctl' field which contains the
473 	 * previous PI value written during packet submission.
474 	 * We must do this or else F/W can read an old value upon queue wraparound.
475 	 */
476 	sent_bd = queue->kernel_address;
477 	sent_bd += hl_pi_2_offset(pi);
478 	sent_bd->ctl = cpu_to_le32(UINT_MAX);
479 
480 out:
481 	mutex_unlock(&hdev->send_cpu_message_lock);
482 
483 	hl_cpu_accessible_dma_pool_free(hdev, len, pkt);
484 
485 	return rc;
486 }
487 
488 int hl_fw_unmask_irq(struct hl_device *hdev, u16 event_type)
489 {
490 	struct cpucp_packet pkt;
491 	u64 result;
492 	int rc;
493 
494 	memset(&pkt, 0, sizeof(pkt));
495 
496 	pkt.ctl = cpu_to_le32(CPUCP_PACKET_UNMASK_RAZWI_IRQ <<
497 				CPUCP_PKT_CTL_OPCODE_SHIFT);
498 	pkt.value = cpu_to_le64(event_type);
499 
500 	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
501 						0, &result);
502 
503 	if (rc)
504 		dev_err(hdev->dev, "failed to unmask RAZWI IRQ %d", event_type);
505 
506 	return rc;
507 }
508 
509 int hl_fw_unmask_irq_arr(struct hl_device *hdev, const u32 *irq_arr,
510 		size_t irq_arr_size)
511 {
512 	struct cpucp_unmask_irq_arr_packet *pkt;
513 	size_t total_pkt_size;
514 	u64 result;
515 	int rc;
516 
517 	total_pkt_size = sizeof(struct cpucp_unmask_irq_arr_packet) +
518 			irq_arr_size;
519 
520 	/* data should be aligned to 8 bytes in order to CPU-CP to copy it */
521 	total_pkt_size = (total_pkt_size + 0x7) & ~0x7;
522 
523 	/* total_pkt_size is casted to u16 later on */
524 	if (total_pkt_size > USHRT_MAX) {
525 		dev_err(hdev->dev, "too many elements in IRQ array\n");
526 		return -EINVAL;
527 	}
528 
529 	pkt = kzalloc(total_pkt_size, GFP_KERNEL);
530 	if (!pkt)
531 		return -ENOMEM;
532 
533 	pkt->length = cpu_to_le32(irq_arr_size / sizeof(irq_arr[0]));
534 	memcpy(&pkt->irqs, irq_arr, irq_arr_size);
535 
536 	pkt->cpucp_pkt.ctl = cpu_to_le32(CPUCP_PACKET_UNMASK_RAZWI_IRQ_ARRAY <<
537 						CPUCP_PKT_CTL_OPCODE_SHIFT);
538 
539 	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) pkt,
540 						total_pkt_size, 0, &result);
541 
542 	if (rc)
543 		dev_err(hdev->dev, "failed to unmask IRQ array\n");
544 
545 	kfree(pkt);
546 
547 	return rc;
548 }
549 
550 int hl_fw_test_cpu_queue(struct hl_device *hdev)
551 {
552 	struct cpucp_packet test_pkt = {};
553 	u64 result;
554 	int rc;
555 
556 	test_pkt.ctl = cpu_to_le32(CPUCP_PACKET_TEST <<
557 					CPUCP_PKT_CTL_OPCODE_SHIFT);
558 	test_pkt.value = cpu_to_le64(CPUCP_PACKET_FENCE_VAL);
559 
560 	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &test_pkt,
561 						sizeof(test_pkt), 0, &result);
562 
563 	if (!rc) {
564 		if (result != CPUCP_PACKET_FENCE_VAL)
565 			dev_err(hdev->dev,
566 				"CPU queue test failed (%#08llx)\n", result);
567 	} else {
568 		dev_err(hdev->dev, "CPU queue test failed, error %d\n", rc);
569 	}
570 
571 	return rc;
572 }
573 
574 void *hl_fw_cpu_accessible_dma_pool_alloc(struct hl_device *hdev, size_t size,
575 						dma_addr_t *dma_handle)
576 {
577 	u64 kernel_addr;
578 
579 	kernel_addr = gen_pool_alloc(hdev->cpu_accessible_dma_pool, size);
580 
581 	*dma_handle = hdev->cpu_accessible_dma_address +
582 		(kernel_addr - (u64) (uintptr_t) hdev->cpu_accessible_dma_mem);
583 
584 	return (void *) (uintptr_t) kernel_addr;
585 }
586 
587 void hl_fw_cpu_accessible_dma_pool_free(struct hl_device *hdev, size_t size,
588 					void *vaddr)
589 {
590 	gen_pool_free(hdev->cpu_accessible_dma_pool, (u64) (uintptr_t) vaddr,
591 			size);
592 }
593 
594 int hl_fw_send_soft_reset(struct hl_device *hdev)
595 {
596 	struct cpucp_packet pkt;
597 	int rc;
598 
599 	memset(&pkt, 0, sizeof(pkt));
600 	pkt.ctl = cpu_to_le32(CPUCP_PACKET_SOFT_RESET << CPUCP_PKT_CTL_OPCODE_SHIFT);
601 	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), 0, NULL);
602 	if (rc)
603 		dev_err(hdev->dev, "failed to send soft-reset msg (err = %d)\n", rc);
604 
605 	return rc;
606 }
607 
608 int hl_fw_send_device_activity(struct hl_device *hdev, bool open)
609 {
610 	struct cpucp_packet pkt;
611 	int rc;
612 
613 	memset(&pkt, 0, sizeof(pkt));
614 	pkt.ctl = cpu_to_le32(CPUCP_PACKET_ACTIVE_STATUS_SET <<	CPUCP_PKT_CTL_OPCODE_SHIFT);
615 	pkt.value = cpu_to_le64(open);
616 	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), 0, NULL);
617 	if (rc)
618 		dev_err(hdev->dev, "failed to send device activity msg(%u)\n", open);
619 
620 	return rc;
621 }
622 
623 int hl_fw_send_heartbeat(struct hl_device *hdev)
624 {
625 	struct cpucp_packet hb_pkt;
626 	u64 result;
627 	int rc;
628 
629 	memset(&hb_pkt, 0, sizeof(hb_pkt));
630 	hb_pkt.ctl = cpu_to_le32(CPUCP_PACKET_TEST <<
631 					CPUCP_PKT_CTL_OPCODE_SHIFT);
632 	hb_pkt.value = cpu_to_le64(CPUCP_PACKET_FENCE_VAL);
633 
634 	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &hb_pkt,
635 						sizeof(hb_pkt), 0, &result);
636 
637 	if ((rc) || (result != CPUCP_PACKET_FENCE_VAL))
638 		return -EIO;
639 
640 	if (le32_to_cpu(hb_pkt.status_mask) &
641 					CPUCP_PKT_HB_STATUS_EQ_FAULT_MASK) {
642 		dev_warn(hdev->dev, "FW reported EQ fault during heartbeat\n");
643 		rc = -EIO;
644 	}
645 
646 	return rc;
647 }
648 
649 static bool fw_report_boot_dev0(struct hl_device *hdev, u32 err_val, u32 sts_val)
650 {
651 	bool err_exists = false;
652 
653 	if (!(err_val & CPU_BOOT_ERR0_ENABLED))
654 		return false;
655 
656 	if (err_val & CPU_BOOT_ERR0_DRAM_INIT_FAIL)
657 		dev_err(hdev->dev, "Device boot error - DRAM initialization failed\n");
658 
659 	if (err_val & CPU_BOOT_ERR0_FIT_CORRUPTED)
660 		dev_err(hdev->dev, "Device boot error - FIT image corrupted\n");
661 
662 	if (err_val & CPU_BOOT_ERR0_TS_INIT_FAIL)
663 		dev_err(hdev->dev, "Device boot error - Thermal Sensor initialization failed\n");
664 
665 	if (err_val & CPU_BOOT_ERR0_BMC_WAIT_SKIPPED) {
666 		if (hdev->bmc_enable) {
667 			dev_err(hdev->dev, "Device boot error - Skipped waiting for BMC\n");
668 		} else {
669 			dev_info(hdev->dev, "Device boot message - Skipped waiting for BMC\n");
670 			/* This is an info so we don't want it to disable the
671 			 * device
672 			 */
673 			err_val &= ~CPU_BOOT_ERR0_BMC_WAIT_SKIPPED;
674 		}
675 	}
676 
677 	if (err_val & CPU_BOOT_ERR0_NIC_DATA_NOT_RDY)
678 		dev_err(hdev->dev, "Device boot error - Serdes data from BMC not available\n");
679 
680 	if (err_val & CPU_BOOT_ERR0_NIC_FW_FAIL)
681 		dev_err(hdev->dev, "Device boot error - NIC F/W initialization failed\n");
682 
683 	if (err_val & CPU_BOOT_ERR0_SECURITY_NOT_RDY)
684 		dev_err(hdev->dev, "Device boot warning - security not ready\n");
685 
686 	if (err_val & CPU_BOOT_ERR0_SECURITY_FAIL)
687 		dev_err(hdev->dev, "Device boot error - security failure\n");
688 
689 	if (err_val & CPU_BOOT_ERR0_EFUSE_FAIL)
690 		dev_err(hdev->dev, "Device boot error - eFuse failure\n");
691 
692 	if (err_val & CPU_BOOT_ERR0_SEC_IMG_VER_FAIL)
693 		dev_err(hdev->dev, "Device boot error - Failed to load preboot secondary image\n");
694 
695 	if (err_val & CPU_BOOT_ERR0_PLL_FAIL)
696 		dev_err(hdev->dev, "Device boot error - PLL failure\n");
697 
698 	if (err_val & CPU_BOOT_ERR0_TMP_THRESH_INIT_FAIL)
699 		dev_err(hdev->dev, "Device boot error - Failed to set threshold for temperature sensor\n");
700 
701 	if (err_val & CPU_BOOT_ERR0_DEVICE_UNUSABLE_FAIL) {
702 		/* Ignore this bit, don't prevent driver loading */
703 		dev_dbg(hdev->dev, "device unusable status is set\n");
704 		err_val &= ~CPU_BOOT_ERR0_DEVICE_UNUSABLE_FAIL;
705 	}
706 
707 	if (err_val & CPU_BOOT_ERR0_BINNING_FAIL)
708 		dev_err(hdev->dev, "Device boot error - binning failure\n");
709 
710 	if (sts_val & CPU_BOOT_DEV_STS0_ENABLED)
711 		dev_dbg(hdev->dev, "Device status0 %#x\n", sts_val);
712 
713 	if (err_val & CPU_BOOT_ERR0_DRAM_SKIPPED)
714 		dev_err(hdev->dev, "Device boot warning - Skipped DRAM initialization\n");
715 
716 	if (err_val & CPU_BOOT_ERR_ENG_ARC_MEM_SCRUB_FAIL)
717 		dev_err(hdev->dev, "Device boot error - ARC memory scrub failed\n");
718 
719 	/* All warnings should go here in order not to reach the unknown error validation */
720 	if (err_val & CPU_BOOT_ERR0_EEPROM_FAIL) {
721 		dev_err(hdev->dev, "Device boot error - EEPROM failure detected\n");
722 		err_exists = true;
723 	}
724 
725 	if (err_val & CPU_BOOT_ERR0_PRI_IMG_VER_FAIL)
726 		dev_warn(hdev->dev, "Device boot warning - Failed to load preboot primary image\n");
727 
728 	if (err_val & CPU_BOOT_ERR0_TPM_FAIL)
729 		dev_warn(hdev->dev, "Device boot warning - TPM failure\n");
730 
731 	if (err_val & CPU_BOOT_ERR_FATAL_MASK)
732 		err_exists = true;
733 
734 	/* return error only if it's in the predefined mask */
735 	if (err_exists && ((err_val & ~CPU_BOOT_ERR0_ENABLED) &
736 				lower_32_bits(hdev->boot_error_status_mask)))
737 		return true;
738 
739 	return false;
740 }
741 
742 /* placeholder for ERR1 as no errors defined there yet */
743 static bool fw_report_boot_dev1(struct hl_device *hdev, u32 err_val,
744 								u32 sts_val)
745 {
746 	/*
747 	 * keep this variable to preserve the logic of the function.
748 	 * this way it would require less modifications when error will be
749 	 * added to DEV_ERR1
750 	 */
751 	bool err_exists = false;
752 
753 	if (!(err_val & CPU_BOOT_ERR1_ENABLED))
754 		return false;
755 
756 	if (sts_val & CPU_BOOT_DEV_STS1_ENABLED)
757 		dev_dbg(hdev->dev, "Device status1 %#x\n", sts_val);
758 
759 	if (!err_exists && (err_val & ~CPU_BOOT_ERR1_ENABLED)) {
760 		dev_err(hdev->dev,
761 			"Device boot error - unknown ERR1 error 0x%08x\n",
762 								err_val);
763 		err_exists = true;
764 	}
765 
766 	/* return error only if it's in the predefined mask */
767 	if (err_exists && ((err_val & ~CPU_BOOT_ERR1_ENABLED) &
768 				upper_32_bits(hdev->boot_error_status_mask)))
769 		return true;
770 
771 	return false;
772 }
773 
774 static int fw_read_errors(struct hl_device *hdev, u32 boot_err0_reg,
775 				u32 boot_err1_reg, u32 cpu_boot_dev_status0_reg,
776 				u32 cpu_boot_dev_status1_reg)
777 {
778 	u32 err_val, status_val;
779 	bool err_exists = false;
780 
781 	/* Some of the firmware status codes are deprecated in newer f/w
782 	 * versions. In those versions, the errors are reported
783 	 * in different registers. Therefore, we need to check those
784 	 * registers and print the exact errors. Moreover, there
785 	 * may be multiple errors, so we need to report on each error
786 	 * separately. Some of the error codes might indicate a state
787 	 * that is not an error per-se, but it is an error in production
788 	 * environment
789 	 */
790 	err_val = RREG32(boot_err0_reg);
791 	status_val = RREG32(cpu_boot_dev_status0_reg);
792 	err_exists = fw_report_boot_dev0(hdev, err_val, status_val);
793 
794 	err_val = RREG32(boot_err1_reg);
795 	status_val = RREG32(cpu_boot_dev_status1_reg);
796 	err_exists |= fw_report_boot_dev1(hdev, err_val, status_val);
797 
798 	if (err_exists)
799 		return -EIO;
800 
801 	return 0;
802 }
803 
804 int hl_fw_cpucp_info_get(struct hl_device *hdev,
805 				u32 sts_boot_dev_sts0_reg,
806 				u32 sts_boot_dev_sts1_reg, u32 boot_err0_reg,
807 				u32 boot_err1_reg)
808 {
809 	struct asic_fixed_properties *prop = &hdev->asic_prop;
810 	struct cpucp_packet pkt = {};
811 	dma_addr_t cpucp_info_dma_addr;
812 	void *cpucp_info_cpu_addr;
813 	char *kernel_ver;
814 	u64 result;
815 	int rc;
816 
817 	cpucp_info_cpu_addr = hl_cpu_accessible_dma_pool_alloc(hdev, sizeof(struct cpucp_info),
818 								&cpucp_info_dma_addr);
819 	if (!cpucp_info_cpu_addr) {
820 		dev_err(hdev->dev,
821 			"Failed to allocate DMA memory for CPU-CP info packet\n");
822 		return -ENOMEM;
823 	}
824 
825 	memset(cpucp_info_cpu_addr, 0, sizeof(struct cpucp_info));
826 
827 	pkt.ctl = cpu_to_le32(CPUCP_PACKET_INFO_GET <<
828 				CPUCP_PKT_CTL_OPCODE_SHIFT);
829 	pkt.addr = cpu_to_le64(cpucp_info_dma_addr);
830 	pkt.data_max_size = cpu_to_le32(sizeof(struct cpucp_info));
831 
832 	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
833 					HL_CPUCP_INFO_TIMEOUT_USEC, &result);
834 	if (rc) {
835 		dev_err(hdev->dev,
836 			"Failed to handle CPU-CP info pkt, error %d\n", rc);
837 		goto out;
838 	}
839 
840 	rc = fw_read_errors(hdev, boot_err0_reg, boot_err1_reg,
841 				sts_boot_dev_sts0_reg, sts_boot_dev_sts1_reg);
842 	if (rc) {
843 		dev_err(hdev->dev, "Errors in device boot\n");
844 		goto out;
845 	}
846 
847 	memcpy(&prop->cpucp_info, cpucp_info_cpu_addr,
848 			sizeof(prop->cpucp_info));
849 
850 	rc = hl_build_hwmon_channel_info(hdev, prop->cpucp_info.sensors);
851 	if (rc) {
852 		dev_err(hdev->dev,
853 			"Failed to build hwmon channel info, error %d\n", rc);
854 		rc = -EFAULT;
855 		goto out;
856 	}
857 
858 	kernel_ver = extract_fw_ver_from_str(prop->cpucp_info.kernel_version);
859 	if (kernel_ver) {
860 		dev_info(hdev->dev, "Linux version %s", kernel_ver);
861 		kfree(kernel_ver);
862 	}
863 
864 	/* assume EQ code doesn't need to check eqe index */
865 	hdev->event_queue.check_eqe_index = false;
866 
867 	/* Read FW application security bits again */
868 	if (prop->fw_cpu_boot_dev_sts0_valid) {
869 		prop->fw_app_cpu_boot_dev_sts0 = RREG32(sts_boot_dev_sts0_reg);
870 		if (prop->fw_app_cpu_boot_dev_sts0 &
871 				CPU_BOOT_DEV_STS0_EQ_INDEX_EN)
872 			hdev->event_queue.check_eqe_index = true;
873 	}
874 
875 	if (prop->fw_cpu_boot_dev_sts1_valid)
876 		prop->fw_app_cpu_boot_dev_sts1 = RREG32(sts_boot_dev_sts1_reg);
877 
878 out:
879 	hl_cpu_accessible_dma_pool_free(hdev, sizeof(struct cpucp_info), cpucp_info_cpu_addr);
880 
881 	return rc;
882 }
883 
884 static int hl_fw_send_msi_info_msg(struct hl_device *hdev)
885 {
886 	struct cpucp_array_data_packet *pkt;
887 	size_t total_pkt_size, data_size;
888 	u64 result;
889 	int rc;
890 
891 	/* skip sending this info for unsupported ASICs */
892 	if (!hdev->asic_funcs->get_msi_info)
893 		return 0;
894 
895 	data_size = CPUCP_NUM_OF_MSI_TYPES * sizeof(u32);
896 	total_pkt_size = sizeof(struct cpucp_array_data_packet) + data_size;
897 
898 	/* data should be aligned to 8 bytes in order to CPU-CP to copy it */
899 	total_pkt_size = (total_pkt_size + 0x7) & ~0x7;
900 
901 	/* total_pkt_size is casted to u16 later on */
902 	if (total_pkt_size > USHRT_MAX) {
903 		dev_err(hdev->dev, "CPUCP array data is too big\n");
904 		return -EINVAL;
905 	}
906 
907 	pkt = kzalloc(total_pkt_size, GFP_KERNEL);
908 	if (!pkt)
909 		return -ENOMEM;
910 
911 	pkt->length = cpu_to_le32(CPUCP_NUM_OF_MSI_TYPES);
912 
913 	memset((void *) &pkt->data, 0xFF, data_size);
914 	hdev->asic_funcs->get_msi_info(pkt->data);
915 
916 	pkt->cpucp_pkt.ctl = cpu_to_le32(CPUCP_PACKET_MSI_INFO_SET <<
917 						CPUCP_PKT_CTL_OPCODE_SHIFT);
918 
919 	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *)pkt,
920 						total_pkt_size, 0, &result);
921 
922 	/*
923 	 * in case packet result is invalid it means that FW does not support
924 	 * this feature and will use default/hard coded MSI values. no reason
925 	 * to stop the boot
926 	 */
927 	if (rc && result == cpucp_packet_invalid)
928 		rc = 0;
929 
930 	if (rc)
931 		dev_err(hdev->dev, "failed to send CPUCP array data\n");
932 
933 	kfree(pkt);
934 
935 	return rc;
936 }
937 
938 int hl_fw_cpucp_handshake(struct hl_device *hdev,
939 				u32 sts_boot_dev_sts0_reg,
940 				u32 sts_boot_dev_sts1_reg, u32 boot_err0_reg,
941 				u32 boot_err1_reg)
942 {
943 	int rc;
944 
945 	rc = hl_fw_cpucp_info_get(hdev, sts_boot_dev_sts0_reg,
946 					sts_boot_dev_sts1_reg, boot_err0_reg,
947 					boot_err1_reg);
948 	if (rc)
949 		return rc;
950 
951 	return hl_fw_send_msi_info_msg(hdev);
952 }
953 
954 int hl_fw_get_eeprom_data(struct hl_device *hdev, void *data, size_t max_size)
955 {
956 	struct cpucp_packet pkt = {};
957 	void *eeprom_info_cpu_addr;
958 	dma_addr_t eeprom_info_dma_addr;
959 	u64 result;
960 	int rc;
961 
962 	eeprom_info_cpu_addr = hl_cpu_accessible_dma_pool_alloc(hdev, max_size,
963 									&eeprom_info_dma_addr);
964 	if (!eeprom_info_cpu_addr) {
965 		dev_err(hdev->dev,
966 			"Failed to allocate DMA memory for CPU-CP EEPROM packet\n");
967 		return -ENOMEM;
968 	}
969 
970 	memset(eeprom_info_cpu_addr, 0, max_size);
971 
972 	pkt.ctl = cpu_to_le32(CPUCP_PACKET_EEPROM_DATA_GET <<
973 				CPUCP_PKT_CTL_OPCODE_SHIFT);
974 	pkt.addr = cpu_to_le64(eeprom_info_dma_addr);
975 	pkt.data_max_size = cpu_to_le32(max_size);
976 
977 	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
978 			HL_CPUCP_EEPROM_TIMEOUT_USEC, &result);
979 
980 	if (rc) {
981 		dev_err(hdev->dev,
982 			"Failed to handle CPU-CP EEPROM packet, error %d\n",
983 			rc);
984 		goto out;
985 	}
986 
987 	/* result contains the actual size */
988 	memcpy(data, eeprom_info_cpu_addr, min((size_t)result, max_size));
989 
990 out:
991 	hl_cpu_accessible_dma_pool_free(hdev, max_size, eeprom_info_cpu_addr);
992 
993 	return rc;
994 }
995 
996 int hl_fw_get_monitor_dump(struct hl_device *hdev, void *data)
997 {
998 	struct cpucp_monitor_dump *mon_dump_cpu_addr;
999 	dma_addr_t mon_dump_dma_addr;
1000 	struct cpucp_packet pkt = {};
1001 	size_t data_size;
1002 	__le32 *src_ptr;
1003 	u32 *dst_ptr;
1004 	u64 result;
1005 	int i, rc;
1006 
1007 	data_size = sizeof(struct cpucp_monitor_dump);
1008 	mon_dump_cpu_addr = hl_cpu_accessible_dma_pool_alloc(hdev, data_size, &mon_dump_dma_addr);
1009 	if (!mon_dump_cpu_addr) {
1010 		dev_err(hdev->dev,
1011 			"Failed to allocate DMA memory for CPU-CP monitor-dump packet\n");
1012 		return -ENOMEM;
1013 	}
1014 
1015 	memset(mon_dump_cpu_addr, 0, data_size);
1016 
1017 	pkt.ctl = cpu_to_le32(CPUCP_PACKET_MONITOR_DUMP_GET << CPUCP_PKT_CTL_OPCODE_SHIFT);
1018 	pkt.addr = cpu_to_le64(mon_dump_dma_addr);
1019 	pkt.data_max_size = cpu_to_le32(data_size);
1020 
1021 	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
1022 							HL_CPUCP_MON_DUMP_TIMEOUT_USEC, &result);
1023 	if (rc) {
1024 		dev_err(hdev->dev, "Failed to handle CPU-CP monitor-dump packet, error %d\n", rc);
1025 		goto out;
1026 	}
1027 
1028 	/* result contains the actual size */
1029 	src_ptr = (__le32 *) mon_dump_cpu_addr;
1030 	dst_ptr = data;
1031 	for (i = 0; i < (data_size / sizeof(u32)); i++) {
1032 		*dst_ptr = le32_to_cpu(*src_ptr);
1033 		src_ptr++;
1034 		dst_ptr++;
1035 	}
1036 
1037 out:
1038 	hl_cpu_accessible_dma_pool_free(hdev, data_size, mon_dump_cpu_addr);
1039 
1040 	return rc;
1041 }
1042 
1043 int hl_fw_cpucp_pci_counters_get(struct hl_device *hdev,
1044 		struct hl_info_pci_counters *counters)
1045 {
1046 	struct cpucp_packet pkt = {};
1047 	u64 result;
1048 	int rc;
1049 
1050 	pkt.ctl = cpu_to_le32(CPUCP_PACKET_PCIE_THROUGHPUT_GET <<
1051 			CPUCP_PKT_CTL_OPCODE_SHIFT);
1052 
1053 	/* Fetch PCI rx counter */
1054 	pkt.index = cpu_to_le32(cpucp_pcie_throughput_rx);
1055 	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
1056 					HL_CPUCP_INFO_TIMEOUT_USEC, &result);
1057 	if (rc) {
1058 		dev_err(hdev->dev,
1059 			"Failed to handle CPU-CP PCI info pkt, error %d\n", rc);
1060 		return rc;
1061 	}
1062 	counters->rx_throughput = result;
1063 
1064 	memset(&pkt, 0, sizeof(pkt));
1065 	pkt.ctl = cpu_to_le32(CPUCP_PACKET_PCIE_THROUGHPUT_GET <<
1066 			CPUCP_PKT_CTL_OPCODE_SHIFT);
1067 
1068 	/* Fetch PCI tx counter */
1069 	pkt.index = cpu_to_le32(cpucp_pcie_throughput_tx);
1070 	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
1071 					HL_CPUCP_INFO_TIMEOUT_USEC, &result);
1072 	if (rc) {
1073 		dev_err(hdev->dev,
1074 			"Failed to handle CPU-CP PCI info pkt, error %d\n", rc);
1075 		return rc;
1076 	}
1077 	counters->tx_throughput = result;
1078 
1079 	/* Fetch PCI replay counter */
1080 	memset(&pkt, 0, sizeof(pkt));
1081 	pkt.ctl = cpu_to_le32(CPUCP_PACKET_PCIE_REPLAY_CNT_GET <<
1082 			CPUCP_PKT_CTL_OPCODE_SHIFT);
1083 
1084 	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
1085 			HL_CPUCP_INFO_TIMEOUT_USEC, &result);
1086 	if (rc) {
1087 		dev_err(hdev->dev,
1088 			"Failed to handle CPU-CP PCI info pkt, error %d\n", rc);
1089 		return rc;
1090 	}
1091 	counters->replay_cnt = (u32) result;
1092 
1093 	return rc;
1094 }
1095 
1096 int hl_fw_cpucp_total_energy_get(struct hl_device *hdev, u64 *total_energy)
1097 {
1098 	struct cpucp_packet pkt = {};
1099 	u64 result;
1100 	int rc;
1101 
1102 	pkt.ctl = cpu_to_le32(CPUCP_PACKET_TOTAL_ENERGY_GET <<
1103 				CPUCP_PKT_CTL_OPCODE_SHIFT);
1104 
1105 	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
1106 					HL_CPUCP_INFO_TIMEOUT_USEC, &result);
1107 	if (rc) {
1108 		dev_err(hdev->dev,
1109 			"Failed to handle CpuCP total energy pkt, error %d\n",
1110 				rc);
1111 		return rc;
1112 	}
1113 
1114 	*total_energy = result;
1115 
1116 	return rc;
1117 }
1118 
1119 int get_used_pll_index(struct hl_device *hdev, u32 input_pll_index,
1120 						enum pll_index *pll_index)
1121 {
1122 	struct asic_fixed_properties *prop = &hdev->asic_prop;
1123 	u8 pll_byte, pll_bit_off;
1124 	bool dynamic_pll;
1125 	int fw_pll_idx;
1126 
1127 	dynamic_pll = !!(prop->fw_app_cpu_boot_dev_sts0 &
1128 						CPU_BOOT_DEV_STS0_DYN_PLL_EN);
1129 
1130 	if (!dynamic_pll) {
1131 		/*
1132 		 * in case we are working with legacy FW (each asic has unique
1133 		 * PLL numbering) use the driver based index as they are
1134 		 * aligned with fw legacy numbering
1135 		 */
1136 		*pll_index = input_pll_index;
1137 		return 0;
1138 	}
1139 
1140 	/* retrieve a FW compatible PLL index based on
1141 	 * ASIC specific user request
1142 	 */
1143 	fw_pll_idx = hdev->asic_funcs->map_pll_idx_to_fw_idx(input_pll_index);
1144 	if (fw_pll_idx < 0) {
1145 		dev_err(hdev->dev, "Invalid PLL index (%u) error %d\n",
1146 			input_pll_index, fw_pll_idx);
1147 		return -EINVAL;
1148 	}
1149 
1150 	/* PLL map is a u8 array */
1151 	pll_byte = prop->cpucp_info.pll_map[fw_pll_idx >> 3];
1152 	pll_bit_off = fw_pll_idx & 0x7;
1153 
1154 	if (!(pll_byte & BIT(pll_bit_off))) {
1155 		dev_err(hdev->dev, "PLL index %d is not supported\n",
1156 			fw_pll_idx);
1157 		return -EINVAL;
1158 	}
1159 
1160 	*pll_index = fw_pll_idx;
1161 
1162 	return 0;
1163 }
1164 
1165 int hl_fw_cpucp_pll_info_get(struct hl_device *hdev, u32 pll_index,
1166 		u16 *pll_freq_arr)
1167 {
1168 	struct cpucp_packet pkt;
1169 	enum pll_index used_pll_idx;
1170 	u64 result;
1171 	int rc;
1172 
1173 	rc = get_used_pll_index(hdev, pll_index, &used_pll_idx);
1174 	if (rc)
1175 		return rc;
1176 
1177 	memset(&pkt, 0, sizeof(pkt));
1178 
1179 	pkt.ctl = cpu_to_le32(CPUCP_PACKET_PLL_INFO_GET <<
1180 				CPUCP_PKT_CTL_OPCODE_SHIFT);
1181 	pkt.pll_type = __cpu_to_le16((u16)used_pll_idx);
1182 
1183 	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
1184 			HL_CPUCP_INFO_TIMEOUT_USEC, &result);
1185 	if (rc) {
1186 		dev_err(hdev->dev, "Failed to read PLL info, error %d\n", rc);
1187 		return rc;
1188 	}
1189 
1190 	pll_freq_arr[0] = FIELD_GET(CPUCP_PKT_RES_PLL_OUT0_MASK, result);
1191 	pll_freq_arr[1] = FIELD_GET(CPUCP_PKT_RES_PLL_OUT1_MASK, result);
1192 	pll_freq_arr[2] = FIELD_GET(CPUCP_PKT_RES_PLL_OUT2_MASK, result);
1193 	pll_freq_arr[3] = FIELD_GET(CPUCP_PKT_RES_PLL_OUT3_MASK, result);
1194 
1195 	return 0;
1196 }
1197 
1198 int hl_fw_cpucp_power_get(struct hl_device *hdev, u64 *power)
1199 {
1200 	struct cpucp_packet pkt;
1201 	u64 result;
1202 	int rc;
1203 
1204 	memset(&pkt, 0, sizeof(pkt));
1205 
1206 	pkt.ctl = cpu_to_le32(CPUCP_PACKET_POWER_GET <<
1207 				CPUCP_PKT_CTL_OPCODE_SHIFT);
1208 	pkt.type = cpu_to_le16(CPUCP_POWER_INPUT);
1209 
1210 	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
1211 			HL_CPUCP_INFO_TIMEOUT_USEC, &result);
1212 	if (rc) {
1213 		dev_err(hdev->dev, "Failed to read power, error %d\n", rc);
1214 		return rc;
1215 	}
1216 
1217 	*power = result;
1218 
1219 	return rc;
1220 }
1221 
1222 int hl_fw_dram_replaced_row_get(struct hl_device *hdev,
1223 				struct cpucp_hbm_row_info *info)
1224 {
1225 	struct cpucp_hbm_row_info *cpucp_repl_rows_info_cpu_addr;
1226 	dma_addr_t cpucp_repl_rows_info_dma_addr;
1227 	struct cpucp_packet pkt = {};
1228 	u64 result;
1229 	int rc;
1230 
1231 	cpucp_repl_rows_info_cpu_addr = hl_cpu_accessible_dma_pool_alloc(hdev,
1232 							sizeof(struct cpucp_hbm_row_info),
1233 							&cpucp_repl_rows_info_dma_addr);
1234 	if (!cpucp_repl_rows_info_cpu_addr) {
1235 		dev_err(hdev->dev,
1236 			"Failed to allocate DMA memory for CPU-CP replaced rows info packet\n");
1237 		return -ENOMEM;
1238 	}
1239 
1240 	memset(cpucp_repl_rows_info_cpu_addr, 0, sizeof(struct cpucp_hbm_row_info));
1241 
1242 	pkt.ctl = cpu_to_le32(CPUCP_PACKET_HBM_REPLACED_ROWS_INFO_GET <<
1243 					CPUCP_PKT_CTL_OPCODE_SHIFT);
1244 	pkt.addr = cpu_to_le64(cpucp_repl_rows_info_dma_addr);
1245 	pkt.data_max_size = cpu_to_le32(sizeof(struct cpucp_hbm_row_info));
1246 
1247 	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
1248 					HL_CPUCP_INFO_TIMEOUT_USEC, &result);
1249 	if (rc) {
1250 		dev_err(hdev->dev,
1251 			"Failed to handle CPU-CP replaced rows info pkt, error %d\n", rc);
1252 		goto out;
1253 	}
1254 
1255 	memcpy(info, cpucp_repl_rows_info_cpu_addr, sizeof(*info));
1256 
1257 out:
1258 	hl_cpu_accessible_dma_pool_free(hdev, sizeof(struct cpucp_hbm_row_info),
1259 						cpucp_repl_rows_info_cpu_addr);
1260 
1261 	return rc;
1262 }
1263 
1264 int hl_fw_dram_pending_row_get(struct hl_device *hdev, u32 *pend_rows_num)
1265 {
1266 	struct cpucp_packet pkt;
1267 	u64 result;
1268 	int rc;
1269 
1270 	memset(&pkt, 0, sizeof(pkt));
1271 
1272 	pkt.ctl = cpu_to_le32(CPUCP_PACKET_HBM_PENDING_ROWS_STATUS << CPUCP_PKT_CTL_OPCODE_SHIFT);
1273 
1274 	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), 0, &result);
1275 	if (rc) {
1276 		dev_err(hdev->dev,
1277 				"Failed to handle CPU-CP pending rows info pkt, error %d\n", rc);
1278 		goto out;
1279 	}
1280 
1281 	*pend_rows_num = (u32) result;
1282 out:
1283 	return rc;
1284 }
1285 
1286 int hl_fw_cpucp_engine_core_asid_set(struct hl_device *hdev, u32 asid)
1287 {
1288 	struct cpucp_packet pkt;
1289 	int rc;
1290 
1291 	memset(&pkt, 0, sizeof(pkt));
1292 
1293 	pkt.ctl = cpu_to_le32(CPUCP_PACKET_ENGINE_CORE_ASID_SET << CPUCP_PKT_CTL_OPCODE_SHIFT);
1294 	pkt.value = cpu_to_le64(asid);
1295 
1296 	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
1297 						HL_CPUCP_INFO_TIMEOUT_USEC, NULL);
1298 	if (rc)
1299 		dev_err(hdev->dev,
1300 			"Failed on ASID configuration request for engine core, error %d\n",
1301 			rc);
1302 
1303 	return rc;
1304 }
1305 
1306 void hl_fw_ask_hard_reset_without_linux(struct hl_device *hdev)
1307 {
1308 	struct static_fw_load_mgr *static_loader =
1309 			&hdev->fw_loader.static_loader;
1310 	int rc;
1311 
1312 	if (hdev->asic_prop.dynamic_fw_load) {
1313 		rc = hl_fw_dynamic_send_protocol_cmd(hdev, &hdev->fw_loader,
1314 				COMMS_RST_DEV, 0, false,
1315 				hdev->fw_loader.cpu_timeout);
1316 		if (rc)
1317 			dev_err(hdev->dev, "Failed sending COMMS_RST_DEV\n");
1318 	} else {
1319 		WREG32(static_loader->kmd_msg_to_cpu_reg, KMD_MSG_RST_DEV);
1320 	}
1321 }
1322 
1323 void hl_fw_ask_halt_machine_without_linux(struct hl_device *hdev)
1324 {
1325 	struct fw_load_mgr *fw_loader = &hdev->fw_loader;
1326 	u32 status, cpu_boot_status_reg, cpu_timeout;
1327 	struct static_fw_load_mgr *static_loader;
1328 	struct pre_fw_load_props *pre_fw_load;
1329 	int rc;
1330 
1331 	if (hdev->device_cpu_is_halted)
1332 		return;
1333 
1334 	/* Stop device CPU to make sure nothing bad happens */
1335 	if (hdev->asic_prop.dynamic_fw_load) {
1336 		pre_fw_load = &fw_loader->pre_fw_load;
1337 		cpu_timeout = fw_loader->cpu_timeout;
1338 		cpu_boot_status_reg = pre_fw_load->cpu_boot_status_reg;
1339 
1340 		rc = hl_fw_dynamic_send_protocol_cmd(hdev, &hdev->fw_loader,
1341 				COMMS_GOTO_WFE, 0, false, cpu_timeout);
1342 		if (rc) {
1343 			dev_err(hdev->dev, "Failed sending COMMS_GOTO_WFE\n");
1344 		} else {
1345 			rc = hl_poll_timeout(
1346 				hdev,
1347 				cpu_boot_status_reg,
1348 				status,
1349 				status == CPU_BOOT_STATUS_IN_WFE,
1350 				hdev->fw_poll_interval_usec,
1351 				cpu_timeout);
1352 			if (rc)
1353 				dev_err(hdev->dev, "Current status=%u. Timed-out updating to WFE\n",
1354 						status);
1355 		}
1356 	} else {
1357 		static_loader = &hdev->fw_loader.static_loader;
1358 		WREG32(static_loader->kmd_msg_to_cpu_reg, KMD_MSG_GOTO_WFE);
1359 		msleep(static_loader->cpu_reset_wait_msec);
1360 
1361 		/* Must clear this register in order to prevent preboot
1362 		 * from reading WFE after reboot
1363 		 */
1364 		WREG32(static_loader->kmd_msg_to_cpu_reg, KMD_MSG_NA);
1365 	}
1366 
1367 	hdev->device_cpu_is_halted = true;
1368 }
1369 
1370 static void detect_cpu_boot_status(struct hl_device *hdev, u32 status)
1371 {
1372 	/* Some of the status codes below are deprecated in newer f/w
1373 	 * versions but we keep them here for backward compatibility
1374 	 */
1375 	switch (status) {
1376 	case CPU_BOOT_STATUS_NA:
1377 		dev_err(hdev->dev,
1378 			"Device boot progress - BTL/ROM did NOT run\n");
1379 		break;
1380 	case CPU_BOOT_STATUS_IN_WFE:
1381 		dev_err(hdev->dev,
1382 			"Device boot progress - Stuck inside WFE loop\n");
1383 		break;
1384 	case CPU_BOOT_STATUS_IN_BTL:
1385 		dev_err(hdev->dev,
1386 			"Device boot progress - Stuck in BTL\n");
1387 		break;
1388 	case CPU_BOOT_STATUS_IN_PREBOOT:
1389 		dev_err(hdev->dev,
1390 			"Device boot progress - Stuck in Preboot\n");
1391 		break;
1392 	case CPU_BOOT_STATUS_IN_SPL:
1393 		dev_err(hdev->dev,
1394 			"Device boot progress - Stuck in SPL\n");
1395 		break;
1396 	case CPU_BOOT_STATUS_IN_UBOOT:
1397 		dev_err(hdev->dev,
1398 			"Device boot progress - Stuck in u-boot\n");
1399 		break;
1400 	case CPU_BOOT_STATUS_DRAM_INIT_FAIL:
1401 		dev_err(hdev->dev,
1402 			"Device boot progress - DRAM initialization failed\n");
1403 		break;
1404 	case CPU_BOOT_STATUS_UBOOT_NOT_READY:
1405 		dev_err(hdev->dev,
1406 			"Device boot progress - Cannot boot\n");
1407 		break;
1408 	case CPU_BOOT_STATUS_TS_INIT_FAIL:
1409 		dev_err(hdev->dev,
1410 			"Device boot progress - Thermal Sensor initialization failed\n");
1411 		break;
1412 	case CPU_BOOT_STATUS_SECURITY_READY:
1413 		dev_err(hdev->dev,
1414 			"Device boot progress - Stuck in preboot after security initialization\n");
1415 		break;
1416 	case CPU_BOOT_STATUS_FW_SHUTDOWN_PREP:
1417 		dev_err(hdev->dev,
1418 			"Device boot progress - Stuck in preparation for shutdown\n");
1419 		break;
1420 	default:
1421 		dev_err(hdev->dev,
1422 			"Device boot progress - Invalid or unexpected status code %d\n", status);
1423 		break;
1424 	}
1425 }
1426 
1427 int hl_fw_wait_preboot_ready(struct hl_device *hdev)
1428 {
1429 	struct pre_fw_load_props *pre_fw_load = &hdev->fw_loader.pre_fw_load;
1430 	u32 status = 0, timeout;
1431 	int rc, tries = 1;
1432 	bool preboot_still_runs;
1433 
1434 	/* Need to check two possible scenarios:
1435 	 *
1436 	 * CPU_BOOT_STATUS_WAITING_FOR_BOOT_FIT - for newer firmwares where
1437 	 * the preboot is waiting for the boot fit
1438 	 *
1439 	 * All other status values - for older firmwares where the uboot was
1440 	 * loaded from the FLASH
1441 	 */
1442 	timeout = pre_fw_load->wait_for_preboot_timeout;
1443 retry:
1444 	rc = hl_poll_timeout(
1445 		hdev,
1446 		pre_fw_load->cpu_boot_status_reg,
1447 		status,
1448 		(status == CPU_BOOT_STATUS_NIC_FW_RDY) ||
1449 		(status == CPU_BOOT_STATUS_READY_TO_BOOT) ||
1450 		(status == CPU_BOOT_STATUS_WAITING_FOR_BOOT_FIT),
1451 		hdev->fw_poll_interval_usec,
1452 		timeout);
1453 	/*
1454 	 * if F/W reports "security-ready" it means preboot might take longer.
1455 	 * If the field 'wait_for_preboot_extended_timeout' is non 0 we wait again
1456 	 * with that timeout
1457 	 */
1458 	preboot_still_runs = (status == CPU_BOOT_STATUS_SECURITY_READY ||
1459 				status == CPU_BOOT_STATUS_IN_PREBOOT ||
1460 				status == CPU_BOOT_STATUS_FW_SHUTDOWN_PREP ||
1461 				status == CPU_BOOT_STATUS_DRAM_RDY);
1462 
1463 	if (rc && tries && preboot_still_runs) {
1464 		tries--;
1465 		if (pre_fw_load->wait_for_preboot_extended_timeout) {
1466 			timeout = pre_fw_load->wait_for_preboot_extended_timeout;
1467 			goto retry;
1468 		}
1469 	}
1470 
1471 	if (rc) {
1472 		detect_cpu_boot_status(hdev, status);
1473 		dev_err(hdev->dev, "CPU boot ready timeout (status = %d)\n", status);
1474 
1475 		/* If we read all FF, then something is totally wrong, no point
1476 		 * of reading specific errors
1477 		 */
1478 		if (status != -1)
1479 			fw_read_errors(hdev, pre_fw_load->boot_err0_reg,
1480 						pre_fw_load->boot_err1_reg,
1481 						pre_fw_load->sts_boot_dev_sts0_reg,
1482 						pre_fw_load->sts_boot_dev_sts1_reg);
1483 		return -EIO;
1484 	}
1485 
1486 	hdev->fw_loader.fw_comp_loaded |= FW_TYPE_PREBOOT_CPU;
1487 
1488 	return 0;
1489 }
1490 
1491 static int hl_fw_read_preboot_caps(struct hl_device *hdev)
1492 {
1493 	struct pre_fw_load_props *pre_fw_load;
1494 	struct asic_fixed_properties *prop;
1495 	u32 reg_val;
1496 	int rc;
1497 
1498 	prop = &hdev->asic_prop;
1499 	pre_fw_load = &hdev->fw_loader.pre_fw_load;
1500 
1501 	rc = hl_fw_wait_preboot_ready(hdev);
1502 	if (rc)
1503 		return rc;
1504 
1505 	/*
1506 	 * the registers DEV_STS* contain FW capabilities/features.
1507 	 * We can rely on this registers only if bit CPU_BOOT_DEV_STS*_ENABLED
1508 	 * is set.
1509 	 * In the first read of this register we store the value of this
1510 	 * register ONLY if the register is enabled (which will be propagated
1511 	 * to next stages) and also mark the register as valid.
1512 	 * In case it is not enabled the stored value will be left 0- all
1513 	 * caps/features are off
1514 	 */
1515 	reg_val = RREG32(pre_fw_load->sts_boot_dev_sts0_reg);
1516 	if (reg_val & CPU_BOOT_DEV_STS0_ENABLED) {
1517 		prop->fw_cpu_boot_dev_sts0_valid = true;
1518 		prop->fw_preboot_cpu_boot_dev_sts0 = reg_val;
1519 	}
1520 
1521 	reg_val = RREG32(pre_fw_load->sts_boot_dev_sts1_reg);
1522 	if (reg_val & CPU_BOOT_DEV_STS1_ENABLED) {
1523 		prop->fw_cpu_boot_dev_sts1_valid = true;
1524 		prop->fw_preboot_cpu_boot_dev_sts1 = reg_val;
1525 	}
1526 
1527 	prop->dynamic_fw_load = !!(prop->fw_preboot_cpu_boot_dev_sts0 &
1528 						CPU_BOOT_DEV_STS0_FW_LD_COM_EN);
1529 
1530 	/* initialize FW loader once we know what load protocol is used */
1531 	hdev->asic_funcs->init_firmware_loader(hdev);
1532 
1533 	dev_dbg(hdev->dev, "Attempting %s FW load\n",
1534 			prop->dynamic_fw_load ? "dynamic" : "legacy");
1535 	return 0;
1536 }
1537 
1538 static int hl_fw_static_read_device_fw_version(struct hl_device *hdev,
1539 					enum hl_fw_component fwc)
1540 {
1541 	struct asic_fixed_properties *prop = &hdev->asic_prop;
1542 	struct fw_load_mgr *fw_loader = &hdev->fw_loader;
1543 	struct static_fw_load_mgr *static_loader;
1544 	char *dest, *boot_ver, *preboot_ver;
1545 	u32 ver_off, limit;
1546 	const char *name;
1547 	char btl_ver[32];
1548 
1549 	static_loader = &hdev->fw_loader.static_loader;
1550 
1551 	switch (fwc) {
1552 	case FW_COMP_BOOT_FIT:
1553 		ver_off = RREG32(static_loader->boot_fit_version_offset_reg);
1554 		dest = prop->uboot_ver;
1555 		name = "Boot-fit";
1556 		limit = static_loader->boot_fit_version_max_off;
1557 		break;
1558 	case FW_COMP_PREBOOT:
1559 		ver_off = RREG32(static_loader->preboot_version_offset_reg);
1560 		dest = prop->preboot_ver;
1561 		name = "Preboot";
1562 		limit = static_loader->preboot_version_max_off;
1563 		break;
1564 	default:
1565 		dev_warn(hdev->dev, "Undefined FW component: %d\n", fwc);
1566 		return -EIO;
1567 	}
1568 
1569 	ver_off &= static_loader->sram_offset_mask;
1570 
1571 	if (ver_off < limit) {
1572 		memcpy_fromio(dest,
1573 			hdev->pcie_bar[fw_loader->sram_bar_id] + ver_off,
1574 			VERSION_MAX_LEN);
1575 	} else {
1576 		dev_err(hdev->dev, "%s version offset (0x%x) is above SRAM\n",
1577 								name, ver_off);
1578 		strscpy(dest, "unavailable", VERSION_MAX_LEN);
1579 		return -EIO;
1580 	}
1581 
1582 	if (fwc == FW_COMP_BOOT_FIT) {
1583 		boot_ver = extract_fw_ver_from_str(prop->uboot_ver);
1584 		if (boot_ver) {
1585 			dev_info(hdev->dev, "boot-fit version %s\n", boot_ver);
1586 			kfree(boot_ver);
1587 		}
1588 	} else if (fwc == FW_COMP_PREBOOT) {
1589 		preboot_ver = strnstr(prop->preboot_ver, "Preboot",
1590 						VERSION_MAX_LEN);
1591 		if (preboot_ver && preboot_ver != prop->preboot_ver) {
1592 			strscpy(btl_ver, prop->preboot_ver,
1593 				min((int) (preboot_ver - prop->preboot_ver),
1594 									31));
1595 			dev_info(hdev->dev, "%s\n", btl_ver);
1596 		}
1597 
1598 		preboot_ver = extract_fw_ver_from_str(prop->preboot_ver);
1599 		if (preboot_ver) {
1600 			dev_info(hdev->dev, "preboot version %s\n",
1601 								preboot_ver);
1602 			kfree(preboot_ver);
1603 		}
1604 	}
1605 
1606 	return 0;
1607 }
1608 
1609 /**
1610  * hl_fw_preboot_update_state - update internal data structures during
1611  *                              handshake with preboot
1612  *
1613  *
1614  * @hdev: pointer to the habanalabs device structure
1615  *
1616  * @return 0 on success, otherwise non-zero error code
1617  */
1618 static void hl_fw_preboot_update_state(struct hl_device *hdev)
1619 {
1620 	struct asic_fixed_properties *prop = &hdev->asic_prop;
1621 	u32 cpu_boot_dev_sts0, cpu_boot_dev_sts1;
1622 
1623 	cpu_boot_dev_sts0 = prop->fw_preboot_cpu_boot_dev_sts0;
1624 	cpu_boot_dev_sts1 = prop->fw_preboot_cpu_boot_dev_sts1;
1625 
1626 	/* We read boot_dev_sts registers multiple times during boot:
1627 	 * 1. preboot - a. Check whether the security status bits are valid
1628 	 *              b. Check whether fw security is enabled
1629 	 *              c. Check whether hard reset is done by preboot
1630 	 * 2. boot cpu - a. Fetch boot cpu security status
1631 	 *               b. Check whether hard reset is done by boot cpu
1632 	 * 3. FW application - a. Fetch fw application security status
1633 	 *                     b. Check whether hard reset is done by fw app
1634 	 */
1635 	prop->hard_reset_done_by_fw = !!(cpu_boot_dev_sts0 & CPU_BOOT_DEV_STS0_FW_HARD_RST_EN);
1636 
1637 	prop->fw_security_enabled = !!(cpu_boot_dev_sts0 & CPU_BOOT_DEV_STS0_SECURITY_EN);
1638 
1639 	dev_dbg(hdev->dev, "Firmware preboot boot device status0 %#x\n",
1640 							cpu_boot_dev_sts0);
1641 
1642 	dev_dbg(hdev->dev, "Firmware preboot boot device status1 %#x\n",
1643 							cpu_boot_dev_sts1);
1644 
1645 	dev_dbg(hdev->dev, "Firmware preboot hard-reset is %s\n",
1646 			prop->hard_reset_done_by_fw ? "enabled" : "disabled");
1647 
1648 	dev_dbg(hdev->dev, "firmware-level security is %s\n",
1649 			prop->fw_security_enabled ? "enabled" : "disabled");
1650 
1651 	dev_dbg(hdev->dev, "GIC controller is %s\n",
1652 			prop->gic_interrupts_enable ? "enabled" : "disabled");
1653 }
1654 
1655 static int hl_fw_static_read_preboot_status(struct hl_device *hdev)
1656 {
1657 	int rc;
1658 
1659 	rc = hl_fw_static_read_device_fw_version(hdev, FW_COMP_PREBOOT);
1660 	if (rc)
1661 		return rc;
1662 
1663 	return 0;
1664 }
1665 
1666 int hl_fw_read_preboot_status(struct hl_device *hdev)
1667 {
1668 	int rc;
1669 
1670 	if (!(hdev->fw_components & FW_TYPE_PREBOOT_CPU))
1671 		return 0;
1672 
1673 	/* get FW pre-load parameters  */
1674 	hdev->asic_funcs->init_firmware_preload_params(hdev);
1675 
1676 	/*
1677 	 * In order to determine boot method (static VS dynamic) we need to
1678 	 * read the boot caps register
1679 	 */
1680 	rc = hl_fw_read_preboot_caps(hdev);
1681 	if (rc)
1682 		return rc;
1683 
1684 	hl_fw_preboot_update_state(hdev);
1685 
1686 	/* no need to read preboot status in dynamic load */
1687 	if (hdev->asic_prop.dynamic_fw_load)
1688 		return 0;
1689 
1690 	return hl_fw_static_read_preboot_status(hdev);
1691 }
1692 
1693 /* associate string with COMM status */
1694 static char *hl_dynamic_fw_status_str[COMMS_STS_INVLD_LAST] = {
1695 	[COMMS_STS_NOOP] = "NOOP",
1696 	[COMMS_STS_ACK] = "ACK",
1697 	[COMMS_STS_OK] = "OK",
1698 	[COMMS_STS_ERR] = "ERR",
1699 	[COMMS_STS_VALID_ERR] = "VALID_ERR",
1700 	[COMMS_STS_TIMEOUT_ERR] = "TIMEOUT_ERR",
1701 };
1702 
1703 /**
1704  * hl_fw_dynamic_report_error_status - report error status
1705  *
1706  * @hdev: pointer to the habanalabs device structure
1707  * @status: value of FW status register
1708  * @expected_status: the expected status
1709  */
1710 static void hl_fw_dynamic_report_error_status(struct hl_device *hdev,
1711 						u32 status,
1712 						enum comms_sts expected_status)
1713 {
1714 	enum comms_sts comm_status =
1715 				FIELD_GET(COMMS_STATUS_STATUS_MASK, status);
1716 
1717 	if (comm_status < COMMS_STS_INVLD_LAST)
1718 		dev_err(hdev->dev, "Device status %s, expected status: %s\n",
1719 				hl_dynamic_fw_status_str[comm_status],
1720 				hl_dynamic_fw_status_str[expected_status]);
1721 	else
1722 		dev_err(hdev->dev, "Device status unknown %d, expected status: %s\n",
1723 				comm_status,
1724 				hl_dynamic_fw_status_str[expected_status]);
1725 }
1726 
1727 /**
1728  * hl_fw_dynamic_send_cmd - send LKD to FW cmd
1729  *
1730  * @hdev: pointer to the habanalabs device structure
1731  * @fw_loader: managing structure for loading device's FW
1732  * @cmd: LKD to FW cmd code
1733  * @size: size of next FW component to be loaded (0 if not necessary)
1734  *
1735  * LDK to FW exact command layout is defined at struct comms_command.
1736  * note: the size argument is used only when the next FW component should be
1737  *       loaded, otherwise it shall be 0. the size is used by the FW in later
1738  *       protocol stages and when sending only indicating the amount of memory
1739  *       to be allocated by the FW to receive the next boot component.
1740  */
1741 static void hl_fw_dynamic_send_cmd(struct hl_device *hdev,
1742 				struct fw_load_mgr *fw_loader,
1743 				enum comms_cmd cmd, unsigned int size)
1744 {
1745 	struct cpu_dyn_regs *dyn_regs;
1746 	u32 val;
1747 
1748 	dyn_regs = &fw_loader->dynamic_loader.comm_desc.cpu_dyn_regs;
1749 
1750 	val = FIELD_PREP(COMMS_COMMAND_CMD_MASK, cmd);
1751 	val |= FIELD_PREP(COMMS_COMMAND_SIZE_MASK, size);
1752 
1753 	trace_habanalabs_comms_send_cmd(hdev->dev, comms_cmd_str_arr[cmd]);
1754 	WREG32(le32_to_cpu(dyn_regs->kmd_msg_to_cpu), val);
1755 }
1756 
1757 /**
1758  * hl_fw_dynamic_extract_fw_response - update the FW response
1759  *
1760  * @hdev: pointer to the habanalabs device structure
1761  * @fw_loader: managing structure for loading device's FW
1762  * @response: FW response
1763  * @status: the status read from CPU status register
1764  *
1765  * @return 0 on success, otherwise non-zero error code
1766  */
1767 static int hl_fw_dynamic_extract_fw_response(struct hl_device *hdev,
1768 						struct fw_load_mgr *fw_loader,
1769 						struct fw_response *response,
1770 						u32 status)
1771 {
1772 	response->status = FIELD_GET(COMMS_STATUS_STATUS_MASK, status);
1773 	response->ram_offset = FIELD_GET(COMMS_STATUS_OFFSET_MASK, status) <<
1774 						COMMS_STATUS_OFFSET_ALIGN_SHIFT;
1775 	response->ram_type = FIELD_GET(COMMS_STATUS_RAM_TYPE_MASK, status);
1776 
1777 	if ((response->ram_type != COMMS_SRAM) &&
1778 					(response->ram_type != COMMS_DRAM)) {
1779 		dev_err(hdev->dev, "FW status: invalid RAM type %u\n",
1780 							response->ram_type);
1781 		return -EIO;
1782 	}
1783 
1784 	return 0;
1785 }
1786 
1787 /**
1788  * hl_fw_dynamic_wait_for_status - wait for status in dynamic FW load
1789  *
1790  * @hdev: pointer to the habanalabs device structure
1791  * @fw_loader: managing structure for loading device's FW
1792  * @expected_status: expected status to wait for
1793  * @timeout: timeout for status wait
1794  *
1795  * @return 0 on success, otherwise non-zero error code
1796  *
1797  * waiting for status from FW include polling the FW status register until
1798  * expected status is received or timeout occurs (whatever occurs first).
1799  */
1800 static int hl_fw_dynamic_wait_for_status(struct hl_device *hdev,
1801 						struct fw_load_mgr *fw_loader,
1802 						enum comms_sts expected_status,
1803 						u32 timeout)
1804 {
1805 	struct cpu_dyn_regs *dyn_regs;
1806 	u32 status;
1807 	int rc;
1808 
1809 	dyn_regs = &fw_loader->dynamic_loader.comm_desc.cpu_dyn_regs;
1810 
1811 	trace_habanalabs_comms_wait_status(hdev->dev, comms_sts_str_arr[expected_status]);
1812 
1813 	/* Wait for expected status */
1814 	rc = hl_poll_timeout(
1815 		hdev,
1816 		le32_to_cpu(dyn_regs->cpu_cmd_status_to_host),
1817 		status,
1818 		FIELD_GET(COMMS_STATUS_STATUS_MASK, status) == expected_status,
1819 		hdev->fw_comms_poll_interval_usec,
1820 		timeout);
1821 
1822 	if (rc) {
1823 		hl_fw_dynamic_report_error_status(hdev, status,
1824 							expected_status);
1825 		return -EIO;
1826 	}
1827 
1828 	trace_habanalabs_comms_wait_status_done(hdev->dev, comms_sts_str_arr[expected_status]);
1829 
1830 	/*
1831 	 * skip storing FW response for NOOP to preserve the actual desired
1832 	 * FW status
1833 	 */
1834 	if (expected_status == COMMS_STS_NOOP)
1835 		return 0;
1836 
1837 	rc = hl_fw_dynamic_extract_fw_response(hdev, fw_loader,
1838 					&fw_loader->dynamic_loader.response,
1839 					status);
1840 	return rc;
1841 }
1842 
1843 /**
1844  * hl_fw_dynamic_send_clear_cmd - send clear command to FW
1845  *
1846  * @hdev: pointer to the habanalabs device structure
1847  * @fw_loader: managing structure for loading device's FW
1848  *
1849  * @return 0 on success, otherwise non-zero error code
1850  *
1851  * after command cycle between LKD to FW CPU (i.e. LKD got an expected status
1852  * from FW) we need to clear the CPU status register in order to avoid garbage
1853  * between command cycles.
1854  * This is done by sending clear command and polling the CPU to LKD status
1855  * register to hold the status NOOP
1856  */
1857 static int hl_fw_dynamic_send_clear_cmd(struct hl_device *hdev,
1858 						struct fw_load_mgr *fw_loader)
1859 {
1860 	hl_fw_dynamic_send_cmd(hdev, fw_loader, COMMS_CLR_STS, 0);
1861 
1862 	return hl_fw_dynamic_wait_for_status(hdev, fw_loader, COMMS_STS_NOOP,
1863 							fw_loader->cpu_timeout);
1864 }
1865 
1866 /**
1867  * hl_fw_dynamic_send_protocol_cmd - send LKD to FW cmd and wait for ACK
1868  *
1869  * @hdev: pointer to the habanalabs device structure
1870  * @fw_loader: managing structure for loading device's FW
1871  * @cmd: LKD to FW cmd code
1872  * @size: size of next FW component to be loaded (0 if not necessary)
1873  * @wait_ok: if true also wait for OK response from FW
1874  * @timeout: timeout for status wait
1875  *
1876  * @return 0 on success, otherwise non-zero error code
1877  *
1878  * brief:
1879  * when sending protocol command we have the following steps:
1880  * - send clear (clear command and verify clear status register)
1881  * - send the actual protocol command
1882  * - wait for ACK on the protocol command
1883  * - send clear
1884  * - send NOOP
1885  * if, in addition, the specific protocol command should wait for OK then:
1886  * - wait for OK
1887  * - send clear
1888  * - send NOOP
1889  *
1890  * NOTES:
1891  * send clear: this is necessary in order to clear the status register to avoid
1892  *             leftovers between command
1893  * NOOP command: necessary to avoid loop on the clear command by the FW
1894  */
1895 int hl_fw_dynamic_send_protocol_cmd(struct hl_device *hdev,
1896 				struct fw_load_mgr *fw_loader,
1897 				enum comms_cmd cmd, unsigned int size,
1898 				bool wait_ok, u32 timeout)
1899 {
1900 	int rc;
1901 
1902 	trace_habanalabs_comms_protocol_cmd(hdev->dev, comms_cmd_str_arr[cmd]);
1903 
1904 	/* first send clear command to clean former commands */
1905 	rc = hl_fw_dynamic_send_clear_cmd(hdev, fw_loader);
1906 	if (rc)
1907 		return rc;
1908 
1909 	/* send the actual command */
1910 	hl_fw_dynamic_send_cmd(hdev, fw_loader, cmd, size);
1911 
1912 	/* wait for ACK for the command */
1913 	rc = hl_fw_dynamic_wait_for_status(hdev, fw_loader, COMMS_STS_ACK,
1914 								timeout);
1915 	if (rc)
1916 		return rc;
1917 
1918 	/* clear command to prepare for NOOP command */
1919 	rc = hl_fw_dynamic_send_clear_cmd(hdev, fw_loader);
1920 	if (rc)
1921 		return rc;
1922 
1923 	/* send the actual NOOP command */
1924 	hl_fw_dynamic_send_cmd(hdev, fw_loader, COMMS_NOOP, 0);
1925 
1926 	if (!wait_ok)
1927 		return 0;
1928 
1929 	rc = hl_fw_dynamic_wait_for_status(hdev, fw_loader, COMMS_STS_OK,
1930 								timeout);
1931 	if (rc)
1932 		return rc;
1933 
1934 	/* clear command to prepare for NOOP command */
1935 	rc = hl_fw_dynamic_send_clear_cmd(hdev, fw_loader);
1936 	if (rc)
1937 		return rc;
1938 
1939 	/* send the actual NOOP command */
1940 	hl_fw_dynamic_send_cmd(hdev, fw_loader, COMMS_NOOP, 0);
1941 
1942 	return 0;
1943 }
1944 
1945 /**
1946  * hl_fw_compat_crc32 - CRC compatible with FW
1947  *
1948  * @data: pointer to the data
1949  * @size: size of the data
1950  *
1951  * @return the CRC32 result
1952  *
1953  * NOTE: kernel's CRC32 differs from standard CRC32 calculation.
1954  *       in order to be aligned we need to flip the bits of both the input
1955  *       initial CRC and kernel's CRC32 result.
1956  *       in addition both sides use initial CRC of 0,
1957  */
1958 static u32 hl_fw_compat_crc32(u8 *data, size_t size)
1959 {
1960 	return ~crc32_le(~((u32)0), data, size);
1961 }
1962 
1963 /**
1964  * hl_fw_dynamic_validate_memory_bound - validate memory bounds for memory
1965  *                                        transfer (image or descriptor) between
1966  *                                        host and FW
1967  *
1968  * @hdev: pointer to the habanalabs device structure
1969  * @addr: device address of memory transfer
1970  * @size: memory transfer size
1971  * @region: PCI memory region
1972  *
1973  * @return 0 on success, otherwise non-zero error code
1974  */
1975 static int hl_fw_dynamic_validate_memory_bound(struct hl_device *hdev,
1976 						u64 addr, size_t size,
1977 						struct pci_mem_region *region)
1978 {
1979 	u64 end_addr;
1980 
1981 	/* now make sure that the memory transfer is within region's bounds */
1982 	end_addr = addr + size;
1983 	if (end_addr >= region->region_base + region->region_size) {
1984 		dev_err(hdev->dev,
1985 			"dynamic FW load: memory transfer end address out of memory region bounds. addr: %llx\n",
1986 							end_addr);
1987 		return -EIO;
1988 	}
1989 
1990 	/*
1991 	 * now make sure memory transfer is within predefined BAR bounds.
1992 	 * this is to make sure we do not need to set the bar (e.g. for DRAM
1993 	 * memory transfers)
1994 	 */
1995 	if (end_addr >= region->region_base - region->offset_in_bar +
1996 							region->bar_size) {
1997 		dev_err(hdev->dev,
1998 			"FW image beyond PCI BAR bounds\n");
1999 		return -EIO;
2000 	}
2001 
2002 	return 0;
2003 }
2004 
2005 /**
2006  * hl_fw_dynamic_validate_descriptor - validate FW descriptor
2007  *
2008  * @hdev: pointer to the habanalabs device structure
2009  * @fw_loader: managing structure for loading device's FW
2010  * @fw_desc: the descriptor from FW
2011  *
2012  * @return 0 on success, otherwise non-zero error code
2013  */
2014 static int hl_fw_dynamic_validate_descriptor(struct hl_device *hdev,
2015 					struct fw_load_mgr *fw_loader,
2016 					struct lkd_fw_comms_desc *fw_desc)
2017 {
2018 	struct pci_mem_region *region;
2019 	enum pci_region region_id;
2020 	size_t data_size;
2021 	u32 data_crc32;
2022 	u8 *data_ptr;
2023 	u64 addr;
2024 	int rc;
2025 
2026 	if (le32_to_cpu(fw_desc->header.magic) != HL_COMMS_DESC_MAGIC)
2027 		dev_dbg(hdev->dev, "Invalid magic for dynamic FW descriptor (%x)\n",
2028 				fw_desc->header.magic);
2029 
2030 	if (fw_desc->header.version != HL_COMMS_DESC_VER)
2031 		dev_dbg(hdev->dev, "Invalid version for dynamic FW descriptor (%x)\n",
2032 				fw_desc->header.version);
2033 
2034 	/*
2035 	 * Calc CRC32 of data without header. use the size of the descriptor
2036 	 * reported by firmware, without calculating it ourself, to allow adding
2037 	 * more fields to the lkd_fw_comms_desc structure.
2038 	 * note that no alignment/stride address issues here as all structures
2039 	 * are 64 bit padded.
2040 	 */
2041 	data_ptr = (u8 *)fw_desc + sizeof(struct comms_desc_header);
2042 	data_size = le16_to_cpu(fw_desc->header.size);
2043 
2044 	data_crc32 = hl_fw_compat_crc32(data_ptr, data_size);
2045 	if (data_crc32 != le32_to_cpu(fw_desc->header.crc32)) {
2046 		dev_err(hdev->dev, "CRC32 mismatch for dynamic FW descriptor (%x:%x)\n",
2047 			data_crc32, fw_desc->header.crc32);
2048 		return -EIO;
2049 	}
2050 
2051 	/* find memory region to which to copy the image */
2052 	addr = le64_to_cpu(fw_desc->img_addr);
2053 	region_id = hl_get_pci_memory_region(hdev, addr);
2054 	if ((region_id != PCI_REGION_SRAM) && ((region_id != PCI_REGION_DRAM))) {
2055 		dev_err(hdev->dev, "Invalid region to copy FW image address=%llx\n", addr);
2056 		return -EIO;
2057 	}
2058 
2059 	region = &hdev->pci_mem_region[region_id];
2060 
2061 	/* store the region for the copy stage */
2062 	fw_loader->dynamic_loader.image_region = region;
2063 
2064 	/*
2065 	 * here we know that the start address is valid, now make sure that the
2066 	 * image is within region's bounds
2067 	 */
2068 	rc = hl_fw_dynamic_validate_memory_bound(hdev, addr,
2069 					fw_loader->dynamic_loader.fw_image_size,
2070 					region);
2071 	if (rc) {
2072 		dev_err(hdev->dev, "invalid mem transfer request for FW image\n");
2073 		return rc;
2074 	}
2075 
2076 	/* here we can mark the descriptor as valid as the content has been validated */
2077 	fw_loader->dynamic_loader.fw_desc_valid = true;
2078 
2079 	return 0;
2080 }
2081 
2082 static int hl_fw_dynamic_validate_response(struct hl_device *hdev,
2083 						struct fw_response *response,
2084 						struct pci_mem_region *region)
2085 {
2086 	u64 device_addr;
2087 	int rc;
2088 
2089 	device_addr = region->region_base + response->ram_offset;
2090 
2091 	/*
2092 	 * validate that the descriptor is within region's bounds
2093 	 * Note that as the start address was supplied according to the RAM
2094 	 * type- testing only the end address is enough
2095 	 */
2096 	rc = hl_fw_dynamic_validate_memory_bound(hdev, device_addr,
2097 					sizeof(struct lkd_fw_comms_desc),
2098 					region);
2099 	return rc;
2100 }
2101 
2102 /*
2103  * hl_fw_dynamic_read_descriptor_msg - read and show the ascii msg that sent by fw
2104  *
2105  * @hdev: pointer to the habanalabs device structure
2106  * @fw_desc: the descriptor from FW
2107  */
2108 static void hl_fw_dynamic_read_descriptor_msg(struct hl_device *hdev,
2109 					struct lkd_fw_comms_desc *fw_desc)
2110 {
2111 	int i;
2112 	char *msg;
2113 
2114 	for (i = 0 ; i < LKD_FW_ASCII_MSG_MAX ; i++) {
2115 		if (!fw_desc->ascii_msg[i].valid)
2116 			return;
2117 
2118 		/* force NULL termination */
2119 		msg = fw_desc->ascii_msg[i].msg;
2120 		msg[LKD_FW_ASCII_MSG_MAX_LEN - 1] = '\0';
2121 
2122 		switch (fw_desc->ascii_msg[i].msg_lvl) {
2123 		case LKD_FW_ASCII_MSG_ERR:
2124 			dev_err(hdev->dev, "fw: %s", fw_desc->ascii_msg[i].msg);
2125 			break;
2126 		case LKD_FW_ASCII_MSG_WRN:
2127 			dev_warn(hdev->dev, "fw: %s", fw_desc->ascii_msg[i].msg);
2128 			break;
2129 		case LKD_FW_ASCII_MSG_INF:
2130 			dev_info(hdev->dev, "fw: %s", fw_desc->ascii_msg[i].msg);
2131 			break;
2132 		default:
2133 			dev_dbg(hdev->dev, "fw: %s", fw_desc->ascii_msg[i].msg);
2134 			break;
2135 		}
2136 	}
2137 }
2138 
2139 /**
2140  * hl_fw_dynamic_read_and_validate_descriptor - read and validate FW descriptor
2141  *
2142  * @hdev: pointer to the habanalabs device structure
2143  * @fw_loader: managing structure for loading device's FW
2144  *
2145  * @return 0 on success, otherwise non-zero error code
2146  */
2147 static int hl_fw_dynamic_read_and_validate_descriptor(struct hl_device *hdev,
2148 						struct fw_load_mgr *fw_loader)
2149 {
2150 	struct lkd_fw_comms_desc *fw_desc;
2151 	struct pci_mem_region *region;
2152 	struct fw_response *response;
2153 	void *temp_fw_desc;
2154 	void __iomem *src;
2155 	u16 fw_data_size;
2156 	enum pci_region region_id;
2157 	int rc;
2158 
2159 	fw_desc = &fw_loader->dynamic_loader.comm_desc;
2160 	response = &fw_loader->dynamic_loader.response;
2161 
2162 	region_id = (response->ram_type == COMMS_SRAM) ?
2163 					PCI_REGION_SRAM : PCI_REGION_DRAM;
2164 
2165 	region = &hdev->pci_mem_region[region_id];
2166 
2167 	rc = hl_fw_dynamic_validate_response(hdev, response, region);
2168 	if (rc) {
2169 		dev_err(hdev->dev,
2170 			"invalid mem transfer request for FW descriptor\n");
2171 		return rc;
2172 	}
2173 
2174 	/*
2175 	 * extract address to copy the descriptor from
2176 	 * in addition, as the descriptor value is going to be over-ridden by new data- we mark it
2177 	 * as invalid.
2178 	 * it will be marked again as valid once validated
2179 	 */
2180 	fw_loader->dynamic_loader.fw_desc_valid = false;
2181 	src = hdev->pcie_bar[region->bar_id] + region->offset_in_bar +
2182 							response->ram_offset;
2183 
2184 	/*
2185 	 * We do the copy of the fw descriptor in 2 phases:
2186 	 * 1. copy the header + data info according to our lkd_fw_comms_desc definition.
2187 	 *    then we're able to read the actual data size provided by fw.
2188 	 *    this is needed for cases where data in descriptor was changed(add/remove)
2189 	 *    in embedded specs header file before updating lkd copy of the header file
2190 	 * 2. copy descriptor to temporary buffer with aligned size and send it to validation
2191 	 */
2192 	memcpy_fromio(fw_desc, src, sizeof(struct lkd_fw_comms_desc));
2193 	fw_data_size = le16_to_cpu(fw_desc->header.size);
2194 
2195 	temp_fw_desc = vzalloc(sizeof(struct comms_desc_header) + fw_data_size);
2196 	if (!temp_fw_desc)
2197 		return -ENOMEM;
2198 
2199 	memcpy_fromio(temp_fw_desc, src, sizeof(struct comms_desc_header) + fw_data_size);
2200 
2201 	rc = hl_fw_dynamic_validate_descriptor(hdev, fw_loader,
2202 					(struct lkd_fw_comms_desc *) temp_fw_desc);
2203 
2204 	if (!rc)
2205 		hl_fw_dynamic_read_descriptor_msg(hdev, temp_fw_desc);
2206 
2207 	vfree(temp_fw_desc);
2208 
2209 	return rc;
2210 }
2211 
2212 /**
2213  * hl_fw_dynamic_request_descriptor - handshake with CPU to get FW descriptor
2214  *
2215  * @hdev: pointer to the habanalabs device structure
2216  * @fw_loader: managing structure for loading device's FW
2217  * @next_image_size: size to allocate for next FW component
2218  *
2219  * @return 0 on success, otherwise non-zero error code
2220  */
2221 static int hl_fw_dynamic_request_descriptor(struct hl_device *hdev,
2222 						struct fw_load_mgr *fw_loader,
2223 						size_t next_image_size)
2224 {
2225 	int rc;
2226 
2227 	rc = hl_fw_dynamic_send_protocol_cmd(hdev, fw_loader, COMMS_PREP_DESC,
2228 						next_image_size, true,
2229 						fw_loader->cpu_timeout);
2230 	if (rc)
2231 		return rc;
2232 
2233 	return hl_fw_dynamic_read_and_validate_descriptor(hdev, fw_loader);
2234 }
2235 
2236 /**
2237  * hl_fw_dynamic_read_device_fw_version - read FW version to exposed properties
2238  *
2239  * @hdev: pointer to the habanalabs device structure
2240  * @fwc: the firmware component
2241  * @fw_version: fw component's version string
2242  */
2243 static int hl_fw_dynamic_read_device_fw_version(struct hl_device *hdev,
2244 					enum hl_fw_component fwc,
2245 					const char *fw_version)
2246 {
2247 	struct asic_fixed_properties *prop = &hdev->asic_prop;
2248 	char *preboot_ver, *boot_ver;
2249 	char btl_ver[32];
2250 	int rc;
2251 
2252 	switch (fwc) {
2253 	case FW_COMP_BOOT_FIT:
2254 		strscpy(prop->uboot_ver, fw_version, VERSION_MAX_LEN);
2255 		boot_ver = extract_fw_ver_from_str(prop->uboot_ver);
2256 		if (boot_ver) {
2257 			dev_info(hdev->dev, "boot-fit version %s\n", boot_ver);
2258 			kfree(boot_ver);
2259 		}
2260 
2261 		break;
2262 	case FW_COMP_PREBOOT:
2263 		strscpy(prop->preboot_ver, fw_version, VERSION_MAX_LEN);
2264 		preboot_ver = strnstr(prop->preboot_ver, "Preboot", VERSION_MAX_LEN);
2265 		dev_info(hdev->dev, "preboot full version: '%s'\n", preboot_ver);
2266 
2267 		if (preboot_ver && preboot_ver != prop->preboot_ver) {
2268 			strscpy(btl_ver, prop->preboot_ver,
2269 				min((int) (preboot_ver - prop->preboot_ver), 31));
2270 			dev_info(hdev->dev, "%s\n", btl_ver);
2271 		}
2272 
2273 		rc = hl_get_sw_major_minor_subminor(hdev, preboot_ver);
2274 		if (rc)
2275 			return rc;
2276 		preboot_ver = extract_fw_ver_from_str(prop->preboot_ver);
2277 		if (preboot_ver) {
2278 			rc = hl_get_preboot_major_minor(hdev, preboot_ver);
2279 			kfree(preboot_ver);
2280 			if (rc)
2281 				return rc;
2282 		}
2283 
2284 		break;
2285 	default:
2286 		dev_warn(hdev->dev, "Undefined FW component: %d\n", fwc);
2287 		return -EINVAL;
2288 	}
2289 
2290 	return 0;
2291 }
2292 
2293 /**
2294  * hl_fw_dynamic_copy_image - copy image to memory allocated by the FW
2295  *
2296  * @hdev: pointer to the habanalabs device structure
2297  * @fw: fw descriptor
2298  * @fw_loader: managing structure for loading device's FW
2299  */
2300 static int hl_fw_dynamic_copy_image(struct hl_device *hdev,
2301 						const struct firmware *fw,
2302 						struct fw_load_mgr *fw_loader)
2303 {
2304 	struct lkd_fw_comms_desc *fw_desc;
2305 	struct pci_mem_region *region;
2306 	void __iomem *dest;
2307 	u64 addr;
2308 	int rc;
2309 
2310 	fw_desc = &fw_loader->dynamic_loader.comm_desc;
2311 	addr = le64_to_cpu(fw_desc->img_addr);
2312 
2313 	/* find memory region to which to copy the image */
2314 	region = fw_loader->dynamic_loader.image_region;
2315 
2316 	dest = hdev->pcie_bar[region->bar_id] + region->offset_in_bar +
2317 					(addr - region->region_base);
2318 
2319 	rc = hl_fw_copy_fw_to_device(hdev, fw, dest,
2320 					fw_loader->boot_fit_img.src_off,
2321 					fw_loader->boot_fit_img.copy_size);
2322 
2323 	return rc;
2324 }
2325 
2326 /**
2327  * hl_fw_dynamic_copy_msg - copy msg to memory allocated by the FW
2328  *
2329  * @hdev: pointer to the habanalabs device structure
2330  * @msg: message
2331  * @fw_loader: managing structure for loading device's FW
2332  */
2333 static int hl_fw_dynamic_copy_msg(struct hl_device *hdev,
2334 		struct lkd_msg_comms *msg, struct fw_load_mgr *fw_loader)
2335 {
2336 	struct lkd_fw_comms_desc *fw_desc;
2337 	struct pci_mem_region *region;
2338 	void __iomem *dest;
2339 	u64 addr;
2340 	int rc;
2341 
2342 	fw_desc = &fw_loader->dynamic_loader.comm_desc;
2343 	addr = le64_to_cpu(fw_desc->img_addr);
2344 
2345 	/* find memory region to which to copy the image */
2346 	region = fw_loader->dynamic_loader.image_region;
2347 
2348 	dest = hdev->pcie_bar[region->bar_id] + region->offset_in_bar +
2349 					(addr - region->region_base);
2350 
2351 	rc = hl_fw_copy_msg_to_device(hdev, msg, dest, 0, 0);
2352 
2353 	return rc;
2354 }
2355 
2356 /**
2357  * hl_fw_boot_fit_update_state - update internal data structures after boot-fit
2358  *                               is loaded
2359  *
2360  * @hdev: pointer to the habanalabs device structure
2361  * @cpu_boot_dev_sts0_reg: register holding CPU boot dev status 0
2362  * @cpu_boot_dev_sts1_reg: register holding CPU boot dev status 1
2363  *
2364  * @return 0 on success, otherwise non-zero error code
2365  */
2366 static void hl_fw_boot_fit_update_state(struct hl_device *hdev,
2367 						u32 cpu_boot_dev_sts0_reg,
2368 						u32 cpu_boot_dev_sts1_reg)
2369 {
2370 	struct asic_fixed_properties *prop = &hdev->asic_prop;
2371 
2372 	hdev->fw_loader.fw_comp_loaded |= FW_TYPE_BOOT_CPU;
2373 
2374 	/* Read boot_cpu status bits */
2375 	if (prop->fw_preboot_cpu_boot_dev_sts0 & CPU_BOOT_DEV_STS0_ENABLED) {
2376 		prop->fw_bootfit_cpu_boot_dev_sts0 =
2377 				RREG32(cpu_boot_dev_sts0_reg);
2378 
2379 		prop->hard_reset_done_by_fw = !!(prop->fw_bootfit_cpu_boot_dev_sts0 &
2380 							CPU_BOOT_DEV_STS0_FW_HARD_RST_EN);
2381 
2382 		dev_dbg(hdev->dev, "Firmware boot CPU status0 %#x\n",
2383 					prop->fw_bootfit_cpu_boot_dev_sts0);
2384 	}
2385 
2386 	if (prop->fw_cpu_boot_dev_sts1_valid) {
2387 		prop->fw_bootfit_cpu_boot_dev_sts1 =
2388 				RREG32(cpu_boot_dev_sts1_reg);
2389 
2390 		dev_dbg(hdev->dev, "Firmware boot CPU status1 %#x\n",
2391 					prop->fw_bootfit_cpu_boot_dev_sts1);
2392 	}
2393 
2394 	dev_dbg(hdev->dev, "Firmware boot CPU hard-reset is %s\n",
2395 			prop->hard_reset_done_by_fw ? "enabled" : "disabled");
2396 }
2397 
2398 static void hl_fw_dynamic_update_linux_interrupt_if(struct hl_device *hdev)
2399 {
2400 	struct cpu_dyn_regs *dyn_regs =
2401 			&hdev->fw_loader.dynamic_loader.comm_desc.cpu_dyn_regs;
2402 
2403 	/* Check whether all 3 interrupt interfaces are set, if not use a
2404 	 * single interface
2405 	 */
2406 	if (!hdev->asic_prop.gic_interrupts_enable &&
2407 			!(hdev->asic_prop.fw_app_cpu_boot_dev_sts0 &
2408 				CPU_BOOT_DEV_STS0_MULTI_IRQ_POLL_EN)) {
2409 		dyn_regs->gic_host_halt_irq = dyn_regs->gic_host_pi_upd_irq;
2410 		dyn_regs->gic_host_ints_irq = dyn_regs->gic_host_pi_upd_irq;
2411 
2412 		dev_warn(hdev->dev,
2413 			"Using a single interrupt interface towards cpucp");
2414 	}
2415 }
2416 /**
2417  * hl_fw_dynamic_load_image - load FW image using dynamic protocol
2418  *
2419  * @hdev: pointer to the habanalabs device structure
2420  * @fw_loader: managing structure for loading device's FW
2421  * @load_fwc: the FW component to be loaded
2422  * @img_ld_timeout: image load timeout
2423  *
2424  * @return 0 on success, otherwise non-zero error code
2425  */
2426 static int hl_fw_dynamic_load_image(struct hl_device *hdev,
2427 						struct fw_load_mgr *fw_loader,
2428 						enum hl_fw_component load_fwc,
2429 						u32 img_ld_timeout)
2430 {
2431 	enum hl_fw_component cur_fwc;
2432 	const struct firmware *fw;
2433 	char *fw_name;
2434 	int rc = 0;
2435 
2436 	/*
2437 	 * when loading image we have one of 2 scenarios:
2438 	 * 1. current FW component is preboot and we want to load boot-fit
2439 	 * 2. current FW component is boot-fit and we want to load linux
2440 	 */
2441 	if (load_fwc == FW_COMP_BOOT_FIT) {
2442 		cur_fwc = FW_COMP_PREBOOT;
2443 		fw_name = fw_loader->boot_fit_img.image_name;
2444 	} else {
2445 		cur_fwc = FW_COMP_BOOT_FIT;
2446 		fw_name = fw_loader->linux_img.image_name;
2447 	}
2448 
2449 	/* request FW in order to communicate to FW the size to be allocated */
2450 	rc = hl_request_fw(hdev, &fw, fw_name);
2451 	if (rc)
2452 		return rc;
2453 
2454 	/* store the image size for future validation */
2455 	fw_loader->dynamic_loader.fw_image_size = fw->size;
2456 
2457 	rc = hl_fw_dynamic_request_descriptor(hdev, fw_loader, fw->size);
2458 	if (rc)
2459 		goto release_fw;
2460 
2461 	/* read preboot version */
2462 	rc = hl_fw_dynamic_read_device_fw_version(hdev, cur_fwc,
2463 				fw_loader->dynamic_loader.comm_desc.cur_fw_ver);
2464 	if (rc)
2465 		goto release_fw;
2466 
2467 	/* copy boot fit to space allocated by FW */
2468 	rc = hl_fw_dynamic_copy_image(hdev, fw, fw_loader);
2469 	if (rc)
2470 		goto release_fw;
2471 
2472 	rc = hl_fw_dynamic_send_protocol_cmd(hdev, fw_loader, COMMS_DATA_RDY,
2473 						0, true,
2474 						fw_loader->cpu_timeout);
2475 	if (rc)
2476 		goto release_fw;
2477 
2478 	rc = hl_fw_dynamic_send_protocol_cmd(hdev, fw_loader, COMMS_EXEC,
2479 						0, false,
2480 						img_ld_timeout);
2481 
2482 release_fw:
2483 	hl_release_firmware(fw);
2484 	return rc;
2485 }
2486 
2487 static int hl_fw_dynamic_wait_for_boot_fit_active(struct hl_device *hdev,
2488 					struct fw_load_mgr *fw_loader)
2489 {
2490 	struct dynamic_fw_load_mgr *dyn_loader;
2491 	u32 status;
2492 	int rc;
2493 
2494 	dyn_loader = &fw_loader->dynamic_loader;
2495 
2496 	/*
2497 	 * Make sure CPU boot-loader is running
2498 	 * Note that the CPU_BOOT_STATUS_SRAM_AVAIL is generally set by Linux
2499 	 * yet there is a debug scenario in which we loading uboot (without Linux)
2500 	 * which at later stage is relocated to DRAM. In this case we expect
2501 	 * uboot to set the CPU_BOOT_STATUS_SRAM_AVAIL and so we add it to the
2502 	 * poll flags
2503 	 */
2504 	rc = hl_poll_timeout(
2505 		hdev,
2506 		le32_to_cpu(dyn_loader->comm_desc.cpu_dyn_regs.cpu_boot_status),
2507 		status,
2508 		(status == CPU_BOOT_STATUS_READY_TO_BOOT) ||
2509 		(status == CPU_BOOT_STATUS_SRAM_AVAIL),
2510 		hdev->fw_poll_interval_usec,
2511 		dyn_loader->wait_for_bl_timeout);
2512 	if (rc) {
2513 		dev_err(hdev->dev, "failed to wait for boot (status = %d)\n", status);
2514 		return rc;
2515 	}
2516 
2517 	dev_dbg(hdev->dev, "uboot status = %d\n", status);
2518 	return 0;
2519 }
2520 
2521 static int hl_fw_dynamic_wait_for_linux_active(struct hl_device *hdev,
2522 						struct fw_load_mgr *fw_loader)
2523 {
2524 	struct dynamic_fw_load_mgr *dyn_loader;
2525 	u32 status;
2526 	int rc;
2527 
2528 	dyn_loader = &fw_loader->dynamic_loader;
2529 
2530 	/* Make sure CPU linux is running */
2531 
2532 	rc = hl_poll_timeout(
2533 		hdev,
2534 		le32_to_cpu(dyn_loader->comm_desc.cpu_dyn_regs.cpu_boot_status),
2535 		status,
2536 		(status == CPU_BOOT_STATUS_SRAM_AVAIL),
2537 		hdev->fw_poll_interval_usec,
2538 		fw_loader->cpu_timeout);
2539 	if (rc) {
2540 		dev_err(hdev->dev, "failed to wait for Linux (status = %d)\n", status);
2541 		return rc;
2542 	}
2543 
2544 	dev_dbg(hdev->dev, "Boot status = %d\n", status);
2545 	return 0;
2546 }
2547 
2548 /**
2549  * hl_fw_linux_update_state -	update internal data structures after Linux
2550  *				is loaded.
2551  *				Note: Linux initialization is comprised mainly
2552  *				of two stages - loading kernel (SRAM_AVAIL)
2553  *				& loading ARMCP.
2554  *				Therefore reading boot device status in any of
2555  *				these stages might result in different values.
2556  *
2557  * @hdev: pointer to the habanalabs device structure
2558  * @cpu_boot_dev_sts0_reg: register holding CPU boot dev status 0
2559  * @cpu_boot_dev_sts1_reg: register holding CPU boot dev status 1
2560  *
2561  * @return 0 on success, otherwise non-zero error code
2562  */
2563 static void hl_fw_linux_update_state(struct hl_device *hdev,
2564 						u32 cpu_boot_dev_sts0_reg,
2565 						u32 cpu_boot_dev_sts1_reg)
2566 {
2567 	struct asic_fixed_properties *prop = &hdev->asic_prop;
2568 
2569 	hdev->fw_loader.fw_comp_loaded |= FW_TYPE_LINUX;
2570 
2571 	/* Read FW application security bits */
2572 	if (prop->fw_cpu_boot_dev_sts0_valid) {
2573 		prop->fw_app_cpu_boot_dev_sts0 = RREG32(cpu_boot_dev_sts0_reg);
2574 
2575 		prop->hard_reset_done_by_fw = !!(prop->fw_app_cpu_boot_dev_sts0 &
2576 							CPU_BOOT_DEV_STS0_FW_HARD_RST_EN);
2577 
2578 		if (prop->fw_app_cpu_boot_dev_sts0 &
2579 				CPU_BOOT_DEV_STS0_GIC_PRIVILEGED_EN)
2580 			prop->gic_interrupts_enable = false;
2581 
2582 		dev_dbg(hdev->dev,
2583 			"Firmware application CPU status0 %#x\n",
2584 			prop->fw_app_cpu_boot_dev_sts0);
2585 
2586 		dev_dbg(hdev->dev, "GIC controller is %s\n",
2587 				prop->gic_interrupts_enable ?
2588 						"enabled" : "disabled");
2589 	}
2590 
2591 	if (prop->fw_cpu_boot_dev_sts1_valid) {
2592 		prop->fw_app_cpu_boot_dev_sts1 = RREG32(cpu_boot_dev_sts1_reg);
2593 
2594 		dev_dbg(hdev->dev,
2595 			"Firmware application CPU status1 %#x\n",
2596 			prop->fw_app_cpu_boot_dev_sts1);
2597 	}
2598 
2599 	dev_dbg(hdev->dev, "Firmware application CPU hard-reset is %s\n",
2600 			prop->hard_reset_done_by_fw ? "enabled" : "disabled");
2601 
2602 	dev_info(hdev->dev, "Successfully loaded firmware to device\n");
2603 }
2604 
2605 /**
2606  * hl_fw_dynamic_send_msg - send a COMMS message with attached data
2607  *
2608  * @hdev: pointer to the habanalabs device structure
2609  * @fw_loader: managing structure for loading device's FW
2610  * @msg_type: message type
2611  * @data: data to be sent
2612  *
2613  * @return 0 on success, otherwise non-zero error code
2614  */
2615 static int hl_fw_dynamic_send_msg(struct hl_device *hdev,
2616 		struct fw_load_mgr *fw_loader, u8 msg_type, void *data)
2617 {
2618 	struct lkd_msg_comms *msg;
2619 	int rc;
2620 
2621 	msg = kzalloc(sizeof(*msg), GFP_KERNEL);
2622 	if (!msg)
2623 		return -ENOMEM;
2624 
2625 	/* create message to be sent */
2626 	msg->header.type = msg_type;
2627 	msg->header.size = cpu_to_le16(sizeof(struct comms_msg_header));
2628 	msg->header.magic = cpu_to_le32(HL_COMMS_MSG_MAGIC);
2629 
2630 	switch (msg_type) {
2631 	case HL_COMMS_RESET_CAUSE_TYPE:
2632 		msg->reset_cause = *(__u8 *) data;
2633 		break;
2634 
2635 	default:
2636 		dev_err(hdev->dev,
2637 			"Send COMMS message - invalid message type %u\n",
2638 			msg_type);
2639 		rc = -EINVAL;
2640 		goto out;
2641 	}
2642 
2643 	rc = hl_fw_dynamic_request_descriptor(hdev, fw_loader,
2644 			sizeof(struct lkd_msg_comms));
2645 	if (rc)
2646 		goto out;
2647 
2648 	/* copy message to space allocated by FW */
2649 	rc = hl_fw_dynamic_copy_msg(hdev, msg, fw_loader);
2650 	if (rc)
2651 		goto out;
2652 
2653 	rc = hl_fw_dynamic_send_protocol_cmd(hdev, fw_loader, COMMS_DATA_RDY,
2654 						0, true,
2655 						fw_loader->cpu_timeout);
2656 	if (rc)
2657 		goto out;
2658 
2659 	rc = hl_fw_dynamic_send_protocol_cmd(hdev, fw_loader, COMMS_EXEC,
2660 						0, true,
2661 						fw_loader->cpu_timeout);
2662 
2663 out:
2664 	kfree(msg);
2665 	return rc;
2666 }
2667 
2668 /**
2669  * hl_fw_dynamic_init_cpu - initialize the device CPU using dynamic protocol
2670  *
2671  * @hdev: pointer to the habanalabs device structure
2672  * @fw_loader: managing structure for loading device's FW
2673  *
2674  * @return 0 on success, otherwise non-zero error code
2675  *
2676  * brief: the dynamic protocol is master (LKD) slave (FW CPU) protocol.
2677  * the communication is done using registers:
2678  * - LKD command register
2679  * - FW status register
2680  * the protocol is race free. this goal is achieved by splitting the requests
2681  * and response to known synchronization points between the LKD and the FW.
2682  * each response to LKD request is known and bound to a predefined timeout.
2683  * in case of timeout expiration without the desired status from FW- the
2684  * protocol (and hence the boot) will fail.
2685  */
2686 static int hl_fw_dynamic_init_cpu(struct hl_device *hdev,
2687 					struct fw_load_mgr *fw_loader)
2688 {
2689 	struct cpu_dyn_regs *dyn_regs;
2690 	int rc, fw_error_rc;
2691 
2692 	dev_info(hdev->dev,
2693 		"Loading %sfirmware to device, may take some time...\n",
2694 		hdev->asic_prop.fw_security_enabled ? "secured " : "");
2695 
2696 	/* initialize FW descriptor as invalid */
2697 	fw_loader->dynamic_loader.fw_desc_valid = false;
2698 
2699 	/*
2700 	 * In this stage, "cpu_dyn_regs" contains only LKD's hard coded values!
2701 	 * It will be updated from FW after hl_fw_dynamic_request_descriptor().
2702 	 */
2703 	dyn_regs = &fw_loader->dynamic_loader.comm_desc.cpu_dyn_regs;
2704 
2705 	rc = hl_fw_dynamic_send_protocol_cmd(hdev, fw_loader, COMMS_RST_STATE,
2706 						0, true,
2707 						fw_loader->cpu_timeout);
2708 	if (rc)
2709 		goto protocol_err;
2710 
2711 	if (hdev->reset_info.curr_reset_cause) {
2712 		rc = hl_fw_dynamic_send_msg(hdev, fw_loader,
2713 				HL_COMMS_RESET_CAUSE_TYPE, &hdev->reset_info.curr_reset_cause);
2714 		if (rc)
2715 			goto protocol_err;
2716 
2717 		/* Clear current reset cause */
2718 		hdev->reset_info.curr_reset_cause = HL_RESET_CAUSE_UNKNOWN;
2719 	}
2720 
2721 	if (!(hdev->fw_components & FW_TYPE_BOOT_CPU)) {
2722 		struct lkd_fw_binning_info *binning_info;
2723 
2724 		rc = hl_fw_dynamic_request_descriptor(hdev, fw_loader,
2725 							sizeof(struct lkd_msg_comms));
2726 		if (rc)
2727 			goto protocol_err;
2728 
2729 		/* read preboot version */
2730 		rc = hl_fw_dynamic_read_device_fw_version(hdev, FW_COMP_PREBOOT,
2731 				fw_loader->dynamic_loader.comm_desc.cur_fw_ver);
2732 
2733 		if (rc)
2734 			return rc;
2735 
2736 		/* read binning info from preboot */
2737 		if (hdev->support_preboot_binning) {
2738 			binning_info = &fw_loader->dynamic_loader.comm_desc.binning_info;
2739 			hdev->tpc_binning = le64_to_cpu(binning_info->tpc_mask_l);
2740 			hdev->dram_binning = le32_to_cpu(binning_info->dram_mask);
2741 			hdev->edma_binning = le32_to_cpu(binning_info->edma_mask);
2742 			hdev->decoder_binning = le32_to_cpu(binning_info->dec_mask);
2743 			hdev->rotator_binning = le32_to_cpu(binning_info->rot_mask);
2744 
2745 			rc = hdev->asic_funcs->set_dram_properties(hdev);
2746 			if (rc)
2747 				return rc;
2748 
2749 			rc = hdev->asic_funcs->set_binning_masks(hdev);
2750 			if (rc)
2751 				return rc;
2752 
2753 			dev_dbg(hdev->dev,
2754 				"Read binning masks: tpc: 0x%llx, dram: 0x%llx, edma: 0x%x, dec: 0x%x, rot:0x%x\n",
2755 				hdev->tpc_binning, hdev->dram_binning, hdev->edma_binning,
2756 				hdev->decoder_binning, hdev->rotator_binning);
2757 		}
2758 
2759 		if (hdev->asic_prop.support_dynamic_resereved_fw_size) {
2760 			hdev->asic_prop.reserved_fw_mem_size =
2761 				le32_to_cpu(fw_loader->dynamic_loader.comm_desc.rsvd_mem_size_mb);
2762 		}
2763 
2764 		return 0;
2765 	}
2766 
2767 	/* load boot fit to FW */
2768 	rc = hl_fw_dynamic_load_image(hdev, fw_loader, FW_COMP_BOOT_FIT,
2769 						fw_loader->boot_fit_timeout);
2770 	if (rc) {
2771 		dev_err(hdev->dev, "failed to load boot fit\n");
2772 		goto protocol_err;
2773 	}
2774 
2775 	rc = hl_fw_dynamic_wait_for_boot_fit_active(hdev, fw_loader);
2776 	if (rc)
2777 		goto protocol_err;
2778 
2779 	hl_fw_boot_fit_update_state(hdev,
2780 			le32_to_cpu(dyn_regs->cpu_boot_dev_sts0),
2781 			le32_to_cpu(dyn_regs->cpu_boot_dev_sts1));
2782 
2783 	/*
2784 	 * when testing FW load (without Linux) on PLDM we don't want to
2785 	 * wait until boot fit is active as it may take several hours.
2786 	 * instead, we load the bootfit and let it do all initialization in
2787 	 * the background.
2788 	 */
2789 	if (hdev->pldm && !(hdev->fw_components & FW_TYPE_LINUX))
2790 		return 0;
2791 
2792 	/* Enable DRAM scrambling before Linux boot and after successful
2793 	 *  UBoot
2794 	 */
2795 	hdev->asic_funcs->init_cpu_scrambler_dram(hdev);
2796 
2797 	if (!(hdev->fw_components & FW_TYPE_LINUX)) {
2798 		dev_info(hdev->dev, "Skip loading Linux F/W\n");
2799 		return 0;
2800 	}
2801 
2802 	if (fw_loader->skip_bmc) {
2803 		rc = hl_fw_dynamic_send_protocol_cmd(hdev, fw_loader,
2804 							COMMS_SKIP_BMC, 0,
2805 							true,
2806 							fw_loader->cpu_timeout);
2807 		if (rc) {
2808 			dev_err(hdev->dev, "failed to load boot fit\n");
2809 			goto protocol_err;
2810 		}
2811 	}
2812 
2813 	/* load Linux image to FW */
2814 	rc = hl_fw_dynamic_load_image(hdev, fw_loader, FW_COMP_LINUX,
2815 							fw_loader->cpu_timeout);
2816 	if (rc) {
2817 		dev_err(hdev->dev, "failed to load Linux\n");
2818 		goto protocol_err;
2819 	}
2820 
2821 	rc = hl_fw_dynamic_wait_for_linux_active(hdev, fw_loader);
2822 	if (rc)
2823 		goto protocol_err;
2824 
2825 	hl_fw_linux_update_state(hdev,
2826 				le32_to_cpu(dyn_regs->cpu_boot_dev_sts0),
2827 				le32_to_cpu(dyn_regs->cpu_boot_dev_sts1));
2828 
2829 	hl_fw_dynamic_update_linux_interrupt_if(hdev);
2830 
2831 protocol_err:
2832 	if (fw_loader->dynamic_loader.fw_desc_valid) {
2833 		fw_error_rc = fw_read_errors(hdev, le32_to_cpu(dyn_regs->cpu_boot_err0),
2834 				le32_to_cpu(dyn_regs->cpu_boot_err1),
2835 				le32_to_cpu(dyn_regs->cpu_boot_dev_sts0),
2836 				le32_to_cpu(dyn_regs->cpu_boot_dev_sts1));
2837 
2838 		if (fw_error_rc)
2839 			return fw_error_rc;
2840 	}
2841 
2842 	return rc;
2843 }
2844 
2845 /**
2846  * hl_fw_static_init_cpu - initialize the device CPU using static protocol
2847  *
2848  * @hdev: pointer to the habanalabs device structure
2849  * @fw_loader: managing structure for loading device's FW
2850  *
2851  * @return 0 on success, otherwise non-zero error code
2852  */
2853 static int hl_fw_static_init_cpu(struct hl_device *hdev,
2854 					struct fw_load_mgr *fw_loader)
2855 {
2856 	u32 cpu_msg_status_reg, cpu_timeout, msg_to_cpu_reg, status;
2857 	u32 cpu_boot_dev_status0_reg, cpu_boot_dev_status1_reg;
2858 	struct static_fw_load_mgr *static_loader;
2859 	u32 cpu_boot_status_reg;
2860 	int rc;
2861 
2862 	if (!(hdev->fw_components & FW_TYPE_BOOT_CPU))
2863 		return 0;
2864 
2865 	/* init common loader parameters */
2866 	cpu_timeout = fw_loader->cpu_timeout;
2867 
2868 	/* init static loader parameters */
2869 	static_loader = &fw_loader->static_loader;
2870 	cpu_msg_status_reg = static_loader->cpu_cmd_status_to_host_reg;
2871 	msg_to_cpu_reg = static_loader->kmd_msg_to_cpu_reg;
2872 	cpu_boot_dev_status0_reg = static_loader->cpu_boot_dev_status0_reg;
2873 	cpu_boot_dev_status1_reg = static_loader->cpu_boot_dev_status1_reg;
2874 	cpu_boot_status_reg = static_loader->cpu_boot_status_reg;
2875 
2876 	dev_info(hdev->dev, "Going to wait for device boot (up to %lds)\n",
2877 		cpu_timeout / USEC_PER_SEC);
2878 
2879 	/* Wait for boot FIT request */
2880 	rc = hl_poll_timeout(
2881 		hdev,
2882 		cpu_boot_status_reg,
2883 		status,
2884 		status == CPU_BOOT_STATUS_WAITING_FOR_BOOT_FIT,
2885 		hdev->fw_poll_interval_usec,
2886 		fw_loader->boot_fit_timeout);
2887 
2888 	if (rc) {
2889 		dev_dbg(hdev->dev,
2890 			"No boot fit request received (status = %d), resuming boot\n", status);
2891 	} else {
2892 		rc = hdev->asic_funcs->load_boot_fit_to_device(hdev);
2893 		if (rc)
2894 			goto out;
2895 
2896 		/* Clear device CPU message status */
2897 		WREG32(cpu_msg_status_reg, CPU_MSG_CLR);
2898 
2899 		/* Signal device CPU that boot loader is ready */
2900 		WREG32(msg_to_cpu_reg, KMD_MSG_FIT_RDY);
2901 
2902 		/* Poll for CPU device ack */
2903 		rc = hl_poll_timeout(
2904 			hdev,
2905 			cpu_msg_status_reg,
2906 			status,
2907 			status == CPU_MSG_OK,
2908 			hdev->fw_poll_interval_usec,
2909 			fw_loader->boot_fit_timeout);
2910 
2911 		if (rc) {
2912 			dev_err(hdev->dev,
2913 				"Timeout waiting for boot fit load ack (status = %d)\n", status);
2914 			goto out;
2915 		}
2916 
2917 		/* Clear message */
2918 		WREG32(msg_to_cpu_reg, KMD_MSG_NA);
2919 	}
2920 
2921 	/*
2922 	 * Make sure CPU boot-loader is running
2923 	 * Note that the CPU_BOOT_STATUS_SRAM_AVAIL is generally set by Linux
2924 	 * yet there is a debug scenario in which we loading uboot (without Linux)
2925 	 * which at later stage is relocated to DRAM. In this case we expect
2926 	 * uboot to set the CPU_BOOT_STATUS_SRAM_AVAIL and so we add it to the
2927 	 * poll flags
2928 	 */
2929 	rc = hl_poll_timeout(
2930 		hdev,
2931 		cpu_boot_status_reg,
2932 		status,
2933 		(status == CPU_BOOT_STATUS_DRAM_RDY) ||
2934 		(status == CPU_BOOT_STATUS_NIC_FW_RDY) ||
2935 		(status == CPU_BOOT_STATUS_READY_TO_BOOT) ||
2936 		(status == CPU_BOOT_STATUS_SRAM_AVAIL),
2937 		hdev->fw_poll_interval_usec,
2938 		cpu_timeout);
2939 
2940 	dev_dbg(hdev->dev, "uboot status = %d\n", status);
2941 
2942 	/* Read U-Boot version now in case we will later fail */
2943 	hl_fw_static_read_device_fw_version(hdev, FW_COMP_BOOT_FIT);
2944 
2945 	/* update state according to boot stage */
2946 	hl_fw_boot_fit_update_state(hdev, cpu_boot_dev_status0_reg,
2947 						cpu_boot_dev_status1_reg);
2948 
2949 	if (rc) {
2950 		detect_cpu_boot_status(hdev, status);
2951 		rc = -EIO;
2952 		goto out;
2953 	}
2954 
2955 	/* Enable DRAM scrambling before Linux boot and after successful
2956 	 *  UBoot
2957 	 */
2958 	hdev->asic_funcs->init_cpu_scrambler_dram(hdev);
2959 
2960 	if (!(hdev->fw_components & FW_TYPE_LINUX)) {
2961 		dev_info(hdev->dev, "Skip loading Linux F/W\n");
2962 		rc = 0;
2963 		goto out;
2964 	}
2965 
2966 	if (status == CPU_BOOT_STATUS_SRAM_AVAIL) {
2967 		rc = 0;
2968 		goto out;
2969 	}
2970 
2971 	dev_info(hdev->dev,
2972 		"Loading firmware to device, may take some time...\n");
2973 
2974 	rc = hdev->asic_funcs->load_firmware_to_device(hdev);
2975 	if (rc)
2976 		goto out;
2977 
2978 	if (fw_loader->skip_bmc) {
2979 		WREG32(msg_to_cpu_reg, KMD_MSG_SKIP_BMC);
2980 
2981 		rc = hl_poll_timeout(
2982 			hdev,
2983 			cpu_boot_status_reg,
2984 			status,
2985 			(status == CPU_BOOT_STATUS_BMC_WAITING_SKIPPED),
2986 			hdev->fw_poll_interval_usec,
2987 			cpu_timeout);
2988 
2989 		if (rc) {
2990 			dev_err(hdev->dev,
2991 				"Failed to get ACK on skipping BMC (status = %d)\n",
2992 				status);
2993 			WREG32(msg_to_cpu_reg, KMD_MSG_NA);
2994 			rc = -EIO;
2995 			goto out;
2996 		}
2997 	}
2998 
2999 	WREG32(msg_to_cpu_reg, KMD_MSG_FIT_RDY);
3000 
3001 	rc = hl_poll_timeout(
3002 		hdev,
3003 		cpu_boot_status_reg,
3004 		status,
3005 		(status == CPU_BOOT_STATUS_SRAM_AVAIL),
3006 		hdev->fw_poll_interval_usec,
3007 		cpu_timeout);
3008 
3009 	/* Clear message */
3010 	WREG32(msg_to_cpu_reg, KMD_MSG_NA);
3011 
3012 	if (rc) {
3013 		if (status == CPU_BOOT_STATUS_FIT_CORRUPTED)
3014 			dev_err(hdev->dev,
3015 				"Device reports FIT image is corrupted\n");
3016 		else
3017 			dev_err(hdev->dev,
3018 				"Failed to load firmware to device (status = %d)\n",
3019 				status);
3020 
3021 		rc = -EIO;
3022 		goto out;
3023 	}
3024 
3025 	rc = fw_read_errors(hdev, fw_loader->static_loader.boot_err0_reg,
3026 					fw_loader->static_loader.boot_err1_reg,
3027 					cpu_boot_dev_status0_reg,
3028 					cpu_boot_dev_status1_reg);
3029 	if (rc)
3030 		return rc;
3031 
3032 	hl_fw_linux_update_state(hdev, cpu_boot_dev_status0_reg,
3033 						cpu_boot_dev_status1_reg);
3034 
3035 	return 0;
3036 
3037 out:
3038 	fw_read_errors(hdev, fw_loader->static_loader.boot_err0_reg,
3039 					fw_loader->static_loader.boot_err1_reg,
3040 					cpu_boot_dev_status0_reg,
3041 					cpu_boot_dev_status1_reg);
3042 
3043 	return rc;
3044 }
3045 
3046 /**
3047  * hl_fw_init_cpu - initialize the device CPU
3048  *
3049  * @hdev: pointer to the habanalabs device structure
3050  *
3051  * @return 0 on success, otherwise non-zero error code
3052  *
3053  * perform necessary initializations for device's CPU. takes into account if
3054  * init protocol is static or dynamic.
3055  */
3056 int hl_fw_init_cpu(struct hl_device *hdev)
3057 {
3058 	struct asic_fixed_properties *prop = &hdev->asic_prop;
3059 	struct fw_load_mgr *fw_loader = &hdev->fw_loader;
3060 
3061 	return  prop->dynamic_fw_load ?
3062 			hl_fw_dynamic_init_cpu(hdev, fw_loader) :
3063 			hl_fw_static_init_cpu(hdev, fw_loader);
3064 }
3065 
3066 void hl_fw_set_pll_profile(struct hl_device *hdev)
3067 {
3068 	hl_fw_set_frequency(hdev, hdev->asic_prop.clk_pll_index,
3069 				hdev->asic_prop.max_freq_value);
3070 }
3071 
3072 int hl_fw_get_clk_rate(struct hl_device *hdev, u32 *cur_clk, u32 *max_clk)
3073 {
3074 	long value;
3075 
3076 	if (!hl_device_operational(hdev, NULL))
3077 		return -ENODEV;
3078 
3079 	if (!hdev->pdev) {
3080 		*cur_clk = 0;
3081 		*max_clk = 0;
3082 		return 0;
3083 	}
3084 
3085 	value = hl_fw_get_frequency(hdev, hdev->asic_prop.clk_pll_index, false);
3086 
3087 	if (value < 0) {
3088 		dev_err(hdev->dev, "Failed to retrieve device max clock %ld\n", value);
3089 		return value;
3090 	}
3091 
3092 	*max_clk = (value / 1000 / 1000);
3093 
3094 	value = hl_fw_get_frequency(hdev, hdev->asic_prop.clk_pll_index, true);
3095 
3096 	if (value < 0) {
3097 		dev_err(hdev->dev, "Failed to retrieve device current clock %ld\n", value);
3098 		return value;
3099 	}
3100 
3101 	*cur_clk = (value / 1000 / 1000);
3102 
3103 	return 0;
3104 }
3105 
3106 long hl_fw_get_frequency(struct hl_device *hdev, u32 pll_index, bool curr)
3107 {
3108 	struct cpucp_packet pkt;
3109 	u32 used_pll_idx;
3110 	u64 result;
3111 	int rc;
3112 
3113 	rc = get_used_pll_index(hdev, pll_index, &used_pll_idx);
3114 	if (rc)
3115 		return rc;
3116 
3117 	memset(&pkt, 0, sizeof(pkt));
3118 
3119 	if (curr)
3120 		pkt.ctl = cpu_to_le32(CPUCP_PACKET_FREQUENCY_CURR_GET <<
3121 						CPUCP_PKT_CTL_OPCODE_SHIFT);
3122 	else
3123 		pkt.ctl = cpu_to_le32(CPUCP_PACKET_FREQUENCY_GET << CPUCP_PKT_CTL_OPCODE_SHIFT);
3124 
3125 	pkt.pll_index = cpu_to_le32((u32)used_pll_idx);
3126 
3127 	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), 0, &result);
3128 
3129 	if (rc) {
3130 		dev_err(hdev->dev, "Failed to get frequency of PLL %d, error %d\n",
3131 			used_pll_idx, rc);
3132 		return rc;
3133 	}
3134 
3135 	return (long) result;
3136 }
3137 
3138 void hl_fw_set_frequency(struct hl_device *hdev, u32 pll_index, u64 freq)
3139 {
3140 	struct cpucp_packet pkt;
3141 	u32 used_pll_idx;
3142 	int rc;
3143 
3144 	rc = get_used_pll_index(hdev, pll_index, &used_pll_idx);
3145 	if (rc)
3146 		return;
3147 
3148 	memset(&pkt, 0, sizeof(pkt));
3149 
3150 	pkt.ctl = cpu_to_le32(CPUCP_PACKET_FREQUENCY_SET << CPUCP_PKT_CTL_OPCODE_SHIFT);
3151 	pkt.pll_index = cpu_to_le32((u32)used_pll_idx);
3152 	pkt.value = cpu_to_le64(freq);
3153 
3154 	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), 0, NULL);
3155 
3156 	if (rc)
3157 		dev_err(hdev->dev, "Failed to set frequency to PLL %d, error %d\n",
3158 			used_pll_idx, rc);
3159 }
3160 
3161 long hl_fw_get_max_power(struct hl_device *hdev)
3162 {
3163 	struct cpucp_packet pkt;
3164 	u64 result;
3165 	int rc;
3166 
3167 	memset(&pkt, 0, sizeof(pkt));
3168 
3169 	pkt.ctl = cpu_to_le32(CPUCP_PACKET_MAX_POWER_GET << CPUCP_PKT_CTL_OPCODE_SHIFT);
3170 
3171 	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), 0, &result);
3172 
3173 	if (rc) {
3174 		dev_err(hdev->dev, "Failed to get max power, error %d\n", rc);
3175 		return rc;
3176 	}
3177 
3178 	return result;
3179 }
3180 
3181 void hl_fw_set_max_power(struct hl_device *hdev)
3182 {
3183 	struct cpucp_packet pkt;
3184 	int rc;
3185 
3186 	/* TODO: remove this after simulator supports this packet */
3187 	if (!hdev->pdev)
3188 		return;
3189 
3190 	memset(&pkt, 0, sizeof(pkt));
3191 
3192 	pkt.ctl = cpu_to_le32(CPUCP_PACKET_MAX_POWER_SET << CPUCP_PKT_CTL_OPCODE_SHIFT);
3193 	pkt.value = cpu_to_le64(hdev->max_power);
3194 
3195 	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), 0, NULL);
3196 
3197 	if (rc)
3198 		dev_err(hdev->dev, "Failed to set max power, error %d\n", rc);
3199 }
3200 
3201 static int hl_fw_get_sec_attest_data(struct hl_device *hdev, u32 packet_id, void *data, u32 size,
3202 					u32 nonce, u32 timeout)
3203 {
3204 	struct cpucp_packet pkt = {};
3205 	dma_addr_t req_dma_addr;
3206 	void *req_cpu_addr;
3207 	int rc;
3208 
3209 	req_cpu_addr = hl_cpu_accessible_dma_pool_alloc(hdev, size, &req_dma_addr);
3210 	if (!req_cpu_addr) {
3211 		dev_err(hdev->dev,
3212 			"Failed to allocate DMA memory for CPU-CP packet %u\n", packet_id);
3213 		return -ENOMEM;
3214 	}
3215 
3216 	memset(data, 0, size);
3217 
3218 	pkt.ctl = cpu_to_le32(packet_id << CPUCP_PKT_CTL_OPCODE_SHIFT);
3219 	pkt.addr = cpu_to_le64(req_dma_addr);
3220 	pkt.data_max_size = cpu_to_le32(size);
3221 	pkt.nonce = cpu_to_le32(nonce);
3222 
3223 	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
3224 					timeout, NULL);
3225 	if (rc) {
3226 		dev_err(hdev->dev,
3227 			"Failed to handle CPU-CP pkt %u, error %d\n", packet_id, rc);
3228 		goto out;
3229 	}
3230 
3231 	memcpy(data, req_cpu_addr, size);
3232 
3233 out:
3234 	hl_cpu_accessible_dma_pool_free(hdev, size, req_cpu_addr);
3235 
3236 	return rc;
3237 }
3238 
3239 int hl_fw_get_sec_attest_info(struct hl_device *hdev, struct cpucp_sec_attest_info *sec_attest_info,
3240 				u32 nonce)
3241 {
3242 	return hl_fw_get_sec_attest_data(hdev, CPUCP_PACKET_SEC_ATTEST_GET, sec_attest_info,
3243 					sizeof(struct cpucp_sec_attest_info), nonce,
3244 					HL_CPUCP_SEC_ATTEST_INFO_TINEOUT_USEC);
3245 }
3246 
3247 int hl_fw_get_dev_info_signed(struct hl_device *hdev,
3248 			      struct cpucp_dev_info_signed *dev_info_signed, u32 nonce)
3249 {
3250 	return hl_fw_get_sec_attest_data(hdev, CPUCP_PACKET_INFO_SIGNED_GET, dev_info_signed,
3251 					 sizeof(struct cpucp_dev_info_signed), nonce,
3252 					 HL_CPUCP_SEC_ATTEST_INFO_TINEOUT_USEC);
3253 }
3254 
3255 int hl_fw_send_generic_request(struct hl_device *hdev, enum hl_passthrough_type sub_opcode,
3256 						dma_addr_t buff, u32 *size)
3257 {
3258 	struct cpucp_packet pkt = {};
3259 	u64 result;
3260 	int rc = 0;
3261 
3262 	pkt.ctl = cpu_to_le32(CPUCP_PACKET_GENERIC_PASSTHROUGH << CPUCP_PKT_CTL_OPCODE_SHIFT);
3263 	pkt.addr = cpu_to_le64(buff);
3264 	pkt.data_max_size = cpu_to_le32(*size);
3265 	pkt.pkt_subidx = cpu_to_le32(sub_opcode);
3266 
3267 	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *)&pkt, sizeof(pkt),
3268 						HL_CPUCP_INFO_TIMEOUT_USEC, &result);
3269 	if (rc)
3270 		dev_err(hdev->dev, "failed to send CPUCP data of generic fw pkt\n");
3271 	else
3272 		dev_dbg(hdev->dev, "generic pkt was successful, result: 0x%llx\n", result);
3273 
3274 	*size = (u32)result;
3275 
3276 	return rc;
3277 }
3278