xref: /illumos-gate/usr/src/uts/common/io/scsi/adapters/smartpqi/smartpqi_init.c (revision 4091c9224059f5a872dee3ac113807fcc7195dea)
1 /*
2  * This file and its contents are supplied under the terms of the
3  * Common Development and Distribution License ("CDDL"), version 1.0.
4  * You may only use this file in accordance with the terms of version
5  * 1.0 of the CDDL.
6  *
7  * A full copy of the text of the CDDL should have accompanied this
8  * source.  A copy of the CDDL is also available via the Internet at
9  * http://www.illumos.org/license/CDDL.
10  */
11 
12 /*
13  * Copyright 2023 Tintri by DDN, Inc. All rights reserved.
14  * Copyright 2021 Racktop Systems.
15  */
16 
17 /*
18  * This file contains the start up code to initialize the HBA for use
19  * with the PQI interface.
20  */
21 #include <smartpqi.h>
22 
23 #define	PQI_DEVICE_SIGNATURE			"PQI DREG"
24 #define	PQI_STATUS_IDLE				0x0
25 #define	PQI_DEVICE_STATE_ALL_REGISTERS_READY	0x2
26 
27 typedef struct _func_list_ {
28 	char		*func_name;
29 	boolean_t	(*func)(pqi_state_t *);
30 } func_list_t;
31 
32 static boolean_t pqi_reset_prep(pqi_state_t *);
33 static boolean_t pqi_ctlr_ready(pqi_state_t *);
34 static boolean_t revert_to_sis(pqi_state_t *);
35 static boolean_t pqi_calculate_io_resources(pqi_state_t *);
36 static boolean_t pqi_check_alloc(pqi_state_t *);
37 static boolean_t pqi_wait_for_mode_ready(pqi_state_t *);
38 static boolean_t save_ctrl_mode_pqi(pqi_state_t *);
39 static boolean_t pqi_process_config_table(pqi_state_t *);
40 static boolean_t pqi_alloc_admin_queue(pqi_state_t *);
41 static boolean_t pqi_create_admin_queues(pqi_state_t *);
42 static boolean_t pqi_report_device_capability(pqi_state_t *);
43 static boolean_t pqi_valid_device_capability(pqi_state_t *);
44 static boolean_t pqi_calculate_queue_resources(pqi_state_t *);
45 static boolean_t pqi_alloc_io_resource(pqi_state_t *);
46 static boolean_t pqi_alloc_operation_queues(pqi_state_t *);
47 static boolean_t pqi_init_operational_queues(pqi_state_t *);
48 static boolean_t pqi_init_operational_locks(pqi_state_t *);
49 static boolean_t pqi_create_queues(pqi_state_t *);
50 static boolean_t pqi_change_irq_mode(pqi_state_t *);
51 static boolean_t pqi_start_heartbeat_timer(pqi_state_t *);
52 static boolean_t pqi_enable_events(pqi_state_t *);
53 static boolean_t pqi_get_hba_version(pqi_state_t *);
54 static boolean_t pqi_version_to_hba(pqi_state_t *);
55 static boolean_t pqi_schedule_update_time_worker(pqi_state_t *);
56 static boolean_t pqi_scan_scsi_devices(pqi_state_t *);
57 
58 func_list_t startup_funcs[] =
59 {
60 	{ "sis_wait_for_ctrl_ready", sis_wait_for_ctrl_ready },
61 	{ "sis_get_ctrl_props", sis_get_ctrl_props },
62 	{ "sis_get_pqi_capabilities", sis_get_pqi_capabilities },
63 	{ "pqi_calculate_io_resources", pqi_calculate_io_resources },
64 	{ "pqi_check_alloc", pqi_check_alloc },
65 	{ "sis_init_base_struct_addr", sis_init_base_struct_addr },
66 	{ "pqi_wait_for_mode_ready", pqi_wait_for_mode_ready },
67 	{ "save_ctrl_mode_pqi", save_ctrl_mode_pqi },
68 	{ "pqi_process_config_table", pqi_process_config_table },
69 	{ "pqi_alloc_admin_queue", pqi_alloc_admin_queue },
70 	{ "pqi_create_admin_queues", pqi_create_admin_queues },
71 	{ "pqi_report_device_capability", pqi_report_device_capability },
72 	{ "pqi_valid_device_capability", pqi_valid_device_capability },
73 	{ "pqi_calculate_queue_resources", pqi_calculate_queue_resources },
74 	{ "pqi_alloc_io_resource", pqi_alloc_io_resource },
75 	{ "pqi_alloc_operation_queues", pqi_alloc_operation_queues },
76 	{ "pqi_init_operational_queues", pqi_init_operational_queues },
77 	{ "pqi_init_operational_locks", pqi_init_operational_locks },
78 	{ "pqi_create_queues", pqi_create_queues },
79 	{ "pqi_change_irq_mode", pqi_change_irq_mode },
80 	{ "pqi_start_heartbeat_timer", pqi_start_heartbeat_timer },
81 	{ "pqi_enable_events", pqi_enable_events },
82 	{ "pqi_get_hba_version", pqi_get_hba_version },
83 	{ "pqi_version_to_hba", pqi_version_to_hba },
84 	{ "pqi_schedule_update_time_worker", pqi_schedule_update_time_worker },
85 	{ "pqi_scan_scsi_devices", pqi_scan_scsi_devices },
86 	{ NULL, NULL }
87 };
88 
89 func_list_t reset_funcs[] =
90 {
91 	{ "pqi_reset_prep", pqi_reset_prep },
92 	{ "revert_to_sis", revert_to_sis },
93 	{ "pqi_check_firmware", pqi_check_firmware },
94 	{ "sis_wait_for_ctrl_ready", sis_wait_for_ctrl_ready },
95 	{ "sis_get_ctrl_props", sis_get_ctrl_props },
96 	{ "sis_get_pqi_capabilities", sis_get_pqi_capabilities },
97 	{ "pqi_calculate_io_resources", pqi_calculate_io_resources },
98 	{ "pqi_check_alloc", pqi_check_alloc },
99 	{ "sis_init_base_struct_addr", sis_init_base_struct_addr },
100 	{ "pqi_wait_for_mode_ready", pqi_wait_for_mode_ready },
101 	{ "save_ctrl_mode_pqi", save_ctrl_mode_pqi },
102 	{ "pqi_process_config_table", pqi_process_config_table },
103 	{ "pqi_alloc_admin_queue", pqi_alloc_admin_queue },
104 	{ "pqi_create_admin_queues", pqi_create_admin_queues },
105 	{ "pqi_report_device_capability", pqi_report_device_capability },
106 	{ "pqi_valid_device_capability", pqi_valid_device_capability },
107 	{ "pqi_calculate_queue_resources", pqi_calculate_queue_resources },
108 	{ "pqi_alloc_io_resource", pqi_alloc_io_resource },
109 	{ "pqi_alloc_operation_queues", pqi_alloc_operation_queues },
110 	{ "pqi_init_operational_queues", pqi_init_operational_queues },
111 	{ "pqi_create_queues", pqi_create_queues },
112 	{ "pqi_change_irq_mode", pqi_change_irq_mode },
113 	{ "pqi_ctlr_ready", pqi_ctlr_ready },
114 	{ "pqi_start_heartbeat_timer", pqi_start_heartbeat_timer },
115 	{ "pqi_enable_events", pqi_enable_events },
116 	{ "pqi_get_hba_version", pqi_get_hba_version },
117 	{ "pqi_version_to_hba", pqi_version_to_hba },
118 	{ "pqi_schedule_update_time_worker", pqi_schedule_update_time_worker },
119 	{ NULL, NULL }
120 };
121 
122 /* ---- Forward declarations for utility functions ---- */
123 static void bcopy_fromregs(pqi_state_t *s, uint8_t *iomem, uint8_t *dst,
124     uint32_t len);
125 static boolean_t submit_admin_rqst_sync(pqi_state_t *s,
126     pqi_general_admin_request_t *rqst, pqi_general_admin_response_t *rsp);
127 static boolean_t create_event_queue(pqi_state_t *s);
128 static boolean_t create_queue_group(pqi_state_t *s, int idx);
129 static boolean_t submit_raid_rqst_sync(pqi_state_t *s, pqi_iu_header_t *rqst,
130     pqi_raid_error_info_t e_info);
131 static boolean_t identify_controller(pqi_state_t *s,
132     bmic_identify_controller_t *ident);
133 static boolean_t write_host_wellness(pqi_state_t *s, void *buf, size_t len);
134 static boolean_t get_device_list(pqi_state_t *s,
135     report_phys_lun_extended_t **pl, size_t *plen,
136     report_log_lun_extended_t **ll, size_t *llen);
137 static boolean_t build_raid_path_request(pqi_raid_path_request_t *rqst, int cmd,
138     caddr_t lun, uint32_t len, int vpd_page);
139 static boolean_t identify_physical_device(pqi_state_t *s, pqi_device_t *devp,
140     bmic_identify_physical_device_t *buf);
141 static pqi_device_t *create_phys_dev(pqi_state_t *s,
142     report_phys_lun_extended_entry_t *e);
143 static pqi_device_t *create_logical_dev(pqi_state_t *s,
144     report_log_lun_extended_entry_t *e);
145 static boolean_t is_new_dev(pqi_state_t *s, pqi_device_t *new_dev);
146 static boolean_t revert_to_sis(pqi_state_t *s);
147 static void save_ctrl_mode(pqi_state_t *s, int mode);
148 static boolean_t scsi_common(pqi_state_t *s, pqi_raid_path_request_t *rqst,
149     caddr_t buf, int len);
150 static void update_time(void *v);
151 
152 static int reset_devices = 1;
153 
154 int pqi_max_io_slots = PQI_MAX_IO_SLOTS;
155 
156 static boolean_t
pqi_reset_prep(pqi_state_t * s)157 pqi_reset_prep(pqi_state_t *s)
158 {
159 	s->s_intr_ready = B_FALSE;
160 	(void) untimeout(s->s_time_of_day);
161 	(void) untimeout(s->s_watchdog);
162 	pqi_free_single(s, s->s_error_dma);
163 	s->s_error_dma = NULL;
164 
165 	pqi_free_single(s, s->s_adminq_dma);
166 	s->s_adminq_dma = NULL;
167 
168 	mutex_enter(&s->s_io_mutex);
169 	pqi_free_io_resource(s);
170 	mutex_exit(&s->s_io_mutex);
171 	return (B_TRUE);
172 }
173 
174 static boolean_t
pqi_ctlr_ready(pqi_state_t * s)175 pqi_ctlr_ready(pqi_state_t *s)
176 {
177 	s->s_offline = B_FALSE;
178 	return (B_TRUE);
179 }
180 
181 boolean_t
pqi_check_firmware(pqi_state_t * s)182 pqi_check_firmware(pqi_state_t *s)
183 {
184 	uint32_t	status;
185 
186 	status = G32(s, sis_firmware_status);
187 	if (status & SIS_CTRL_KERNEL_PANIC)
188 		return (B_FALSE);
189 
190 	if (sis_read_scratch(s) == SIS_MODE)
191 		return (B_TRUE);
192 
193 	if (status & SIS_CTRL_KERNEL_UP) {
194 		sis_write_scratch(s, SIS_MODE);
195 		return (B_TRUE);
196 	} else {
197 		return (revert_to_sis(s));
198 	}
199 }
200 
201 boolean_t
pqi_prep_full(pqi_state_t * s)202 pqi_prep_full(pqi_state_t *s)
203 {
204 	func_list_t	*f;
205 
206 	for (f = startup_funcs; f->func_name != NULL; f++)
207 		if (f->func(s) == B_FALSE) {
208 			cmn_err(CE_WARN, "Init failed on %s", f->func_name);
209 			return (B_FALSE);
210 		}
211 
212 	return (B_TRUE);
213 }
214 
215 boolean_t
pqi_reset_ctl(pqi_state_t * s)216 pqi_reset_ctl(pqi_state_t *s)
217 {
218 	func_list_t	*f;
219 
220 	for (f = reset_funcs; f->func_name != NULL; f++)
221 		if (f->func(s) == B_FALSE) {
222 			cmn_err(CE_WARN, "Reset failed on %s", f->func_name);
223 			return (B_FALSE);
224 		}
225 
226 	return (B_TRUE);
227 }
228 /*
229  * []----------------------------------------------------------[]
230  * | Startup functions called in sequence to initialize HBA.	|
231  * []----------------------------------------------------------[]
232  */
233 
234 static boolean_t
pqi_calculate_io_resources(pqi_state_t * s)235 pqi_calculate_io_resources(pqi_state_t *s)
236 {
237 	uint32_t	max_xfer_size;
238 	uint32_t	max_sg_entries;
239 
240 	s->s_max_io_slots = s->s_max_outstanding_requests;
241 
242 	max_xfer_size = min(s->s_max_xfer_size, PQI_MAX_TRANSFER_SIZE);
243 
244 	/* ---- add 1 when buf is not page aligned ---- */
245 	max_sg_entries = max_xfer_size / PAGESIZE + 1;
246 	max_sg_entries = min(max_sg_entries, s->s_max_sg_entries);
247 	max_xfer_size = (max_sg_entries - 1) * PAGESIZE;
248 
249 	s->s_sg_chain_buf_length = (max_sg_entries * sizeof (pqi_sg_entry_t)) +
250 	    PQI_EXTRA_SGL_MEMORY;
251 
252 	s->s_max_sectors = max_xfer_size / 512;
253 
254 	return (B_TRUE);
255 }
256 
257 static boolean_t
pqi_check_alloc(pqi_state_t * s)258 pqi_check_alloc(pqi_state_t *s)
259 {
260 	/*
261 	 * Note that we need to pass a generation cnt as part of a i/o
262 	 * request id.  The id is limited to 16 bits and we reserve 4 bits
263 	 * for a generation no.  This means we must limit s_max_io_slots
264 	 * to max 12 bits worth of slot indexes.
265 	 */
266 	if (pqi_max_io_slots != 0 && pqi_max_io_slots < s->s_max_io_slots) {
267 		s->s_max_io_slots = pqi_max_io_slots;
268 	}
269 
270 	s->s_error_dma = pqi_alloc_single(s, (s->s_max_io_slots *
271 	    PQI_ERROR_BUFFER_ELEMENT_LENGTH) + SIS_BASE_STRUCT_ALIGNMENT);
272 	if (s->s_error_dma == NULL)
273 		return (B_FALSE);
274 
275 	return (B_TRUE);
276 }
277 
278 #define	WAIT_FOR_FIRMWARE_IN_MSECS (5 * MILLISEC)
279 
280 static boolean_t
pqi_wait_for_mode_ready(pqi_state_t * s)281 pqi_wait_for_mode_ready(pqi_state_t *s)
282 {
283 	uint64_t	signature;
284 	int32_t		count = WAIT_FOR_FIRMWARE_IN_MSECS;
285 
286 	for (;;) {
287 		signature = G64(s, pqi_registers.signature);
288 		if (memcmp(&signature, PQI_DEVICE_SIGNATURE,
289 		    sizeof (signature)) == 0)
290 			break;
291 		if (count-- == 0)
292 			return (B_FALSE);
293 		drv_usecwait(MICROSEC / MILLISEC);
294 	}
295 
296 	count = WAIT_FOR_FIRMWARE_IN_MSECS;
297 	for (;;) {
298 		if (G64(s, pqi_registers.function_and_status_code) ==
299 		    PQI_STATUS_IDLE)
300 			break;
301 		if (count-- == 0)
302 			return (B_FALSE);
303 		drv_usecwait(MICROSEC / MILLISEC);
304 	}
305 
306 	count = WAIT_FOR_FIRMWARE_IN_MSECS;
307 	for (;;) {
308 		if (G32(s, pqi_registers.device_status) ==
309 		    PQI_DEVICE_STATE_ALL_REGISTERS_READY)
310 			break;
311 		if (count-- == 0)
312 			return (B_FALSE);
313 		drv_usecwait(MICROSEC / MILLISEC);
314 	}
315 
316 	return (B_TRUE);
317 }
318 
319 static boolean_t
save_ctrl_mode_pqi(pqi_state_t * s)320 save_ctrl_mode_pqi(pqi_state_t *s)
321 {
322 	save_ctrl_mode(s, PQI_MODE);
323 	return (B_TRUE);
324 }
325 
326 static boolean_t
pqi_process_config_table(pqi_state_t * s)327 pqi_process_config_table(pqi_state_t *s)
328 {
329 	pqi_config_table_t			*c_table;
330 	pqi_config_table_section_header_t	*section;
331 	uint32_t				section_offset;
332 
333 	c_table = kmem_zalloc(s->s_config_table_len, KM_SLEEP);
334 	bcopy_fromregs(s, (uint8_t *)s->s_reg + s->s_config_table_offset,
335 	    (uint8_t *)c_table, s->s_config_table_len);
336 
337 	section_offset = c_table->first_section_offset;
338 	while (section_offset) {
339 		section = (pqi_config_table_section_header_t *)
340 		    ((caddr_t)c_table + section_offset);
341 		switch (section->section_id) {
342 		case PQI_CONFIG_TABLE_SECTION_HEARTBEAT:
343 			/* LINTED E_BAD_PTR_CAST_ALIGN */
344 			s->s_heartbeat_counter = (uint32_t *)
345 			    ((caddr_t)s->s_reg +
346 			    s->s_config_table_offset + section_offset +
347 			    offsetof(struct pqi_config_table_heartbeat,
348 			    heartbeat_counter));
349 			break;
350 		}
351 		section_offset = section->next_section_offset;
352 	}
353 	kmem_free(c_table, s->s_config_table_len);
354 	return (B_TRUE);
355 }
356 
357 static boolean_t
pqi_alloc_admin_queue(pqi_state_t * s)358 pqi_alloc_admin_queue(pqi_state_t *s)
359 {
360 	pqi_admin_queues_t		*aq;
361 	pqi_admin_queues_aligned_t	*aq_aligned;
362 	int				len;
363 
364 	len = sizeof (*aq_aligned) + PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
365 	if ((s->s_adminq_dma = pqi_alloc_single(s, len)) == NULL)
366 		return (B_FALSE);
367 	(void) memset(s->s_adminq_dma->alloc_memory, 0,
368 	    s->s_adminq_dma->len_to_alloc);
369 	(void) ddi_dma_sync(s->s_adminq_dma->handle, 0,
370 	    s->s_adminq_dma->len_to_alloc, DDI_DMA_SYNC_FORDEV);
371 
372 	aq = &s->s_admin_queues;
373 	aq_aligned = PQIALIGN_TYPED(s->s_adminq_dma->alloc_memory,
374 	    PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT, pqi_admin_queues_aligned_t *);
375 	aq->iq_element_array = (caddr_t)&aq_aligned->iq_element_array;
376 	aq->oq_element_array = (caddr_t)&aq_aligned->oq_element_array;
377 	aq->iq_ci = &aq_aligned->iq_ci;
378 	aq->oq_pi = &aq_aligned->oq_pi;
379 
380 	aq->iq_element_array_bus_addr = s->s_adminq_dma->dma_addr +
381 	    ((uintptr_t)aq->iq_element_array -
382 	    (uintptr_t)s->s_adminq_dma->alloc_memory);
383 	aq->oq_element_array_bus_addr = s->s_adminq_dma->dma_addr +
384 	    ((uintptr_t)aq->oq_element_array -
385 	    (uintptr_t)s->s_adminq_dma->alloc_memory);
386 
387 	aq->iq_ci_bus_addr = s->s_adminq_dma->dma_addr +
388 	    ((uintptr_t)aq->iq_ci - (uintptr_t)s->s_adminq_dma->alloc_memory);
389 	aq->oq_pi_bus_addr = s->s_adminq_dma->dma_addr +
390 	    ((uintptr_t)aq->oq_pi - (uintptr_t)s->s_adminq_dma->alloc_memory);
391 	return (B_TRUE);
392 }
393 
394 static boolean_t
pqi_create_admin_queues(pqi_state_t * s)395 pqi_create_admin_queues(pqi_state_t *s)
396 {
397 	pqi_admin_queues_t *aq = &s->s_admin_queues;
398 	int			val;
399 	int			status;
400 	int			countdown = 1000;
401 
402 
403 	aq->iq_pi_copy = 0;
404 	aq->oq_ci_copy = 0;
405 
406 	S64(s, pqi_registers.admin_iq_element_array_addr,
407 	    aq->iq_element_array_bus_addr);
408 	S64(s, pqi_registers.admin_oq_element_array_addr,
409 	    aq->oq_element_array_bus_addr);
410 	S64(s, pqi_registers.admin_iq_ci_addr,
411 	    aq->iq_ci_bus_addr);
412 	S64(s, pqi_registers.admin_oq_pi_addr,
413 	    aq->oq_pi_bus_addr);
414 
415 	val = PQI_ADMIN_IQ_NUM_ELEMENTS | PQI_ADMIN_OQ_NUM_ELEMENTS << 8 |
416 	    aq->int_msg_num << 16;
417 	S32(s, pqi_registers.admin_queue_params, val);
418 	S64(s, pqi_registers.function_and_status_code,
419 	    PQI_CREATE_ADMIN_QUEUE_PAIR);
420 
421 	while (countdown-- > 0) {
422 		status = G64(s, pqi_registers.function_and_status_code);
423 		if (status == PQI_STATUS_IDLE)
424 			break;
425 		drv_usecwait(1000);	/* ---- Wait 1ms ---- */
426 	}
427 	if (countdown == 0)
428 		return (B_FALSE);
429 
430 	/*
431 	 * The offset registers are not initialized to the correct
432 	 * offsets until *after* the create admin queue pair command
433 	 * completes successfully.
434 	 */
435 	aq->iq_pi = (void *)(intptr_t)((intptr_t)s->s_reg +
436 	    PQI_DEVICE_REGISTERS_OFFSET +
437 	    G64(s, pqi_registers.admin_iq_pi_offset));
438 	ASSERT((G64(s, pqi_registers.admin_iq_pi_offset) +
439 	    PQI_DEVICE_REGISTERS_OFFSET) < 0x8000);
440 
441 	aq->oq_ci = (void *)(intptr_t)((intptr_t)s->s_reg +
442 	    PQI_DEVICE_REGISTERS_OFFSET +
443 	    G64(s, pqi_registers.admin_oq_ci_offset));
444 	ASSERT((G64(s, pqi_registers.admin_oq_ci_offset) +
445 	    PQI_DEVICE_REGISTERS_OFFSET) < 0x8000);
446 
447 	return (B_TRUE);
448 }
449 
450 static boolean_t
pqi_report_device_capability(pqi_state_t * s)451 pqi_report_device_capability(pqi_state_t *s)
452 {
453 	pqi_general_admin_request_t	rqst;
454 	pqi_general_admin_response_t	rsp;
455 	pqi_device_capability_t		*cap;
456 	pqi_iu_layer_descriptor_t	*iu_layer;
457 	pqi_dma_overhead_t		*dma;
458 	boolean_t			rval;
459 	pqi_sg_entry_t			*sg;
460 
461 	(void) memset(&rqst, 0, sizeof (rqst));
462 
463 	rqst.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
464 	rqst.header.iu_length = PQI_GENERAL_ADMIN_IU_LENGTH;
465 	rqst.function_code =
466 	    PQI_GENERAL_ADMIN_FUNCTION_REPORT_DEVICE_CAPABILITY;
467 	rqst.data.report_device_capability.buffer_length =
468 	    sizeof (*cap);
469 
470 	if ((dma = pqi_alloc_single(s, sizeof (*cap))) == NULL)
471 		return (B_FALSE);
472 
473 	sg = &rqst.data.report_device_capability.sg_descriptor;
474 	sg->sg_addr = dma->dma_addr;
475 	sg->sg_len = dma->len_to_alloc;
476 	sg->sg_flags = CISS_SG_LAST;
477 
478 	rval = submit_admin_rqst_sync(s, &rqst, &rsp);
479 	(void) ddi_dma_sync(dma->handle, 0, 0, DDI_DMA_SYNC_FORCPU);
480 	cap = (pqi_device_capability_t *)dma->alloc_memory;
481 
482 	s->s_max_inbound_queues = cap->max_inbound_queues;
483 	s->s_max_elements_per_iq = cap->max_elements_per_iq;
484 	s->s_max_iq_element_length = cap->max_iq_element_length * 16;
485 	s->s_max_outbound_queues = cap->max_outbound_queues;
486 	s->s_max_elements_per_oq = cap->max_elements_per_oq;
487 	s->s_max_oq_element_length = cap->max_oq_element_length * 16;
488 
489 	iu_layer = &cap->iu_layer_descriptors[PQI_PROTOCOL_SOP];
490 	s->s_max_inbound_iu_length_per_firmware =
491 	    iu_layer->max_inbound_iu_length;
492 	s->s_inbound_spanning_supported = iu_layer->inbound_spanning_supported;
493 	s->s_outbound_spanning_supported =
494 	    iu_layer->outbound_spanning_supported;
495 
496 	pqi_free_single(s, dma);
497 	return (rval);
498 }
499 
500 static boolean_t
pqi_valid_device_capability(pqi_state_t * s)501 pqi_valid_device_capability(pqi_state_t *s)
502 {
503 	if (s->s_max_iq_element_length < PQI_OPERATIONAL_IQ_ELEMENT_LENGTH)
504 		return (B_FALSE);
505 	if (s->s_max_oq_element_length < PQI_OPERATIONAL_OQ_ELEMENT_LENGTH)
506 		return (B_FALSE);
507 	if (s->s_max_inbound_iu_length_per_firmware <
508 	    PQI_OPERATIONAL_IQ_ELEMENT_LENGTH)
509 		return (B_FALSE);
510 	/* ---- Controller doesn't support spanning but we need it ---- */
511 	if (!s->s_inbound_spanning_supported)
512 		return (B_FALSE);
513 	/* ---- Controller wants outbound spanning, the driver doesn't ---- */
514 	if (s->s_outbound_spanning_supported)
515 		return (B_FALSE);
516 
517 	return (B_TRUE);
518 }
519 
520 static boolean_t
pqi_calculate_queue_resources(pqi_state_t * s)521 pqi_calculate_queue_resources(pqi_state_t *s)
522 {
523 	int	max_queue_groups;
524 	int	num_queue_groups;
525 	int	num_elements_per_iq;
526 	int	num_elements_per_oq;
527 
528 	if (reset_devices) {
529 		num_queue_groups = 1;
530 	} else {
531 		max_queue_groups = min(s->s_max_inbound_queues / 2,
532 		    s->s_max_outbound_queues - 1);
533 		max_queue_groups = min(max_queue_groups, PQI_MAX_QUEUE_GROUPS);
534 
535 		num_queue_groups = min(ncpus, s->s_intr_cnt);
536 		num_queue_groups = min(num_queue_groups, max_queue_groups);
537 	}
538 	s->s_num_queue_groups = num_queue_groups;
539 
540 	s->s_max_inbound_iu_length =
541 	    (s->s_max_inbound_iu_length_per_firmware /
542 	    PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) *
543 	    PQI_OPERATIONAL_IQ_ELEMENT_LENGTH;
544 
545 	num_elements_per_iq = s->s_max_inbound_iu_length /
546 	    PQI_OPERATIONAL_IQ_ELEMENT_LENGTH;
547 	/* ---- add one because one element in each queue is unusable ---- */
548 	num_elements_per_iq++;
549 
550 	num_elements_per_iq = min(num_elements_per_iq,
551 	    s->s_max_elements_per_iq);
552 
553 	num_elements_per_oq = ((num_elements_per_iq - 1) * 2) + 1;
554 	num_elements_per_oq = min(num_elements_per_oq,
555 	    s->s_max_elements_per_oq);
556 
557 	s->s_num_elements_per_iq = num_elements_per_iq;
558 	s->s_num_elements_per_oq = num_elements_per_oq;
559 
560 	s->s_max_sg_per_iu = ((s->s_max_inbound_iu_length -
561 	    PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) /
562 	    sizeof (struct pqi_sg_entry)) +
563 	    PQI_MAX_EMBEDDED_SG_DESCRIPTORS;
564 	return (B_TRUE);
565 }
566 
567 static boolean_t
pqi_alloc_io_resource(pqi_state_t * s)568 pqi_alloc_io_resource(pqi_state_t *s)
569 {
570 	pqi_io_request_t	*io;
571 	size_t			sg_chain_len;
572 	int			i;
573 
574 	s->s_io_rqst_pool = kmem_zalloc(s->s_max_io_slots * sizeof (*io),
575 	    KM_SLEEP);
576 
577 	sg_chain_len = s->s_sg_chain_buf_length;
578 	io = s->s_io_rqst_pool;
579 	for (i = 0; i < s->s_max_io_slots; i++) {
580 		io->io_iu = kmem_zalloc(s->s_max_inbound_iu_length, KM_SLEEP);
581 
582 		/*
583 		 * TODO: Don't allocate dma space here. Move this to
584 		 * init_pkt when it's clear the data being transferred
585 		 * will not fit in the four SG slots provided by each
586 		 * command.
587 		 */
588 		io->io_sg_chain_dma = pqi_alloc_single(s, sg_chain_len);
589 		if (io->io_sg_chain_dma == NULL)
590 			goto error_out;
591 
592 		mutex_init(&io->io_lock, NULL, MUTEX_DRIVER, NULL);
593 		io->io_gen = 1;
594 		list_link_init(&io->io_list_node);
595 		io->io_index = (uint16_t)i;
596 
597 		io->io_softc = s;
598 		io++;
599 	}
600 
601 	return (B_TRUE);
602 
603 error_out:
604 	for (i = 0; i < s->s_max_io_slots; i++) {
605 		if (io->io_iu != NULL) {
606 			kmem_free(io->io_iu, s->s_max_inbound_iu_length);
607 			io->io_iu = NULL;
608 		}
609 		if (io->io_sg_chain_dma != NULL) {
610 			pqi_free_single(s, io->io_sg_chain_dma);
611 			io->io_sg_chain_dma = NULL;
612 		}
613 	}
614 	kmem_free(s->s_io_rqst_pool, s->s_max_io_slots * sizeof (*io));
615 	s->s_io_rqst_pool = NULL;
616 
617 	return (B_FALSE);
618 }
619 
620 static boolean_t
pqi_alloc_operation_queues(pqi_state_t * s)621 pqi_alloc_operation_queues(pqi_state_t *s)
622 {
623 	uint32_t	niq = s->s_num_queue_groups * 2;
624 	uint32_t	noq = s->s_num_queue_groups;
625 	uint32_t	queue_idx = (s->s_num_queue_groups * 3) + 1;
626 	uint32_t	i;
627 	size_t		array_len_iq;
628 	size_t		array_len_oq;
629 	size_t		alloc_len;
630 	caddr_t		aligned_pointer = NULL;
631 	pqi_queue_group_t	*qg;
632 
633 	array_len_iq = PQI_OPERATIONAL_IQ_ELEMENT_LENGTH *
634 	    s->s_num_elements_per_iq;
635 	array_len_oq = PQI_OPERATIONAL_OQ_ELEMENT_LENGTH *
636 	    s->s_num_elements_per_oq;
637 
638 	for (i = 0; i < niq; i++) {
639 		aligned_pointer = PQIALIGN_TYPED(aligned_pointer,
640 		    PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT, caddr_t);
641 		aligned_pointer += array_len_iq;
642 	}
643 
644 	for (i = 0; i < noq; i++) {
645 		aligned_pointer = PQIALIGN_TYPED(aligned_pointer,
646 		    PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT, caddr_t);
647 		aligned_pointer += array_len_oq;
648 	}
649 
650 	aligned_pointer = PQIALIGN_TYPED(aligned_pointer,
651 	    PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT, caddr_t);
652 	aligned_pointer += PQI_NUM_EVENT_QUEUE_ELEMENTS *
653 	    PQI_EVENT_OQ_ELEMENT_LENGTH;
654 
655 	for (i = 0; i < queue_idx; i++) {
656 		aligned_pointer = PQIALIGN_TYPED(aligned_pointer,
657 		    PQI_OPERATIONAL_INDEX_ALIGNMENT, caddr_t);
658 		aligned_pointer += sizeof (pqi_index_t);
659 	}
660 
661 	alloc_len = (size_t)aligned_pointer +
662 	    PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT + PQI_EXTRA_SGL_MEMORY;
663 	if ((s->s_queue_dma = pqi_alloc_single(s, alloc_len)) == NULL)
664 		return (B_FALSE);
665 
666 	aligned_pointer = PQIALIGN_TYPED(s->s_queue_dma->alloc_memory,
667 	    PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT, caddr_t);
668 	for (i = 0; i < s->s_num_queue_groups; i++) {
669 		qg = &s->s_queue_groups[i];
670 
671 		qg->iq_pi_copy[0] = 0;
672 		qg->iq_pi_copy[1] = 0;
673 		qg->oq_ci_copy = 0;
674 		qg->iq_element_array[RAID_PATH] = aligned_pointer;
675 		qg->iq_element_array_bus_addr[RAID_PATH] =
676 		    s->s_queue_dma->dma_addr +
677 		    ((uintptr_t)aligned_pointer -
678 		    (uintptr_t)s->s_queue_dma->alloc_memory);
679 
680 		aligned_pointer += array_len_iq;
681 		aligned_pointer = PQIALIGN_TYPED(aligned_pointer,
682 		    PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT, caddr_t);
683 
684 		qg->iq_element_array[AIO_PATH] = aligned_pointer;
685 		qg->iq_element_array_bus_addr[AIO_PATH] =
686 		    s->s_queue_dma->dma_addr +
687 		    ((uintptr_t)aligned_pointer -
688 		    (uintptr_t)s->s_queue_dma->alloc_memory);
689 
690 		aligned_pointer += array_len_iq;
691 		aligned_pointer = PQIALIGN_TYPED(aligned_pointer,
692 		    PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT, caddr_t);
693 	}
694 	for (i = 0; i < s->s_num_queue_groups; i++) {
695 		qg = &s->s_queue_groups[i];
696 
697 		qg->oq_element_array = aligned_pointer;
698 		qg->oq_element_array_bus_addr =
699 		    s->s_queue_dma->dma_addr +
700 		    ((uintptr_t)aligned_pointer -
701 		    (uintptr_t)s->s_queue_dma->alloc_memory);
702 
703 		aligned_pointer += array_len_oq;
704 		aligned_pointer = PQIALIGN_TYPED(aligned_pointer,
705 		    PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT, caddr_t);
706 	}
707 
708 	s->s_event_queue.oq_element_array = aligned_pointer;
709 	s->s_event_queue.oq_element_array_bus_addr =
710 	    s->s_queue_dma->dma_addr +
711 	    ((uintptr_t)aligned_pointer -
712 	    (uintptr_t)s->s_queue_dma->alloc_memory);
713 	aligned_pointer += PQI_NUM_EVENT_QUEUE_ELEMENTS *
714 	    PQI_EVENT_OQ_ELEMENT_LENGTH;
715 
716 	aligned_pointer = PQIALIGN_TYPED(aligned_pointer,
717 	    PQI_OPERATIONAL_INDEX_ALIGNMENT, caddr_t);
718 
719 	for (i = 0; i < s->s_num_queue_groups; i++) {
720 		qg = &s->s_queue_groups[i];
721 
722 		/* LINTED E_BAD_PTR_CAST_ALIGN */
723 		qg->iq_ci[RAID_PATH] = (pqi_index_t *)aligned_pointer;
724 		qg->iq_ci_bus_addr[RAID_PATH] =
725 		    s->s_queue_dma->dma_addr +
726 		    ((uintptr_t)aligned_pointer -
727 		    (uintptr_t)s->s_queue_dma->alloc_memory);
728 
729 		aligned_pointer += sizeof (pqi_index_t);
730 		aligned_pointer = PQIALIGN_TYPED(aligned_pointer,
731 		    PQI_OPERATIONAL_INDEX_ALIGNMENT, caddr_t);
732 
733 		/* LINTED E_BAD_PTR_CAST_ALIGN */
734 		qg->iq_ci[AIO_PATH] = (pqi_index_t *)aligned_pointer;
735 		qg->iq_ci_bus_addr[AIO_PATH] =
736 		    s->s_queue_dma->dma_addr +
737 		    ((uintptr_t)aligned_pointer -
738 		    (uintptr_t)s->s_queue_dma->alloc_memory);
739 
740 		aligned_pointer += sizeof (pqi_index_t);
741 		aligned_pointer = PQIALIGN_TYPED(aligned_pointer,
742 		    PQI_OPERATIONAL_INDEX_ALIGNMENT, caddr_t);
743 
744 		/* LINTED E_BAD_PTR_CAST_ALIGN */
745 		qg->oq_pi = (pqi_index_t *)aligned_pointer;
746 		qg->oq_pi_bus_addr =
747 		    s->s_queue_dma->dma_addr +
748 		    ((uintptr_t)aligned_pointer -
749 		    (uintptr_t)s->s_queue_dma->alloc_memory);
750 
751 		aligned_pointer += sizeof (pqi_index_t);
752 		aligned_pointer = PQIALIGN_TYPED(aligned_pointer,
753 		    PQI_OPERATIONAL_INDEX_ALIGNMENT, caddr_t);
754 	}
755 
756 	/* LINTED E_BAD_PTR_CAST_ALIGN */
757 	s->s_event_queue.oq_pi = (pqi_index_t *)aligned_pointer;
758 	s->s_event_queue.oq_pi_bus_addr =
759 	    s->s_queue_dma->dma_addr +
760 	    ((uintptr_t)aligned_pointer -
761 	    (uintptr_t)s->s_queue_dma->alloc_memory);
762 	ASSERT((uintptr_t)aligned_pointer -
763 	    (uintptr_t)s->s_queue_dma->alloc_memory +
764 	    sizeof (pqi_index_t) <= s->s_queue_dma->len_to_alloc);
765 
766 	return (B_TRUE);
767 }
768 
769 static boolean_t
pqi_init_operational_queues(pqi_state_t * s)770 pqi_init_operational_queues(pqi_state_t *s)
771 {
772 	int		i;
773 	uint16_t	iq_id = PQI_MIN_OPERATIONAL_QUEUE_ID;
774 	uint16_t	oq_id = PQI_MIN_OPERATIONAL_QUEUE_ID;
775 
776 	for (i = 0; i < s->s_num_queue_groups; i++) {
777 		s->s_queue_groups[i].qg_softc = s;
778 	}
779 	s->s_event_queue.oq_id = oq_id++;
780 	for (i = 0; i < s->s_num_queue_groups; i++) {
781 		s->s_queue_groups[i].iq_id[RAID_PATH] = iq_id++;
782 		s->s_queue_groups[i].iq_id[AIO_PATH] = iq_id++;
783 		s->s_queue_groups[i].oq_id = oq_id++;
784 		s->s_queue_groups[i].qg_active = B_TRUE;
785 	}
786 	s->s_event_queue.int_msg_num = 0;
787 	for (i = 0; i < s->s_num_queue_groups; i++)
788 		s->s_queue_groups[i].int_msg_num = (uint16_t)i;
789 
790 	return (B_TRUE);
791 }
792 
793 static boolean_t
pqi_init_operational_locks(pqi_state_t * s)794 pqi_init_operational_locks(pqi_state_t *s)
795 {
796 	int	i;
797 
798 	for (i = 0; i < s->s_num_queue_groups; i++) {
799 		mutex_init(&s->s_queue_groups[i].submit_lock[0], NULL,
800 		    MUTEX_DRIVER, NULL);
801 		mutex_init(&s->s_queue_groups[i].submit_lock[1], NULL,
802 		    MUTEX_DRIVER, NULL);
803 		list_create(&s->s_queue_groups[i].request_list[RAID_PATH],
804 		    sizeof (pqi_io_request_t),
805 		    offsetof(struct pqi_io_request, io_list_node));
806 		list_create(&s->s_queue_groups[i].request_list[AIO_PATH],
807 		    sizeof (pqi_io_request_t),
808 		    offsetof(struct pqi_io_request, io_list_node));
809 	}
810 	return (B_TRUE);
811 }
812 
813 static boolean_t
pqi_create_queues(pqi_state_t * s)814 pqi_create_queues(pqi_state_t *s)
815 {
816 	int	i;
817 
818 	if (create_event_queue(s) == B_FALSE)
819 		return (B_FALSE);
820 
821 	for (i = 0; i < s->s_num_queue_groups; i++) {
822 		if (create_queue_group(s, i) == B_FALSE) {
823 			return (B_FALSE);
824 		}
825 	}
826 
827 	return (B_TRUE);
828 }
829 
830 static boolean_t
pqi_change_irq_mode(pqi_state_t * s)831 pqi_change_irq_mode(pqi_state_t *s)
832 {
833 	/* ---- Device already is in MSIX mode ---- */
834 	s->s_intr_ready = B_TRUE;
835 	return (B_TRUE);
836 }
837 
838 static boolean_t
pqi_start_heartbeat_timer(pqi_state_t * s)839 pqi_start_heartbeat_timer(pqi_state_t *s)
840 {
841 	s->s_last_heartbeat_count = 0;
842 	s->s_last_intr_count = 0;
843 
844 	s->s_watchdog = timeout(pqi_watchdog, s, drv_usectohz(WATCHDOG));
845 	return (B_TRUE);
846 }
847 
848 #define	PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH \
849 	(offsetof(struct pqi_event_config, descriptors) + \
850 	(PQI_MAX_EVENT_DESCRIPTORS * sizeof (pqi_event_descriptor_t)))
851 
852 static boolean_t
pqi_enable_events(pqi_state_t * s)853 pqi_enable_events(pqi_state_t *s)
854 {
855 	int			i;
856 	pqi_event_config_t	*ec;
857 	pqi_event_descriptor_t	*desc;
858 	pqi_general_mgmt_rqst_t	rqst;
859 	pqi_dma_overhead_t	*dma;
860 	pqi_sg_entry_t		*sg;
861 	boolean_t		rval = B_FALSE;
862 
863 	(void) memset(&rqst, 0, sizeof (rqst));
864 	dma = pqi_alloc_single(s, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH);
865 	if (dma == NULL)
866 		return (B_FALSE);
867 
868 	rqst.header.iu_type = PQI_REQUEST_IU_REPORT_VENDOR_EVENT_CONFIG;
869 	rqst.header.iu_length = offsetof(struct pqi_general_management_request,
870 	    data.report_event_configuration.sg_descriptors[1]) -
871 	    PQI_REQUEST_HEADER_LENGTH;
872 	rqst.data.report_event_configuration.buffer_length =
873 	    PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH;
874 	sg = &rqst.data.report_event_configuration.sg_descriptors[0];
875 	sg->sg_addr = dma->dma_addr;
876 	sg->sg_len = dma->len_to_alloc;
877 	sg->sg_flags = CISS_SG_LAST;
878 
879 	if (submit_raid_rqst_sync(s, &rqst.header, NULL) == B_FALSE)
880 		goto error_out;
881 
882 	(void) ddi_dma_sync(dma->handle, 0, 0, DDI_DMA_SYNC_FORCPU);
883 	ec = (pqi_event_config_t *)dma->alloc_memory;
884 	for (i = 0; i < ec->num_event_descriptors; i++) {
885 		desc = &ec->descriptors[i];
886 		if (pqi_supported_event(desc->event_type) == B_TRUE)
887 			desc->oq_id = s->s_event_queue.oq_id;
888 		else
889 			desc->oq_id = 0;
890 	}
891 
892 	rqst.header.iu_type = PQI_REQUEST_IU_SET_VENDOR_EVENT_CONFIG;
893 	rqst.header.iu_length = offsetof(struct pqi_general_management_request,
894 	    data.report_event_configuration.sg_descriptors[1]) -
895 	    PQI_REQUEST_HEADER_LENGTH;
896 	rqst.data.report_event_configuration.buffer_length =
897 	    PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH;
898 	(void) ddi_dma_sync(dma->handle, 0, 0, DDI_DMA_SYNC_FORDEV);
899 
900 	rval = submit_raid_rqst_sync(s, &rqst.header, NULL);
901 
902 error_out:
903 	pqi_free_single(s, dma);
904 	return (rval);
905 }
906 
907 /*
908  * pqi_get_hba_version -- find HBA's version number
909  */
910 static boolean_t
pqi_get_hba_version(pqi_state_t * s)911 pqi_get_hba_version(pqi_state_t *s)
912 {
913 	bmic_identify_controller_t	*ident;
914 	boolean_t			rval = B_FALSE;
915 
916 	ident = kmem_zalloc(sizeof (*ident), KM_SLEEP);
917 	if (identify_controller(s, ident) == B_FALSE)
918 		goto out;
919 	(void) memcpy(s->s_firmware_version, ident->firmware_version,
920 	    sizeof (ident->firmware_version));
921 	s->s_firmware_version[sizeof (ident->firmware_version)] = '\0';
922 	(void) snprintf(s->s_firmware_version + strlen(s->s_firmware_version),
923 	    sizeof (s->s_firmware_version) - strlen(s->s_firmware_version),
924 	    "-%u", ident->firmware_build_number);
925 	rval = B_TRUE;
926 	cmn_err(CE_NOTE, "!smartpqi%d - firmware version: %s",
927 	    s->s_instance, s->s_firmware_version);
928 out:
929 	kmem_free(ident, sizeof (*ident));
930 	return (rval);
931 }
932 
933 /*
934  * pqi_version_to_hba -- send driver version to HBA
935  */
936 static boolean_t
pqi_version_to_hba(pqi_state_t * s)937 pqi_version_to_hba(pqi_state_t *s)
938 {
939 	bmic_host_wellness_driver_version_t	*b;
940 	boolean_t				rval = B_FALSE;
941 
942 	b = kmem_zalloc(sizeof (*b), KM_SLEEP);
943 	b->start_tag[0] = '<';
944 	b->start_tag[1] = 'H';
945 	b->start_tag[2] = 'W';
946 	b->start_tag[3] = '>';
947 	b->drv_tag[0] = 'D';
948 	b->drv_tag[1] = 'V';
949 	b->driver_version_length = sizeof (b->driver_version);
950 	(void) snprintf(b->driver_version, sizeof (b->driver_version),
951 	    "Illumos 1.0");
952 	b->end_tag[0] = 'Z';
953 	b->end_tag[1] = 'Z';
954 
955 	rval = write_host_wellness(s, b, sizeof (*b));
956 	kmem_free(b, sizeof (*b));
957 
958 	return (rval);
959 }
960 
961 
962 static boolean_t
pqi_schedule_update_time_worker(pqi_state_t * s)963 pqi_schedule_update_time_worker(pqi_state_t *s)
964 {
965 	update_time(s);
966 	return (B_TRUE);
967 }
968 
969 static boolean_t
pqi_scan_scsi_devices(pqi_state_t * s)970 pqi_scan_scsi_devices(pqi_state_t *s)
971 {
972 	report_phys_lun_extended_t	*phys_list	= NULL;
973 	report_log_lun_extended_t	*logical_list	= NULL;
974 	size_t plen;
975 	size_t llen;
976 	boolean_t			rval		= B_FALSE;
977 	int				num_phys	= 0;
978 	int				num_logical	= 0;
979 	int				i;
980 	pqi_device_t			*dev;
981 
982 	if (get_device_list(s, &phys_list, &plen,
983 	    &logical_list, &llen) == B_FALSE)
984 		goto error_out;
985 
986 	if (phys_list) {
987 		num_phys = ntohl(phys_list->header.list_length) /
988 		    sizeof (phys_list->lun_entries[0]);
989 	}
990 
991 	if (logical_list) {
992 		num_logical = ntohl(logical_list->header.list_length) /
993 		    sizeof (logical_list->lun_entries[0]);
994 	}
995 
996 	/*
997 	 * Need to look for devices that are no longer available. The call
998 	 * below to is_new_dev() will mark either the new device just created
999 	 * as having been scanned or if is_new_dev() finds an existing
1000 	 * device in the list that one will be marked as scanned.
1001 	 */
1002 	mutex_enter(&s->s_mutex);
1003 	for (dev = list_head(&s->s_devnodes); dev != NULL;
1004 	    dev = list_next(&s->s_devnodes, dev)) {
1005 		dev->pd_scanned = 0;
1006 	}
1007 	mutex_exit(&s->s_mutex);
1008 
1009 	for (i = 0; i < (num_phys + num_logical); i++) {
1010 		if (i < num_phys) {
1011 			dev = create_phys_dev(s, &phys_list->lun_entries[i]);
1012 		} else {
1013 			dev = create_logical_dev(s,
1014 			    &logical_list->lun_entries[i - num_phys]);
1015 		}
1016 		if (dev != NULL) {
1017 			if (is_new_dev(s, dev) == B_TRUE) {
1018 				list_create(&dev->pd_cmd_list,
1019 				    sizeof (struct pqi_cmd),
1020 				    offsetof(struct pqi_cmd, pc_list));
1021 				mutex_init(&dev->pd_mutex, NULL, MUTEX_DRIVER,
1022 				    NULL);
1023 
1024 				mutex_enter(&s->s_mutex);
1025 				list_insert_tail(&s->s_devnodes, dev);
1026 				mutex_exit(&s->s_mutex);
1027 			} else {
1028 				ddi_devid_free_guid(dev->pd_guid);
1029 				kmem_free(dev, sizeof (*dev));
1030 			}
1031 		}
1032 	}
1033 
1034 	/*
1035 	 * Now look through the list for devices which have disappeared.
1036 	 * Mark them as being offline. During the call to config_one, which
1037 	 * will come next during a hotplug event, those devices will be
1038 	 * offlined to the SCSI subsystem.
1039 	 */
1040 	mutex_enter(&s->s_mutex);
1041 	for (dev = list_head(&s->s_devnodes); dev != NULL;
1042 	    dev = list_next(&s->s_devnodes, dev)) {
1043 		if (dev->pd_scanned)
1044 			dev->pd_online = 1;
1045 		else
1046 			dev->pd_online = 0;
1047 	}
1048 
1049 	mutex_exit(&s->s_mutex);
1050 
1051 	rval = B_TRUE;
1052 
1053 error_out:
1054 	if (phys_list != NULL)
1055 		kmem_free(phys_list, plen);
1056 	if (logical_list != NULL)
1057 		kmem_free(logical_list, llen);
1058 	return (rval);
1059 }
1060 
1061 /*
1062  * []----------------------------------------------------------[]
1063  * | Entry points used by other funtions found in other files	|
1064  * []----------------------------------------------------------[]
1065  */
1066 void
pqi_rescan_devices(pqi_state_t * s)1067 pqi_rescan_devices(pqi_state_t *s)
1068 {
1069 	(void) pqi_scan_scsi_devices(s);
1070 }
1071 
1072 boolean_t
pqi_scsi_inquiry(pqi_state_t * s,pqi_device_t * dev,int vpd,struct scsi_inquiry * inq,int len)1073 pqi_scsi_inquiry(pqi_state_t *s, pqi_device_t *dev, int vpd,
1074     struct scsi_inquiry *inq, int len)
1075 {
1076 	pqi_raid_path_request_t rqst;
1077 
1078 	if (build_raid_path_request(&rqst, SCMD_INQUIRY,
1079 	    dev->pd_scsi3addr, len, vpd) == B_FALSE)
1080 		return (B_FALSE);
1081 
1082 	return (scsi_common(s, &rqst, (caddr_t)inq, len));
1083 }
1084 
1085 void
pqi_free_io_resource(pqi_state_t * s)1086 pqi_free_io_resource(pqi_state_t *s)
1087 {
1088 	pqi_io_request_t	*io = s->s_io_rqst_pool;
1089 	int			i;
1090 
1091 	if (io == NULL)
1092 		return;
1093 
1094 	for (i = 0; i < s->s_max_io_slots; i++) {
1095 		if (io->io_iu == NULL)
1096 			break;
1097 		kmem_free(io->io_iu, s->s_max_inbound_iu_length);
1098 		io->io_iu = NULL;
1099 		pqi_free_single(s, io->io_sg_chain_dma);
1100 		io->io_sg_chain_dma = NULL;
1101 	}
1102 
1103 	kmem_free(s->s_io_rqst_pool, s->s_max_io_slots * sizeof (*io));
1104 	s->s_io_rqst_pool = NULL;
1105 }
1106 
1107 /*
1108  * []----------------------------------------------------------[]
1109  * | Utility functions for startup code.			|
1110  * []----------------------------------------------------------[]
1111  */
1112 
1113 static boolean_t
scsi_common(pqi_state_t * s,pqi_raid_path_request_t * rqst,caddr_t buf,int len)1114 scsi_common(pqi_state_t *s, pqi_raid_path_request_t *rqst, caddr_t buf, int len)
1115 {
1116 	pqi_dma_overhead_t	*dma;
1117 	pqi_sg_entry_t		*sg;
1118 	boolean_t		rval = B_FALSE;
1119 
1120 	if ((dma = pqi_alloc_single(s, len)) == NULL)
1121 		return (B_FALSE);
1122 
1123 	sg = &rqst->rp_sglist[0];
1124 	sg->sg_addr = dma->dma_addr;
1125 	sg->sg_len = dma->len_to_alloc;
1126 	sg->sg_flags = CISS_SG_LAST;
1127 
1128 	if (submit_raid_rqst_sync(s, &rqst->header, NULL) == B_FALSE)
1129 		goto out;
1130 
1131 	(void) ddi_dma_sync(dma->handle, 0, 0, DDI_DMA_SYNC_FORCPU);
1132 	(void) memcpy(buf, dma->alloc_memory, len);
1133 	rval = B_TRUE;
1134 out:
1135 	pqi_free_single(s, dma);
1136 	return (rval);
1137 }
1138 
1139 static void
bcopy_fromregs(pqi_state_t * s,uint8_t * iomem,uint8_t * dst,uint32_t len)1140 bcopy_fromregs(pqi_state_t *s, uint8_t *iomem, uint8_t *dst, uint32_t len)
1141 {
1142 	int	i;
1143 
1144 	for (i = 0; i < len; i++) {
1145 		*dst++ = ddi_get8(s->s_datap, iomem + i);
1146 	}
1147 }
1148 
1149 static void
submit_admin_request(pqi_state_t * s,pqi_general_admin_request_t * r)1150 submit_admin_request(pqi_state_t *s, pqi_general_admin_request_t *r)
1151 {
1152 	pqi_admin_queues_t	*aq;
1153 	pqi_index_t		iq_pi;
1154 	caddr_t			next_element;
1155 
1156 	aq = &s->s_admin_queues;
1157 	iq_pi = aq->iq_pi_copy;
1158 	next_element = aq->iq_element_array + (iq_pi *
1159 	    PQI_ADMIN_IQ_ELEMENT_LENGTH);
1160 	(void) memcpy(next_element, r, sizeof (*r));
1161 	(void) ddi_dma_sync(s->s_adminq_dma->handle,
1162 	    iq_pi * PQI_ADMIN_IQ_ELEMENT_LENGTH, sizeof (*r),
1163 	    DDI_DMA_SYNC_FORDEV);
1164 	iq_pi = (iq_pi + 1) % PQI_ADMIN_IQ_NUM_ELEMENTS;
1165 	aq->iq_pi_copy = iq_pi;
1166 
1167 	ddi_put32(s->s_datap, aq->iq_pi, iq_pi);
1168 }
1169 
1170 static boolean_t
poll_for_admin_response(pqi_state_t * s,pqi_general_admin_response_t * r)1171 poll_for_admin_response(pqi_state_t *s, pqi_general_admin_response_t *r)
1172 {
1173 	pqi_admin_queues_t	*aq;
1174 	pqi_index_t		oq_pi;
1175 	pqi_index_t		oq_ci;
1176 	int			countdown = 10 * MICROSEC;	/* 10 seconds */
1177 	int			pause_time = 10 * MILLISEC;	/* 10ms */
1178 
1179 	countdown /= pause_time;
1180 	aq = &s->s_admin_queues;
1181 	oq_ci = aq->oq_ci_copy;
1182 
1183 	while (--countdown) {
1184 		oq_pi = ddi_get32(s->s_adminq_dma->acc, aq->oq_pi);
1185 		if (oq_pi != oq_ci)
1186 			break;
1187 		drv_usecwait(pause_time);
1188 	}
1189 	if (countdown == 0)
1190 		return (B_FALSE);
1191 
1192 	(void) ddi_dma_sync(s->s_adminq_dma->handle,
1193 	    oq_ci * PQI_ADMIN_OQ_ELEMENT_LENGTH, sizeof (*r),
1194 	    DDI_DMA_SYNC_FORCPU);
1195 	(void) memcpy(r, aq->oq_element_array +
1196 	    (oq_ci * PQI_ADMIN_OQ_ELEMENT_LENGTH), sizeof (*r));
1197 
1198 	aq->oq_ci_copy = (oq_ci + 1) % PQI_ADMIN_OQ_NUM_ELEMENTS;
1199 	ddi_put32(s->s_datap, aq->oq_ci, aq->oq_ci_copy);
1200 
1201 	return (B_TRUE);
1202 }
1203 
1204 static boolean_t
validate_admin_response(pqi_general_admin_response_t * r,uint8_t code)1205 validate_admin_response(pqi_general_admin_response_t *r, uint8_t code)
1206 {
1207 	if (r->header.iu_type != PQI_RESPONSE_IU_GENERAL_ADMIN)
1208 		return (B_FALSE);
1209 
1210 	if (r->header.iu_length != PQI_GENERAL_ADMIN_IU_LENGTH)
1211 		return (B_FALSE);
1212 
1213 	if (r->function_code != code)
1214 		return (B_FALSE);
1215 
1216 	if (r->status != PQI_GENERAL_ADMIN_STATUS_SUCCESS)
1217 		return (B_FALSE);
1218 
1219 	return (B_TRUE);
1220 }
1221 
1222 static boolean_t
submit_admin_rqst_sync(pqi_state_t * s,pqi_general_admin_request_t * rqst,pqi_general_admin_response_t * rsp)1223 submit_admin_rqst_sync(pqi_state_t *s,
1224     pqi_general_admin_request_t *rqst, pqi_general_admin_response_t *rsp)
1225 {
1226 	boolean_t	rval;
1227 
1228 	submit_admin_request(s, rqst);
1229 	rval = poll_for_admin_response(s, rsp);
1230 	if (rval == B_TRUE) {
1231 		rval = validate_admin_response(rsp, rqst->function_code);
1232 		if (rval == B_FALSE) {
1233 			pqi_show_dev_state(s);
1234 		}
1235 	}
1236 	return (rval);
1237 }
1238 
1239 static boolean_t
create_event_queue(pqi_state_t * s)1240 create_event_queue(pqi_state_t *s)
1241 {
1242 	pqi_event_queue_t		*eq;
1243 	pqi_general_admin_request_t	request;
1244 	pqi_general_admin_response_t	response;
1245 
1246 	eq = &s->s_event_queue;
1247 
1248 	/*
1249 	 * Create OQ (Outbound Queue - device to host queue) to dedicate
1250 	 * to events.
1251 	 */
1252 	(void) memset(&request, 0, sizeof (request));
1253 	request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
1254 	request.header.iu_length = PQI_GENERAL_ADMIN_IU_LENGTH;
1255 	request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ;
1256 	request.data.create_operational_oq.queue_id = eq->oq_id;
1257 	request.data.create_operational_oq.element_array_addr =
1258 	    eq->oq_element_array_bus_addr;
1259 	request.data.create_operational_oq.pi_addr = eq->oq_pi_bus_addr;
1260 	request.data.create_operational_oq.num_elements =
1261 	    PQI_NUM_EVENT_QUEUE_ELEMENTS;
1262 	request.data.create_operational_oq.element_length =
1263 	    PQI_EVENT_OQ_ELEMENT_LENGTH / 16;
1264 	request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP;
1265 	request.data.create_operational_oq.int_msg_num = eq->int_msg_num;
1266 
1267 	if (submit_admin_rqst_sync(s, &request, &response) == B_FALSE)
1268 		return (B_FALSE);
1269 
1270 	eq->oq_ci = (uint32_t *)(intptr_t)((uint64_t)(intptr_t)s->s_reg +
1271 	    PQI_DEVICE_REGISTERS_OFFSET +
1272 	    response.data.create_operational_oq.oq_ci_offset);
1273 
1274 	return (B_TRUE);
1275 }
1276 
1277 static boolean_t
create_queue_group(pqi_state_t * s,int idx)1278 create_queue_group(pqi_state_t *s, int idx)
1279 {
1280 	pqi_queue_group_t		*qg;
1281 	pqi_general_admin_request_t	rqst;
1282 	pqi_general_admin_response_t	rsp;
1283 
1284 	qg = &s->s_queue_groups[idx];
1285 
1286 	/* ---- Create inbound queue for RAID path (host to device) ---- */
1287 	(void) memset(&rqst, 0, sizeof (rqst));
1288 	rqst.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
1289 	rqst.header.iu_length = PQI_GENERAL_ADMIN_IU_LENGTH;
1290 	rqst.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ;
1291 	rqst.data.create_operational_iq.queue_id = qg->iq_id[RAID_PATH];
1292 	rqst.data.create_operational_iq.element_array_addr =
1293 	    qg->iq_element_array_bus_addr[RAID_PATH];
1294 	rqst.data.create_operational_iq.ci_addr =
1295 	    qg->iq_ci_bus_addr[RAID_PATH];
1296 	rqst.data.create_operational_iq.num_elements =
1297 	    s->s_num_elements_per_iq;
1298 	rqst.data.create_operational_iq.element_length =
1299 	    PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16;
1300 	rqst.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP;
1301 
1302 	if (submit_admin_rqst_sync(s, &rqst, &rsp) == B_FALSE)
1303 		return (B_FALSE);
1304 	qg->iq_pi[RAID_PATH] =
1305 	    (uint32_t *)(intptr_t)((uint64_t)(intptr_t)s->s_reg +
1306 	    PQI_DEVICE_REGISTERS_OFFSET +
1307 	    rsp.data.create_operational_iq.iq_pi_offset);
1308 
1309 	/* ---- Create inbound queue for Advanced I/O path. ---- */
1310 	(void) memset(&rqst, 0, sizeof (rqst));
1311 	rqst.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
1312 	rqst.header.iu_length = PQI_GENERAL_ADMIN_IU_LENGTH;
1313 	rqst.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ;
1314 	rqst.data.create_operational_iq.queue_id =
1315 	    qg->iq_id[AIO_PATH];
1316 	rqst.data.create_operational_iq.element_array_addr =
1317 	    qg->iq_element_array_bus_addr[AIO_PATH];
1318 	rqst.data.create_operational_iq.ci_addr =
1319 	    qg->iq_ci_bus_addr[AIO_PATH];
1320 	rqst.data.create_operational_iq.num_elements =
1321 	    s->s_num_elements_per_iq;
1322 	rqst.data.create_operational_iq.element_length =
1323 	    PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16;
1324 	rqst.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP;
1325 
1326 	if (submit_admin_rqst_sync(s, &rqst, &rsp) == B_FALSE)
1327 		return (B_FALSE);
1328 
1329 	qg->iq_pi[AIO_PATH] =
1330 	    (uint32_t *)(intptr_t)((uint64_t)(intptr_t)s->s_reg +
1331 	    PQI_DEVICE_REGISTERS_OFFSET +
1332 	    rsp.data.create_operational_iq.iq_pi_offset);
1333 
1334 	/* ---- Change second queue to be AIO ---- */
1335 	(void) memset(&rqst, 0, sizeof (rqst));
1336 	rqst.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
1337 	rqst.header.iu_length =	PQI_GENERAL_ADMIN_IU_LENGTH;
1338 	rqst.function_code = PQI_GENERAL_ADMIN_FUNCTION_CHANGE_IQ_PROPERTY;
1339 	rqst.data.change_operational_iq_properties.queue_id =
1340 	    qg->iq_id[AIO_PATH];
1341 	rqst.data.change_operational_iq_properties.queue_id =
1342 	    PQI_IQ_PROPERTY_IS_AIO_QUEUE;
1343 
1344 	if (submit_admin_rqst_sync(s, &rqst, &rsp) == B_FALSE)
1345 		return (B_FALSE);
1346 
1347 	/* ---- Create outbound queue (device to host) ---- */
1348 	(void) memset(&rqst, 0, sizeof (rqst));
1349 	rqst.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
1350 	rqst.header.iu_length = PQI_GENERAL_ADMIN_IU_LENGTH;
1351 	rqst.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ;
1352 	rqst.data.create_operational_oq.queue_id = qg->oq_id;
1353 	rqst.data.create_operational_oq.element_array_addr =
1354 	    qg->oq_element_array_bus_addr;
1355 	rqst.data.create_operational_oq.pi_addr = qg->oq_pi_bus_addr;
1356 	rqst.data.create_operational_oq.num_elements =
1357 	    s->s_num_elements_per_oq;
1358 	rqst.data.create_operational_oq.element_length =
1359 	    PQI_OPERATIONAL_OQ_ELEMENT_LENGTH / 16;
1360 	rqst.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP;
1361 	rqst.data.create_operational_oq.int_msg_num = qg->int_msg_num;
1362 
1363 	if (submit_admin_rqst_sync(s, &rqst, &rsp) == B_FALSE)
1364 		return (B_FALSE);
1365 	qg->oq_ci = (uint32_t *)(intptr_t)((uint64_t)(intptr_t)s->s_reg +
1366 	    PQI_DEVICE_REGISTERS_OFFSET +
1367 	    rsp.data.create_operational_oq.oq_ci_offset);
1368 
1369 	return (B_TRUE);
1370 }
1371 
1372 static void
raid_sync_complete(pqi_io_request_t * io __unused,void * ctx)1373 raid_sync_complete(pqi_io_request_t *io __unused, void *ctx)
1374 {
1375 	ksema_t *s = (ksema_t *)ctx;
1376 
1377 	sema_v(s);
1378 }
1379 
1380 static boolean_t
submit_raid_sync_with_io(pqi_state_t * s,pqi_io_request_t * io)1381 submit_raid_sync_with_io(pqi_state_t *s, pqi_io_request_t *io)
1382 {
1383 	ksema_t	sema;
1384 
1385 	sema_init(&sema, 0, NULL, SEMA_DRIVER, NULL);
1386 
1387 	io->io_cb = raid_sync_complete;
1388 	io->io_context = &sema;
1389 
1390 	pqi_start_io(s, &s->s_queue_groups[PQI_DEFAULT_QUEUE_GROUP],
1391 	    RAID_PATH, io);
1392 	sema_p(&sema);
1393 
1394 	switch (io->io_status) {
1395 		case PQI_DATA_IN_OUT_GOOD:
1396 		case PQI_DATA_IN_OUT_UNDERFLOW:
1397 			return (B_TRUE);
1398 		default:
1399 			return (B_FALSE);
1400 	}
1401 }
1402 
1403 static boolean_t
submit_raid_rqst_sync(pqi_state_t * s,pqi_iu_header_t * rqst,pqi_raid_error_info_t e_info __unused)1404 submit_raid_rqst_sync(pqi_state_t *s, pqi_iu_header_t *rqst,
1405     pqi_raid_error_info_t e_info __unused)
1406 {
1407 	pqi_io_request_t	*io;
1408 	size_t			len;
1409 	boolean_t		rval = B_FALSE; // default to error case
1410 	struct pqi_cmd		*c;
1411 
1412 	if ((io = pqi_alloc_io(s)) == NULL)
1413 		return (B_FALSE);
1414 
1415 	c = kmem_zalloc(sizeof (*c), KM_SLEEP);
1416 
1417 	mutex_init(&c->pc_mutex, NULL, MUTEX_DRIVER, NULL);
1418 	c->pc_io_rqst = io;
1419 	c->pc_device = &s->s_special_device;
1420 	c->pc_softc = s;
1421 	io->io_cmd = c;
1422 	(void) pqi_cmd_action(c, PQI_CMD_QUEUE);
1423 
1424 	((pqi_raid_path_request_t *)rqst)->rp_id = PQI_MAKE_REQID(io->io_index,
1425 	    io->io_gen);
1426 	if (rqst->iu_type == PQI_REQUEST_IU_RAID_PATH_IO)
1427 		((pqi_raid_path_request_t *)rqst)->rp_error_index =
1428 		    io->io_index;
1429 	len = rqst->iu_length + PQI_REQUEST_HEADER_LENGTH;
1430 	(void) memcpy(io->io_iu, rqst, len);
1431 
1432 	if (submit_raid_sync_with_io(s, io) == B_TRUE)
1433 		rval = B_TRUE;
1434 
1435 	(void) pqi_cmd_action(c, PQI_CMD_CMPLT);
1436 	mutex_destroy(&c->pc_mutex);
1437 	kmem_free(c, sizeof (*c));
1438 
1439 	return (rval);
1440 }
1441 
1442 static boolean_t
build_raid_path_request(pqi_raid_path_request_t * rqst,int cmd,caddr_t lun,uint32_t len,int vpd_page)1443 build_raid_path_request(pqi_raid_path_request_t *rqst,
1444     int cmd, caddr_t lun, uint32_t len, int vpd_page)
1445 {
1446 	uint8_t		*cdb;
1447 
1448 	(void) memset(rqst, 0, sizeof (*rqst));
1449 	rqst->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
1450 	rqst->header.iu_length = offsetof(struct pqi_raid_path_request,
1451 	    rp_sglist[1]) - PQI_REQUEST_HEADER_LENGTH;
1452 	rqst->rp_data_len = len;
1453 	(void) memcpy(rqst->rp_lun, lun, sizeof (rqst->rp_lun));
1454 	rqst->rp_task_attr = SOP_TASK_ATTRIBUTE_SIMPLE;
1455 	rqst->rp_additional_cdb = SOP_ADDITIONAL_CDB_BYTES_0;
1456 
1457 	cdb = rqst->rp_cdb;
1458 	switch (cmd) {
1459 	case SCMD_READ_CAPACITY:
1460 		rqst->rp_data_dir = (uint8_t)SOP_READ_FLAG;
1461 		cdb[0] = (uint8_t)cmd;
1462 		break;
1463 
1464 	case SCMD_READ:
1465 		rqst->rp_data_dir = (uint8_t)SOP_READ_FLAG;
1466 		cdb[0] = (uint8_t)cmd;
1467 		cdb[2] = (uint8_t)(vpd_page >> 8);
1468 		cdb[3] = (uint8_t)vpd_page;
1469 		cdb[4] = len >> 9;
1470 		break;
1471 
1472 	case SCMD_MODE_SENSE:
1473 		rqst->rp_data_dir = (uint8_t)SOP_READ_FLAG;
1474 		cdb[0] = (uint8_t)cmd;
1475 		cdb[1] = 0;
1476 		cdb[2] = (uint8_t)vpd_page;
1477 		cdb[4] = (uint8_t)len;
1478 		break;
1479 
1480 	case SCMD_INQUIRY:
1481 		rqst->rp_data_dir = SOP_READ_FLAG;
1482 		cdb[0] = (uint8_t)cmd;
1483 		if (vpd_page & VPD_PAGE) {
1484 			cdb[1] = 0x1;
1485 			cdb[2] = (uint8_t)vpd_page;
1486 		}
1487 		cdb[4] = (uint8_t)len;
1488 		break;
1489 
1490 	case BMIC_IDENTIFY_PHYSICAL_DEVICE:
1491 	case BMIC_IDENTIFY_CONTROLLER:
1492 		rqst->rp_data_dir = SOP_READ_FLAG;
1493 		cdb[0] = BMIC_READ;
1494 		cdb[6] = (uint8_t)cmd;
1495 		cdb[7] = (uint8_t)(len >> 8);
1496 		cdb[8] = (uint8_t)len;
1497 		break;
1498 
1499 	case BMIC_WRITE_HOST_WELLNESS:
1500 		rqst->rp_data_dir = SOP_WRITE_FLAG;
1501 		cdb[0] = BMIC_WRITE;
1502 		cdb[6] = (uint8_t)cmd;
1503 		cdb[7] = (uint8_t)(len >> 8);
1504 		cdb[8] = (uint8_t)len;
1505 		break;
1506 
1507 	case CISS_REPORT_LOG:
1508 	case CISS_REPORT_PHYS:
1509 		rqst->rp_data_dir = SOP_READ_FLAG;
1510 		cdb[0] = (uint8_t)cmd;
1511 		if (cmd == CISS_REPORT_PHYS)
1512 			cdb[1] = CISS_REPORT_PHYS_EXTENDED;
1513 		else
1514 			cdb[1] = CISS_REPORT_LOG_EXTENDED;
1515 		cdb[6] = (uint8_t)(len >> 24);
1516 		cdb[7] = (uint8_t)(len >> 16);
1517 		cdb[8] = (uint8_t)(len >> 8);
1518 		cdb[9] = (uint8_t)len;
1519 		break;
1520 
1521 	default:
1522 		ASSERT(0);
1523 		break;
1524 	}
1525 
1526 	return (B_TRUE);
1527 }
1528 
1529 static boolean_t
identify_physical_device(pqi_state_t * s,pqi_device_t * devp,bmic_identify_physical_device_t * buf)1530 identify_physical_device(pqi_state_t *s, pqi_device_t *devp,
1531     bmic_identify_physical_device_t *buf)
1532 {
1533 	pqi_dma_overhead_t	*dma;
1534 	pqi_raid_path_request_t	rqst;
1535 	boolean_t		rval = B_FALSE;
1536 	uint16_t		idx;
1537 
1538 	if ((dma = pqi_alloc_single(s, sizeof (*buf))) == NULL)
1539 		return (B_FALSE);
1540 
1541 	if (build_raid_path_request(&rqst, BMIC_IDENTIFY_PHYSICAL_DEVICE,
1542 	    RAID_CTLR_LUNID, sizeof (*buf), 0) == B_FALSE)
1543 		goto out;
1544 
1545 	idx = CISS_GET_DRIVE_NUMBER(devp->pd_scsi3addr);
1546 	rqst.rp_cdb[2] = (uint8_t)idx;
1547 	rqst.rp_cdb[9] = (uint8_t)(idx >> 8);
1548 
1549 	rqst.rp_sglist[0].sg_addr = dma->dma_addr;
1550 	rqst.rp_sglist[0].sg_len = dma->len_to_alloc;
1551 	rqst.rp_sglist[0].sg_flags = CISS_SG_LAST;
1552 
1553 	if (submit_raid_rqst_sync(s, &rqst.header, NULL) == B_FALSE)
1554 		goto out;
1555 
1556 	(void) ddi_dma_sync(dma->handle, 0, 0, DDI_DMA_SYNC_FORCPU);
1557 	(void) memcpy(buf, dma->alloc_memory, sizeof (*buf));
1558 	rval = B_TRUE;
1559 out:
1560 	pqi_free_single(s, dma);
1561 	return (rval);
1562 }
1563 
1564 static boolean_t
identify_controller(pqi_state_t * s,bmic_identify_controller_t * ident)1565 identify_controller(pqi_state_t *s, bmic_identify_controller_t *ident)
1566 {
1567 	pqi_raid_path_request_t	rqst;
1568 	pqi_dma_overhead_t	*dma;
1569 	boolean_t		rval = B_FALSE;
1570 
1571 	if ((dma = pqi_alloc_single(s, sizeof (*ident))) == NULL)
1572 		return (B_FALSE);
1573 
1574 	if (build_raid_path_request(&rqst, BMIC_IDENTIFY_CONTROLLER,
1575 	    RAID_CTLR_LUNID, sizeof (*ident), 0) == B_FALSE)
1576 		goto out;
1577 
1578 	rqst.rp_sglist[0].sg_addr = dma->dma_addr;
1579 	rqst.rp_sglist[0].sg_len = dma->len_to_alloc;
1580 	rqst.rp_sglist[0].sg_flags = CISS_SG_LAST;
1581 
1582 	if (submit_raid_rqst_sync(s, &rqst.header, NULL) == B_FALSE)
1583 		goto out;
1584 
1585 	(void) ddi_dma_sync(dma->handle, 0, 0, DDI_DMA_SYNC_FORCPU);
1586 	(void) memcpy(ident, dma->alloc_memory, sizeof (*ident));
1587 	rval = B_TRUE;
1588 out:
1589 	pqi_free_single(s, dma);
1590 	return (rval);
1591 }
1592 
1593 static boolean_t
write_host_wellness(pqi_state_t * s,void * buf,size_t len)1594 write_host_wellness(pqi_state_t *s, void *buf, size_t len)
1595 {
1596 	pqi_dma_overhead_t	*dma;
1597 	boolean_t		rval = B_FALSE;
1598 	pqi_raid_path_request_t	rqst;
1599 
1600 	if ((dma = pqi_alloc_single(s, len)) == NULL)
1601 		return (B_FALSE);
1602 	if (build_raid_path_request(&rqst, BMIC_WRITE_HOST_WELLNESS,
1603 	    RAID_CTLR_LUNID, len, 0) == B_FALSE)
1604 		goto out;
1605 
1606 	(void) memcpy(dma->alloc_memory, buf, dma->len_to_alloc);
1607 	rqst.rp_sglist[0].sg_addr = dma->dma_addr;
1608 	rqst.rp_sglist[0].sg_len = dma->len_to_alloc;
1609 	rqst.rp_sglist[0].sg_flags = CISS_SG_LAST;
1610 
1611 	rval = submit_raid_rqst_sync(s, &rqst.header, NULL);
1612 out:
1613 	pqi_free_single(s, dma);
1614 	return (rval);
1615 }
1616 
1617 static boolean_t
report_luns(pqi_state_t * s,int cmd,void * data,size_t len)1618 report_luns(pqi_state_t *s, int cmd, void *data, size_t len)
1619 {
1620 	pqi_dma_overhead_t	*dma;
1621 	boolean_t		rval = B_FALSE;
1622 	pqi_raid_path_request_t	rqst;
1623 
1624 	if ((dma = pqi_alloc_single(s, len)) == NULL)
1625 		return (B_FALSE);
1626 	if (build_raid_path_request(&rqst, cmd, RAID_CTLR_LUNID,
1627 	    len, 0) == B_FALSE)
1628 		goto error_out;
1629 
1630 	rqst.rp_sglist[0].sg_addr = dma->dma_addr;
1631 	rqst.rp_sglist[0].sg_len = dma->len_to_alloc;
1632 	rqst.rp_sglist[0].sg_flags = CISS_SG_LAST;
1633 
1634 	if (submit_raid_rqst_sync(s, &rqst.header, NULL) == B_FALSE)
1635 		goto error_out;
1636 
1637 	(void) ddi_dma_sync(dma->handle, 0, 0, DDI_DMA_SYNC_FORCPU);
1638 	(void) memcpy(data, dma->alloc_memory, len);
1639 	rval = B_TRUE;
1640 
1641 error_out:
1642 	pqi_free_single(s, dma);
1643 	return (rval);
1644 }
1645 
1646 static boolean_t
report_luns_by_cmd(pqi_state_t * s,int cmd,void ** buf,size_t * buflen)1647 report_luns_by_cmd(pqi_state_t *s, int cmd, void **buf, size_t *buflen)
1648 {
1649 	void		*data		= NULL;
1650 	size_t		data_len	= 0;
1651 	size_t		new_data_len;
1652 	uint32_t	new_list_len	= 0;
1653 	uint32_t	list_len	= 0;
1654 	boolean_t	rval		= B_FALSE;
1655 
1656 	new_data_len = sizeof (report_lun_header_t);
1657 	do {
1658 		if (data != NULL) {
1659 			kmem_free(data, data_len);
1660 		}
1661 		data_len = new_data_len;
1662 		data = kmem_zalloc(data_len, KM_SLEEP);
1663 		list_len = new_list_len;
1664 		if (report_luns(s, cmd, data, data_len) == B_FALSE)
1665 			goto error_out;
1666 		new_list_len =
1667 		    ntohl(((report_lun_header_t *)data)->list_length);
1668 		new_data_len = sizeof (report_lun_header_t) +
1669 		    new_list_len;
1670 	} while (new_list_len > list_len);
1671 	rval = B_TRUE;
1672 
1673 error_out:
1674 	if (rval == B_FALSE) {
1675 		kmem_free(data, data_len);
1676 		data = NULL;
1677 		data_len = 0;
1678 	}
1679 	*buf = data;
1680 	*buflen = data_len;
1681 	return (rval);
1682 }
1683 
1684 static inline boolean_t
report_phys_luns(pqi_state_t * s,void ** v,size_t * vlen)1685 report_phys_luns(pqi_state_t *s, void **v, size_t *vlen)
1686 {
1687 	return (report_luns_by_cmd(s, CISS_REPORT_PHYS, v, vlen));
1688 }
1689 
1690 static inline boolean_t
report_logical_luns(pqi_state_t * s,void ** v,size_t * vlen)1691 report_logical_luns(pqi_state_t *s, void **v, size_t *vlen)
1692 {
1693 	return (report_luns_by_cmd(s, CISS_REPORT_LOG, v, vlen));
1694 }
1695 
1696 static boolean_t
get_device_list(pqi_state_t * s,report_phys_lun_extended_t ** pl,size_t * plen,report_log_lun_extended_t ** ll,size_t * llen)1697 get_device_list(pqi_state_t *s, report_phys_lun_extended_t **pl, size_t *plen,
1698     report_log_lun_extended_t **ll, size_t *llen)
1699 {
1700 	report_log_lun_extended_t	*log_data;
1701 	report_log_lun_extended_t	*internal_log;
1702 	size_t				list_len;
1703 	size_t				data_len;
1704 	report_lun_header_t		header;
1705 
1706 	if (report_phys_luns(s, (void **)pl, plen) == B_FALSE)
1707 		return (B_FALSE);
1708 
1709 	if (report_logical_luns(s, (void **)ll, llen) == B_FALSE)
1710 		return (B_FALSE);
1711 
1712 	log_data = *ll;
1713 	if (log_data != NULL) {
1714 		list_len = ntohl(log_data->header.list_length);
1715 	} else {
1716 		(void) memset(&header, 0, sizeof (header));
1717 		log_data = (report_log_lun_extended_t *)&header;
1718 		list_len = 0;
1719 	}
1720 
1721 	data_len = sizeof (header) + list_len;
1722 	/*
1723 	 * Add the controller to the logical luns which is a empty device
1724 	 */
1725 	internal_log = kmem_zalloc(data_len +
1726 	    sizeof (report_log_lun_extended_entry_t), KM_SLEEP);
1727 	(void) memcpy(internal_log, log_data, data_len);
1728 	internal_log->header.list_length = htonl(list_len +
1729 	    sizeof (report_log_lun_extended_entry_t));
1730 
1731 	if (*ll != NULL)
1732 		kmem_free(*ll, *llen);
1733 	*ll = internal_log;
1734 	*llen = data_len + sizeof (report_log_lun_extended_entry_t);
1735 	return (B_TRUE);
1736 }
1737 
1738 static boolean_t
get_device_info(pqi_state_t * s,pqi_device_t * dev)1739 get_device_info(pqi_state_t *s, pqi_device_t *dev)
1740 {
1741 	boolean_t		rval = B_FALSE;
1742 	struct scsi_inquiry	*inq;
1743 
1744 	inq = kmem_zalloc(sizeof (*inq), KM_SLEEP);
1745 	if (pqi_scsi_inquiry(s, dev, 0, inq, sizeof (*inq)) == B_FALSE)
1746 		goto out;
1747 
1748 	dev->pd_devtype = inq->inq_dtype & 0x1f;
1749 	(void) memcpy(dev->pd_vendor, inq->inq_vid, sizeof (dev->pd_vendor));
1750 	(void) memcpy(dev->pd_model, inq->inq_pid, sizeof (dev->pd_model));
1751 
1752 	rval = B_TRUE;
1753 out:
1754 	kmem_free(inq, sizeof (*inq));
1755 	return (rval);
1756 }
1757 
1758 static boolean_t
is_supported_dev(pqi_state_t * s,pqi_device_t * dev)1759 is_supported_dev(pqi_state_t *s, pqi_device_t *dev)
1760 {
1761 	boolean_t	rval = B_FALSE;
1762 
1763 	switch (dev->pd_devtype) {
1764 	case DTYPE_DIRECT:
1765 	case TYPE_ZBC:
1766 	case DTYPE_SEQUENTIAL:
1767 	case DTYPE_ESI:
1768 		rval = B_TRUE;
1769 		break;
1770 	case DTYPE_ARRAY_CTRL:
1771 		if (strncmp(dev->pd_scsi3addr, RAID_CTLR_LUNID,
1772 		    sizeof (dev->pd_scsi3addr)) == 0)
1773 			rval = B_TRUE;
1774 		break;
1775 	default:
1776 		dev_err(s->s_dip, CE_WARN, "%s is not a supported device",
1777 		    scsi_dname(dev->pd_devtype));
1778 		break;
1779 	}
1780 	return (rval);
1781 }
1782 
1783 static void
get_phys_disk_info(pqi_state_t * s __unused,pqi_device_t * dev,bmic_identify_physical_device_t * id)1784 get_phys_disk_info(pqi_state_t *s __unused, pqi_device_t *dev,
1785     bmic_identify_physical_device_t *id)
1786 {
1787 	dev->pd_lun = id->scsi_lun;
1788 	(void) snprintf(dev->pd_unit_address, sizeof (dev->pd_unit_address),
1789 	    "w%016lx,%d", dev->pd_wwid, id->scsi_lun);
1790 }
1791 
1792 static int
is_external_raid_addr(char * addr)1793 is_external_raid_addr(char *addr)
1794 {
1795 	return (addr[2] != 0);
1796 }
1797 
1798 static void
build_guid(pqi_state_t * s,pqi_device_t * d)1799 build_guid(pqi_state_t *s, pqi_device_t *d)
1800 {
1801 	int			len	= 0xff;
1802 	struct scsi_inquiry	*inq	= NULL;
1803 	uchar_t			*inq83	= NULL;
1804 	ddi_devid_t		devid;
1805 
1806 	ddi_devid_free_guid(d->pd_guid);
1807 	d->pd_guid = NULL;
1808 
1809 	inq = kmem_alloc(sizeof (struct scsi_inquiry), KM_SLEEP);
1810 	if (pqi_scsi_inquiry(s, d, 0, inq, sizeof (struct scsi_inquiry)) ==
1811 	    B_FALSE) {
1812 		goto out;
1813 	}
1814 
1815 	inq83 = kmem_zalloc(len, KM_SLEEP);
1816 	if (pqi_scsi_inquiry(s, d, VPD_PAGE | 0x83,
1817 	    (struct scsi_inquiry *)inq83, len) == B_FALSE) {
1818 		goto out;
1819 	}
1820 
1821 	if (ddi_devid_scsi_encode(DEVID_SCSI_ENCODE_VERSION_LATEST, NULL,
1822 	    (uchar_t *)inq, sizeof (struct scsi_inquiry), NULL, 0, inq83,
1823 	    (size_t)len, &devid) == DDI_SUCCESS) {
1824 		d->pd_guid = ddi_devid_to_guid(devid);
1825 		ddi_devid_free(devid);
1826 	}
1827 out:
1828 	if (inq != NULL)
1829 		kmem_free(inq, sizeof (struct scsi_inquiry));
1830 	if (inq83 != NULL)
1831 		kmem_free(inq83, len);
1832 }
1833 
1834 static pqi_device_t *
create_phys_dev(pqi_state_t * s,report_phys_lun_extended_entry_t * e)1835 create_phys_dev(pqi_state_t *s, report_phys_lun_extended_entry_t *e)
1836 {
1837 	pqi_device_t			*dev;
1838 	bmic_identify_physical_device_t	*id_phys	= NULL;
1839 
1840 	dev = kmem_zalloc(sizeof (*dev), KM_SLEEP);
1841 	dev->pd_phys_dev = 1;
1842 	dev->pd_wwid = htonll(e->wwid);
1843 	(void) memcpy(dev->pd_scsi3addr, e->lunid, sizeof (dev->pd_scsi3addr));
1844 
1845 	/* Skip masked physical devices */
1846 	if (MASKED_DEVICE(dev->pd_scsi3addr))
1847 		goto out;
1848 
1849 	if (get_device_info(s, dev) == B_FALSE)
1850 		goto out;
1851 
1852 	if (!is_supported_dev(s, dev))
1853 		goto out;
1854 
1855 	switch (dev->pd_devtype) {
1856 	case DTYPE_ESI:
1857 		build_guid(s, dev);
1858 		/* hopefully only LUN 0... which seems to match */
1859 		(void) snprintf(dev->pd_unit_address, 20, "w%016lx,0",
1860 		    dev->pd_wwid);
1861 		break;
1862 
1863 	case DTYPE_DIRECT:
1864 	case TYPE_ZBC:
1865 		build_guid(s, dev);
1866 		id_phys = kmem_zalloc(sizeof (*id_phys), KM_SLEEP);
1867 		if ((e->device_flags &
1868 		    REPORT_PHYS_LUN_DEV_FLAG_AIO_ENABLED) &&
1869 		    e->aio_handle) {
1870 
1871 			/*
1872 			 * XXX Until I figure out what's wrong with
1873 			 * using AIO I'll disable this for now.
1874 			 */
1875 			dev->pd_aio_enabled = 0;
1876 			dev->pd_aio_handle = e->aio_handle;
1877 			if (identify_physical_device(s, dev,
1878 			    id_phys) == B_FALSE)
1879 				goto out;
1880 		}
1881 		get_phys_disk_info(s, dev, id_phys);
1882 		kmem_free(id_phys, sizeof (*id_phys));
1883 		break;
1884 	}
1885 
1886 	return (dev);
1887 out:
1888 	kmem_free(dev, sizeof (*dev));
1889 	return (NULL);
1890 }
1891 
1892 static pqi_device_t *
create_logical_dev(pqi_state_t * s,report_log_lun_extended_entry_t * e)1893 create_logical_dev(pqi_state_t *s, report_log_lun_extended_entry_t *e)
1894 {
1895 	pqi_device_t	*dev;
1896 	uint16_t	target;
1897 	uint16_t	lun;
1898 
1899 	dev = kmem_zalloc(sizeof (*dev), KM_SLEEP);
1900 	dev->pd_phys_dev = 0;
1901 	(void) memcpy(dev->pd_scsi3addr, e->lunid, sizeof (dev->pd_scsi3addr));
1902 	dev->pd_external_raid = is_external_raid_addr(dev->pd_scsi3addr);
1903 
1904 	if (get_device_info(s, dev) == B_FALSE)
1905 		goto out;
1906 
1907 	if (!is_supported_dev(s, dev))
1908 		goto out;
1909 
1910 	if (memcmp(dev->pd_scsi3addr, RAID_CTLR_LUNID, 8) == 0) {
1911 		target = 0;
1912 		lun = 0;
1913 	} else if (dev->pd_external_raid) {
1914 		target = (LE_IN16(&dev->pd_scsi3addr[2]) & 0x3FFF) + 2;
1915 		lun = dev->pd_scsi3addr[0];
1916 	} else {
1917 		target = 1;
1918 		lun = LE_IN16(dev->pd_scsi3addr);
1919 	}
1920 	dev->pd_target = target;
1921 	dev->pd_lun = lun;
1922 	(void) snprintf(dev->pd_unit_address, sizeof (dev->pd_unit_address),
1923 	    "%d,%d", target, lun);
1924 
1925 	(void) memcpy(dev->pd_volume_id, e->volume_id,
1926 	    sizeof (dev->pd_volume_id));
1927 	return (dev);
1928 
1929 out:
1930 	kmem_free(dev, sizeof (*dev));
1931 	return (NULL);
1932 }
1933 
1934 /*
1935  * is_new_dev -- look to see if new_dev is indeed new.
1936  *
1937  * NOTE: This function has two outcomes. One is to determine if the new_dev
1938  * is truly new. The other is to mark a new_dev as being scanned if it's
1939  * truly new or marking the existing device as having been scanned.
1940  */
1941 static boolean_t
is_new_dev(pqi_state_t * s,pqi_device_t * new_dev)1942 is_new_dev(pqi_state_t *s, pqi_device_t *new_dev)
1943 {
1944 	pqi_device_t	*dev;
1945 
1946 	for (dev = list_head(&s->s_devnodes); dev != NULL;
1947 	    dev = list_next(&s->s_devnodes, dev)) {
1948 		if (new_dev->pd_phys_dev != dev->pd_phys_dev) {
1949 			continue;
1950 		}
1951 		if (dev->pd_phys_dev) {
1952 			if (dev->pd_wwid == new_dev->pd_wwid) {
1953 				dev->pd_scanned = 1;
1954 				return (B_FALSE);
1955 			}
1956 		} else {
1957 			if (memcmp(dev->pd_volume_id, new_dev->pd_volume_id,
1958 			    16) == 0) {
1959 				dev->pd_scanned = 1;
1960 				return (B_FALSE);
1961 			}
1962 		}
1963 	}
1964 
1965 	new_dev->pd_scanned = 1;
1966 	return (B_TRUE);
1967 }
1968 
1969 enum pqi_reset_action {
1970 	PQI_RESET_ACTION_RESET = 0x1,
1971 	PQI_RESET_ACTION_COMPLETE = 0x2
1972 };
1973 
1974 enum pqi_reset_type {
1975 	PQI_RESET_TYPE_NO_RESET =	0x0,
1976 	PQI_RESET_TYPE_SOFT_RESET =	0x1,
1977 	PQI_RESET_TYPE_FIRM_RESET =	0x2,
1978 	PQI_RESET_TYPE_HARD_RESET =	0x3
1979 };
1980 
1981 boolean_t
pqi_hba_reset(pqi_state_t * s)1982 pqi_hba_reset(pqi_state_t *s)
1983 {
1984 	uint32_t	val;
1985 	int		max_count = 1000;
1986 
1987 	val = (PQI_RESET_ACTION_RESET << 5) | PQI_RESET_TYPE_HARD_RESET;
1988 	S32(s, pqi_registers.device_reset, val);
1989 
1990 	while (1) {
1991 		drv_usecwait(100 * (MICROSEC / MILLISEC));
1992 		val = G32(s, pqi_registers.device_reset);
1993 		if ((val >> 5) == PQI_RESET_ACTION_COMPLETE)
1994 			break;
1995 		if (max_count-- == 0)
1996 			break;
1997 	}
1998 
1999 #ifdef DEBUG
2000 	cmn_err(CE_WARN, "pqi_hba_reset: reset reg=0x%x, count=%d", val,
2001 	    max_count);
2002 #endif
2003 	return (pqi_wait_for_mode_ready(s));
2004 }
2005 
2006 static void
save_ctrl_mode(pqi_state_t * s,int mode)2007 save_ctrl_mode(pqi_state_t *s, int mode)
2008 {
2009 	sis_write_scratch(s, mode);
2010 }
2011 
2012 static boolean_t
revert_to_sis(pqi_state_t * s)2013 revert_to_sis(pqi_state_t *s)
2014 {
2015 	if (!pqi_hba_reset(s))
2016 		return (B_FALSE);
2017 	if (sis_reenable_mode(s) == B_FALSE)
2018 		return (B_FALSE);
2019 	sis_write_scratch(s, SIS_MODE);
2020 	return (B_TRUE);
2021 }
2022 
2023 
2024 #define	BIN2BCD(x)	((((x) / 10) << 4) + (x) % 10)
2025 
2026 static void
update_time(void * v)2027 update_time(void *v)
2028 {
2029 	pqi_state_t			*s = v;
2030 	bmic_host_wellness_time_t	*ht;
2031 	struct timeval			curtime;
2032 	todinfo_t			tod;
2033 
2034 	ht = kmem_zalloc(sizeof (*ht), KM_SLEEP);
2035 	ht->start_tag[0] = '<';
2036 	ht->start_tag[1] = 'H';
2037 	ht->start_tag[2] = 'W';
2038 	ht->start_tag[3] = '>';
2039 	ht->time_tag[0] = 'T';
2040 	ht->time_tag[1] = 'D';
2041 	ht->time_length = sizeof (ht->time);
2042 
2043 	uniqtime(&curtime);
2044 	mutex_enter(&tod_lock);
2045 	tod = utc_to_tod(curtime.tv_sec);
2046 	mutex_exit(&tod_lock);
2047 
2048 	ht->time[0] = BIN2BCD(tod.tod_hour);		/* Hour */
2049 	ht->time[1] = BIN2BCD(tod.tod_min);		/* Minute */
2050 	ht->time[2] = BIN2BCD(tod.tod_sec);		/* Second */
2051 	ht->time[3] = 0;
2052 	ht->time[4] = BIN2BCD(tod.tod_month);		/* Month */
2053 	ht->time[5] = BIN2BCD(tod.tod_day);		/* Day */
2054 	ht->time[6] = BIN2BCD(20);			/* Century */
2055 	ht->time[7] = BIN2BCD(tod.tod_year - 70);	/* Year w/in century */
2056 
2057 	ht->dont_write_tag[0] = 'D';
2058 	ht->dont_write_tag[1] = 'W';
2059 	ht->end_tag[0] = 'Z';
2060 	ht->end_tag[1] = 'Z';
2061 
2062 	(void) write_host_wellness(s, ht, sizeof (*ht));
2063 	kmem_free(ht, sizeof (*ht));
2064 	s->s_time_of_day = timeout(update_time, s,
2065 	    DAY * drv_usectohz(MICROSEC));
2066 }
2067