xref: /linux/drivers/scsi/hpsa.h (revision a4cdb556cae05cd3e7b602b3a44c01420c4e2258)
1 /*
2  *    Disk Array driver for HP Smart Array SAS controllers
3  *    Copyright 2014-2015 PMC-Sierra, Inc.
4  *    Copyright 2000,2009-2015 Hewlett-Packard Development Company, L.P.
5  *
6  *    This program is free software; you can redistribute it and/or modify
7  *    it under the terms of the GNU General Public License as published by
8  *    the Free Software Foundation; version 2 of the License.
9  *
10  *    This program is distributed in the hope that it will be useful,
11  *    but WITHOUT ANY WARRANTY; without even the implied warranty of
12  *    MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13  *    NON INFRINGEMENT.  See the GNU General Public License for more details.
14  *
15  *    Questions/Comments/Bugfixes to storagedev@pmcs.com
16  *
17  */
18 #ifndef HPSA_H
19 #define HPSA_H
20 
21 #include <scsi/scsicam.h>
22 
23 #define IO_OK		0
24 #define IO_ERROR	1
25 
26 struct ctlr_info;
27 
28 struct access_method {
29 	void (*submit_command)(struct ctlr_info *h,
30 		struct CommandList *c);
31 	void (*set_intr_mask)(struct ctlr_info *h, unsigned long val);
32 	bool (*intr_pending)(struct ctlr_info *h);
33 	unsigned long (*command_completed)(struct ctlr_info *h, u8 q);
34 };
35 
36 /* for SAS hosts and SAS expanders */
37 struct hpsa_sas_node {
38 	struct device *parent_dev;
39 	struct list_head port_list_head;
40 };
41 
42 struct hpsa_sas_port {
43 	struct list_head port_list_entry;
44 	u64 sas_address;
45 	struct sas_port *port;
46 	int next_phy_index;
47 	struct list_head phy_list_head;
48 	struct hpsa_sas_node *parent_node;
49 	struct sas_rphy *rphy;
50 };
51 
52 struct hpsa_sas_phy {
53 	struct list_head phy_list_entry;
54 	struct sas_phy *phy;
55 	struct hpsa_sas_port *parent_port;
56 	bool added_to_port;
57 };
58 
59 struct hpsa_scsi_dev_t {
60 	unsigned int devtype;
61 	int bus, target, lun;		/* as presented to the OS */
62 	unsigned char scsi3addr[8];	/* as presented to the HW */
63 	u8 physical_device : 1;
64 	u8 expose_device;
65 #define RAID_CTLR_LUNID "\0\0\0\0\0\0\0\0"
66 	unsigned char device_id[16];    /* from inquiry pg. 0x83 */
67 	u64 sas_address;
68 	unsigned char vendor[8];        /* bytes 8-15 of inquiry data */
69 	unsigned char model[16];        /* bytes 16-31 of inquiry data */
70 	unsigned char raid_level;	/* from inquiry page 0xC1 */
71 	unsigned char volume_offline;	/* discovered via TUR or VPD */
72 	u16 queue_depth;		/* max queue_depth for this device */
73 	atomic_t reset_cmds_out;	/* Count of commands to-be affected */
74 	atomic_t ioaccel_cmds_out;	/* Only used for physical devices
75 					 * counts commands sent to physical
76 					 * device via "ioaccel" path.
77 					 */
78 	u32 ioaccel_handle;
79 	u8 active_path_index;
80 	u8 path_map;
81 	u8 bay;
82 	u8 box[8];
83 	u16 phys_connector[8];
84 	int offload_config;		/* I/O accel RAID offload configured */
85 	int offload_enabled;		/* I/O accel RAID offload enabled */
86 	int offload_to_be_enabled;
87 	int hba_ioaccel_enabled;
88 	int offload_to_mirror;		/* Send next I/O accelerator RAID
89 					 * offload request to mirror drive
90 					 */
91 	struct raid_map_data raid_map;	/* I/O accelerator RAID map */
92 
93 	/*
94 	 * Pointers from logical drive map indices to the phys drives that
95 	 * make those logical drives.  Note, multiple logical drives may
96 	 * share physical drives.  You can have for instance 5 physical
97 	 * drives with 3 logical drives each using those same 5 physical
98 	 * disks. We need these pointers for counting i/o's out to physical
99 	 * devices in order to honor physical device queue depth limits.
100 	 */
101 	struct hpsa_scsi_dev_t *phys_disk[RAID_MAP_MAX_ENTRIES];
102 	int nphysical_disks;
103 	int supports_aborts;
104 	struct hpsa_sas_port *sas_port;
105 	int external;   /* 1-from external array 0-not <0-unknown */
106 };
107 
108 struct reply_queue_buffer {
109 	u64 *head;
110 	size_t size;
111 	u8 wraparound;
112 	u32 current_entry;
113 	dma_addr_t busaddr;
114 };
115 
116 #pragma pack(1)
117 struct bmic_controller_parameters {
118 	u8   led_flags;
119 	u8   enable_command_list_verification;
120 	u8   backed_out_write_drives;
121 	u16  stripes_for_parity;
122 	u8   parity_distribution_mode_flags;
123 	u16  max_driver_requests;
124 	u16  elevator_trend_count;
125 	u8   disable_elevator;
126 	u8   force_scan_complete;
127 	u8   scsi_transfer_mode;
128 	u8   force_narrow;
129 	u8   rebuild_priority;
130 	u8   expand_priority;
131 	u8   host_sdb_asic_fix;
132 	u8   pdpi_burst_from_host_disabled;
133 	char software_name[64];
134 	char hardware_name[32];
135 	u8   bridge_revision;
136 	u8   snapshot_priority;
137 	u32  os_specific;
138 	u8   post_prompt_timeout;
139 	u8   automatic_drive_slamming;
140 	u8   reserved1;
141 	u8   nvram_flags;
142 	u8   cache_nvram_flags;
143 	u8   drive_config_flags;
144 	u16  reserved2;
145 	u8   temp_warning_level;
146 	u8   temp_shutdown_level;
147 	u8   temp_condition_reset;
148 	u8   max_coalesce_commands;
149 	u32  max_coalesce_delay;
150 	u8   orca_password[4];
151 	u8   access_id[16];
152 	u8   reserved[356];
153 };
154 #pragma pack()
155 
156 struct ctlr_info {
157 	int	ctlr;
158 	char	devname[8];
159 	char    *product_name;
160 	struct pci_dev *pdev;
161 	u32	board_id;
162 	u64	sas_address;
163 	void __iomem *vaddr;
164 	unsigned long paddr;
165 	int 	nr_cmds; /* Number of commands allowed on this controller */
166 #define HPSA_CMDS_RESERVED_FOR_ABORTS 2
167 #define HPSA_CMDS_RESERVED_FOR_DRIVER 1
168 	struct CfgTable __iomem *cfgtable;
169 	int	interrupts_enabled;
170 	int 	max_commands;
171 	atomic_t commands_outstanding;
172 #	define PERF_MODE_INT	0
173 #	define DOORBELL_INT	1
174 #	define SIMPLE_MODE_INT	2
175 #	define MEMQ_MODE_INT	3
176 	unsigned int intr[MAX_REPLY_QUEUES];
177 	unsigned int msix_vector;
178 	unsigned int msi_vector;
179 	int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
180 	struct access_method access;
181 
182 	/* queue and queue Info */
183 	unsigned int Qdepth;
184 	unsigned int maxSG;
185 	spinlock_t lock;
186 	int maxsgentries;
187 	u8 max_cmd_sg_entries;
188 	int chainsize;
189 	struct SGDescriptor **cmd_sg_list;
190 	struct ioaccel2_sg_element **ioaccel2_cmd_sg_list;
191 
192 	/* pointers to command and error info pool */
193 	struct CommandList 	*cmd_pool;
194 	dma_addr_t		cmd_pool_dhandle;
195 	struct io_accel1_cmd	*ioaccel_cmd_pool;
196 	dma_addr_t		ioaccel_cmd_pool_dhandle;
197 	struct io_accel2_cmd	*ioaccel2_cmd_pool;
198 	dma_addr_t		ioaccel2_cmd_pool_dhandle;
199 	struct ErrorInfo 	*errinfo_pool;
200 	dma_addr_t		errinfo_pool_dhandle;
201 	unsigned long  		*cmd_pool_bits;
202 	int			scan_finished;
203 	spinlock_t		scan_lock;
204 	wait_queue_head_t	scan_wait_queue;
205 
206 	struct Scsi_Host *scsi_host;
207 	spinlock_t devlock; /* to protect hba[ctlr]->dev[];  */
208 	int ndevices; /* number of used elements in .dev[] array. */
209 	struct hpsa_scsi_dev_t *dev[HPSA_MAX_DEVICES];
210 	/*
211 	 * Performant mode tables.
212 	 */
213 	u32 trans_support;
214 	u32 trans_offset;
215 	struct TransTable_struct __iomem *transtable;
216 	unsigned long transMethod;
217 
218 	/* cap concurrent passthrus at some reasonable maximum */
219 #define HPSA_MAX_CONCURRENT_PASSTHRUS (10)
220 	atomic_t passthru_cmds_avail;
221 
222 	/*
223 	 * Performant mode completion buffers
224 	 */
225 	size_t reply_queue_size;
226 	struct reply_queue_buffer reply_queue[MAX_REPLY_QUEUES];
227 	u8 nreply_queues;
228 	u32 *blockFetchTable;
229 	u32 *ioaccel1_blockFetchTable;
230 	u32 *ioaccel2_blockFetchTable;
231 	u32 __iomem *ioaccel2_bft2_regs;
232 	unsigned char *hba_inquiry_data;
233 	u32 driver_support;
234 	u32 fw_support;
235 	int ioaccel_support;
236 	int ioaccel_maxsg;
237 	u64 last_intr_timestamp;
238 	u32 last_heartbeat;
239 	u64 last_heartbeat_timestamp;
240 	u32 heartbeat_sample_interval;
241 	atomic_t firmware_flash_in_progress;
242 	u32 __percpu *lockup_detected;
243 	struct delayed_work monitor_ctlr_work;
244 	struct delayed_work rescan_ctlr_work;
245 	int remove_in_progress;
246 	/* Address of h->q[x] is passed to intr handler to know which queue */
247 	u8 q[MAX_REPLY_QUEUES];
248 	char intrname[MAX_REPLY_QUEUES][16];	/* "hpsa0-msix00" names */
249 	u32 TMFSupportFlags; /* cache what task mgmt funcs are supported. */
250 #define HPSATMF_BITS_SUPPORTED  (1 << 0)
251 #define HPSATMF_PHYS_LUN_RESET  (1 << 1)
252 #define HPSATMF_PHYS_NEX_RESET  (1 << 2)
253 #define HPSATMF_PHYS_TASK_ABORT (1 << 3)
254 #define HPSATMF_PHYS_TSET_ABORT (1 << 4)
255 #define HPSATMF_PHYS_CLEAR_ACA  (1 << 5)
256 #define HPSATMF_PHYS_CLEAR_TSET (1 << 6)
257 #define HPSATMF_PHYS_QRY_TASK   (1 << 7)
258 #define HPSATMF_PHYS_QRY_TSET   (1 << 8)
259 #define HPSATMF_PHYS_QRY_ASYNC  (1 << 9)
260 #define HPSATMF_IOACCEL_ENABLED (1 << 15)
261 #define HPSATMF_MASK_SUPPORTED  (1 << 16)
262 #define HPSATMF_LOG_LUN_RESET   (1 << 17)
263 #define HPSATMF_LOG_NEX_RESET   (1 << 18)
264 #define HPSATMF_LOG_TASK_ABORT  (1 << 19)
265 #define HPSATMF_LOG_TSET_ABORT  (1 << 20)
266 #define HPSATMF_LOG_CLEAR_ACA   (1 << 21)
267 #define HPSATMF_LOG_CLEAR_TSET  (1 << 22)
268 #define HPSATMF_LOG_QRY_TASK    (1 << 23)
269 #define HPSATMF_LOG_QRY_TSET    (1 << 24)
270 #define HPSATMF_LOG_QRY_ASYNC   (1 << 25)
271 	u32 events;
272 #define CTLR_STATE_CHANGE_EVENT				(1 << 0)
273 #define CTLR_ENCLOSURE_HOT_PLUG_EVENT			(1 << 1)
274 #define CTLR_STATE_CHANGE_EVENT_PHYSICAL_DRV		(1 << 4)
275 #define CTLR_STATE_CHANGE_EVENT_LOGICAL_DRV		(1 << 5)
276 #define CTLR_STATE_CHANGE_EVENT_REDUNDANT_CNTRL		(1 << 6)
277 #define CTLR_STATE_CHANGE_EVENT_AIO_ENABLED_DISABLED	(1 << 30)
278 #define CTLR_STATE_CHANGE_EVENT_AIO_CONFIG_CHANGE	(1 << 31)
279 
280 #define RESCAN_REQUIRED_EVENT_BITS \
281 		(CTLR_ENCLOSURE_HOT_PLUG_EVENT | \
282 		CTLR_STATE_CHANGE_EVENT_PHYSICAL_DRV | \
283 		CTLR_STATE_CHANGE_EVENT_LOGICAL_DRV | \
284 		CTLR_STATE_CHANGE_EVENT_AIO_ENABLED_DISABLED | \
285 		CTLR_STATE_CHANGE_EVENT_AIO_CONFIG_CHANGE)
286 	spinlock_t offline_device_lock;
287 	struct list_head offline_device_list;
288 	int	acciopath_status;
289 	int	drv_req_rescan;
290 	int	raid_offload_debug;
291 	int     discovery_polling;
292 	struct  ReportLUNdata *lastlogicals;
293 	int	needs_abort_tags_swizzled;
294 	struct workqueue_struct *resubmit_wq;
295 	struct workqueue_struct *rescan_ctlr_wq;
296 	atomic_t abort_cmds_available;
297 	wait_queue_head_t abort_cmd_wait_queue;
298 	wait_queue_head_t event_sync_wait_queue;
299 	struct mutex reset_mutex;
300 	u8 reset_in_progress;
301 	struct hpsa_sas_node *sas_host;
302 };
303 
304 struct offline_device_entry {
305 	unsigned char scsi3addr[8];
306 	struct list_head offline_list;
307 };
308 
309 #define HPSA_ABORT_MSG 0
310 #define HPSA_DEVICE_RESET_MSG 1
311 #define HPSA_RESET_TYPE_CONTROLLER 0x00
312 #define HPSA_RESET_TYPE_BUS 0x01
313 #define HPSA_RESET_TYPE_TARGET 0x03
314 #define HPSA_RESET_TYPE_LUN 0x04
315 #define HPSA_PHYS_TARGET_RESET 0x99 /* not defined by cciss spec */
316 #define HPSA_MSG_SEND_RETRY_LIMIT 10
317 #define HPSA_MSG_SEND_RETRY_INTERVAL_MSECS (10000)
318 
319 /* Maximum time in seconds driver will wait for command completions
320  * when polling before giving up.
321  */
322 #define HPSA_MAX_POLL_TIME_SECS (20)
323 
324 /* During SCSI error recovery, HPSA_TUR_RETRY_LIMIT defines
325  * how many times to retry TEST UNIT READY on a device
326  * while waiting for it to become ready before giving up.
327  * HPSA_MAX_WAIT_INTERVAL_SECS is the max wait interval
328  * between sending TURs while waiting for a device
329  * to become ready.
330  */
331 #define HPSA_TUR_RETRY_LIMIT (20)
332 #define HPSA_MAX_WAIT_INTERVAL_SECS (30)
333 
334 /* HPSA_BOARD_READY_WAIT_SECS is how long to wait for a board
335  * to become ready, in seconds, before giving up on it.
336  * HPSA_BOARD_READY_POLL_INTERVAL_MSECS * is how long to wait
337  * between polling the board to see if it is ready, in
338  * milliseconds.  HPSA_BOARD_READY_POLL_INTERVAL and
339  * HPSA_BOARD_READY_ITERATIONS are derived from those.
340  */
341 #define HPSA_BOARD_READY_WAIT_SECS (120)
342 #define HPSA_BOARD_NOT_READY_WAIT_SECS (100)
343 #define HPSA_BOARD_READY_POLL_INTERVAL_MSECS (100)
344 #define HPSA_BOARD_READY_POLL_INTERVAL \
345 	((HPSA_BOARD_READY_POLL_INTERVAL_MSECS * HZ) / 1000)
346 #define HPSA_BOARD_READY_ITERATIONS \
347 	((HPSA_BOARD_READY_WAIT_SECS * 1000) / \
348 		HPSA_BOARD_READY_POLL_INTERVAL_MSECS)
349 #define HPSA_BOARD_NOT_READY_ITERATIONS \
350 	((HPSA_BOARD_NOT_READY_WAIT_SECS * 1000) / \
351 		HPSA_BOARD_READY_POLL_INTERVAL_MSECS)
352 #define HPSA_POST_RESET_PAUSE_MSECS (3000)
353 #define HPSA_POST_RESET_NOOP_RETRIES (12)
354 
355 /*  Defining the diffent access_menthods */
356 /*
357  * Memory mapped FIFO interface (SMART 53xx cards)
358  */
359 #define SA5_DOORBELL	0x20
360 #define SA5_REQUEST_PORT_OFFSET	0x40
361 #define SA5_REQUEST_PORT64_LO_OFFSET 0xC0
362 #define SA5_REQUEST_PORT64_HI_OFFSET 0xC4
363 #define SA5_REPLY_INTR_MASK_OFFSET	0x34
364 #define SA5_REPLY_PORT_OFFSET		0x44
365 #define SA5_INTR_STATUS		0x30
366 #define SA5_SCRATCHPAD_OFFSET	0xB0
367 
368 #define SA5_CTCFG_OFFSET	0xB4
369 #define SA5_CTMEM_OFFSET	0xB8
370 
371 #define SA5_INTR_OFF		0x08
372 #define SA5B_INTR_OFF		0x04
373 #define SA5_INTR_PENDING	0x08
374 #define SA5B_INTR_PENDING	0x04
375 #define FIFO_EMPTY		0xffffffff
376 #define HPSA_FIRMWARE_READY	0xffff0000 /* value in scratchpad register */
377 
378 #define HPSA_ERROR_BIT		0x02
379 
380 /* Performant mode flags */
381 #define SA5_PERF_INTR_PENDING   0x04
382 #define SA5_PERF_INTR_OFF       0x05
383 #define SA5_OUTDB_STATUS_PERF_BIT       0x01
384 #define SA5_OUTDB_CLEAR_PERF_BIT        0x01
385 #define SA5_OUTDB_CLEAR         0xA0
386 #define SA5_OUTDB_CLEAR_PERF_BIT        0x01
387 #define SA5_OUTDB_STATUS        0x9C
388 
389 
390 #define HPSA_INTR_ON 	1
391 #define HPSA_INTR_OFF	0
392 
393 /*
394  * Inbound Post Queue offsets for IO Accelerator Mode 2
395  */
396 #define IOACCEL2_INBOUND_POSTQ_32	0x48
397 #define IOACCEL2_INBOUND_POSTQ_64_LOW	0xd0
398 #define IOACCEL2_INBOUND_POSTQ_64_HI	0xd4
399 
400 #define HPSA_PHYSICAL_DEVICE_BUS	0
401 #define HPSA_RAID_VOLUME_BUS		1
402 #define HPSA_EXTERNAL_RAID_VOLUME_BUS	2
403 #define HPSA_HBA_BUS			3
404 
405 /*
406 	Send the command to the hardware
407 */
408 static void SA5_submit_command(struct ctlr_info *h,
409 	struct CommandList *c)
410 {
411 	writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
412 	(void) readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
413 }
414 
415 static void SA5_submit_command_no_read(struct ctlr_info *h,
416 	struct CommandList *c)
417 {
418 	writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
419 }
420 
421 static void SA5_submit_command_ioaccel2(struct ctlr_info *h,
422 	struct CommandList *c)
423 {
424 	writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
425 }
426 
427 /*
428  *  This card is the opposite of the other cards.
429  *   0 turns interrupts on...
430  *   0x08 turns them off...
431  */
432 static void SA5_intr_mask(struct ctlr_info *h, unsigned long val)
433 {
434 	if (val) { /* Turn interrupts on */
435 		h->interrupts_enabled = 1;
436 		writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
437 		(void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
438 	} else { /* Turn them off */
439 		h->interrupts_enabled = 0;
440 		writel(SA5_INTR_OFF,
441 			h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
442 		(void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
443 	}
444 }
445 
446 static void SA5_performant_intr_mask(struct ctlr_info *h, unsigned long val)
447 {
448 	if (val) { /* turn on interrupts */
449 		h->interrupts_enabled = 1;
450 		writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
451 		(void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
452 	} else {
453 		h->interrupts_enabled = 0;
454 		writel(SA5_PERF_INTR_OFF,
455 			h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
456 		(void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
457 	}
458 }
459 
460 static unsigned long SA5_performant_completed(struct ctlr_info *h, u8 q)
461 {
462 	struct reply_queue_buffer *rq = &h->reply_queue[q];
463 	unsigned long register_value = FIFO_EMPTY;
464 
465 	/* msi auto clears the interrupt pending bit. */
466 	if (unlikely(!(h->msi_vector || h->msix_vector))) {
467 		/* flush the controller write of the reply queue by reading
468 		 * outbound doorbell status register.
469 		 */
470 		(void) readl(h->vaddr + SA5_OUTDB_STATUS);
471 		writel(SA5_OUTDB_CLEAR_PERF_BIT, h->vaddr + SA5_OUTDB_CLEAR);
472 		/* Do a read in order to flush the write to the controller
473 		 * (as per spec.)
474 		 */
475 		(void) readl(h->vaddr + SA5_OUTDB_STATUS);
476 	}
477 
478 	if ((((u32) rq->head[rq->current_entry]) & 1) == rq->wraparound) {
479 		register_value = rq->head[rq->current_entry];
480 		rq->current_entry++;
481 		atomic_dec(&h->commands_outstanding);
482 	} else {
483 		register_value = FIFO_EMPTY;
484 	}
485 	/* Check for wraparound */
486 	if (rq->current_entry == h->max_commands) {
487 		rq->current_entry = 0;
488 		rq->wraparound ^= 1;
489 	}
490 	return register_value;
491 }
492 
493 /*
494  *   returns value read from hardware.
495  *     returns FIFO_EMPTY if there is nothing to read
496  */
497 static unsigned long SA5_completed(struct ctlr_info *h,
498 	__attribute__((unused)) u8 q)
499 {
500 	unsigned long register_value
501 		= readl(h->vaddr + SA5_REPLY_PORT_OFFSET);
502 
503 	if (register_value != FIFO_EMPTY)
504 		atomic_dec(&h->commands_outstanding);
505 
506 #ifdef HPSA_DEBUG
507 	if (register_value != FIFO_EMPTY)
508 		dev_dbg(&h->pdev->dev, "Read %lx back from board\n",
509 			register_value);
510 	else
511 		dev_dbg(&h->pdev->dev, "FIFO Empty read\n");
512 #endif
513 
514 	return register_value;
515 }
516 /*
517  *	Returns true if an interrupt is pending..
518  */
519 static bool SA5_intr_pending(struct ctlr_info *h)
520 {
521 	unsigned long register_value  =
522 		readl(h->vaddr + SA5_INTR_STATUS);
523 	return register_value & SA5_INTR_PENDING;
524 }
525 
526 static bool SA5_performant_intr_pending(struct ctlr_info *h)
527 {
528 	unsigned long register_value = readl(h->vaddr + SA5_INTR_STATUS);
529 
530 	if (!register_value)
531 		return false;
532 
533 	/* Read outbound doorbell to flush */
534 	register_value = readl(h->vaddr + SA5_OUTDB_STATUS);
535 	return register_value & SA5_OUTDB_STATUS_PERF_BIT;
536 }
537 
538 #define SA5_IOACCEL_MODE1_INTR_STATUS_CMP_BIT    0x100
539 
540 static bool SA5_ioaccel_mode1_intr_pending(struct ctlr_info *h)
541 {
542 	unsigned long register_value = readl(h->vaddr + SA5_INTR_STATUS);
543 
544 	return (register_value & SA5_IOACCEL_MODE1_INTR_STATUS_CMP_BIT) ?
545 		true : false;
546 }
547 
548 #define IOACCEL_MODE1_REPLY_QUEUE_INDEX  0x1A0
549 #define IOACCEL_MODE1_PRODUCER_INDEX     0x1B8
550 #define IOACCEL_MODE1_CONSUMER_INDEX     0x1BC
551 #define IOACCEL_MODE1_REPLY_UNUSED       0xFFFFFFFFFFFFFFFFULL
552 
553 static unsigned long SA5_ioaccel_mode1_completed(struct ctlr_info *h, u8 q)
554 {
555 	u64 register_value;
556 	struct reply_queue_buffer *rq = &h->reply_queue[q];
557 
558 	BUG_ON(q >= h->nreply_queues);
559 
560 	register_value = rq->head[rq->current_entry];
561 	if (register_value != IOACCEL_MODE1_REPLY_UNUSED) {
562 		rq->head[rq->current_entry] = IOACCEL_MODE1_REPLY_UNUSED;
563 		if (++rq->current_entry == rq->size)
564 			rq->current_entry = 0;
565 		/*
566 		 * @todo
567 		 *
568 		 * Don't really need to write the new index after each command,
569 		 * but with current driver design this is easiest.
570 		 */
571 		wmb();
572 		writel((q << 24) | rq->current_entry, h->vaddr +
573 				IOACCEL_MODE1_CONSUMER_INDEX);
574 		atomic_dec(&h->commands_outstanding);
575 	}
576 	return (unsigned long) register_value;
577 }
578 
579 static struct access_method SA5_access = {
580 	SA5_submit_command,
581 	SA5_intr_mask,
582 	SA5_intr_pending,
583 	SA5_completed,
584 };
585 
586 static struct access_method SA5_ioaccel_mode1_access = {
587 	SA5_submit_command,
588 	SA5_performant_intr_mask,
589 	SA5_ioaccel_mode1_intr_pending,
590 	SA5_ioaccel_mode1_completed,
591 };
592 
593 static struct access_method SA5_ioaccel_mode2_access = {
594 	SA5_submit_command_ioaccel2,
595 	SA5_performant_intr_mask,
596 	SA5_performant_intr_pending,
597 	SA5_performant_completed,
598 };
599 
600 static struct access_method SA5_performant_access = {
601 	SA5_submit_command,
602 	SA5_performant_intr_mask,
603 	SA5_performant_intr_pending,
604 	SA5_performant_completed,
605 };
606 
607 static struct access_method SA5_performant_access_no_read = {
608 	SA5_submit_command_no_read,
609 	SA5_performant_intr_mask,
610 	SA5_performant_intr_pending,
611 	SA5_performant_completed,
612 };
613 
614 struct board_type {
615 	u32	board_id;
616 	char	*product_name;
617 	struct access_method *access;
618 };
619 
620 #endif /* HPSA_H */
621 
622