xref: /linux/drivers/net/wireless/intel/iwlwifi/pcie/internal.h (revision fbc872c38c8fed31948c85683b5326ee5ab9fccc)
1 /******************************************************************************
2  *
3  * Copyright(c) 2003 - 2015 Intel Corporation. All rights reserved.
4  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
5  * Copyright(c) 2016 Intel Deutschland GmbH
6  *
7  * Portions of this file are derived from the ipw3945 project, as well
8  * as portions of the ieee80211 subsystem header files.
9  *
10  * This program is free software; you can redistribute it and/or modify it
11  * under the terms of version 2 of the GNU General Public License as
12  * published by the Free Software Foundation.
13  *
14  * This program is distributed in the hope that it will be useful, but WITHOUT
15  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
17  * more details.
18  *
19  * You should have received a copy of the GNU General Public License along with
20  * this program; if not, write to the Free Software Foundation, Inc.,
21  * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
22  *
23  * The full GNU General Public License is included in this distribution in the
24  * file called LICENSE.
25  *
26  * Contact Information:
27  *  Intel Linux Wireless <linuxwifi@intel.com>
28  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
29  *
30  *****************************************************************************/
31 #ifndef __iwl_trans_int_pcie_h__
32 #define __iwl_trans_int_pcie_h__
33 
34 #include <linux/spinlock.h>
35 #include <linux/interrupt.h>
36 #include <linux/skbuff.h>
37 #include <linux/wait.h>
38 #include <linux/pci.h>
39 #include <linux/timer.h>
40 
41 #include "iwl-fh.h"
42 #include "iwl-csr.h"
43 #include "iwl-trans.h"
44 #include "iwl-debug.h"
45 #include "iwl-io.h"
46 #include "iwl-op-mode.h"
47 
48 /* We need 2 entries for the TX command and header, and another one might
49  * be needed for potential data in the SKB's head. The remaining ones can
50  * be used for frags.
51  */
52 #define IWL_PCIE_MAX_FRAGS (IWL_NUM_OF_TBS - 3)
53 
54 /*
55  * RX related structures and functions
56  */
57 #define RX_NUM_QUEUES 1
58 #define RX_POST_REQ_ALLOC 2
59 #define RX_CLAIM_REQ_ALLOC 8
60 #define RX_PENDING_WATERMARK 16
61 
62 struct iwl_host_cmd;
63 
64 /*This file includes the declaration that are internal to the
65  * trans_pcie layer */
66 
67 /**
68  * struct iwl_rx_mem_buffer
69  * @page_dma: bus address of rxb page
70  * @page: driver's pointer to the rxb page
71  * @vid: index of this rxb in the global table
72  */
73 struct iwl_rx_mem_buffer {
74 	dma_addr_t page_dma;
75 	struct page *page;
76 	u16 vid;
77 	struct list_head list;
78 };
79 
80 /**
81  * struct isr_statistics - interrupt statistics
82  *
83  */
84 struct isr_statistics {
85 	u32 hw;
86 	u32 sw;
87 	u32 err_code;
88 	u32 sch;
89 	u32 alive;
90 	u32 rfkill;
91 	u32 ctkill;
92 	u32 wakeup;
93 	u32 rx;
94 	u32 tx;
95 	u32 unhandled;
96 };
97 
98 /**
99  * struct iwl_rxq - Rx queue
100  * @id: queue index
101  * @bd: driver's pointer to buffer of receive buffer descriptors (rbd).
102  *	Address size is 32 bit in pre-9000 devices and 64 bit in 9000 devices.
103  * @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
104  * @ubd: driver's pointer to buffer of used receive buffer descriptors (rbd)
105  * @ubd_dma: physical address of buffer of used receive buffer descriptors (rbd)
106  * @read: Shared index to newest available Rx buffer
107  * @write: Shared index to oldest written Rx packet
108  * @free_count: Number of pre-allocated buffers in rx_free
109  * @used_count: Number of RBDs handled to allocator to use for allocation
110  * @write_actual:
111  * @rx_free: list of RBDs with allocated RB ready for use
112  * @rx_used: list of RBDs with no RB attached
113  * @need_update: flag to indicate we need to update read/write index
114  * @rb_stts: driver's pointer to receive buffer status
115  * @rb_stts_dma: bus address of receive buffer status
116  * @lock:
117  * @queue: actual rx queue. Not used for multi-rx queue.
118  *
119  * NOTE:  rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers
120  */
121 struct iwl_rxq {
122 	int id;
123 	void *bd;
124 	dma_addr_t bd_dma;
125 	__le32 *used_bd;
126 	dma_addr_t used_bd_dma;
127 	u32 read;
128 	u32 write;
129 	u32 free_count;
130 	u32 used_count;
131 	u32 write_actual;
132 	u32 queue_size;
133 	struct list_head rx_free;
134 	struct list_head rx_used;
135 	bool need_update;
136 	struct iwl_rb_status *rb_stts;
137 	dma_addr_t rb_stts_dma;
138 	spinlock_t lock;
139 	struct napi_struct napi;
140 	struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
141 };
142 
143 /**
144  * struct iwl_rb_allocator - Rx allocator
145  * @req_pending: number of requests the allcator had not processed yet
146  * @req_ready: number of requests honored and ready for claiming
147  * @rbd_allocated: RBDs with pages allocated and ready to be handled to
148  *	the queue. This is a list of &struct iwl_rx_mem_buffer
149  * @rbd_empty: RBDs with no page attached for allocator use. This is a list
150  *	of &struct iwl_rx_mem_buffer
151  * @lock: protects the rbd_allocated and rbd_empty lists
152  * @alloc_wq: work queue for background calls
153  * @rx_alloc: work struct for background calls
154  */
155 struct iwl_rb_allocator {
156 	atomic_t req_pending;
157 	atomic_t req_ready;
158 	struct list_head rbd_allocated;
159 	struct list_head rbd_empty;
160 	spinlock_t lock;
161 	struct workqueue_struct *alloc_wq;
162 	struct work_struct rx_alloc;
163 };
164 
165 struct iwl_dma_ptr {
166 	dma_addr_t dma;
167 	void *addr;
168 	size_t size;
169 };
170 
171 /**
172  * iwl_queue_inc_wrap - increment queue index, wrap back to beginning
173  * @index -- current index
174  */
175 static inline int iwl_queue_inc_wrap(int index)
176 {
177 	return ++index & (TFD_QUEUE_SIZE_MAX - 1);
178 }
179 
180 /**
181  * iwl_queue_dec_wrap - decrement queue index, wrap back to end
182  * @index -- current index
183  */
184 static inline int iwl_queue_dec_wrap(int index)
185 {
186 	return --index & (TFD_QUEUE_SIZE_MAX - 1);
187 }
188 
189 struct iwl_cmd_meta {
190 	/* only for SYNC commands, iff the reply skb is wanted */
191 	struct iwl_host_cmd *source;
192 	u32 flags;
193 };
194 
195 /*
196  * Generic queue structure
197  *
198  * Contains common data for Rx and Tx queues.
199  *
200  * Note the difference between TFD_QUEUE_SIZE_MAX and n_window: the hardware
201  * always assumes 256 descriptors, so TFD_QUEUE_SIZE_MAX is always 256 (unless
202  * there might be HW changes in the future). For the normal TX
203  * queues, n_window, which is the size of the software queue data
204  * is also 256; however, for the command queue, n_window is only
205  * 32 since we don't need so many commands pending. Since the HW
206  * still uses 256 BDs for DMA though, TFD_QUEUE_SIZE_MAX stays 256. As a result,
207  * the software buffers (in the variables @meta, @txb in struct
208  * iwl_txq) only have 32 entries, while the HW buffers (@tfds in
209  * the same struct) have 256.
210  * This means that we end up with the following:
211  *  HW entries: | 0 | ... | N * 32 | ... | N * 32 + 31 | ... | 255 |
212  *  SW entries:           | 0      | ... | 31          |
213  * where N is a number between 0 and 7. This means that the SW
214  * data is a window overlayed over the HW queue.
215  */
216 struct iwl_queue {
217 	int write_ptr;       /* 1-st empty entry (index) host_w*/
218 	int read_ptr;         /* last used entry (index) host_r*/
219 	/* use for monitoring and recovering the stuck queue */
220 	dma_addr_t dma_addr;   /* physical addr for BD's */
221 	int n_window;	       /* safe queue window */
222 	u32 id;
223 	int low_mark;	       /* low watermark, resume queue if free
224 				* space more than this */
225 	int high_mark;         /* high watermark, stop queue if free
226 				* space less than this */
227 };
228 
229 #define TFD_TX_CMD_SLOTS 256
230 #define TFD_CMD_SLOTS 32
231 
232 /*
233  * The FH will write back to the first TB only, so we need
234  * to copy some data into the buffer regardless of whether
235  * it should be mapped or not. This indicates how big the
236  * first TB must be to include the scratch buffer. Since
237  * the scratch is 4 bytes at offset 12, it's 16 now. If we
238  * make it bigger then allocations will be bigger and copy
239  * slower, so that's probably not useful.
240  */
241 #define IWL_HCMD_SCRATCHBUF_SIZE	16
242 
243 struct iwl_pcie_txq_entry {
244 	struct iwl_device_cmd *cmd;
245 	struct sk_buff *skb;
246 	/* buffer to free after command completes */
247 	const void *free_buf;
248 	struct iwl_cmd_meta meta;
249 };
250 
251 struct iwl_pcie_txq_scratch_buf {
252 	struct iwl_cmd_header hdr;
253 	u8 buf[8];
254 	__le32 scratch;
255 };
256 
257 /**
258  * struct iwl_txq - Tx Queue for DMA
259  * @q: generic Rx/Tx queue descriptor
260  * @tfds: transmit frame descriptors (DMA memory)
261  * @scratchbufs: start of command headers, including scratch buffers, for
262  *	the writeback -- this is DMA memory and an array holding one buffer
263  *	for each command on the queue
264  * @scratchbufs_dma: DMA address for the scratchbufs start
265  * @entries: transmit entries (driver state)
266  * @lock: queue lock
267  * @stuck_timer: timer that fires if queue gets stuck
268  * @trans_pcie: pointer back to transport (for timer)
269  * @need_update: indicates need to update read/write index
270  * @active: stores if queue is active
271  * @ampdu: true if this queue is an ampdu queue for an specific RA/TID
272  * @wd_timeout: queue watchdog timeout (jiffies) - per queue
273  * @frozen: tx stuck queue timer is frozen
274  * @frozen_expiry_remainder: remember how long until the timer fires
275  *
276  * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame
277  * descriptors) and required locking structures.
278  */
279 struct iwl_txq {
280 	struct iwl_queue q;
281 	struct iwl_tfd *tfds;
282 	struct iwl_pcie_txq_scratch_buf *scratchbufs;
283 	dma_addr_t scratchbufs_dma;
284 	struct iwl_pcie_txq_entry *entries;
285 	spinlock_t lock;
286 	unsigned long frozen_expiry_remainder;
287 	struct timer_list stuck_timer;
288 	struct iwl_trans_pcie *trans_pcie;
289 	bool need_update;
290 	bool frozen;
291 	u8 active;
292 	bool ampdu;
293 	bool block;
294 	unsigned long wd_timeout;
295 	struct sk_buff_head overflow_q;
296 };
297 
298 static inline dma_addr_t
299 iwl_pcie_get_scratchbuf_dma(struct iwl_txq *txq, int idx)
300 {
301 	return txq->scratchbufs_dma +
302 	       sizeof(struct iwl_pcie_txq_scratch_buf) * idx;
303 }
304 
305 struct iwl_tso_hdr_page {
306 	struct page *page;
307 	u8 *pos;
308 };
309 
310 /**
311  * struct iwl_trans_pcie - PCIe transport specific data
312  * @rxq: all the RX queue data
313  * @rx_pool: initial pool of iwl_rx_mem_buffer for all the queues
314  * @global_table: table mapping received VID from hw to rxb
315  * @rba: allocator for RX replenishing
316  * @drv - pointer to iwl_drv
317  * @trans: pointer to the generic transport area
318  * @scd_base_addr: scheduler sram base address in SRAM
319  * @scd_bc_tbls: pointer to the byte count table of the scheduler
320  * @kw: keep warm address
321  * @pci_dev: basic pci-network driver stuff
322  * @hw_base: pci hardware address support
323  * @ucode_write_complete: indicates that the ucode has been copied.
324  * @ucode_write_waitq: wait queue for uCode load
325  * @cmd_queue - command queue number
326  * @rx_buf_size: Rx buffer size
327  * @bc_table_dword: true if the BC table expects DWORD (as opposed to bytes)
328  * @scd_set_active: should the transport configure the SCD for HCMD queue
329  * @wide_cmd_header: true when ucode supports wide command header format
330  * @sw_csum_tx: if true, then the transport will compute the csum of the TXed
331  *	frame.
332  * @rx_page_order: page order for receive buffer size
333  * @reg_lock: protect hw register access
334  * @mutex: to protect stop_device / start_fw / start_hw
335  * @cmd_in_flight: true when we have a host command in flight
336  * @fw_mon_phys: physical address of the buffer for the firmware monitor
337  * @fw_mon_page: points to the first page of the buffer for the firmware monitor
338  * @fw_mon_size: size of the buffer for the firmware monitor
339  * @msix_entries: array of MSI-X entries
340  * @msix_enabled: true if managed to enable MSI-X
341  * @allocated_vector: the number of interrupt vector allocated by the OS
342  * @default_irq_num: default irq for non rx interrupt
343  * @fh_init_mask: initial unmasked fh causes
344  * @hw_init_mask: initial unmasked hw causes
345  * @fh_mask: current unmasked fh causes
346  * @hw_mask: current unmasked hw causes
347  */
348 struct iwl_trans_pcie {
349 	struct iwl_rxq *rxq;
350 	struct iwl_rx_mem_buffer rx_pool[RX_POOL_SIZE];
351 	struct iwl_rx_mem_buffer *global_table[RX_POOL_SIZE];
352 	struct iwl_rb_allocator rba;
353 	struct iwl_trans *trans;
354 	struct iwl_drv *drv;
355 
356 	struct net_device napi_dev;
357 
358 	struct __percpu iwl_tso_hdr_page *tso_hdr_page;
359 
360 	/* INT ICT Table */
361 	__le32 *ict_tbl;
362 	dma_addr_t ict_tbl_dma;
363 	int ict_index;
364 	bool use_ict;
365 	bool is_down;
366 	struct isr_statistics isr_stats;
367 
368 	spinlock_t irq_lock;
369 	struct mutex mutex;
370 	u32 inta_mask;
371 	u32 scd_base_addr;
372 	struct iwl_dma_ptr scd_bc_tbls;
373 	struct iwl_dma_ptr kw;
374 
375 	struct iwl_txq *txq;
376 	unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)];
377 	unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)];
378 
379 	/* PCI bus related data */
380 	struct pci_dev *pci_dev;
381 	void __iomem *hw_base;
382 
383 	bool ucode_write_complete;
384 	wait_queue_head_t ucode_write_waitq;
385 	wait_queue_head_t wait_command_queue;
386 	wait_queue_head_t d0i3_waitq;
387 
388 	u8 cmd_queue;
389 	u8 cmd_fifo;
390 	unsigned int cmd_q_wdg_timeout;
391 	u8 n_no_reclaim_cmds;
392 	u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS];
393 
394 	enum iwl_amsdu_size rx_buf_size;
395 	bool bc_table_dword;
396 	bool scd_set_active;
397 	bool wide_cmd_header;
398 	bool sw_csum_tx;
399 	u32 rx_page_order;
400 
401 	/*protect hw register */
402 	spinlock_t reg_lock;
403 	bool cmd_hold_nic_awake;
404 	bool ref_cmd_in_flight;
405 
406 	dma_addr_t fw_mon_phys;
407 	struct page *fw_mon_page;
408 	u32 fw_mon_size;
409 
410 	struct msix_entry msix_entries[IWL_MAX_RX_HW_QUEUES];
411 	bool msix_enabled;
412 	u32 allocated_vector;
413 	u32 default_irq_num;
414 	u32 fh_init_mask;
415 	u32 hw_init_mask;
416 	u32 fh_mask;
417 	u32 hw_mask;
418 };
419 
420 static inline struct iwl_trans_pcie *
421 IWL_TRANS_GET_PCIE_TRANS(struct iwl_trans *trans)
422 {
423 	return (void *)trans->trans_specific;
424 }
425 
426 static inline struct iwl_trans *
427 iwl_trans_pcie_get_trans(struct iwl_trans_pcie *trans_pcie)
428 {
429 	return container_of((void *)trans_pcie, struct iwl_trans,
430 			    trans_specific);
431 }
432 
433 /*
434  * Convention: trans API functions: iwl_trans_pcie_XXX
435  *	Other functions: iwl_pcie_XXX
436  */
437 struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
438 				       const struct pci_device_id *ent,
439 				       const struct iwl_cfg *cfg);
440 void iwl_trans_pcie_free(struct iwl_trans *trans);
441 
442 /*****************************************************
443 * RX
444 ******************************************************/
445 int iwl_pcie_rx_init(struct iwl_trans *trans);
446 irqreturn_t iwl_pcie_msix_isr(int irq, void *data);
447 irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id);
448 irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id);
449 irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id);
450 int iwl_pcie_rx_stop(struct iwl_trans *trans);
451 void iwl_pcie_rx_free(struct iwl_trans *trans);
452 
453 /*****************************************************
454 * ICT - interrupt handling
455 ******************************************************/
456 irqreturn_t iwl_pcie_isr(int irq, void *data);
457 int iwl_pcie_alloc_ict(struct iwl_trans *trans);
458 void iwl_pcie_free_ict(struct iwl_trans *trans);
459 void iwl_pcie_reset_ict(struct iwl_trans *trans);
460 void iwl_pcie_disable_ict(struct iwl_trans *trans);
461 
462 /*****************************************************
463 * TX / HCMD
464 ******************************************************/
465 int iwl_pcie_tx_init(struct iwl_trans *trans);
466 void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr);
467 int iwl_pcie_tx_stop(struct iwl_trans *trans);
468 void iwl_pcie_tx_free(struct iwl_trans *trans);
469 void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int queue, u16 ssn,
470 			       const struct iwl_trans_txq_scd_cfg *cfg,
471 			       unsigned int wdg_timeout);
472 void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue,
473 				bool configure_scd);
474 int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
475 		      struct iwl_device_cmd *dev_cmd, int txq_id);
476 void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans);
477 int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
478 void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
479 			    struct iwl_rx_cmd_buffer *rxb);
480 void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
481 			    struct sk_buff_head *skbs);
482 void iwl_trans_pcie_tx_reset(struct iwl_trans *trans);
483 
484 static inline u16 iwl_pcie_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx)
485 {
486 	struct iwl_tfd_tb *tb = &tfd->tbs[idx];
487 
488 	return le16_to_cpu(tb->hi_n_len) >> 4;
489 }
490 
491 /*****************************************************
492 * Error handling
493 ******************************************************/
494 void iwl_pcie_dump_csr(struct iwl_trans *trans);
495 
496 /*****************************************************
497 * Helpers
498 ******************************************************/
499 static inline void iwl_disable_interrupts(struct iwl_trans *trans)
500 {
501 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
502 
503 	clear_bit(STATUS_INT_ENABLED, &trans->status);
504 	if (!trans_pcie->msix_enabled) {
505 		/* disable interrupts from uCode/NIC to host */
506 		iwl_write32(trans, CSR_INT_MASK, 0x00000000);
507 
508 		/* acknowledge/clear/reset any interrupts still pending
509 		 * from uCode or flow handler (Rx/Tx DMA) */
510 		iwl_write32(trans, CSR_INT, 0xffffffff);
511 		iwl_write32(trans, CSR_FH_INT_STATUS, 0xffffffff);
512 	} else {
513 		/* disable all the interrupt we might use */
514 		iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD,
515 			    trans_pcie->fh_init_mask);
516 		iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD,
517 			    trans_pcie->hw_init_mask);
518 	}
519 	IWL_DEBUG_ISR(trans, "Disabled interrupts\n");
520 }
521 
522 static inline void iwl_enable_interrupts(struct iwl_trans *trans)
523 {
524 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
525 
526 	IWL_DEBUG_ISR(trans, "Enabling interrupts\n");
527 	set_bit(STATUS_INT_ENABLED, &trans->status);
528 	if (!trans_pcie->msix_enabled) {
529 		trans_pcie->inta_mask = CSR_INI_SET_MASK;
530 		iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
531 	} else {
532 		/*
533 		 * fh/hw_mask keeps all the unmasked causes.
534 		 * Unlike msi, in msix cause is enabled when it is unset.
535 		 */
536 		trans_pcie->hw_mask = trans_pcie->hw_init_mask;
537 		trans_pcie->fh_mask = trans_pcie->fh_init_mask;
538 		iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD,
539 			    ~trans_pcie->fh_mask);
540 		iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD,
541 			    ~trans_pcie->hw_mask);
542 	}
543 }
544 
545 static inline void iwl_enable_hw_int_msk_msix(struct iwl_trans *trans, u32 msk)
546 {
547 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
548 
549 	iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD, ~msk);
550 	trans_pcie->hw_mask = msk;
551 }
552 
553 static inline void iwl_enable_fh_int_msk_msix(struct iwl_trans *trans, u32 msk)
554 {
555 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
556 
557 	iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, ~msk);
558 	trans_pcie->fh_mask = msk;
559 }
560 
561 static inline void iwl_enable_fw_load_int(struct iwl_trans *trans)
562 {
563 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
564 
565 	IWL_DEBUG_ISR(trans, "Enabling FW load interrupt\n");
566 	if (!trans_pcie->msix_enabled) {
567 		trans_pcie->inta_mask = CSR_INT_BIT_FH_TX;
568 		iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
569 	} else {
570 		iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD,
571 			    trans_pcie->hw_init_mask);
572 		iwl_enable_fh_int_msk_msix(trans,
573 					   MSIX_FH_INT_CAUSES_D2S_CH0_NUM);
574 	}
575 }
576 
577 static inline void iwl_enable_rfkill_int(struct iwl_trans *trans)
578 {
579 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
580 
581 	IWL_DEBUG_ISR(trans, "Enabling rfkill interrupt\n");
582 	if (!trans_pcie->msix_enabled) {
583 		trans_pcie->inta_mask = CSR_INT_BIT_RF_KILL;
584 		iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
585 	} else {
586 		iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD,
587 			    trans_pcie->fh_init_mask);
588 		iwl_enable_hw_int_msk_msix(trans,
589 					   MSIX_HW_INT_CAUSES_REG_RF_KILL);
590 	}
591 }
592 
593 static inline void iwl_wake_queue(struct iwl_trans *trans,
594 				  struct iwl_txq *txq)
595 {
596 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
597 
598 	if (test_and_clear_bit(txq->q.id, trans_pcie->queue_stopped)) {
599 		IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d\n", txq->q.id);
600 		iwl_op_mode_queue_not_full(trans->op_mode, txq->q.id);
601 	}
602 }
603 
604 static inline void iwl_stop_queue(struct iwl_trans *trans,
605 				  struct iwl_txq *txq)
606 {
607 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
608 
609 	if (!test_and_set_bit(txq->q.id, trans_pcie->queue_stopped)) {
610 		iwl_op_mode_queue_full(trans->op_mode, txq->q.id);
611 		IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d\n", txq->q.id);
612 	} else
613 		IWL_DEBUG_TX_QUEUES(trans, "hwq %d already stopped\n",
614 				    txq->q.id);
615 }
616 
617 static inline bool iwl_queue_used(const struct iwl_queue *q, int i)
618 {
619 	return q->write_ptr >= q->read_ptr ?
620 		(i >= q->read_ptr && i < q->write_ptr) :
621 		!(i < q->read_ptr && i >= q->write_ptr);
622 }
623 
624 static inline u8 get_cmd_index(struct iwl_queue *q, u32 index)
625 {
626 	return index & (q->n_window - 1);
627 }
628 
629 static inline bool iwl_is_rfkill_set(struct iwl_trans *trans)
630 {
631 	return !(iwl_read32(trans, CSR_GP_CNTRL) &
632 		CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW);
633 }
634 
635 static inline void __iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans,
636 						  u32 reg, u32 mask, u32 value)
637 {
638 	u32 v;
639 
640 #ifdef CONFIG_IWLWIFI_DEBUG
641 	WARN_ON_ONCE(value & ~mask);
642 #endif
643 
644 	v = iwl_read32(trans, reg);
645 	v &= ~mask;
646 	v |= value;
647 	iwl_write32(trans, reg, v);
648 }
649 
650 static inline void __iwl_trans_pcie_clear_bit(struct iwl_trans *trans,
651 					      u32 reg, u32 mask)
652 {
653 	__iwl_trans_pcie_set_bits_mask(trans, reg, mask, 0);
654 }
655 
656 static inline void __iwl_trans_pcie_set_bit(struct iwl_trans *trans,
657 					    u32 reg, u32 mask)
658 {
659 	__iwl_trans_pcie_set_bits_mask(trans, reg, mask, mask);
660 }
661 
662 void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state);
663 
664 #ifdef CONFIG_IWLWIFI_DEBUGFS
665 int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans);
666 #else
667 static inline int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans)
668 {
669 	return 0;
670 }
671 #endif
672 
673 int iwl_pci_fw_exit_d0i3(struct iwl_trans *trans);
674 int iwl_pci_fw_enter_d0i3(struct iwl_trans *trans);
675 
676 #endif /* __iwl_trans_int_pcie_h__ */
677