xref: /linux/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c (revision e2be04c7f9958dde770eeb8b30e829ca969b37bb)
1 /**********************************************************************
2  * Author: Cavium, Inc.
3  *
4  * Contact: support@cavium.com
5  *          Please include "LiquidIO" in the subject.
6  *
7  * Copyright (c) 2003-2016 Cavium, Inc.
8  *
9  * This file is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License, Version 2, as
11  * published by the Free Software Foundation.
12  *
13  * This file is distributed in the hope that it will be useful, but
14  * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15  * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16  * NONINFRINGEMENT.  See the GNU General Public License for more
17  * details.
18  **********************************************************************/
19 #include <linux/netdevice.h>
20 #include "liquidio_common.h"
21 #include "octeon_droq.h"
22 #include "octeon_iq.h"
23 #include "response_manager.h"
24 #include "octeon_device.h"
25 
26 #define MEMOPS_IDX   BAR1_INDEX_DYNAMIC_MAP
27 
28 #ifdef __BIG_ENDIAN_BITFIELD
29 static inline void
30 octeon_toggle_bar1_swapmode(struct octeon_device *oct, u32 idx)
31 {
32 	u32 mask;
33 
34 	mask = oct->fn_list.bar1_idx_read(oct, idx);
35 	mask = (mask & 0x2) ? (mask & ~2) : (mask | 2);
36 	oct->fn_list.bar1_idx_write(oct, idx, mask);
37 }
38 #else
39 #define octeon_toggle_bar1_swapmode(oct, idx)
40 #endif
41 
42 static void
43 octeon_pci_fastwrite(struct octeon_device *oct, u8 __iomem *mapped_addr,
44 		     u8 *hostbuf, u32 len)
45 {
46 	while ((len) && ((unsigned long)mapped_addr) & 7) {
47 		writeb(*(hostbuf++), mapped_addr++);
48 		len--;
49 	}
50 
51 	octeon_toggle_bar1_swapmode(oct, MEMOPS_IDX);
52 
53 	while (len >= 8) {
54 		writeq(*((u64 *)hostbuf), mapped_addr);
55 		mapped_addr += 8;
56 		hostbuf += 8;
57 		len -= 8;
58 	}
59 
60 	octeon_toggle_bar1_swapmode(oct, MEMOPS_IDX);
61 
62 	while (len--)
63 		writeb(*(hostbuf++), mapped_addr++);
64 }
65 
66 static void
67 octeon_pci_fastread(struct octeon_device *oct, u8 __iomem *mapped_addr,
68 		    u8 *hostbuf, u32 len)
69 {
70 	while ((len) && ((unsigned long)mapped_addr) & 7) {
71 		*(hostbuf++) = readb(mapped_addr++);
72 		len--;
73 	}
74 
75 	octeon_toggle_bar1_swapmode(oct, MEMOPS_IDX);
76 
77 	while (len >= 8) {
78 		*((u64 *)hostbuf) = readq(mapped_addr);
79 		mapped_addr += 8;
80 		hostbuf += 8;
81 		len -= 8;
82 	}
83 
84 	octeon_toggle_bar1_swapmode(oct, MEMOPS_IDX);
85 
86 	while (len--)
87 		*(hostbuf++) = readb(mapped_addr++);
88 }
89 
90 /* Core mem read/write with temporary bar1 settings. */
91 /* op = 1 to read, op = 0 to write. */
92 static void
93 __octeon_pci_rw_core_mem(struct octeon_device *oct, u64 addr,
94 			 u8 *hostbuf, u32 len, u32 op)
95 {
96 	u32 copy_len = 0, index_reg_val = 0;
97 	unsigned long flags;
98 	u8 __iomem *mapped_addr;
99 	u64 static_mapping_base;
100 
101 	static_mapping_base = oct->console_nb_info.dram_region_base;
102 
103 	if (static_mapping_base &&
104 	    static_mapping_base == (addr & ~(OCTEON_BAR1_ENTRY_SIZE - 1ULL))) {
105 		int bar1_index = oct->console_nb_info.bar1_index;
106 
107 		mapped_addr = oct->mmio[1].hw_addr
108 			+ (bar1_index << ilog2(OCTEON_BAR1_ENTRY_SIZE))
109 			+ (addr & (OCTEON_BAR1_ENTRY_SIZE - 1ULL));
110 
111 		if (op)
112 			octeon_pci_fastread(oct, mapped_addr, hostbuf, len);
113 		else
114 			octeon_pci_fastwrite(oct, mapped_addr, hostbuf, len);
115 
116 		return;
117 	}
118 
119 	spin_lock_irqsave(&oct->mem_access_lock, flags);
120 
121 	/* Save the original index reg value. */
122 	index_reg_val = oct->fn_list.bar1_idx_read(oct, MEMOPS_IDX);
123 	do {
124 		oct->fn_list.bar1_idx_setup(oct, addr, MEMOPS_IDX, 1);
125 		mapped_addr = oct->mmio[1].hw_addr
126 		    + (MEMOPS_IDX << 22) + (addr & 0x3fffff);
127 
128 		/* If operation crosses a 4MB boundary, split the transfer
129 		 * at the 4MB
130 		 * boundary.
131 		 */
132 		if (((addr + len - 1) & ~(0x3fffff)) != (addr & ~(0x3fffff))) {
133 			copy_len = (u32)(((addr & ~(0x3fffff)) +
134 				   (MEMOPS_IDX << 22)) - addr);
135 		} else {
136 			copy_len = len;
137 		}
138 
139 		if (op) {	/* read from core */
140 			octeon_pci_fastread(oct, mapped_addr, hostbuf,
141 					    copy_len);
142 		} else {
143 			octeon_pci_fastwrite(oct, mapped_addr, hostbuf,
144 					     copy_len);
145 		}
146 
147 		len -= copy_len;
148 		addr += copy_len;
149 		hostbuf += copy_len;
150 
151 	} while (len);
152 
153 	oct->fn_list.bar1_idx_write(oct, MEMOPS_IDX, index_reg_val);
154 
155 	spin_unlock_irqrestore(&oct->mem_access_lock, flags);
156 }
157 
158 void
159 octeon_pci_read_core_mem(struct octeon_device *oct,
160 			 u64 coreaddr,
161 			 u8 *buf,
162 			 u32 len)
163 {
164 	__octeon_pci_rw_core_mem(oct, coreaddr, buf, len, 1);
165 }
166 
167 void
168 octeon_pci_write_core_mem(struct octeon_device *oct,
169 			  u64 coreaddr,
170 			  const u8 *buf,
171 			  u32 len)
172 {
173 	__octeon_pci_rw_core_mem(oct, coreaddr, (u8 *)buf, len, 0);
174 }
175 
176 u64 octeon_read_device_mem64(struct octeon_device *oct, u64 coreaddr)
177 {
178 	__be64 ret;
179 
180 	__octeon_pci_rw_core_mem(oct, coreaddr, (u8 *)&ret, 8, 1);
181 
182 	return be64_to_cpu(ret);
183 }
184 
185 u32 octeon_read_device_mem32(struct octeon_device *oct, u64 coreaddr)
186 {
187 	__be32 ret;
188 
189 	__octeon_pci_rw_core_mem(oct, coreaddr, (u8 *)&ret, 4, 1);
190 
191 	return be32_to_cpu(ret);
192 }
193 
194 void octeon_write_device_mem32(struct octeon_device *oct, u64 coreaddr,
195 			       u32 val)
196 {
197 	__be32 t = cpu_to_be32(val);
198 
199 	__octeon_pci_rw_core_mem(oct, coreaddr, (u8 *)&t, 4, 0);
200 }
201