xref: /linux/drivers/net/wireless/intel/ipw2x00/ipw2200.c (revision 975ef7ff81bb000af6e6c8e63e81f89f3468dcf7)
1 /******************************************************************************
2 
3   Copyright(c) 2003 - 2006 Intel Corporation. All rights reserved.
4 
5   802.11 status code portion of this file from ethereal-0.10.6:
6     Copyright 2000, Axis Communications AB
7     Ethereal - Network traffic analyzer
8     By Gerald Combs <gerald@ethereal.com>
9     Copyright 1998 Gerald Combs
10 
11   This program is free software; you can redistribute it and/or modify it
12   under the terms of version 2 of the GNU General Public License as
13   published by the Free Software Foundation.
14 
15   This program is distributed in the hope that it will be useful, but WITHOUT
16   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
18   more details.
19 
20   You should have received a copy of the GNU General Public License along with
21   this program; if not, write to the Free Software Foundation, Inc., 59
22   Temple Place - Suite 330, Boston, MA  02111-1307, USA.
23 
24   The full GNU General Public License is included in this distribution in the
25   file called LICENSE.
26 
27   Contact Information:
28   Intel Linux Wireless <ilw@linux.intel.com>
29   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 
31 ******************************************************************************/
32 
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <net/cfg80211-wext.h>
36 #include "ipw2200.h"
37 #include "ipw.h"
38 
39 
40 #ifndef KBUILD_EXTMOD
41 #define VK "k"
42 #else
43 #define VK
44 #endif
45 
46 #ifdef CONFIG_IPW2200_DEBUG
47 #define VD "d"
48 #else
49 #define VD
50 #endif
51 
52 #ifdef CONFIG_IPW2200_MONITOR
53 #define VM "m"
54 #else
55 #define VM
56 #endif
57 
58 #ifdef CONFIG_IPW2200_PROMISCUOUS
59 #define VP "p"
60 #else
61 #define VP
62 #endif
63 
64 #ifdef CONFIG_IPW2200_RADIOTAP
65 #define VR "r"
66 #else
67 #define VR
68 #endif
69 
70 #ifdef CONFIG_IPW2200_QOS
71 #define VQ "q"
72 #else
73 #define VQ
74 #endif
75 
76 #define IPW2200_VERSION "1.2.2" VK VD VM VP VR VQ
77 #define DRV_DESCRIPTION	"Intel(R) PRO/Wireless 2200/2915 Network Driver"
78 #define DRV_COPYRIGHT	"Copyright(c) 2003-2006 Intel Corporation"
79 #define DRV_VERSION     IPW2200_VERSION
80 
81 #define ETH_P_80211_STATS (ETH_P_80211_RAW + 1)
82 
83 MODULE_DESCRIPTION(DRV_DESCRIPTION);
84 MODULE_VERSION(DRV_VERSION);
85 MODULE_AUTHOR(DRV_COPYRIGHT);
86 MODULE_LICENSE("GPL");
87 MODULE_FIRMWARE("ipw2200-ibss.fw");
88 #ifdef CONFIG_IPW2200_MONITOR
89 MODULE_FIRMWARE("ipw2200-sniffer.fw");
90 #endif
91 MODULE_FIRMWARE("ipw2200-bss.fw");
92 
93 static int cmdlog = 0;
94 static int debug = 0;
95 static int default_channel = 0;
96 static int network_mode = 0;
97 
98 static u32 ipw_debug_level;
99 static int associate;
100 static int auto_create = 1;
101 static int led_support = 1;
102 static int disable = 0;
103 static int bt_coexist = 0;
104 static int hwcrypto = 0;
105 static int roaming = 1;
106 static const char ipw_modes[] = {
107 	'a', 'b', 'g', '?'
108 };
109 static int antenna = CFG_SYS_ANTENNA_BOTH;
110 
111 #ifdef CONFIG_IPW2200_PROMISCUOUS
112 static int rtap_iface = 0;     /* def: 0 -- do not create rtap interface */
113 #endif
114 
115 static struct ieee80211_rate ipw2200_rates[] = {
116 	{ .bitrate = 10 },
117 	{ .bitrate = 20, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
118 	{ .bitrate = 55, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
119 	{ .bitrate = 110, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
120 	{ .bitrate = 60 },
121 	{ .bitrate = 90 },
122 	{ .bitrate = 120 },
123 	{ .bitrate = 180 },
124 	{ .bitrate = 240 },
125 	{ .bitrate = 360 },
126 	{ .bitrate = 480 },
127 	{ .bitrate = 540 }
128 };
129 
130 #define ipw2200_a_rates		(ipw2200_rates + 4)
131 #define ipw2200_num_a_rates	8
132 #define ipw2200_bg_rates	(ipw2200_rates + 0)
133 #define ipw2200_num_bg_rates	12
134 
135 /* Ugly macro to convert literal channel numbers into their mhz equivalents
136  * There are certianly some conditions that will break this (like feeding it '30')
137  * but they shouldn't arise since nothing talks on channel 30. */
138 #define ieee80211chan2mhz(x) \
139 	(((x) <= 14) ? \
140 	(((x) == 14) ? 2484 : ((x) * 5) + 2407) : \
141 	((x) + 1000) * 5)
142 
143 #ifdef CONFIG_IPW2200_QOS
144 static int qos_enable = 0;
145 static int qos_burst_enable = 0;
146 static int qos_no_ack_mask = 0;
147 static int burst_duration_CCK = 0;
148 static int burst_duration_OFDM = 0;
149 
150 static struct libipw_qos_parameters def_qos_parameters_OFDM = {
151 	{QOS_TX0_CW_MIN_OFDM, QOS_TX1_CW_MIN_OFDM, QOS_TX2_CW_MIN_OFDM,
152 	 QOS_TX3_CW_MIN_OFDM},
153 	{QOS_TX0_CW_MAX_OFDM, QOS_TX1_CW_MAX_OFDM, QOS_TX2_CW_MAX_OFDM,
154 	 QOS_TX3_CW_MAX_OFDM},
155 	{QOS_TX0_AIFS, QOS_TX1_AIFS, QOS_TX2_AIFS, QOS_TX3_AIFS},
156 	{QOS_TX0_ACM, QOS_TX1_ACM, QOS_TX2_ACM, QOS_TX3_ACM},
157 	{QOS_TX0_TXOP_LIMIT_OFDM, QOS_TX1_TXOP_LIMIT_OFDM,
158 	 QOS_TX2_TXOP_LIMIT_OFDM, QOS_TX3_TXOP_LIMIT_OFDM}
159 };
160 
161 static struct libipw_qos_parameters def_qos_parameters_CCK = {
162 	{QOS_TX0_CW_MIN_CCK, QOS_TX1_CW_MIN_CCK, QOS_TX2_CW_MIN_CCK,
163 	 QOS_TX3_CW_MIN_CCK},
164 	{QOS_TX0_CW_MAX_CCK, QOS_TX1_CW_MAX_CCK, QOS_TX2_CW_MAX_CCK,
165 	 QOS_TX3_CW_MAX_CCK},
166 	{QOS_TX0_AIFS, QOS_TX1_AIFS, QOS_TX2_AIFS, QOS_TX3_AIFS},
167 	{QOS_TX0_ACM, QOS_TX1_ACM, QOS_TX2_ACM, QOS_TX3_ACM},
168 	{QOS_TX0_TXOP_LIMIT_CCK, QOS_TX1_TXOP_LIMIT_CCK, QOS_TX2_TXOP_LIMIT_CCK,
169 	 QOS_TX3_TXOP_LIMIT_CCK}
170 };
171 
172 static struct libipw_qos_parameters def_parameters_OFDM = {
173 	{DEF_TX0_CW_MIN_OFDM, DEF_TX1_CW_MIN_OFDM, DEF_TX2_CW_MIN_OFDM,
174 	 DEF_TX3_CW_MIN_OFDM},
175 	{DEF_TX0_CW_MAX_OFDM, DEF_TX1_CW_MAX_OFDM, DEF_TX2_CW_MAX_OFDM,
176 	 DEF_TX3_CW_MAX_OFDM},
177 	{DEF_TX0_AIFS, DEF_TX1_AIFS, DEF_TX2_AIFS, DEF_TX3_AIFS},
178 	{DEF_TX0_ACM, DEF_TX1_ACM, DEF_TX2_ACM, DEF_TX3_ACM},
179 	{DEF_TX0_TXOP_LIMIT_OFDM, DEF_TX1_TXOP_LIMIT_OFDM,
180 	 DEF_TX2_TXOP_LIMIT_OFDM, DEF_TX3_TXOP_LIMIT_OFDM}
181 };
182 
183 static struct libipw_qos_parameters def_parameters_CCK = {
184 	{DEF_TX0_CW_MIN_CCK, DEF_TX1_CW_MIN_CCK, DEF_TX2_CW_MIN_CCK,
185 	 DEF_TX3_CW_MIN_CCK},
186 	{DEF_TX0_CW_MAX_CCK, DEF_TX1_CW_MAX_CCK, DEF_TX2_CW_MAX_CCK,
187 	 DEF_TX3_CW_MAX_CCK},
188 	{DEF_TX0_AIFS, DEF_TX1_AIFS, DEF_TX2_AIFS, DEF_TX3_AIFS},
189 	{DEF_TX0_ACM, DEF_TX1_ACM, DEF_TX2_ACM, DEF_TX3_ACM},
190 	{DEF_TX0_TXOP_LIMIT_CCK, DEF_TX1_TXOP_LIMIT_CCK, DEF_TX2_TXOP_LIMIT_CCK,
191 	 DEF_TX3_TXOP_LIMIT_CCK}
192 };
193 
194 static u8 qos_oui[QOS_OUI_LEN] = { 0x00, 0x50, 0xF2 };
195 
196 static int from_priority_to_tx_queue[] = {
197 	IPW_TX_QUEUE_1, IPW_TX_QUEUE_2, IPW_TX_QUEUE_2, IPW_TX_QUEUE_1,
198 	IPW_TX_QUEUE_3, IPW_TX_QUEUE_3, IPW_TX_QUEUE_4, IPW_TX_QUEUE_4
199 };
200 
201 static u32 ipw_qos_get_burst_duration(struct ipw_priv *priv);
202 
203 static int ipw_send_qos_params_command(struct ipw_priv *priv, struct libipw_qos_parameters
204 				       *qos_param);
205 static int ipw_send_qos_info_command(struct ipw_priv *priv, struct libipw_qos_information_element
206 				     *qos_param);
207 #endif				/* CONFIG_IPW2200_QOS */
208 
209 static struct iw_statistics *ipw_get_wireless_stats(struct net_device *dev);
210 static void ipw_remove_current_network(struct ipw_priv *priv);
211 static void ipw_rx(struct ipw_priv *priv);
212 static int ipw_queue_tx_reclaim(struct ipw_priv *priv,
213 				struct clx2_tx_queue *txq, int qindex);
214 static int ipw_queue_reset(struct ipw_priv *priv);
215 
216 static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, void *buf,
217 			     int len, int sync);
218 
219 static void ipw_tx_queue_free(struct ipw_priv *);
220 
221 static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *);
222 static void ipw_rx_queue_free(struct ipw_priv *, struct ipw_rx_queue *);
223 static void ipw_rx_queue_replenish(void *);
224 static int ipw_up(struct ipw_priv *);
225 static void ipw_bg_up(struct work_struct *work);
226 static void ipw_down(struct ipw_priv *);
227 static void ipw_bg_down(struct work_struct *work);
228 static int ipw_config(struct ipw_priv *);
229 static int init_supported_rates(struct ipw_priv *priv,
230 				struct ipw_supported_rates *prates);
231 static void ipw_set_hwcrypto_keys(struct ipw_priv *);
232 static void ipw_send_wep_keys(struct ipw_priv *, int);
233 
234 static int snprint_line(char *buf, size_t count,
235 			const u8 * data, u32 len, u32 ofs)
236 {
237 	int out, i, j, l;
238 	char c;
239 
240 	out = snprintf(buf, count, "%08X", ofs);
241 
242 	for (l = 0, i = 0; i < 2; i++) {
243 		out += snprintf(buf + out, count - out, " ");
244 		for (j = 0; j < 8 && l < len; j++, l++)
245 			out += snprintf(buf + out, count - out, "%02X ",
246 					data[(i * 8 + j)]);
247 		for (; j < 8; j++)
248 			out += snprintf(buf + out, count - out, "   ");
249 	}
250 
251 	out += snprintf(buf + out, count - out, " ");
252 	for (l = 0, i = 0; i < 2; i++) {
253 		out += snprintf(buf + out, count - out, " ");
254 		for (j = 0; j < 8 && l < len; j++, l++) {
255 			c = data[(i * 8 + j)];
256 			if (!isascii(c) || !isprint(c))
257 				c = '.';
258 
259 			out += snprintf(buf + out, count - out, "%c", c);
260 		}
261 
262 		for (; j < 8; j++)
263 			out += snprintf(buf + out, count - out, " ");
264 	}
265 
266 	return out;
267 }
268 
269 static void printk_buf(int level, const u8 * data, u32 len)
270 {
271 	char line[81];
272 	u32 ofs = 0;
273 	if (!(ipw_debug_level & level))
274 		return;
275 
276 	while (len) {
277 		snprint_line(line, sizeof(line), &data[ofs],
278 			     min(len, 16U), ofs);
279 		printk(KERN_DEBUG "%s\n", line);
280 		ofs += 16;
281 		len -= min(len, 16U);
282 	}
283 }
284 
285 static int snprintk_buf(u8 * output, size_t size, const u8 * data, size_t len)
286 {
287 	size_t out = size;
288 	u32 ofs = 0;
289 	int total = 0;
290 
291 	while (size && len) {
292 		out = snprint_line(output, size, &data[ofs],
293 				   min_t(size_t, len, 16U), ofs);
294 
295 		ofs += 16;
296 		output += out;
297 		size -= out;
298 		len -= min_t(size_t, len, 16U);
299 		total += out;
300 	}
301 	return total;
302 }
303 
304 /* alias for 32-bit indirect read (for SRAM/reg above 4K), with debug wrapper */
305 static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg);
306 #define ipw_read_reg32(a, b) _ipw_read_reg32(a, b)
307 
308 /* alias for 8-bit indirect read (for SRAM/reg above 4K), with debug wrapper */
309 static u8 _ipw_read_reg8(struct ipw_priv *ipw, u32 reg);
310 #define ipw_read_reg8(a, b) _ipw_read_reg8(a, b)
311 
312 /* 8-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
313 static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value);
314 static inline void ipw_write_reg8(struct ipw_priv *a, u32 b, u8 c)
315 {
316 	IPW_DEBUG_IO("%s %d: write_indirect8(0x%08X, 0x%08X)\n", __FILE__,
317 		     __LINE__, (u32) (b), (u32) (c));
318 	_ipw_write_reg8(a, b, c);
319 }
320 
321 /* 16-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
322 static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value);
323 static inline void ipw_write_reg16(struct ipw_priv *a, u32 b, u16 c)
324 {
325 	IPW_DEBUG_IO("%s %d: write_indirect16(0x%08X, 0x%08X)\n", __FILE__,
326 		     __LINE__, (u32) (b), (u32) (c));
327 	_ipw_write_reg16(a, b, c);
328 }
329 
330 /* 32-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
331 static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value);
332 static inline void ipw_write_reg32(struct ipw_priv *a, u32 b, u32 c)
333 {
334 	IPW_DEBUG_IO("%s %d: write_indirect32(0x%08X, 0x%08X)\n", __FILE__,
335 		     __LINE__, (u32) (b), (u32) (c));
336 	_ipw_write_reg32(a, b, c);
337 }
338 
339 /* 8-bit direct write (low 4K) */
340 static inline void _ipw_write8(struct ipw_priv *ipw, unsigned long ofs,
341 		u8 val)
342 {
343 	writeb(val, ipw->hw_base + ofs);
344 }
345 
346 /* 8-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
347 #define ipw_write8(ipw, ofs, val) do { \
348 	IPW_DEBUG_IO("%s %d: write_direct8(0x%08X, 0x%08X)\n", __FILE__, \
349 			__LINE__, (u32)(ofs), (u32)(val)); \
350 	_ipw_write8(ipw, ofs, val); \
351 } while (0)
352 
353 /* 16-bit direct write (low 4K) */
354 static inline void _ipw_write16(struct ipw_priv *ipw, unsigned long ofs,
355 		u16 val)
356 {
357 	writew(val, ipw->hw_base + ofs);
358 }
359 
360 /* 16-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
361 #define ipw_write16(ipw, ofs, val) do { \
362 	IPW_DEBUG_IO("%s %d: write_direct16(0x%08X, 0x%08X)\n", __FILE__, \
363 			__LINE__, (u32)(ofs), (u32)(val)); \
364 	_ipw_write16(ipw, ofs, val); \
365 } while (0)
366 
367 /* 32-bit direct write (low 4K) */
368 static inline void _ipw_write32(struct ipw_priv *ipw, unsigned long ofs,
369 		u32 val)
370 {
371 	writel(val, ipw->hw_base + ofs);
372 }
373 
374 /* 32-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
375 #define ipw_write32(ipw, ofs, val) do { \
376 	IPW_DEBUG_IO("%s %d: write_direct32(0x%08X, 0x%08X)\n", __FILE__, \
377 			__LINE__, (u32)(ofs), (u32)(val)); \
378 	_ipw_write32(ipw, ofs, val); \
379 } while (0)
380 
381 /* 8-bit direct read (low 4K) */
382 static inline u8 _ipw_read8(struct ipw_priv *ipw, unsigned long ofs)
383 {
384 	return readb(ipw->hw_base + ofs);
385 }
386 
387 /* alias to 8-bit direct read (low 4K of SRAM/regs), with debug wrapper */
388 #define ipw_read8(ipw, ofs) ({ \
389 	IPW_DEBUG_IO("%s %d: read_direct8(0x%08X)\n", __FILE__, __LINE__, \
390 			(u32)(ofs)); \
391 	_ipw_read8(ipw, ofs); \
392 })
393 
394 /* 16-bit direct read (low 4K) */
395 static inline u16 _ipw_read16(struct ipw_priv *ipw, unsigned long ofs)
396 {
397 	return readw(ipw->hw_base + ofs);
398 }
399 
400 /* alias to 16-bit direct read (low 4K of SRAM/regs), with debug wrapper */
401 #define ipw_read16(ipw, ofs) ({ \
402 	IPW_DEBUG_IO("%s %d: read_direct16(0x%08X)\n", __FILE__, __LINE__, \
403 			(u32)(ofs)); \
404 	_ipw_read16(ipw, ofs); \
405 })
406 
407 /* 32-bit direct read (low 4K) */
408 static inline u32 _ipw_read32(struct ipw_priv *ipw, unsigned long ofs)
409 {
410 	return readl(ipw->hw_base + ofs);
411 }
412 
413 /* alias to 32-bit direct read (low 4K of SRAM/regs), with debug wrapper */
414 #define ipw_read32(ipw, ofs) ({ \
415 	IPW_DEBUG_IO("%s %d: read_direct32(0x%08X)\n", __FILE__, __LINE__, \
416 			(u32)(ofs)); \
417 	_ipw_read32(ipw, ofs); \
418 })
419 
420 static void _ipw_read_indirect(struct ipw_priv *, u32, u8 *, int);
421 /* alias to multi-byte read (SRAM/regs above 4K), with debug wrapper */
422 #define ipw_read_indirect(a, b, c, d) ({ \
423 	IPW_DEBUG_IO("%s %d: read_indirect(0x%08X) %u bytes\n", __FILE__, \
424 			__LINE__, (u32)(b), (u32)(d)); \
425 	_ipw_read_indirect(a, b, c, d); \
426 })
427 
428 /* alias to multi-byte read (SRAM/regs above 4K), with debug wrapper */
429 static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * data,
430 				int num);
431 #define ipw_write_indirect(a, b, c, d) do { \
432 	IPW_DEBUG_IO("%s %d: write_indirect(0x%08X) %u bytes\n", __FILE__, \
433 			__LINE__, (u32)(b), (u32)(d)); \
434 	_ipw_write_indirect(a, b, c, d); \
435 } while (0)
436 
437 /* 32-bit indirect write (above 4K) */
438 static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value)
439 {
440 	IPW_DEBUG_IO(" %p : reg = 0x%8X : value = 0x%8X\n", priv, reg, value);
441 	_ipw_write32(priv, IPW_INDIRECT_ADDR, reg);
442 	_ipw_write32(priv, IPW_INDIRECT_DATA, value);
443 }
444 
445 /* 8-bit indirect write (above 4K) */
446 static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value)
447 {
448 	u32 aligned_addr = reg & IPW_INDIRECT_ADDR_MASK;	/* dword align */
449 	u32 dif_len = reg - aligned_addr;
450 
451 	IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value);
452 	_ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
453 	_ipw_write8(priv, IPW_INDIRECT_DATA + dif_len, value);
454 }
455 
456 /* 16-bit indirect write (above 4K) */
457 static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value)
458 {
459 	u32 aligned_addr = reg & IPW_INDIRECT_ADDR_MASK;	/* dword align */
460 	u32 dif_len = (reg - aligned_addr) & (~0x1ul);
461 
462 	IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value);
463 	_ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
464 	_ipw_write16(priv, IPW_INDIRECT_DATA + dif_len, value);
465 }
466 
467 /* 8-bit indirect read (above 4K) */
468 static u8 _ipw_read_reg8(struct ipw_priv *priv, u32 reg)
469 {
470 	u32 word;
471 	_ipw_write32(priv, IPW_INDIRECT_ADDR, reg & IPW_INDIRECT_ADDR_MASK);
472 	IPW_DEBUG_IO(" reg = 0x%8X :\n", reg);
473 	word = _ipw_read32(priv, IPW_INDIRECT_DATA);
474 	return (word >> ((reg & 0x3) * 8)) & 0xff;
475 }
476 
477 /* 32-bit indirect read (above 4K) */
478 static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg)
479 {
480 	u32 value;
481 
482 	IPW_DEBUG_IO("%p : reg = 0x%08x\n", priv, reg);
483 
484 	_ipw_write32(priv, IPW_INDIRECT_ADDR, reg);
485 	value = _ipw_read32(priv, IPW_INDIRECT_DATA);
486 	IPW_DEBUG_IO(" reg = 0x%4X : value = 0x%4x\n", reg, value);
487 	return value;
488 }
489 
490 /* General purpose, no alignment requirement, iterative (multi-byte) read, */
491 /*    for area above 1st 4K of SRAM/reg space */
492 static void _ipw_read_indirect(struct ipw_priv *priv, u32 addr, u8 * buf,
493 			       int num)
494 {
495 	u32 aligned_addr = addr & IPW_INDIRECT_ADDR_MASK;	/* dword align */
496 	u32 dif_len = addr - aligned_addr;
497 	u32 i;
498 
499 	IPW_DEBUG_IO("addr = %i, buf = %p, num = %i\n", addr, buf, num);
500 
501 	if (num <= 0) {
502 		return;
503 	}
504 
505 	/* Read the first dword (or portion) byte by byte */
506 	if (unlikely(dif_len)) {
507 		_ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
508 		/* Start reading at aligned_addr + dif_len */
509 		for (i = dif_len; ((i < 4) && (num > 0)); i++, num--)
510 			*buf++ = _ipw_read8(priv, IPW_INDIRECT_DATA + i);
511 		aligned_addr += 4;
512 	}
513 
514 	/* Read all of the middle dwords as dwords, with auto-increment */
515 	_ipw_write32(priv, IPW_AUTOINC_ADDR, aligned_addr);
516 	for (; num >= 4; buf += 4, aligned_addr += 4, num -= 4)
517 		*(u32 *) buf = _ipw_read32(priv, IPW_AUTOINC_DATA);
518 
519 	/* Read the last dword (or portion) byte by byte */
520 	if (unlikely(num)) {
521 		_ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
522 		for (i = 0; num > 0; i++, num--)
523 			*buf++ = ipw_read8(priv, IPW_INDIRECT_DATA + i);
524 	}
525 }
526 
527 /* General purpose, no alignment requirement, iterative (multi-byte) write, */
528 /*    for area above 1st 4K of SRAM/reg space */
529 static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * buf,
530 				int num)
531 {
532 	u32 aligned_addr = addr & IPW_INDIRECT_ADDR_MASK;	/* dword align */
533 	u32 dif_len = addr - aligned_addr;
534 	u32 i;
535 
536 	IPW_DEBUG_IO("addr = %i, buf = %p, num = %i\n", addr, buf, num);
537 
538 	if (num <= 0) {
539 		return;
540 	}
541 
542 	/* Write the first dword (or portion) byte by byte */
543 	if (unlikely(dif_len)) {
544 		_ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
545 		/* Start writing at aligned_addr + dif_len */
546 		for (i = dif_len; ((i < 4) && (num > 0)); i++, num--, buf++)
547 			_ipw_write8(priv, IPW_INDIRECT_DATA + i, *buf);
548 		aligned_addr += 4;
549 	}
550 
551 	/* Write all of the middle dwords as dwords, with auto-increment */
552 	_ipw_write32(priv, IPW_AUTOINC_ADDR, aligned_addr);
553 	for (; num >= 4; buf += 4, aligned_addr += 4, num -= 4)
554 		_ipw_write32(priv, IPW_AUTOINC_DATA, *(u32 *) buf);
555 
556 	/* Write the last dword (or portion) byte by byte */
557 	if (unlikely(num)) {
558 		_ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
559 		for (i = 0; num > 0; i++, num--, buf++)
560 			_ipw_write8(priv, IPW_INDIRECT_DATA + i, *buf);
561 	}
562 }
563 
564 /* General purpose, no alignment requirement, iterative (multi-byte) write, */
565 /*    for 1st 4K of SRAM/regs space */
566 static void ipw_write_direct(struct ipw_priv *priv, u32 addr, void *buf,
567 			     int num)
568 {
569 	memcpy_toio((priv->hw_base + addr), buf, num);
570 }
571 
572 /* Set bit(s) in low 4K of SRAM/regs */
573 static inline void ipw_set_bit(struct ipw_priv *priv, u32 reg, u32 mask)
574 {
575 	ipw_write32(priv, reg, ipw_read32(priv, reg) | mask);
576 }
577 
578 /* Clear bit(s) in low 4K of SRAM/regs */
579 static inline void ipw_clear_bit(struct ipw_priv *priv, u32 reg, u32 mask)
580 {
581 	ipw_write32(priv, reg, ipw_read32(priv, reg) & ~mask);
582 }
583 
584 static inline void __ipw_enable_interrupts(struct ipw_priv *priv)
585 {
586 	if (priv->status & STATUS_INT_ENABLED)
587 		return;
588 	priv->status |= STATUS_INT_ENABLED;
589 	ipw_write32(priv, IPW_INTA_MASK_R, IPW_INTA_MASK_ALL);
590 }
591 
592 static inline void __ipw_disable_interrupts(struct ipw_priv *priv)
593 {
594 	if (!(priv->status & STATUS_INT_ENABLED))
595 		return;
596 	priv->status &= ~STATUS_INT_ENABLED;
597 	ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
598 }
599 
600 static inline void ipw_enable_interrupts(struct ipw_priv *priv)
601 {
602 	unsigned long flags;
603 
604 	spin_lock_irqsave(&priv->irq_lock, flags);
605 	__ipw_enable_interrupts(priv);
606 	spin_unlock_irqrestore(&priv->irq_lock, flags);
607 }
608 
609 static inline void ipw_disable_interrupts(struct ipw_priv *priv)
610 {
611 	unsigned long flags;
612 
613 	spin_lock_irqsave(&priv->irq_lock, flags);
614 	__ipw_disable_interrupts(priv);
615 	spin_unlock_irqrestore(&priv->irq_lock, flags);
616 }
617 
618 static char *ipw_error_desc(u32 val)
619 {
620 	switch (val) {
621 	case IPW_FW_ERROR_OK:
622 		return "ERROR_OK";
623 	case IPW_FW_ERROR_FAIL:
624 		return "ERROR_FAIL";
625 	case IPW_FW_ERROR_MEMORY_UNDERFLOW:
626 		return "MEMORY_UNDERFLOW";
627 	case IPW_FW_ERROR_MEMORY_OVERFLOW:
628 		return "MEMORY_OVERFLOW";
629 	case IPW_FW_ERROR_BAD_PARAM:
630 		return "BAD_PARAM";
631 	case IPW_FW_ERROR_BAD_CHECKSUM:
632 		return "BAD_CHECKSUM";
633 	case IPW_FW_ERROR_NMI_INTERRUPT:
634 		return "NMI_INTERRUPT";
635 	case IPW_FW_ERROR_BAD_DATABASE:
636 		return "BAD_DATABASE";
637 	case IPW_FW_ERROR_ALLOC_FAIL:
638 		return "ALLOC_FAIL";
639 	case IPW_FW_ERROR_DMA_UNDERRUN:
640 		return "DMA_UNDERRUN";
641 	case IPW_FW_ERROR_DMA_STATUS:
642 		return "DMA_STATUS";
643 	case IPW_FW_ERROR_DINO_ERROR:
644 		return "DINO_ERROR";
645 	case IPW_FW_ERROR_EEPROM_ERROR:
646 		return "EEPROM_ERROR";
647 	case IPW_FW_ERROR_SYSASSERT:
648 		return "SYSASSERT";
649 	case IPW_FW_ERROR_FATAL_ERROR:
650 		return "FATAL_ERROR";
651 	default:
652 		return "UNKNOWN_ERROR";
653 	}
654 }
655 
656 static void ipw_dump_error_log(struct ipw_priv *priv,
657 			       struct ipw_fw_error *error)
658 {
659 	u32 i;
660 
661 	if (!error) {
662 		IPW_ERROR("Error allocating and capturing error log.  "
663 			  "Nothing to dump.\n");
664 		return;
665 	}
666 
667 	IPW_ERROR("Start IPW Error Log Dump:\n");
668 	IPW_ERROR("Status: 0x%08X, Config: %08X\n",
669 		  error->status, error->config);
670 
671 	for (i = 0; i < error->elem_len; i++)
672 		IPW_ERROR("%s %i 0x%08x  0x%08x  0x%08x  0x%08x  0x%08x\n",
673 			  ipw_error_desc(error->elem[i].desc),
674 			  error->elem[i].time,
675 			  error->elem[i].blink1,
676 			  error->elem[i].blink2,
677 			  error->elem[i].link1,
678 			  error->elem[i].link2, error->elem[i].data);
679 	for (i = 0; i < error->log_len; i++)
680 		IPW_ERROR("%i\t0x%08x\t%i\n",
681 			  error->log[i].time,
682 			  error->log[i].data, error->log[i].event);
683 }
684 
685 static inline int ipw_is_init(struct ipw_priv *priv)
686 {
687 	return (priv->status & STATUS_INIT) ? 1 : 0;
688 }
689 
690 static int ipw_get_ordinal(struct ipw_priv *priv, u32 ord, void *val, u32 * len)
691 {
692 	u32 addr, field_info, field_len, field_count, total_len;
693 
694 	IPW_DEBUG_ORD("ordinal = %i\n", ord);
695 
696 	if (!priv || !val || !len) {
697 		IPW_DEBUG_ORD("Invalid argument\n");
698 		return -EINVAL;
699 	}
700 
701 	/* verify device ordinal tables have been initialized */
702 	if (!priv->table0_addr || !priv->table1_addr || !priv->table2_addr) {
703 		IPW_DEBUG_ORD("Access ordinals before initialization\n");
704 		return -EINVAL;
705 	}
706 
707 	switch (IPW_ORD_TABLE_ID_MASK & ord) {
708 	case IPW_ORD_TABLE_0_MASK:
709 		/*
710 		 * TABLE 0: Direct access to a table of 32 bit values
711 		 *
712 		 * This is a very simple table with the data directly
713 		 * read from the table
714 		 */
715 
716 		/* remove the table id from the ordinal */
717 		ord &= IPW_ORD_TABLE_VALUE_MASK;
718 
719 		/* boundary check */
720 		if (ord > priv->table0_len) {
721 			IPW_DEBUG_ORD("ordinal value (%i) longer then "
722 				      "max (%i)\n", ord, priv->table0_len);
723 			return -EINVAL;
724 		}
725 
726 		/* verify we have enough room to store the value */
727 		if (*len < sizeof(u32)) {
728 			IPW_DEBUG_ORD("ordinal buffer length too small, "
729 				      "need %zd\n", sizeof(u32));
730 			return -EINVAL;
731 		}
732 
733 		IPW_DEBUG_ORD("Reading TABLE0[%i] from offset 0x%08x\n",
734 			      ord, priv->table0_addr + (ord << 2));
735 
736 		*len = sizeof(u32);
737 		ord <<= 2;
738 		*((u32 *) val) = ipw_read32(priv, priv->table0_addr + ord);
739 		break;
740 
741 	case IPW_ORD_TABLE_1_MASK:
742 		/*
743 		 * TABLE 1: Indirect access to a table of 32 bit values
744 		 *
745 		 * This is a fairly large table of u32 values each
746 		 * representing starting addr for the data (which is
747 		 * also a u32)
748 		 */
749 
750 		/* remove the table id from the ordinal */
751 		ord &= IPW_ORD_TABLE_VALUE_MASK;
752 
753 		/* boundary check */
754 		if (ord > priv->table1_len) {
755 			IPW_DEBUG_ORD("ordinal value too long\n");
756 			return -EINVAL;
757 		}
758 
759 		/* verify we have enough room to store the value */
760 		if (*len < sizeof(u32)) {
761 			IPW_DEBUG_ORD("ordinal buffer length too small, "
762 				      "need %zd\n", sizeof(u32));
763 			return -EINVAL;
764 		}
765 
766 		*((u32 *) val) =
767 		    ipw_read_reg32(priv, (priv->table1_addr + (ord << 2)));
768 		*len = sizeof(u32);
769 		break;
770 
771 	case IPW_ORD_TABLE_2_MASK:
772 		/*
773 		 * TABLE 2: Indirect access to a table of variable sized values
774 		 *
775 		 * This table consist of six values, each containing
776 		 *     - dword containing the starting offset of the data
777 		 *     - dword containing the lengh in the first 16bits
778 		 *       and the count in the second 16bits
779 		 */
780 
781 		/* remove the table id from the ordinal */
782 		ord &= IPW_ORD_TABLE_VALUE_MASK;
783 
784 		/* boundary check */
785 		if (ord > priv->table2_len) {
786 			IPW_DEBUG_ORD("ordinal value too long\n");
787 			return -EINVAL;
788 		}
789 
790 		/* get the address of statistic */
791 		addr = ipw_read_reg32(priv, priv->table2_addr + (ord << 3));
792 
793 		/* get the second DW of statistics ;
794 		 * two 16-bit words - first is length, second is count */
795 		field_info =
796 		    ipw_read_reg32(priv,
797 				   priv->table2_addr + (ord << 3) +
798 				   sizeof(u32));
799 
800 		/* get each entry length */
801 		field_len = *((u16 *) & field_info);
802 
803 		/* get number of entries */
804 		field_count = *(((u16 *) & field_info) + 1);
805 
806 		/* abort if not enough memory */
807 		total_len = field_len * field_count;
808 		if (total_len > *len) {
809 			*len = total_len;
810 			return -EINVAL;
811 		}
812 
813 		*len = total_len;
814 		if (!total_len)
815 			return 0;
816 
817 		IPW_DEBUG_ORD("addr = 0x%08x, total_len = %i, "
818 			      "field_info = 0x%08x\n",
819 			      addr, total_len, field_info);
820 		ipw_read_indirect(priv, addr, val, total_len);
821 		break;
822 
823 	default:
824 		IPW_DEBUG_ORD("Invalid ordinal!\n");
825 		return -EINVAL;
826 
827 	}
828 
829 	return 0;
830 }
831 
832 static void ipw_init_ordinals(struct ipw_priv *priv)
833 {
834 	priv->table0_addr = IPW_ORDINALS_TABLE_LOWER;
835 	priv->table0_len = ipw_read32(priv, priv->table0_addr);
836 
837 	IPW_DEBUG_ORD("table 0 offset at 0x%08x, len = %i\n",
838 		      priv->table0_addr, priv->table0_len);
839 
840 	priv->table1_addr = ipw_read32(priv, IPW_ORDINALS_TABLE_1);
841 	priv->table1_len = ipw_read_reg32(priv, priv->table1_addr);
842 
843 	IPW_DEBUG_ORD("table 1 offset at 0x%08x, len = %i\n",
844 		      priv->table1_addr, priv->table1_len);
845 
846 	priv->table2_addr = ipw_read32(priv, IPW_ORDINALS_TABLE_2);
847 	priv->table2_len = ipw_read_reg32(priv, priv->table2_addr);
848 	priv->table2_len &= 0x0000ffff;	/* use first two bytes */
849 
850 	IPW_DEBUG_ORD("table 2 offset at 0x%08x, len = %i\n",
851 		      priv->table2_addr, priv->table2_len);
852 
853 }
854 
855 static u32 ipw_register_toggle(u32 reg)
856 {
857 	reg &= ~IPW_START_STANDBY;
858 	if (reg & IPW_GATE_ODMA)
859 		reg &= ~IPW_GATE_ODMA;
860 	if (reg & IPW_GATE_IDMA)
861 		reg &= ~IPW_GATE_IDMA;
862 	if (reg & IPW_GATE_ADMA)
863 		reg &= ~IPW_GATE_ADMA;
864 	return reg;
865 }
866 
867 /*
868  * LED behavior:
869  * - On radio ON, turn on any LEDs that require to be on during start
870  * - On initialization, start unassociated blink
871  * - On association, disable unassociated blink
872  * - On disassociation, start unassociated blink
873  * - On radio OFF, turn off any LEDs started during radio on
874  *
875  */
876 #define LD_TIME_LINK_ON msecs_to_jiffies(300)
877 #define LD_TIME_LINK_OFF msecs_to_jiffies(2700)
878 #define LD_TIME_ACT_ON msecs_to_jiffies(250)
879 
880 static void ipw_led_link_on(struct ipw_priv *priv)
881 {
882 	unsigned long flags;
883 	u32 led;
884 
885 	/* If configured to not use LEDs, or nic_type is 1,
886 	 * then we don't toggle a LINK led */
887 	if (priv->config & CFG_NO_LED || priv->nic_type == EEPROM_NIC_TYPE_1)
888 		return;
889 
890 	spin_lock_irqsave(&priv->lock, flags);
891 
892 	if (!(priv->status & STATUS_RF_KILL_MASK) &&
893 	    !(priv->status & STATUS_LED_LINK_ON)) {
894 		IPW_DEBUG_LED("Link LED On\n");
895 		led = ipw_read_reg32(priv, IPW_EVENT_REG);
896 		led |= priv->led_association_on;
897 
898 		led = ipw_register_toggle(led);
899 
900 		IPW_DEBUG_LED("Reg: 0x%08X\n", led);
901 		ipw_write_reg32(priv, IPW_EVENT_REG, led);
902 
903 		priv->status |= STATUS_LED_LINK_ON;
904 
905 		/* If we aren't associated, schedule turning the LED off */
906 		if (!(priv->status & STATUS_ASSOCIATED))
907 			schedule_delayed_work(&priv->led_link_off,
908 					      LD_TIME_LINK_ON);
909 	}
910 
911 	spin_unlock_irqrestore(&priv->lock, flags);
912 }
913 
914 static void ipw_bg_led_link_on(struct work_struct *work)
915 {
916 	struct ipw_priv *priv =
917 		container_of(work, struct ipw_priv, led_link_on.work);
918 	mutex_lock(&priv->mutex);
919 	ipw_led_link_on(priv);
920 	mutex_unlock(&priv->mutex);
921 }
922 
923 static void ipw_led_link_off(struct ipw_priv *priv)
924 {
925 	unsigned long flags;
926 	u32 led;
927 
928 	/* If configured not to use LEDs, or nic type is 1,
929 	 * then we don't goggle the LINK led. */
930 	if (priv->config & CFG_NO_LED || priv->nic_type == EEPROM_NIC_TYPE_1)
931 		return;
932 
933 	spin_lock_irqsave(&priv->lock, flags);
934 
935 	if (priv->status & STATUS_LED_LINK_ON) {
936 		led = ipw_read_reg32(priv, IPW_EVENT_REG);
937 		led &= priv->led_association_off;
938 		led = ipw_register_toggle(led);
939 
940 		IPW_DEBUG_LED("Reg: 0x%08X\n", led);
941 		ipw_write_reg32(priv, IPW_EVENT_REG, led);
942 
943 		IPW_DEBUG_LED("Link LED Off\n");
944 
945 		priv->status &= ~STATUS_LED_LINK_ON;
946 
947 		/* If we aren't associated and the radio is on, schedule
948 		 * turning the LED on (blink while unassociated) */
949 		if (!(priv->status & STATUS_RF_KILL_MASK) &&
950 		    !(priv->status & STATUS_ASSOCIATED))
951 			schedule_delayed_work(&priv->led_link_on,
952 					      LD_TIME_LINK_OFF);
953 
954 	}
955 
956 	spin_unlock_irqrestore(&priv->lock, flags);
957 }
958 
959 static void ipw_bg_led_link_off(struct work_struct *work)
960 {
961 	struct ipw_priv *priv =
962 		container_of(work, struct ipw_priv, led_link_off.work);
963 	mutex_lock(&priv->mutex);
964 	ipw_led_link_off(priv);
965 	mutex_unlock(&priv->mutex);
966 }
967 
968 static void __ipw_led_activity_on(struct ipw_priv *priv)
969 {
970 	u32 led;
971 
972 	if (priv->config & CFG_NO_LED)
973 		return;
974 
975 	if (priv->status & STATUS_RF_KILL_MASK)
976 		return;
977 
978 	if (!(priv->status & STATUS_LED_ACT_ON)) {
979 		led = ipw_read_reg32(priv, IPW_EVENT_REG);
980 		led |= priv->led_activity_on;
981 
982 		led = ipw_register_toggle(led);
983 
984 		IPW_DEBUG_LED("Reg: 0x%08X\n", led);
985 		ipw_write_reg32(priv, IPW_EVENT_REG, led);
986 
987 		IPW_DEBUG_LED("Activity LED On\n");
988 
989 		priv->status |= STATUS_LED_ACT_ON;
990 
991 		cancel_delayed_work(&priv->led_act_off);
992 		schedule_delayed_work(&priv->led_act_off, LD_TIME_ACT_ON);
993 	} else {
994 		/* Reschedule LED off for full time period */
995 		cancel_delayed_work(&priv->led_act_off);
996 		schedule_delayed_work(&priv->led_act_off, LD_TIME_ACT_ON);
997 	}
998 }
999 
1000 #if 0
1001 void ipw_led_activity_on(struct ipw_priv *priv)
1002 {
1003 	unsigned long flags;
1004 	spin_lock_irqsave(&priv->lock, flags);
1005 	__ipw_led_activity_on(priv);
1006 	spin_unlock_irqrestore(&priv->lock, flags);
1007 }
1008 #endif  /*  0  */
1009 
1010 static void ipw_led_activity_off(struct ipw_priv *priv)
1011 {
1012 	unsigned long flags;
1013 	u32 led;
1014 
1015 	if (priv->config & CFG_NO_LED)
1016 		return;
1017 
1018 	spin_lock_irqsave(&priv->lock, flags);
1019 
1020 	if (priv->status & STATUS_LED_ACT_ON) {
1021 		led = ipw_read_reg32(priv, IPW_EVENT_REG);
1022 		led &= priv->led_activity_off;
1023 
1024 		led = ipw_register_toggle(led);
1025 
1026 		IPW_DEBUG_LED("Reg: 0x%08X\n", led);
1027 		ipw_write_reg32(priv, IPW_EVENT_REG, led);
1028 
1029 		IPW_DEBUG_LED("Activity LED Off\n");
1030 
1031 		priv->status &= ~STATUS_LED_ACT_ON;
1032 	}
1033 
1034 	spin_unlock_irqrestore(&priv->lock, flags);
1035 }
1036 
1037 static void ipw_bg_led_activity_off(struct work_struct *work)
1038 {
1039 	struct ipw_priv *priv =
1040 		container_of(work, struct ipw_priv, led_act_off.work);
1041 	mutex_lock(&priv->mutex);
1042 	ipw_led_activity_off(priv);
1043 	mutex_unlock(&priv->mutex);
1044 }
1045 
1046 static void ipw_led_band_on(struct ipw_priv *priv)
1047 {
1048 	unsigned long flags;
1049 	u32 led;
1050 
1051 	/* Only nic type 1 supports mode LEDs */
1052 	if (priv->config & CFG_NO_LED ||
1053 	    priv->nic_type != EEPROM_NIC_TYPE_1 || !priv->assoc_network)
1054 		return;
1055 
1056 	spin_lock_irqsave(&priv->lock, flags);
1057 
1058 	led = ipw_read_reg32(priv, IPW_EVENT_REG);
1059 	if (priv->assoc_network->mode == IEEE_A) {
1060 		led |= priv->led_ofdm_on;
1061 		led &= priv->led_association_off;
1062 		IPW_DEBUG_LED("Mode LED On: 802.11a\n");
1063 	} else if (priv->assoc_network->mode == IEEE_G) {
1064 		led |= priv->led_ofdm_on;
1065 		led |= priv->led_association_on;
1066 		IPW_DEBUG_LED("Mode LED On: 802.11g\n");
1067 	} else {
1068 		led &= priv->led_ofdm_off;
1069 		led |= priv->led_association_on;
1070 		IPW_DEBUG_LED("Mode LED On: 802.11b\n");
1071 	}
1072 
1073 	led = ipw_register_toggle(led);
1074 
1075 	IPW_DEBUG_LED("Reg: 0x%08X\n", led);
1076 	ipw_write_reg32(priv, IPW_EVENT_REG, led);
1077 
1078 	spin_unlock_irqrestore(&priv->lock, flags);
1079 }
1080 
1081 static void ipw_led_band_off(struct ipw_priv *priv)
1082 {
1083 	unsigned long flags;
1084 	u32 led;
1085 
1086 	/* Only nic type 1 supports mode LEDs */
1087 	if (priv->config & CFG_NO_LED || priv->nic_type != EEPROM_NIC_TYPE_1)
1088 		return;
1089 
1090 	spin_lock_irqsave(&priv->lock, flags);
1091 
1092 	led = ipw_read_reg32(priv, IPW_EVENT_REG);
1093 	led &= priv->led_ofdm_off;
1094 	led &= priv->led_association_off;
1095 
1096 	led = ipw_register_toggle(led);
1097 
1098 	IPW_DEBUG_LED("Reg: 0x%08X\n", led);
1099 	ipw_write_reg32(priv, IPW_EVENT_REG, led);
1100 
1101 	spin_unlock_irqrestore(&priv->lock, flags);
1102 }
1103 
1104 static void ipw_led_radio_on(struct ipw_priv *priv)
1105 {
1106 	ipw_led_link_on(priv);
1107 }
1108 
1109 static void ipw_led_radio_off(struct ipw_priv *priv)
1110 {
1111 	ipw_led_activity_off(priv);
1112 	ipw_led_link_off(priv);
1113 }
1114 
1115 static void ipw_led_link_up(struct ipw_priv *priv)
1116 {
1117 	/* Set the Link Led on for all nic types */
1118 	ipw_led_link_on(priv);
1119 }
1120 
1121 static void ipw_led_link_down(struct ipw_priv *priv)
1122 {
1123 	ipw_led_activity_off(priv);
1124 	ipw_led_link_off(priv);
1125 
1126 	if (priv->status & STATUS_RF_KILL_MASK)
1127 		ipw_led_radio_off(priv);
1128 }
1129 
1130 static void ipw_led_init(struct ipw_priv *priv)
1131 {
1132 	priv->nic_type = priv->eeprom[EEPROM_NIC_TYPE];
1133 
1134 	/* Set the default PINs for the link and activity leds */
1135 	priv->led_activity_on = IPW_ACTIVITY_LED;
1136 	priv->led_activity_off = ~(IPW_ACTIVITY_LED);
1137 
1138 	priv->led_association_on = IPW_ASSOCIATED_LED;
1139 	priv->led_association_off = ~(IPW_ASSOCIATED_LED);
1140 
1141 	/* Set the default PINs for the OFDM leds */
1142 	priv->led_ofdm_on = IPW_OFDM_LED;
1143 	priv->led_ofdm_off = ~(IPW_OFDM_LED);
1144 
1145 	switch (priv->nic_type) {
1146 	case EEPROM_NIC_TYPE_1:
1147 		/* In this NIC type, the LEDs are reversed.... */
1148 		priv->led_activity_on = IPW_ASSOCIATED_LED;
1149 		priv->led_activity_off = ~(IPW_ASSOCIATED_LED);
1150 		priv->led_association_on = IPW_ACTIVITY_LED;
1151 		priv->led_association_off = ~(IPW_ACTIVITY_LED);
1152 
1153 		if (!(priv->config & CFG_NO_LED))
1154 			ipw_led_band_on(priv);
1155 
1156 		/* And we don't blink link LEDs for this nic, so
1157 		 * just return here */
1158 		return;
1159 
1160 	case EEPROM_NIC_TYPE_3:
1161 	case EEPROM_NIC_TYPE_2:
1162 	case EEPROM_NIC_TYPE_4:
1163 	case EEPROM_NIC_TYPE_0:
1164 		break;
1165 
1166 	default:
1167 		IPW_DEBUG_INFO("Unknown NIC type from EEPROM: %d\n",
1168 			       priv->nic_type);
1169 		priv->nic_type = EEPROM_NIC_TYPE_0;
1170 		break;
1171 	}
1172 
1173 	if (!(priv->config & CFG_NO_LED)) {
1174 		if (priv->status & STATUS_ASSOCIATED)
1175 			ipw_led_link_on(priv);
1176 		else
1177 			ipw_led_link_off(priv);
1178 	}
1179 }
1180 
1181 static void ipw_led_shutdown(struct ipw_priv *priv)
1182 {
1183 	ipw_led_activity_off(priv);
1184 	ipw_led_link_off(priv);
1185 	ipw_led_band_off(priv);
1186 	cancel_delayed_work(&priv->led_link_on);
1187 	cancel_delayed_work(&priv->led_link_off);
1188 	cancel_delayed_work(&priv->led_act_off);
1189 }
1190 
1191 /*
1192  * The following adds a new attribute to the sysfs representation
1193  * of this device driver (i.e. a new file in /sys/bus/pci/drivers/ipw/)
1194  * used for controlling the debug level.
1195  *
1196  * See the level definitions in ipw for details.
1197  */
1198 static ssize_t debug_level_show(struct device_driver *d, char *buf)
1199 {
1200 	return sprintf(buf, "0x%08X\n", ipw_debug_level);
1201 }
1202 
1203 static ssize_t debug_level_store(struct device_driver *d, const char *buf,
1204 				 size_t count)
1205 {
1206 	char *p = (char *)buf;
1207 	u32 val;
1208 
1209 	if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
1210 		p++;
1211 		if (p[0] == 'x' || p[0] == 'X')
1212 			p++;
1213 		val = simple_strtoul(p, &p, 16);
1214 	} else
1215 		val = simple_strtoul(p, &p, 10);
1216 	if (p == buf)
1217 		printk(KERN_INFO DRV_NAME
1218 		       ": %s is not in hex or decimal form.\n", buf);
1219 	else
1220 		ipw_debug_level = val;
1221 
1222 	return strnlen(buf, count);
1223 }
1224 static DRIVER_ATTR_RW(debug_level);
1225 
1226 static inline u32 ipw_get_event_log_len(struct ipw_priv *priv)
1227 {
1228 	/* length = 1st dword in log */
1229 	return ipw_read_reg32(priv, ipw_read32(priv, IPW_EVENT_LOG));
1230 }
1231 
1232 static void ipw_capture_event_log(struct ipw_priv *priv,
1233 				  u32 log_len, struct ipw_event *log)
1234 {
1235 	u32 base;
1236 
1237 	if (log_len) {
1238 		base = ipw_read32(priv, IPW_EVENT_LOG);
1239 		ipw_read_indirect(priv, base + sizeof(base) + sizeof(u32),
1240 				  (u8 *) log, sizeof(*log) * log_len);
1241 	}
1242 }
1243 
1244 static struct ipw_fw_error *ipw_alloc_error_log(struct ipw_priv *priv)
1245 {
1246 	struct ipw_fw_error *error;
1247 	u32 log_len = ipw_get_event_log_len(priv);
1248 	u32 base = ipw_read32(priv, IPW_ERROR_LOG);
1249 	u32 elem_len = ipw_read_reg32(priv, base);
1250 
1251 	error = kmalloc(sizeof(*error) +
1252 			sizeof(*error->elem) * elem_len +
1253 			sizeof(*error->log) * log_len, GFP_ATOMIC);
1254 	if (!error) {
1255 		IPW_ERROR("Memory allocation for firmware error log "
1256 			  "failed.\n");
1257 		return NULL;
1258 	}
1259 	error->jiffies = jiffies;
1260 	error->status = priv->status;
1261 	error->config = priv->config;
1262 	error->elem_len = elem_len;
1263 	error->log_len = log_len;
1264 	error->elem = (struct ipw_error_elem *)error->payload;
1265 	error->log = (struct ipw_event *)(error->elem + elem_len);
1266 
1267 	ipw_capture_event_log(priv, log_len, error->log);
1268 
1269 	if (elem_len)
1270 		ipw_read_indirect(priv, base + sizeof(base), (u8 *) error->elem,
1271 				  sizeof(*error->elem) * elem_len);
1272 
1273 	return error;
1274 }
1275 
1276 static ssize_t show_event_log(struct device *d,
1277 			      struct device_attribute *attr, char *buf)
1278 {
1279 	struct ipw_priv *priv = dev_get_drvdata(d);
1280 	u32 log_len = ipw_get_event_log_len(priv);
1281 	u32 log_size;
1282 	struct ipw_event *log;
1283 	u32 len = 0, i;
1284 
1285 	/* not using min() because of its strict type checking */
1286 	log_size = PAGE_SIZE / sizeof(*log) > log_len ?
1287 			sizeof(*log) * log_len : PAGE_SIZE;
1288 	log = kzalloc(log_size, GFP_KERNEL);
1289 	if (!log) {
1290 		IPW_ERROR("Unable to allocate memory for log\n");
1291 		return 0;
1292 	}
1293 	log_len = log_size / sizeof(*log);
1294 	ipw_capture_event_log(priv, log_len, log);
1295 
1296 	len += snprintf(buf + len, PAGE_SIZE - len, "%08X", log_len);
1297 	for (i = 0; i < log_len; i++)
1298 		len += snprintf(buf + len, PAGE_SIZE - len,
1299 				"\n%08X%08X%08X",
1300 				log[i].time, log[i].event, log[i].data);
1301 	len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1302 	kfree(log);
1303 	return len;
1304 }
1305 
1306 static DEVICE_ATTR(event_log, 0444, show_event_log, NULL);
1307 
1308 static ssize_t show_error(struct device *d,
1309 			  struct device_attribute *attr, char *buf)
1310 {
1311 	struct ipw_priv *priv = dev_get_drvdata(d);
1312 	u32 len = 0, i;
1313 	if (!priv->error)
1314 		return 0;
1315 	len += snprintf(buf + len, PAGE_SIZE - len,
1316 			"%08lX%08X%08X%08X",
1317 			priv->error->jiffies,
1318 			priv->error->status,
1319 			priv->error->config, priv->error->elem_len);
1320 	for (i = 0; i < priv->error->elem_len; i++)
1321 		len += snprintf(buf + len, PAGE_SIZE - len,
1322 				"\n%08X%08X%08X%08X%08X%08X%08X",
1323 				priv->error->elem[i].time,
1324 				priv->error->elem[i].desc,
1325 				priv->error->elem[i].blink1,
1326 				priv->error->elem[i].blink2,
1327 				priv->error->elem[i].link1,
1328 				priv->error->elem[i].link2,
1329 				priv->error->elem[i].data);
1330 
1331 	len += snprintf(buf + len, PAGE_SIZE - len,
1332 			"\n%08X", priv->error->log_len);
1333 	for (i = 0; i < priv->error->log_len; i++)
1334 		len += snprintf(buf + len, PAGE_SIZE - len,
1335 				"\n%08X%08X%08X",
1336 				priv->error->log[i].time,
1337 				priv->error->log[i].event,
1338 				priv->error->log[i].data);
1339 	len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1340 	return len;
1341 }
1342 
1343 static ssize_t clear_error(struct device *d,
1344 			   struct device_attribute *attr,
1345 			   const char *buf, size_t count)
1346 {
1347 	struct ipw_priv *priv = dev_get_drvdata(d);
1348 
1349 	kfree(priv->error);
1350 	priv->error = NULL;
1351 	return count;
1352 }
1353 
1354 static DEVICE_ATTR(error, 0644, show_error, clear_error);
1355 
1356 static ssize_t show_cmd_log(struct device *d,
1357 			    struct device_attribute *attr, char *buf)
1358 {
1359 	struct ipw_priv *priv = dev_get_drvdata(d);
1360 	u32 len = 0, i;
1361 	if (!priv->cmdlog)
1362 		return 0;
1363 	for (i = (priv->cmdlog_pos + 1) % priv->cmdlog_len;
1364 	     (i != priv->cmdlog_pos) && (len < PAGE_SIZE);
1365 	     i = (i + 1) % priv->cmdlog_len) {
1366 		len +=
1367 		    snprintf(buf + len, PAGE_SIZE - len,
1368 			     "\n%08lX%08X%08X%08X\n", priv->cmdlog[i].jiffies,
1369 			     priv->cmdlog[i].retcode, priv->cmdlog[i].cmd.cmd,
1370 			     priv->cmdlog[i].cmd.len);
1371 		len +=
1372 		    snprintk_buf(buf + len, PAGE_SIZE - len,
1373 				 (u8 *) priv->cmdlog[i].cmd.param,
1374 				 priv->cmdlog[i].cmd.len);
1375 		len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1376 	}
1377 	len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1378 	return len;
1379 }
1380 
1381 static DEVICE_ATTR(cmd_log, 0444, show_cmd_log, NULL);
1382 
1383 #ifdef CONFIG_IPW2200_PROMISCUOUS
1384 static void ipw_prom_free(struct ipw_priv *priv);
1385 static int ipw_prom_alloc(struct ipw_priv *priv);
1386 static ssize_t store_rtap_iface(struct device *d,
1387 			 struct device_attribute *attr,
1388 			 const char *buf, size_t count)
1389 {
1390 	struct ipw_priv *priv = dev_get_drvdata(d);
1391 	int rc = 0;
1392 
1393 	if (count < 1)
1394 		return -EINVAL;
1395 
1396 	switch (buf[0]) {
1397 	case '0':
1398 		if (!rtap_iface)
1399 			return count;
1400 
1401 		if (netif_running(priv->prom_net_dev)) {
1402 			IPW_WARNING("Interface is up.  Cannot unregister.\n");
1403 			return count;
1404 		}
1405 
1406 		ipw_prom_free(priv);
1407 		rtap_iface = 0;
1408 		break;
1409 
1410 	case '1':
1411 		if (rtap_iface)
1412 			return count;
1413 
1414 		rc = ipw_prom_alloc(priv);
1415 		if (!rc)
1416 			rtap_iface = 1;
1417 		break;
1418 
1419 	default:
1420 		return -EINVAL;
1421 	}
1422 
1423 	if (rc) {
1424 		IPW_ERROR("Failed to register promiscuous network "
1425 			  "device (error %d).\n", rc);
1426 	}
1427 
1428 	return count;
1429 }
1430 
1431 static ssize_t show_rtap_iface(struct device *d,
1432 			struct device_attribute *attr,
1433 			char *buf)
1434 {
1435 	struct ipw_priv *priv = dev_get_drvdata(d);
1436 	if (rtap_iface)
1437 		return sprintf(buf, "%s", priv->prom_net_dev->name);
1438 	else {
1439 		buf[0] = '-';
1440 		buf[1] = '1';
1441 		buf[2] = '\0';
1442 		return 3;
1443 	}
1444 }
1445 
1446 static DEVICE_ATTR(rtap_iface, 0600, show_rtap_iface, store_rtap_iface);
1447 
1448 static ssize_t store_rtap_filter(struct device *d,
1449 			 struct device_attribute *attr,
1450 			 const char *buf, size_t count)
1451 {
1452 	struct ipw_priv *priv = dev_get_drvdata(d);
1453 
1454 	if (!priv->prom_priv) {
1455 		IPW_ERROR("Attempting to set filter without "
1456 			  "rtap_iface enabled.\n");
1457 		return -EPERM;
1458 	}
1459 
1460 	priv->prom_priv->filter = simple_strtol(buf, NULL, 0);
1461 
1462 	IPW_DEBUG_INFO("Setting rtap filter to " BIT_FMT16 "\n",
1463 		       BIT_ARG16(priv->prom_priv->filter));
1464 
1465 	return count;
1466 }
1467 
1468 static ssize_t show_rtap_filter(struct device *d,
1469 			struct device_attribute *attr,
1470 			char *buf)
1471 {
1472 	struct ipw_priv *priv = dev_get_drvdata(d);
1473 	return sprintf(buf, "0x%04X",
1474 		       priv->prom_priv ? priv->prom_priv->filter : 0);
1475 }
1476 
1477 static DEVICE_ATTR(rtap_filter, 0600, show_rtap_filter, store_rtap_filter);
1478 #endif
1479 
1480 static ssize_t show_scan_age(struct device *d, struct device_attribute *attr,
1481 			     char *buf)
1482 {
1483 	struct ipw_priv *priv = dev_get_drvdata(d);
1484 	return sprintf(buf, "%d\n", priv->ieee->scan_age);
1485 }
1486 
1487 static ssize_t store_scan_age(struct device *d, struct device_attribute *attr,
1488 			      const char *buf, size_t count)
1489 {
1490 	struct ipw_priv *priv = dev_get_drvdata(d);
1491 	struct net_device *dev = priv->net_dev;
1492 	char buffer[] = "00000000";
1493 	unsigned long len =
1494 	    (sizeof(buffer) - 1) > count ? count : sizeof(buffer) - 1;
1495 	unsigned long val;
1496 	char *p = buffer;
1497 
1498 	IPW_DEBUG_INFO("enter\n");
1499 
1500 	strncpy(buffer, buf, len);
1501 	buffer[len] = 0;
1502 
1503 	if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
1504 		p++;
1505 		if (p[0] == 'x' || p[0] == 'X')
1506 			p++;
1507 		val = simple_strtoul(p, &p, 16);
1508 	} else
1509 		val = simple_strtoul(p, &p, 10);
1510 	if (p == buffer) {
1511 		IPW_DEBUG_INFO("%s: user supplied invalid value.\n", dev->name);
1512 	} else {
1513 		priv->ieee->scan_age = val;
1514 		IPW_DEBUG_INFO("set scan_age = %u\n", priv->ieee->scan_age);
1515 	}
1516 
1517 	IPW_DEBUG_INFO("exit\n");
1518 	return len;
1519 }
1520 
1521 static DEVICE_ATTR(scan_age, 0644, show_scan_age, store_scan_age);
1522 
1523 static ssize_t show_led(struct device *d, struct device_attribute *attr,
1524 			char *buf)
1525 {
1526 	struct ipw_priv *priv = dev_get_drvdata(d);
1527 	return sprintf(buf, "%d\n", (priv->config & CFG_NO_LED) ? 0 : 1);
1528 }
1529 
1530 static ssize_t store_led(struct device *d, struct device_attribute *attr,
1531 			 const char *buf, size_t count)
1532 {
1533 	struct ipw_priv *priv = dev_get_drvdata(d);
1534 
1535 	IPW_DEBUG_INFO("enter\n");
1536 
1537 	if (count == 0)
1538 		return 0;
1539 
1540 	if (*buf == 0) {
1541 		IPW_DEBUG_LED("Disabling LED control.\n");
1542 		priv->config |= CFG_NO_LED;
1543 		ipw_led_shutdown(priv);
1544 	} else {
1545 		IPW_DEBUG_LED("Enabling LED control.\n");
1546 		priv->config &= ~CFG_NO_LED;
1547 		ipw_led_init(priv);
1548 	}
1549 
1550 	IPW_DEBUG_INFO("exit\n");
1551 	return count;
1552 }
1553 
1554 static DEVICE_ATTR(led, 0644, show_led, store_led);
1555 
1556 static ssize_t show_status(struct device *d,
1557 			   struct device_attribute *attr, char *buf)
1558 {
1559 	struct ipw_priv *p = dev_get_drvdata(d);
1560 	return sprintf(buf, "0x%08x\n", (int)p->status);
1561 }
1562 
1563 static DEVICE_ATTR(status, 0444, show_status, NULL);
1564 
1565 static ssize_t show_cfg(struct device *d, struct device_attribute *attr,
1566 			char *buf)
1567 {
1568 	struct ipw_priv *p = dev_get_drvdata(d);
1569 	return sprintf(buf, "0x%08x\n", (int)p->config);
1570 }
1571 
1572 static DEVICE_ATTR(cfg, 0444, show_cfg, NULL);
1573 
1574 static ssize_t show_nic_type(struct device *d,
1575 			     struct device_attribute *attr, char *buf)
1576 {
1577 	struct ipw_priv *priv = dev_get_drvdata(d);
1578 	return sprintf(buf, "TYPE: %d\n", priv->nic_type);
1579 }
1580 
1581 static DEVICE_ATTR(nic_type, 0444, show_nic_type, NULL);
1582 
1583 static ssize_t show_ucode_version(struct device *d,
1584 				  struct device_attribute *attr, char *buf)
1585 {
1586 	u32 len = sizeof(u32), tmp = 0;
1587 	struct ipw_priv *p = dev_get_drvdata(d);
1588 
1589 	if (ipw_get_ordinal(p, IPW_ORD_STAT_UCODE_VERSION, &tmp, &len))
1590 		return 0;
1591 
1592 	return sprintf(buf, "0x%08x\n", tmp);
1593 }
1594 
1595 static DEVICE_ATTR(ucode_version, 0644, show_ucode_version, NULL);
1596 
1597 static ssize_t show_rtc(struct device *d, struct device_attribute *attr,
1598 			char *buf)
1599 {
1600 	u32 len = sizeof(u32), tmp = 0;
1601 	struct ipw_priv *p = dev_get_drvdata(d);
1602 
1603 	if (ipw_get_ordinal(p, IPW_ORD_STAT_RTC, &tmp, &len))
1604 		return 0;
1605 
1606 	return sprintf(buf, "0x%08x\n", tmp);
1607 }
1608 
1609 static DEVICE_ATTR(rtc, 0644, show_rtc, NULL);
1610 
1611 /*
1612  * Add a device attribute to view/control the delay between eeprom
1613  * operations.
1614  */
1615 static ssize_t show_eeprom_delay(struct device *d,
1616 				 struct device_attribute *attr, char *buf)
1617 {
1618 	struct ipw_priv *p = dev_get_drvdata(d);
1619 	int n = p->eeprom_delay;
1620 	return sprintf(buf, "%i\n", n);
1621 }
1622 static ssize_t store_eeprom_delay(struct device *d,
1623 				  struct device_attribute *attr,
1624 				  const char *buf, size_t count)
1625 {
1626 	struct ipw_priv *p = dev_get_drvdata(d);
1627 	sscanf(buf, "%i", &p->eeprom_delay);
1628 	return strnlen(buf, count);
1629 }
1630 
1631 static DEVICE_ATTR(eeprom_delay, 0644, show_eeprom_delay, store_eeprom_delay);
1632 
1633 static ssize_t show_command_event_reg(struct device *d,
1634 				      struct device_attribute *attr, char *buf)
1635 {
1636 	u32 reg = 0;
1637 	struct ipw_priv *p = dev_get_drvdata(d);
1638 
1639 	reg = ipw_read_reg32(p, IPW_INTERNAL_CMD_EVENT);
1640 	return sprintf(buf, "0x%08x\n", reg);
1641 }
1642 static ssize_t store_command_event_reg(struct device *d,
1643 				       struct device_attribute *attr,
1644 				       const char *buf, size_t count)
1645 {
1646 	u32 reg;
1647 	struct ipw_priv *p = dev_get_drvdata(d);
1648 
1649 	sscanf(buf, "%x", &reg);
1650 	ipw_write_reg32(p, IPW_INTERNAL_CMD_EVENT, reg);
1651 	return strnlen(buf, count);
1652 }
1653 
1654 static DEVICE_ATTR(command_event_reg, 0644,
1655 		   show_command_event_reg, store_command_event_reg);
1656 
1657 static ssize_t show_mem_gpio_reg(struct device *d,
1658 				 struct device_attribute *attr, char *buf)
1659 {
1660 	u32 reg = 0;
1661 	struct ipw_priv *p = dev_get_drvdata(d);
1662 
1663 	reg = ipw_read_reg32(p, 0x301100);
1664 	return sprintf(buf, "0x%08x\n", reg);
1665 }
1666 static ssize_t store_mem_gpio_reg(struct device *d,
1667 				  struct device_attribute *attr,
1668 				  const char *buf, size_t count)
1669 {
1670 	u32 reg;
1671 	struct ipw_priv *p = dev_get_drvdata(d);
1672 
1673 	sscanf(buf, "%x", &reg);
1674 	ipw_write_reg32(p, 0x301100, reg);
1675 	return strnlen(buf, count);
1676 }
1677 
1678 static DEVICE_ATTR(mem_gpio_reg, 0644, show_mem_gpio_reg, store_mem_gpio_reg);
1679 
1680 static ssize_t show_indirect_dword(struct device *d,
1681 				   struct device_attribute *attr, char *buf)
1682 {
1683 	u32 reg = 0;
1684 	struct ipw_priv *priv = dev_get_drvdata(d);
1685 
1686 	if (priv->status & STATUS_INDIRECT_DWORD)
1687 		reg = ipw_read_reg32(priv, priv->indirect_dword);
1688 	else
1689 		reg = 0;
1690 
1691 	return sprintf(buf, "0x%08x\n", reg);
1692 }
1693 static ssize_t store_indirect_dword(struct device *d,
1694 				    struct device_attribute *attr,
1695 				    const char *buf, size_t count)
1696 {
1697 	struct ipw_priv *priv = dev_get_drvdata(d);
1698 
1699 	sscanf(buf, "%x", &priv->indirect_dword);
1700 	priv->status |= STATUS_INDIRECT_DWORD;
1701 	return strnlen(buf, count);
1702 }
1703 
1704 static DEVICE_ATTR(indirect_dword, 0644,
1705 		   show_indirect_dword, store_indirect_dword);
1706 
1707 static ssize_t show_indirect_byte(struct device *d,
1708 				  struct device_attribute *attr, char *buf)
1709 {
1710 	u8 reg = 0;
1711 	struct ipw_priv *priv = dev_get_drvdata(d);
1712 
1713 	if (priv->status & STATUS_INDIRECT_BYTE)
1714 		reg = ipw_read_reg8(priv, priv->indirect_byte);
1715 	else
1716 		reg = 0;
1717 
1718 	return sprintf(buf, "0x%02x\n", reg);
1719 }
1720 static ssize_t store_indirect_byte(struct device *d,
1721 				   struct device_attribute *attr,
1722 				   const char *buf, size_t count)
1723 {
1724 	struct ipw_priv *priv = dev_get_drvdata(d);
1725 
1726 	sscanf(buf, "%x", &priv->indirect_byte);
1727 	priv->status |= STATUS_INDIRECT_BYTE;
1728 	return strnlen(buf, count);
1729 }
1730 
1731 static DEVICE_ATTR(indirect_byte, 0644,
1732 		   show_indirect_byte, store_indirect_byte);
1733 
1734 static ssize_t show_direct_dword(struct device *d,
1735 				 struct device_attribute *attr, char *buf)
1736 {
1737 	u32 reg = 0;
1738 	struct ipw_priv *priv = dev_get_drvdata(d);
1739 
1740 	if (priv->status & STATUS_DIRECT_DWORD)
1741 		reg = ipw_read32(priv, priv->direct_dword);
1742 	else
1743 		reg = 0;
1744 
1745 	return sprintf(buf, "0x%08x\n", reg);
1746 }
1747 static ssize_t store_direct_dword(struct device *d,
1748 				  struct device_attribute *attr,
1749 				  const char *buf, size_t count)
1750 {
1751 	struct ipw_priv *priv = dev_get_drvdata(d);
1752 
1753 	sscanf(buf, "%x", &priv->direct_dword);
1754 	priv->status |= STATUS_DIRECT_DWORD;
1755 	return strnlen(buf, count);
1756 }
1757 
1758 static DEVICE_ATTR(direct_dword, 0644, show_direct_dword, store_direct_dword);
1759 
1760 static int rf_kill_active(struct ipw_priv *priv)
1761 {
1762 	if (0 == (ipw_read32(priv, 0x30) & 0x10000)) {
1763 		priv->status |= STATUS_RF_KILL_HW;
1764 		wiphy_rfkill_set_hw_state(priv->ieee->wdev.wiphy, true);
1765 	} else {
1766 		priv->status &= ~STATUS_RF_KILL_HW;
1767 		wiphy_rfkill_set_hw_state(priv->ieee->wdev.wiphy, false);
1768 	}
1769 
1770 	return (priv->status & STATUS_RF_KILL_HW) ? 1 : 0;
1771 }
1772 
1773 static ssize_t show_rf_kill(struct device *d, struct device_attribute *attr,
1774 			    char *buf)
1775 {
1776 	/* 0 - RF kill not enabled
1777 	   1 - SW based RF kill active (sysfs)
1778 	   2 - HW based RF kill active
1779 	   3 - Both HW and SW baed RF kill active */
1780 	struct ipw_priv *priv = dev_get_drvdata(d);
1781 	int val = ((priv->status & STATUS_RF_KILL_SW) ? 0x1 : 0x0) |
1782 	    (rf_kill_active(priv) ? 0x2 : 0x0);
1783 	return sprintf(buf, "%i\n", val);
1784 }
1785 
1786 static int ipw_radio_kill_sw(struct ipw_priv *priv, int disable_radio)
1787 {
1788 	if ((disable_radio ? 1 : 0) ==
1789 	    ((priv->status & STATUS_RF_KILL_SW) ? 1 : 0))
1790 		return 0;
1791 
1792 	IPW_DEBUG_RF_KILL("Manual SW RF Kill set to: RADIO  %s\n",
1793 			  disable_radio ? "OFF" : "ON");
1794 
1795 	if (disable_radio) {
1796 		priv->status |= STATUS_RF_KILL_SW;
1797 
1798 		cancel_delayed_work(&priv->request_scan);
1799 		cancel_delayed_work(&priv->request_direct_scan);
1800 		cancel_delayed_work(&priv->request_passive_scan);
1801 		cancel_delayed_work(&priv->scan_event);
1802 		schedule_work(&priv->down);
1803 	} else {
1804 		priv->status &= ~STATUS_RF_KILL_SW;
1805 		if (rf_kill_active(priv)) {
1806 			IPW_DEBUG_RF_KILL("Can not turn radio back on - "
1807 					  "disabled by HW switch\n");
1808 			/* Make sure the RF_KILL check timer is running */
1809 			cancel_delayed_work(&priv->rf_kill);
1810 			schedule_delayed_work(&priv->rf_kill,
1811 					      round_jiffies_relative(2 * HZ));
1812 		} else
1813 			schedule_work(&priv->up);
1814 	}
1815 
1816 	return 1;
1817 }
1818 
1819 static ssize_t store_rf_kill(struct device *d, struct device_attribute *attr,
1820 			     const char *buf, size_t count)
1821 {
1822 	struct ipw_priv *priv = dev_get_drvdata(d);
1823 
1824 	ipw_radio_kill_sw(priv, buf[0] == '1');
1825 
1826 	return count;
1827 }
1828 
1829 static DEVICE_ATTR(rf_kill, 0644, show_rf_kill, store_rf_kill);
1830 
1831 static ssize_t show_speed_scan(struct device *d, struct device_attribute *attr,
1832 			       char *buf)
1833 {
1834 	struct ipw_priv *priv = dev_get_drvdata(d);
1835 	int pos = 0, len = 0;
1836 	if (priv->config & CFG_SPEED_SCAN) {
1837 		while (priv->speed_scan[pos] != 0)
1838 			len += sprintf(&buf[len], "%d ",
1839 				       priv->speed_scan[pos++]);
1840 		return len + sprintf(&buf[len], "\n");
1841 	}
1842 
1843 	return sprintf(buf, "0\n");
1844 }
1845 
1846 static ssize_t store_speed_scan(struct device *d, struct device_attribute *attr,
1847 				const char *buf, size_t count)
1848 {
1849 	struct ipw_priv *priv = dev_get_drvdata(d);
1850 	int channel, pos = 0;
1851 	const char *p = buf;
1852 
1853 	/* list of space separated channels to scan, optionally ending with 0 */
1854 	while ((channel = simple_strtol(p, NULL, 0))) {
1855 		if (pos == MAX_SPEED_SCAN - 1) {
1856 			priv->speed_scan[pos] = 0;
1857 			break;
1858 		}
1859 
1860 		if (libipw_is_valid_channel(priv->ieee, channel))
1861 			priv->speed_scan[pos++] = channel;
1862 		else
1863 			IPW_WARNING("Skipping invalid channel request: %d\n",
1864 				    channel);
1865 		p = strchr(p, ' ');
1866 		if (!p)
1867 			break;
1868 		while (*p == ' ' || *p == '\t')
1869 			p++;
1870 	}
1871 
1872 	if (pos == 0)
1873 		priv->config &= ~CFG_SPEED_SCAN;
1874 	else {
1875 		priv->speed_scan_pos = 0;
1876 		priv->config |= CFG_SPEED_SCAN;
1877 	}
1878 
1879 	return count;
1880 }
1881 
1882 static DEVICE_ATTR(speed_scan, 0644, show_speed_scan, store_speed_scan);
1883 
1884 static ssize_t show_net_stats(struct device *d, struct device_attribute *attr,
1885 			      char *buf)
1886 {
1887 	struct ipw_priv *priv = dev_get_drvdata(d);
1888 	return sprintf(buf, "%c\n", (priv->config & CFG_NET_STATS) ? '1' : '0');
1889 }
1890 
1891 static ssize_t store_net_stats(struct device *d, struct device_attribute *attr,
1892 			       const char *buf, size_t count)
1893 {
1894 	struct ipw_priv *priv = dev_get_drvdata(d);
1895 	if (buf[0] == '1')
1896 		priv->config |= CFG_NET_STATS;
1897 	else
1898 		priv->config &= ~CFG_NET_STATS;
1899 
1900 	return count;
1901 }
1902 
1903 static DEVICE_ATTR(net_stats, 0644, show_net_stats, store_net_stats);
1904 
1905 static ssize_t show_channels(struct device *d,
1906 			     struct device_attribute *attr,
1907 			     char *buf)
1908 {
1909 	struct ipw_priv *priv = dev_get_drvdata(d);
1910 	const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
1911 	int len = 0, i;
1912 
1913 	len = sprintf(&buf[len],
1914 		      "Displaying %d channels in 2.4Ghz band "
1915 		      "(802.11bg):\n", geo->bg_channels);
1916 
1917 	for (i = 0; i < geo->bg_channels; i++) {
1918 		len += sprintf(&buf[len], "%d: BSS%s%s, %s, Band %s.\n",
1919 			       geo->bg[i].channel,
1920 			       geo->bg[i].flags & LIBIPW_CH_RADAR_DETECT ?
1921 			       " (radar spectrum)" : "",
1922 			       ((geo->bg[i].flags & LIBIPW_CH_NO_IBSS) ||
1923 				(geo->bg[i].flags & LIBIPW_CH_RADAR_DETECT))
1924 			       ? "" : ", IBSS",
1925 			       geo->bg[i].flags & LIBIPW_CH_PASSIVE_ONLY ?
1926 			       "passive only" : "active/passive",
1927 			       geo->bg[i].flags & LIBIPW_CH_B_ONLY ?
1928 			       "B" : "B/G");
1929 	}
1930 
1931 	len += sprintf(&buf[len],
1932 		       "Displaying %d channels in 5.2Ghz band "
1933 		       "(802.11a):\n", geo->a_channels);
1934 	for (i = 0; i < geo->a_channels; i++) {
1935 		len += sprintf(&buf[len], "%d: BSS%s%s, %s.\n",
1936 			       geo->a[i].channel,
1937 			       geo->a[i].flags & LIBIPW_CH_RADAR_DETECT ?
1938 			       " (radar spectrum)" : "",
1939 			       ((geo->a[i].flags & LIBIPW_CH_NO_IBSS) ||
1940 				(geo->a[i].flags & LIBIPW_CH_RADAR_DETECT))
1941 			       ? "" : ", IBSS",
1942 			       geo->a[i].flags & LIBIPW_CH_PASSIVE_ONLY ?
1943 			       "passive only" : "active/passive");
1944 	}
1945 
1946 	return len;
1947 }
1948 
1949 static DEVICE_ATTR(channels, 0400, show_channels, NULL);
1950 
1951 static void notify_wx_assoc_event(struct ipw_priv *priv)
1952 {
1953 	union iwreq_data wrqu;
1954 	wrqu.ap_addr.sa_family = ARPHRD_ETHER;
1955 	if (priv->status & STATUS_ASSOCIATED)
1956 		memcpy(wrqu.ap_addr.sa_data, priv->bssid, ETH_ALEN);
1957 	else
1958 		eth_zero_addr(wrqu.ap_addr.sa_data);
1959 	wireless_send_event(priv->net_dev, SIOCGIWAP, &wrqu, NULL);
1960 }
1961 
1962 static void ipw_irq_tasklet(struct ipw_priv *priv)
1963 {
1964 	u32 inta, inta_mask, handled = 0;
1965 	unsigned long flags;
1966 	int rc = 0;
1967 
1968 	spin_lock_irqsave(&priv->irq_lock, flags);
1969 
1970 	inta = ipw_read32(priv, IPW_INTA_RW);
1971 	inta_mask = ipw_read32(priv, IPW_INTA_MASK_R);
1972 
1973 	if (inta == 0xFFFFFFFF) {
1974 		/* Hardware disappeared */
1975 		IPW_WARNING("TASKLET INTA == 0xFFFFFFFF\n");
1976 		/* Only handle the cached INTA values */
1977 		inta = 0;
1978 	}
1979 	inta &= (IPW_INTA_MASK_ALL & inta_mask);
1980 
1981 	/* Add any cached INTA values that need to be handled */
1982 	inta |= priv->isr_inta;
1983 
1984 	spin_unlock_irqrestore(&priv->irq_lock, flags);
1985 
1986 	spin_lock_irqsave(&priv->lock, flags);
1987 
1988 	/* handle all the justifications for the interrupt */
1989 	if (inta & IPW_INTA_BIT_RX_TRANSFER) {
1990 		ipw_rx(priv);
1991 		handled |= IPW_INTA_BIT_RX_TRANSFER;
1992 	}
1993 
1994 	if (inta & IPW_INTA_BIT_TX_CMD_QUEUE) {
1995 		IPW_DEBUG_HC("Command completed.\n");
1996 		rc = ipw_queue_tx_reclaim(priv, &priv->txq_cmd, -1);
1997 		priv->status &= ~STATUS_HCMD_ACTIVE;
1998 		wake_up_interruptible(&priv->wait_command_queue);
1999 		handled |= IPW_INTA_BIT_TX_CMD_QUEUE;
2000 	}
2001 
2002 	if (inta & IPW_INTA_BIT_TX_QUEUE_1) {
2003 		IPW_DEBUG_TX("TX_QUEUE_1\n");
2004 		rc = ipw_queue_tx_reclaim(priv, &priv->txq[0], 0);
2005 		handled |= IPW_INTA_BIT_TX_QUEUE_1;
2006 	}
2007 
2008 	if (inta & IPW_INTA_BIT_TX_QUEUE_2) {
2009 		IPW_DEBUG_TX("TX_QUEUE_2\n");
2010 		rc = ipw_queue_tx_reclaim(priv, &priv->txq[1], 1);
2011 		handled |= IPW_INTA_BIT_TX_QUEUE_2;
2012 	}
2013 
2014 	if (inta & IPW_INTA_BIT_TX_QUEUE_3) {
2015 		IPW_DEBUG_TX("TX_QUEUE_3\n");
2016 		rc = ipw_queue_tx_reclaim(priv, &priv->txq[2], 2);
2017 		handled |= IPW_INTA_BIT_TX_QUEUE_3;
2018 	}
2019 
2020 	if (inta & IPW_INTA_BIT_TX_QUEUE_4) {
2021 		IPW_DEBUG_TX("TX_QUEUE_4\n");
2022 		rc = ipw_queue_tx_reclaim(priv, &priv->txq[3], 3);
2023 		handled |= IPW_INTA_BIT_TX_QUEUE_4;
2024 	}
2025 
2026 	if (inta & IPW_INTA_BIT_STATUS_CHANGE) {
2027 		IPW_WARNING("STATUS_CHANGE\n");
2028 		handled |= IPW_INTA_BIT_STATUS_CHANGE;
2029 	}
2030 
2031 	if (inta & IPW_INTA_BIT_BEACON_PERIOD_EXPIRED) {
2032 		IPW_WARNING("TX_PERIOD_EXPIRED\n");
2033 		handled |= IPW_INTA_BIT_BEACON_PERIOD_EXPIRED;
2034 	}
2035 
2036 	if (inta & IPW_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE) {
2037 		IPW_WARNING("HOST_CMD_DONE\n");
2038 		handled |= IPW_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE;
2039 	}
2040 
2041 	if (inta & IPW_INTA_BIT_FW_INITIALIZATION_DONE) {
2042 		IPW_WARNING("FW_INITIALIZATION_DONE\n");
2043 		handled |= IPW_INTA_BIT_FW_INITIALIZATION_DONE;
2044 	}
2045 
2046 	if (inta & IPW_INTA_BIT_FW_CARD_DISABLE_PHY_OFF_DONE) {
2047 		IPW_WARNING("PHY_OFF_DONE\n");
2048 		handled |= IPW_INTA_BIT_FW_CARD_DISABLE_PHY_OFF_DONE;
2049 	}
2050 
2051 	if (inta & IPW_INTA_BIT_RF_KILL_DONE) {
2052 		IPW_DEBUG_RF_KILL("RF_KILL_DONE\n");
2053 		priv->status |= STATUS_RF_KILL_HW;
2054 		wiphy_rfkill_set_hw_state(priv->ieee->wdev.wiphy, true);
2055 		wake_up_interruptible(&priv->wait_command_queue);
2056 		priv->status &= ~(STATUS_ASSOCIATED | STATUS_ASSOCIATING);
2057 		cancel_delayed_work(&priv->request_scan);
2058 		cancel_delayed_work(&priv->request_direct_scan);
2059 		cancel_delayed_work(&priv->request_passive_scan);
2060 		cancel_delayed_work(&priv->scan_event);
2061 		schedule_work(&priv->link_down);
2062 		schedule_delayed_work(&priv->rf_kill, 2 * HZ);
2063 		handled |= IPW_INTA_BIT_RF_KILL_DONE;
2064 	}
2065 
2066 	if (inta & IPW_INTA_BIT_FATAL_ERROR) {
2067 		IPW_WARNING("Firmware error detected.  Restarting.\n");
2068 		if (priv->error) {
2069 			IPW_DEBUG_FW("Sysfs 'error' log already exists.\n");
2070 			if (ipw_debug_level & IPW_DL_FW_ERRORS) {
2071 				struct ipw_fw_error *error =
2072 				    ipw_alloc_error_log(priv);
2073 				ipw_dump_error_log(priv, error);
2074 				kfree(error);
2075 			}
2076 		} else {
2077 			priv->error = ipw_alloc_error_log(priv);
2078 			if (priv->error)
2079 				IPW_DEBUG_FW("Sysfs 'error' log captured.\n");
2080 			else
2081 				IPW_DEBUG_FW("Error allocating sysfs 'error' "
2082 					     "log.\n");
2083 			if (ipw_debug_level & IPW_DL_FW_ERRORS)
2084 				ipw_dump_error_log(priv, priv->error);
2085 		}
2086 
2087 		/* XXX: If hardware encryption is for WPA/WPA2,
2088 		 * we have to notify the supplicant. */
2089 		if (priv->ieee->sec.encrypt) {
2090 			priv->status &= ~STATUS_ASSOCIATED;
2091 			notify_wx_assoc_event(priv);
2092 		}
2093 
2094 		/* Keep the restart process from trying to send host
2095 		 * commands by clearing the INIT status bit */
2096 		priv->status &= ~STATUS_INIT;
2097 
2098 		/* Cancel currently queued command. */
2099 		priv->status &= ~STATUS_HCMD_ACTIVE;
2100 		wake_up_interruptible(&priv->wait_command_queue);
2101 
2102 		schedule_work(&priv->adapter_restart);
2103 		handled |= IPW_INTA_BIT_FATAL_ERROR;
2104 	}
2105 
2106 	if (inta & IPW_INTA_BIT_PARITY_ERROR) {
2107 		IPW_ERROR("Parity error\n");
2108 		handled |= IPW_INTA_BIT_PARITY_ERROR;
2109 	}
2110 
2111 	if (handled != inta) {
2112 		IPW_ERROR("Unhandled INTA bits 0x%08x\n", inta & ~handled);
2113 	}
2114 
2115 	spin_unlock_irqrestore(&priv->lock, flags);
2116 
2117 	/* enable all interrupts */
2118 	ipw_enable_interrupts(priv);
2119 }
2120 
2121 #define IPW_CMD(x) case IPW_CMD_ ## x : return #x
2122 static char *get_cmd_string(u8 cmd)
2123 {
2124 	switch (cmd) {
2125 		IPW_CMD(HOST_COMPLETE);
2126 		IPW_CMD(POWER_DOWN);
2127 		IPW_CMD(SYSTEM_CONFIG);
2128 		IPW_CMD(MULTICAST_ADDRESS);
2129 		IPW_CMD(SSID);
2130 		IPW_CMD(ADAPTER_ADDRESS);
2131 		IPW_CMD(PORT_TYPE);
2132 		IPW_CMD(RTS_THRESHOLD);
2133 		IPW_CMD(FRAG_THRESHOLD);
2134 		IPW_CMD(POWER_MODE);
2135 		IPW_CMD(WEP_KEY);
2136 		IPW_CMD(TGI_TX_KEY);
2137 		IPW_CMD(SCAN_REQUEST);
2138 		IPW_CMD(SCAN_REQUEST_EXT);
2139 		IPW_CMD(ASSOCIATE);
2140 		IPW_CMD(SUPPORTED_RATES);
2141 		IPW_CMD(SCAN_ABORT);
2142 		IPW_CMD(TX_FLUSH);
2143 		IPW_CMD(QOS_PARAMETERS);
2144 		IPW_CMD(DINO_CONFIG);
2145 		IPW_CMD(RSN_CAPABILITIES);
2146 		IPW_CMD(RX_KEY);
2147 		IPW_CMD(CARD_DISABLE);
2148 		IPW_CMD(SEED_NUMBER);
2149 		IPW_CMD(TX_POWER);
2150 		IPW_CMD(COUNTRY_INFO);
2151 		IPW_CMD(AIRONET_INFO);
2152 		IPW_CMD(AP_TX_POWER);
2153 		IPW_CMD(CCKM_INFO);
2154 		IPW_CMD(CCX_VER_INFO);
2155 		IPW_CMD(SET_CALIBRATION);
2156 		IPW_CMD(SENSITIVITY_CALIB);
2157 		IPW_CMD(RETRY_LIMIT);
2158 		IPW_CMD(IPW_PRE_POWER_DOWN);
2159 		IPW_CMD(VAP_BEACON_TEMPLATE);
2160 		IPW_CMD(VAP_DTIM_PERIOD);
2161 		IPW_CMD(EXT_SUPPORTED_RATES);
2162 		IPW_CMD(VAP_LOCAL_TX_PWR_CONSTRAINT);
2163 		IPW_CMD(VAP_QUIET_INTERVALS);
2164 		IPW_CMD(VAP_CHANNEL_SWITCH);
2165 		IPW_CMD(VAP_MANDATORY_CHANNELS);
2166 		IPW_CMD(VAP_CELL_PWR_LIMIT);
2167 		IPW_CMD(VAP_CF_PARAM_SET);
2168 		IPW_CMD(VAP_SET_BEACONING_STATE);
2169 		IPW_CMD(MEASUREMENT);
2170 		IPW_CMD(POWER_CAPABILITY);
2171 		IPW_CMD(SUPPORTED_CHANNELS);
2172 		IPW_CMD(TPC_REPORT);
2173 		IPW_CMD(WME_INFO);
2174 		IPW_CMD(PRODUCTION_COMMAND);
2175 	default:
2176 		return "UNKNOWN";
2177 	}
2178 }
2179 
2180 #define HOST_COMPLETE_TIMEOUT HZ
2181 
2182 static int __ipw_send_cmd(struct ipw_priv *priv, struct host_cmd *cmd)
2183 {
2184 	int rc = 0;
2185 	unsigned long flags;
2186 	unsigned long now, end;
2187 
2188 	spin_lock_irqsave(&priv->lock, flags);
2189 	if (priv->status & STATUS_HCMD_ACTIVE) {
2190 		IPW_ERROR("Failed to send %s: Already sending a command.\n",
2191 			  get_cmd_string(cmd->cmd));
2192 		spin_unlock_irqrestore(&priv->lock, flags);
2193 		return -EAGAIN;
2194 	}
2195 
2196 	priv->status |= STATUS_HCMD_ACTIVE;
2197 
2198 	if (priv->cmdlog) {
2199 		priv->cmdlog[priv->cmdlog_pos].jiffies = jiffies;
2200 		priv->cmdlog[priv->cmdlog_pos].cmd.cmd = cmd->cmd;
2201 		priv->cmdlog[priv->cmdlog_pos].cmd.len = cmd->len;
2202 		memcpy(priv->cmdlog[priv->cmdlog_pos].cmd.param, cmd->param,
2203 		       cmd->len);
2204 		priv->cmdlog[priv->cmdlog_pos].retcode = -1;
2205 	}
2206 
2207 	IPW_DEBUG_HC("%s command (#%d) %d bytes: 0x%08X\n",
2208 		     get_cmd_string(cmd->cmd), cmd->cmd, cmd->len,
2209 		     priv->status);
2210 
2211 #ifndef DEBUG_CMD_WEP_KEY
2212 	if (cmd->cmd == IPW_CMD_WEP_KEY)
2213 		IPW_DEBUG_HC("WEP_KEY command masked out for secure.\n");
2214 	else
2215 #endif
2216 		printk_buf(IPW_DL_HOST_COMMAND, (u8 *) cmd->param, cmd->len);
2217 
2218 	rc = ipw_queue_tx_hcmd(priv, cmd->cmd, cmd->param, cmd->len, 0);
2219 	if (rc) {
2220 		priv->status &= ~STATUS_HCMD_ACTIVE;
2221 		IPW_ERROR("Failed to send %s: Reason %d\n",
2222 			  get_cmd_string(cmd->cmd), rc);
2223 		spin_unlock_irqrestore(&priv->lock, flags);
2224 		goto exit;
2225 	}
2226 	spin_unlock_irqrestore(&priv->lock, flags);
2227 
2228 	now = jiffies;
2229 	end = now + HOST_COMPLETE_TIMEOUT;
2230 again:
2231 	rc = wait_event_interruptible_timeout(priv->wait_command_queue,
2232 					      !(priv->
2233 						status & STATUS_HCMD_ACTIVE),
2234 					      end - now);
2235 	if (rc < 0) {
2236 		now = jiffies;
2237 		if (time_before(now, end))
2238 			goto again;
2239 		rc = 0;
2240 	}
2241 
2242 	if (rc == 0) {
2243 		spin_lock_irqsave(&priv->lock, flags);
2244 		if (priv->status & STATUS_HCMD_ACTIVE) {
2245 			IPW_ERROR("Failed to send %s: Command timed out.\n",
2246 				  get_cmd_string(cmd->cmd));
2247 			priv->status &= ~STATUS_HCMD_ACTIVE;
2248 			spin_unlock_irqrestore(&priv->lock, flags);
2249 			rc = -EIO;
2250 			goto exit;
2251 		}
2252 		spin_unlock_irqrestore(&priv->lock, flags);
2253 	} else
2254 		rc = 0;
2255 
2256 	if (priv->status & STATUS_RF_KILL_HW) {
2257 		IPW_ERROR("Failed to send %s: Aborted due to RF kill switch.\n",
2258 			  get_cmd_string(cmd->cmd));
2259 		rc = -EIO;
2260 		goto exit;
2261 	}
2262 
2263       exit:
2264 	if (priv->cmdlog) {
2265 		priv->cmdlog[priv->cmdlog_pos++].retcode = rc;
2266 		priv->cmdlog_pos %= priv->cmdlog_len;
2267 	}
2268 	return rc;
2269 }
2270 
2271 static int ipw_send_cmd_simple(struct ipw_priv *priv, u8 command)
2272 {
2273 	struct host_cmd cmd = {
2274 		.cmd = command,
2275 	};
2276 
2277 	return __ipw_send_cmd(priv, &cmd);
2278 }
2279 
2280 static int ipw_send_cmd_pdu(struct ipw_priv *priv, u8 command, u8 len,
2281 			    void *data)
2282 {
2283 	struct host_cmd cmd = {
2284 		.cmd = command,
2285 		.len = len,
2286 		.param = data,
2287 	};
2288 
2289 	return __ipw_send_cmd(priv, &cmd);
2290 }
2291 
2292 static int ipw_send_host_complete(struct ipw_priv *priv)
2293 {
2294 	if (!priv) {
2295 		IPW_ERROR("Invalid args\n");
2296 		return -1;
2297 	}
2298 
2299 	return ipw_send_cmd_simple(priv, IPW_CMD_HOST_COMPLETE);
2300 }
2301 
2302 static int ipw_send_system_config(struct ipw_priv *priv)
2303 {
2304 	return ipw_send_cmd_pdu(priv, IPW_CMD_SYSTEM_CONFIG,
2305 				sizeof(priv->sys_config),
2306 				&priv->sys_config);
2307 }
2308 
2309 static int ipw_send_ssid(struct ipw_priv *priv, u8 * ssid, int len)
2310 {
2311 	if (!priv || !ssid) {
2312 		IPW_ERROR("Invalid args\n");
2313 		return -1;
2314 	}
2315 
2316 	return ipw_send_cmd_pdu(priv, IPW_CMD_SSID, min(len, IW_ESSID_MAX_SIZE),
2317 				ssid);
2318 }
2319 
2320 static int ipw_send_adapter_address(struct ipw_priv *priv, u8 * mac)
2321 {
2322 	if (!priv || !mac) {
2323 		IPW_ERROR("Invalid args\n");
2324 		return -1;
2325 	}
2326 
2327 	IPW_DEBUG_INFO("%s: Setting MAC to %pM\n",
2328 		       priv->net_dev->name, mac);
2329 
2330 	return ipw_send_cmd_pdu(priv, IPW_CMD_ADAPTER_ADDRESS, ETH_ALEN, mac);
2331 }
2332 
2333 static void ipw_adapter_restart(void *adapter)
2334 {
2335 	struct ipw_priv *priv = adapter;
2336 
2337 	if (priv->status & STATUS_RF_KILL_MASK)
2338 		return;
2339 
2340 	ipw_down(priv);
2341 
2342 	if (priv->assoc_network &&
2343 	    (priv->assoc_network->capability & WLAN_CAPABILITY_IBSS))
2344 		ipw_remove_current_network(priv);
2345 
2346 	if (ipw_up(priv)) {
2347 		IPW_ERROR("Failed to up device\n");
2348 		return;
2349 	}
2350 }
2351 
2352 static void ipw_bg_adapter_restart(struct work_struct *work)
2353 {
2354 	struct ipw_priv *priv =
2355 		container_of(work, struct ipw_priv, adapter_restart);
2356 	mutex_lock(&priv->mutex);
2357 	ipw_adapter_restart(priv);
2358 	mutex_unlock(&priv->mutex);
2359 }
2360 
2361 static void ipw_abort_scan(struct ipw_priv *priv);
2362 
2363 #define IPW_SCAN_CHECK_WATCHDOG	(5 * HZ)
2364 
2365 static void ipw_scan_check(void *data)
2366 {
2367 	struct ipw_priv *priv = data;
2368 
2369 	if (priv->status & STATUS_SCAN_ABORTING) {
2370 		IPW_DEBUG_SCAN("Scan completion watchdog resetting "
2371 			       "adapter after (%dms).\n",
2372 			       jiffies_to_msecs(IPW_SCAN_CHECK_WATCHDOG));
2373 		schedule_work(&priv->adapter_restart);
2374 	} else if (priv->status & STATUS_SCANNING) {
2375 		IPW_DEBUG_SCAN("Scan completion watchdog aborting scan "
2376 			       "after (%dms).\n",
2377 			       jiffies_to_msecs(IPW_SCAN_CHECK_WATCHDOG));
2378 		ipw_abort_scan(priv);
2379 		schedule_delayed_work(&priv->scan_check, HZ);
2380 	}
2381 }
2382 
2383 static void ipw_bg_scan_check(struct work_struct *work)
2384 {
2385 	struct ipw_priv *priv =
2386 		container_of(work, struct ipw_priv, scan_check.work);
2387 	mutex_lock(&priv->mutex);
2388 	ipw_scan_check(priv);
2389 	mutex_unlock(&priv->mutex);
2390 }
2391 
2392 static int ipw_send_scan_request_ext(struct ipw_priv *priv,
2393 				     struct ipw_scan_request_ext *request)
2394 {
2395 	return ipw_send_cmd_pdu(priv, IPW_CMD_SCAN_REQUEST_EXT,
2396 				sizeof(*request), request);
2397 }
2398 
2399 static int ipw_send_scan_abort(struct ipw_priv *priv)
2400 {
2401 	if (!priv) {
2402 		IPW_ERROR("Invalid args\n");
2403 		return -1;
2404 	}
2405 
2406 	return ipw_send_cmd_simple(priv, IPW_CMD_SCAN_ABORT);
2407 }
2408 
2409 static int ipw_set_sensitivity(struct ipw_priv *priv, u16 sens)
2410 {
2411 	struct ipw_sensitivity_calib calib = {
2412 		.beacon_rssi_raw = cpu_to_le16(sens),
2413 	};
2414 
2415 	return ipw_send_cmd_pdu(priv, IPW_CMD_SENSITIVITY_CALIB, sizeof(calib),
2416 				&calib);
2417 }
2418 
2419 static int ipw_send_associate(struct ipw_priv *priv,
2420 			      struct ipw_associate *associate)
2421 {
2422 	if (!priv || !associate) {
2423 		IPW_ERROR("Invalid args\n");
2424 		return -1;
2425 	}
2426 
2427 	return ipw_send_cmd_pdu(priv, IPW_CMD_ASSOCIATE, sizeof(*associate),
2428 				associate);
2429 }
2430 
2431 static int ipw_send_supported_rates(struct ipw_priv *priv,
2432 				    struct ipw_supported_rates *rates)
2433 {
2434 	if (!priv || !rates) {
2435 		IPW_ERROR("Invalid args\n");
2436 		return -1;
2437 	}
2438 
2439 	return ipw_send_cmd_pdu(priv, IPW_CMD_SUPPORTED_RATES, sizeof(*rates),
2440 				rates);
2441 }
2442 
2443 static int ipw_set_random_seed(struct ipw_priv *priv)
2444 {
2445 	u32 val;
2446 
2447 	if (!priv) {
2448 		IPW_ERROR("Invalid args\n");
2449 		return -1;
2450 	}
2451 
2452 	get_random_bytes(&val, sizeof(val));
2453 
2454 	return ipw_send_cmd_pdu(priv, IPW_CMD_SEED_NUMBER, sizeof(val), &val);
2455 }
2456 
2457 static int ipw_send_card_disable(struct ipw_priv *priv, u32 phy_off)
2458 {
2459 	__le32 v = cpu_to_le32(phy_off);
2460 	if (!priv) {
2461 		IPW_ERROR("Invalid args\n");
2462 		return -1;
2463 	}
2464 
2465 	return ipw_send_cmd_pdu(priv, IPW_CMD_CARD_DISABLE, sizeof(v), &v);
2466 }
2467 
2468 static int ipw_send_tx_power(struct ipw_priv *priv, struct ipw_tx_power *power)
2469 {
2470 	if (!priv || !power) {
2471 		IPW_ERROR("Invalid args\n");
2472 		return -1;
2473 	}
2474 
2475 	return ipw_send_cmd_pdu(priv, IPW_CMD_TX_POWER, sizeof(*power), power);
2476 }
2477 
2478 static int ipw_set_tx_power(struct ipw_priv *priv)
2479 {
2480 	const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
2481 	struct ipw_tx_power tx_power;
2482 	s8 max_power;
2483 	int i;
2484 
2485 	memset(&tx_power, 0, sizeof(tx_power));
2486 
2487 	/* configure device for 'G' band */
2488 	tx_power.ieee_mode = IPW_G_MODE;
2489 	tx_power.num_channels = geo->bg_channels;
2490 	for (i = 0; i < geo->bg_channels; i++) {
2491 		max_power = geo->bg[i].max_power;
2492 		tx_power.channels_tx_power[i].channel_number =
2493 		    geo->bg[i].channel;
2494 		tx_power.channels_tx_power[i].tx_power = max_power ?
2495 		    min(max_power, priv->tx_power) : priv->tx_power;
2496 	}
2497 	if (ipw_send_tx_power(priv, &tx_power))
2498 		return -EIO;
2499 
2500 	/* configure device to also handle 'B' band */
2501 	tx_power.ieee_mode = IPW_B_MODE;
2502 	if (ipw_send_tx_power(priv, &tx_power))
2503 		return -EIO;
2504 
2505 	/* configure device to also handle 'A' band */
2506 	if (priv->ieee->abg_true) {
2507 		tx_power.ieee_mode = IPW_A_MODE;
2508 		tx_power.num_channels = geo->a_channels;
2509 		for (i = 0; i < tx_power.num_channels; i++) {
2510 			max_power = geo->a[i].max_power;
2511 			tx_power.channels_tx_power[i].channel_number =
2512 			    geo->a[i].channel;
2513 			tx_power.channels_tx_power[i].tx_power = max_power ?
2514 			    min(max_power, priv->tx_power) : priv->tx_power;
2515 		}
2516 		if (ipw_send_tx_power(priv, &tx_power))
2517 			return -EIO;
2518 	}
2519 	return 0;
2520 }
2521 
2522 static int ipw_send_rts_threshold(struct ipw_priv *priv, u16 rts)
2523 {
2524 	struct ipw_rts_threshold rts_threshold = {
2525 		.rts_threshold = cpu_to_le16(rts),
2526 	};
2527 
2528 	if (!priv) {
2529 		IPW_ERROR("Invalid args\n");
2530 		return -1;
2531 	}
2532 
2533 	return ipw_send_cmd_pdu(priv, IPW_CMD_RTS_THRESHOLD,
2534 				sizeof(rts_threshold), &rts_threshold);
2535 }
2536 
2537 static int ipw_send_frag_threshold(struct ipw_priv *priv, u16 frag)
2538 {
2539 	struct ipw_frag_threshold frag_threshold = {
2540 		.frag_threshold = cpu_to_le16(frag),
2541 	};
2542 
2543 	if (!priv) {
2544 		IPW_ERROR("Invalid args\n");
2545 		return -1;
2546 	}
2547 
2548 	return ipw_send_cmd_pdu(priv, IPW_CMD_FRAG_THRESHOLD,
2549 				sizeof(frag_threshold), &frag_threshold);
2550 }
2551 
2552 static int ipw_send_power_mode(struct ipw_priv *priv, u32 mode)
2553 {
2554 	__le32 param;
2555 
2556 	if (!priv) {
2557 		IPW_ERROR("Invalid args\n");
2558 		return -1;
2559 	}
2560 
2561 	/* If on battery, set to 3, if AC set to CAM, else user
2562 	 * level */
2563 	switch (mode) {
2564 	case IPW_POWER_BATTERY:
2565 		param = cpu_to_le32(IPW_POWER_INDEX_3);
2566 		break;
2567 	case IPW_POWER_AC:
2568 		param = cpu_to_le32(IPW_POWER_MODE_CAM);
2569 		break;
2570 	default:
2571 		param = cpu_to_le32(mode);
2572 		break;
2573 	}
2574 
2575 	return ipw_send_cmd_pdu(priv, IPW_CMD_POWER_MODE, sizeof(param),
2576 				&param);
2577 }
2578 
2579 static int ipw_send_retry_limit(struct ipw_priv *priv, u8 slimit, u8 llimit)
2580 {
2581 	struct ipw_retry_limit retry_limit = {
2582 		.short_retry_limit = slimit,
2583 		.long_retry_limit = llimit
2584 	};
2585 
2586 	if (!priv) {
2587 		IPW_ERROR("Invalid args\n");
2588 		return -1;
2589 	}
2590 
2591 	return ipw_send_cmd_pdu(priv, IPW_CMD_RETRY_LIMIT, sizeof(retry_limit),
2592 				&retry_limit);
2593 }
2594 
2595 /*
2596  * The IPW device contains a Microwire compatible EEPROM that stores
2597  * various data like the MAC address.  Usually the firmware has exclusive
2598  * access to the eeprom, but during device initialization (before the
2599  * device driver has sent the HostComplete command to the firmware) the
2600  * device driver has read access to the EEPROM by way of indirect addressing
2601  * through a couple of memory mapped registers.
2602  *
2603  * The following is a simplified implementation for pulling data out of the
2604  * the eeprom, along with some helper functions to find information in
2605  * the per device private data's copy of the eeprom.
2606  *
2607  * NOTE: To better understand how these functions work (i.e what is a chip
2608  *       select and why do have to keep driving the eeprom clock?), read
2609  *       just about any data sheet for a Microwire compatible EEPROM.
2610  */
2611 
2612 /* write a 32 bit value into the indirect accessor register */
2613 static inline void eeprom_write_reg(struct ipw_priv *p, u32 data)
2614 {
2615 	ipw_write_reg32(p, FW_MEM_REG_EEPROM_ACCESS, data);
2616 
2617 	/* the eeprom requires some time to complete the operation */
2618 	udelay(p->eeprom_delay);
2619 }
2620 
2621 /* perform a chip select operation */
2622 static void eeprom_cs(struct ipw_priv *priv)
2623 {
2624 	eeprom_write_reg(priv, 0);
2625 	eeprom_write_reg(priv, EEPROM_BIT_CS);
2626 	eeprom_write_reg(priv, EEPROM_BIT_CS | EEPROM_BIT_SK);
2627 	eeprom_write_reg(priv, EEPROM_BIT_CS);
2628 }
2629 
2630 /* perform a chip select operation */
2631 static void eeprom_disable_cs(struct ipw_priv *priv)
2632 {
2633 	eeprom_write_reg(priv, EEPROM_BIT_CS);
2634 	eeprom_write_reg(priv, 0);
2635 	eeprom_write_reg(priv, EEPROM_BIT_SK);
2636 }
2637 
2638 /* push a single bit down to the eeprom */
2639 static inline void eeprom_write_bit(struct ipw_priv *p, u8 bit)
2640 {
2641 	int d = (bit ? EEPROM_BIT_DI : 0);
2642 	eeprom_write_reg(p, EEPROM_BIT_CS | d);
2643 	eeprom_write_reg(p, EEPROM_BIT_CS | d | EEPROM_BIT_SK);
2644 }
2645 
2646 /* push an opcode followed by an address down to the eeprom */
2647 static void eeprom_op(struct ipw_priv *priv, u8 op, u8 addr)
2648 {
2649 	int i;
2650 
2651 	eeprom_cs(priv);
2652 	eeprom_write_bit(priv, 1);
2653 	eeprom_write_bit(priv, op & 2);
2654 	eeprom_write_bit(priv, op & 1);
2655 	for (i = 7; i >= 0; i--) {
2656 		eeprom_write_bit(priv, addr & (1 << i));
2657 	}
2658 }
2659 
2660 /* pull 16 bits off the eeprom, one bit at a time */
2661 static u16 eeprom_read_u16(struct ipw_priv *priv, u8 addr)
2662 {
2663 	int i;
2664 	u16 r = 0;
2665 
2666 	/* Send READ Opcode */
2667 	eeprom_op(priv, EEPROM_CMD_READ, addr);
2668 
2669 	/* Send dummy bit */
2670 	eeprom_write_reg(priv, EEPROM_BIT_CS);
2671 
2672 	/* Read the byte off the eeprom one bit at a time */
2673 	for (i = 0; i < 16; i++) {
2674 		u32 data = 0;
2675 		eeprom_write_reg(priv, EEPROM_BIT_CS | EEPROM_BIT_SK);
2676 		eeprom_write_reg(priv, EEPROM_BIT_CS);
2677 		data = ipw_read_reg32(priv, FW_MEM_REG_EEPROM_ACCESS);
2678 		r = (r << 1) | ((data & EEPROM_BIT_DO) ? 1 : 0);
2679 	}
2680 
2681 	/* Send another dummy bit */
2682 	eeprom_write_reg(priv, 0);
2683 	eeprom_disable_cs(priv);
2684 
2685 	return r;
2686 }
2687 
2688 /* helper function for pulling the mac address out of the private */
2689 /* data's copy of the eeprom data                                 */
2690 static void eeprom_parse_mac(struct ipw_priv *priv, u8 * mac)
2691 {
2692 	memcpy(mac, &priv->eeprom[EEPROM_MAC_ADDRESS], ETH_ALEN);
2693 }
2694 
2695 static void ipw_read_eeprom(struct ipw_priv *priv)
2696 {
2697 	int i;
2698 	__le16 *eeprom = (__le16 *) priv->eeprom;
2699 
2700 	IPW_DEBUG_TRACE(">>\n");
2701 
2702 	/* read entire contents of eeprom into private buffer */
2703 	for (i = 0; i < 128; i++)
2704 		eeprom[i] = cpu_to_le16(eeprom_read_u16(priv, (u8) i));
2705 
2706 	IPW_DEBUG_TRACE("<<\n");
2707 }
2708 
2709 /*
2710  * Either the device driver (i.e. the host) or the firmware can
2711  * load eeprom data into the designated region in SRAM.  If neither
2712  * happens then the FW will shutdown with a fatal error.
2713  *
2714  * In order to signal the FW to load the EEPROM, the EEPROM_LOAD_DISABLE
2715  * bit needs region of shared SRAM needs to be non-zero.
2716  */
2717 static void ipw_eeprom_init_sram(struct ipw_priv *priv)
2718 {
2719 	int i;
2720 
2721 	IPW_DEBUG_TRACE(">>\n");
2722 
2723 	/*
2724 	   If the data looks correct, then copy it to our private
2725 	   copy.  Otherwise let the firmware know to perform the operation
2726 	   on its own.
2727 	 */
2728 	if (priv->eeprom[EEPROM_VERSION] != 0) {
2729 		IPW_DEBUG_INFO("Writing EEPROM data into SRAM\n");
2730 
2731 		/* write the eeprom data to sram */
2732 		for (i = 0; i < IPW_EEPROM_IMAGE_SIZE; i++)
2733 			ipw_write8(priv, IPW_EEPROM_DATA + i, priv->eeprom[i]);
2734 
2735 		/* Do not load eeprom data on fatal error or suspend */
2736 		ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0);
2737 	} else {
2738 		IPW_DEBUG_INFO("Enabling FW initializationg of SRAM\n");
2739 
2740 		/* Load eeprom data on fatal error or suspend */
2741 		ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 1);
2742 	}
2743 
2744 	IPW_DEBUG_TRACE("<<\n");
2745 }
2746 
2747 static void ipw_zero_memory(struct ipw_priv *priv, u32 start, u32 count)
2748 {
2749 	count >>= 2;
2750 	if (!count)
2751 		return;
2752 	_ipw_write32(priv, IPW_AUTOINC_ADDR, start);
2753 	while (count--)
2754 		_ipw_write32(priv, IPW_AUTOINC_DATA, 0);
2755 }
2756 
2757 static inline void ipw_fw_dma_reset_command_blocks(struct ipw_priv *priv)
2758 {
2759 	ipw_zero_memory(priv, IPW_SHARED_SRAM_DMA_CONTROL,
2760 			CB_NUMBER_OF_ELEMENTS_SMALL *
2761 			sizeof(struct command_block));
2762 }
2763 
2764 static int ipw_fw_dma_enable(struct ipw_priv *priv)
2765 {				/* start dma engine but no transfers yet */
2766 
2767 	IPW_DEBUG_FW(">> :\n");
2768 
2769 	/* Start the dma */
2770 	ipw_fw_dma_reset_command_blocks(priv);
2771 
2772 	/* Write CB base address */
2773 	ipw_write_reg32(priv, IPW_DMA_I_CB_BASE, IPW_SHARED_SRAM_DMA_CONTROL);
2774 
2775 	IPW_DEBUG_FW("<< :\n");
2776 	return 0;
2777 }
2778 
2779 static void ipw_fw_dma_abort(struct ipw_priv *priv)
2780 {
2781 	u32 control = 0;
2782 
2783 	IPW_DEBUG_FW(">> :\n");
2784 
2785 	/* set the Stop and Abort bit */
2786 	control = DMA_CONTROL_SMALL_CB_CONST_VALUE | DMA_CB_STOP_AND_ABORT;
2787 	ipw_write_reg32(priv, IPW_DMA_I_DMA_CONTROL, control);
2788 	priv->sram_desc.last_cb_index = 0;
2789 
2790 	IPW_DEBUG_FW("<<\n");
2791 }
2792 
2793 static int ipw_fw_dma_write_command_block(struct ipw_priv *priv, int index,
2794 					  struct command_block *cb)
2795 {
2796 	u32 address =
2797 	    IPW_SHARED_SRAM_DMA_CONTROL +
2798 	    (sizeof(struct command_block) * index);
2799 	IPW_DEBUG_FW(">> :\n");
2800 
2801 	ipw_write_indirect(priv, address, (u8 *) cb,
2802 			   (int)sizeof(struct command_block));
2803 
2804 	IPW_DEBUG_FW("<< :\n");
2805 	return 0;
2806 
2807 }
2808 
2809 static int ipw_fw_dma_kick(struct ipw_priv *priv)
2810 {
2811 	u32 control = 0;
2812 	u32 index = 0;
2813 
2814 	IPW_DEBUG_FW(">> :\n");
2815 
2816 	for (index = 0; index < priv->sram_desc.last_cb_index; index++)
2817 		ipw_fw_dma_write_command_block(priv, index,
2818 					       &priv->sram_desc.cb_list[index]);
2819 
2820 	/* Enable the DMA in the CSR register */
2821 	ipw_clear_bit(priv, IPW_RESET_REG,
2822 		      IPW_RESET_REG_MASTER_DISABLED |
2823 		      IPW_RESET_REG_STOP_MASTER);
2824 
2825 	/* Set the Start bit. */
2826 	control = DMA_CONTROL_SMALL_CB_CONST_VALUE | DMA_CB_START;
2827 	ipw_write_reg32(priv, IPW_DMA_I_DMA_CONTROL, control);
2828 
2829 	IPW_DEBUG_FW("<< :\n");
2830 	return 0;
2831 }
2832 
2833 static void ipw_fw_dma_dump_command_block(struct ipw_priv *priv)
2834 {
2835 	u32 address;
2836 	u32 register_value = 0;
2837 	u32 cb_fields_address = 0;
2838 
2839 	IPW_DEBUG_FW(">> :\n");
2840 	address = ipw_read_reg32(priv, IPW_DMA_I_CURRENT_CB);
2841 	IPW_DEBUG_FW_INFO("Current CB is 0x%x\n", address);
2842 
2843 	/* Read the DMA Controlor register */
2844 	register_value = ipw_read_reg32(priv, IPW_DMA_I_DMA_CONTROL);
2845 	IPW_DEBUG_FW_INFO("IPW_DMA_I_DMA_CONTROL is 0x%x\n", register_value);
2846 
2847 	/* Print the CB values */
2848 	cb_fields_address = address;
2849 	register_value = ipw_read_reg32(priv, cb_fields_address);
2850 	IPW_DEBUG_FW_INFO("Current CB Control Field is 0x%x\n", register_value);
2851 
2852 	cb_fields_address += sizeof(u32);
2853 	register_value = ipw_read_reg32(priv, cb_fields_address);
2854 	IPW_DEBUG_FW_INFO("Current CB Source Field is 0x%x\n", register_value);
2855 
2856 	cb_fields_address += sizeof(u32);
2857 	register_value = ipw_read_reg32(priv, cb_fields_address);
2858 	IPW_DEBUG_FW_INFO("Current CB Destination Field is 0x%x\n",
2859 			  register_value);
2860 
2861 	cb_fields_address += sizeof(u32);
2862 	register_value = ipw_read_reg32(priv, cb_fields_address);
2863 	IPW_DEBUG_FW_INFO("Current CB Status Field is 0x%x\n", register_value);
2864 
2865 	IPW_DEBUG_FW(">> :\n");
2866 }
2867 
2868 static int ipw_fw_dma_command_block_index(struct ipw_priv *priv)
2869 {
2870 	u32 current_cb_address = 0;
2871 	u32 current_cb_index = 0;
2872 
2873 	IPW_DEBUG_FW("<< :\n");
2874 	current_cb_address = ipw_read_reg32(priv, IPW_DMA_I_CURRENT_CB);
2875 
2876 	current_cb_index = (current_cb_address - IPW_SHARED_SRAM_DMA_CONTROL) /
2877 	    sizeof(struct command_block);
2878 
2879 	IPW_DEBUG_FW_INFO("Current CB index 0x%x address = 0x%X\n",
2880 			  current_cb_index, current_cb_address);
2881 
2882 	IPW_DEBUG_FW(">> :\n");
2883 	return current_cb_index;
2884 
2885 }
2886 
2887 static int ipw_fw_dma_add_command_block(struct ipw_priv *priv,
2888 					u32 src_address,
2889 					u32 dest_address,
2890 					u32 length,
2891 					int interrupt_enabled, int is_last)
2892 {
2893 
2894 	u32 control = CB_VALID | CB_SRC_LE | CB_DEST_LE | CB_SRC_AUTOINC |
2895 	    CB_SRC_IO_GATED | CB_DEST_AUTOINC | CB_SRC_SIZE_LONG |
2896 	    CB_DEST_SIZE_LONG;
2897 	struct command_block *cb;
2898 	u32 last_cb_element = 0;
2899 
2900 	IPW_DEBUG_FW_INFO("src_address=0x%x dest_address=0x%x length=0x%x\n",
2901 			  src_address, dest_address, length);
2902 
2903 	if (priv->sram_desc.last_cb_index >= CB_NUMBER_OF_ELEMENTS_SMALL)
2904 		return -1;
2905 
2906 	last_cb_element = priv->sram_desc.last_cb_index;
2907 	cb = &priv->sram_desc.cb_list[last_cb_element];
2908 	priv->sram_desc.last_cb_index++;
2909 
2910 	/* Calculate the new CB control word */
2911 	if (interrupt_enabled)
2912 		control |= CB_INT_ENABLED;
2913 
2914 	if (is_last)
2915 		control |= CB_LAST_VALID;
2916 
2917 	control |= length;
2918 
2919 	/* Calculate the CB Element's checksum value */
2920 	cb->status = control ^ src_address ^ dest_address;
2921 
2922 	/* Copy the Source and Destination addresses */
2923 	cb->dest_addr = dest_address;
2924 	cb->source_addr = src_address;
2925 
2926 	/* Copy the Control Word last */
2927 	cb->control = control;
2928 
2929 	return 0;
2930 }
2931 
2932 static int ipw_fw_dma_add_buffer(struct ipw_priv *priv, dma_addr_t *src_address,
2933 				 int nr, u32 dest_address, u32 len)
2934 {
2935 	int ret, i;
2936 	u32 size;
2937 
2938 	IPW_DEBUG_FW(">>\n");
2939 	IPW_DEBUG_FW_INFO("nr=%d dest_address=0x%x len=0x%x\n",
2940 			  nr, dest_address, len);
2941 
2942 	for (i = 0; i < nr; i++) {
2943 		size = min_t(u32, len - i * CB_MAX_LENGTH, CB_MAX_LENGTH);
2944 		ret = ipw_fw_dma_add_command_block(priv, src_address[i],
2945 						   dest_address +
2946 						   i * CB_MAX_LENGTH, size,
2947 						   0, 0);
2948 		if (ret) {
2949 			IPW_DEBUG_FW_INFO(": Failed\n");
2950 			return -1;
2951 		} else
2952 			IPW_DEBUG_FW_INFO(": Added new cb\n");
2953 	}
2954 
2955 	IPW_DEBUG_FW("<<\n");
2956 	return 0;
2957 }
2958 
2959 static int ipw_fw_dma_wait(struct ipw_priv *priv)
2960 {
2961 	u32 current_index = 0, previous_index;
2962 	u32 watchdog = 0;
2963 
2964 	IPW_DEBUG_FW(">> :\n");
2965 
2966 	current_index = ipw_fw_dma_command_block_index(priv);
2967 	IPW_DEBUG_FW_INFO("sram_desc.last_cb_index:0x%08X\n",
2968 			  (int)priv->sram_desc.last_cb_index);
2969 
2970 	while (current_index < priv->sram_desc.last_cb_index) {
2971 		udelay(50);
2972 		previous_index = current_index;
2973 		current_index = ipw_fw_dma_command_block_index(priv);
2974 
2975 		if (previous_index < current_index) {
2976 			watchdog = 0;
2977 			continue;
2978 		}
2979 		if (++watchdog > 400) {
2980 			IPW_DEBUG_FW_INFO("Timeout\n");
2981 			ipw_fw_dma_dump_command_block(priv);
2982 			ipw_fw_dma_abort(priv);
2983 			return -1;
2984 		}
2985 	}
2986 
2987 	ipw_fw_dma_abort(priv);
2988 
2989 	/*Disable the DMA in the CSR register */
2990 	ipw_set_bit(priv, IPW_RESET_REG,
2991 		    IPW_RESET_REG_MASTER_DISABLED | IPW_RESET_REG_STOP_MASTER);
2992 
2993 	IPW_DEBUG_FW("<< dmaWaitSync\n");
2994 	return 0;
2995 }
2996 
2997 static void ipw_remove_current_network(struct ipw_priv *priv)
2998 {
2999 	struct list_head *element, *safe;
3000 	struct libipw_network *network = NULL;
3001 	unsigned long flags;
3002 
3003 	spin_lock_irqsave(&priv->ieee->lock, flags);
3004 	list_for_each_safe(element, safe, &priv->ieee->network_list) {
3005 		network = list_entry(element, struct libipw_network, list);
3006 		if (ether_addr_equal(network->bssid, priv->bssid)) {
3007 			list_del(element);
3008 			list_add_tail(&network->list,
3009 				      &priv->ieee->network_free_list);
3010 		}
3011 	}
3012 	spin_unlock_irqrestore(&priv->ieee->lock, flags);
3013 }
3014 
3015 /**
3016  * Check that card is still alive.
3017  * Reads debug register from domain0.
3018  * If card is present, pre-defined value should
3019  * be found there.
3020  *
3021  * @param priv
3022  * @return 1 if card is present, 0 otherwise
3023  */
3024 static inline int ipw_alive(struct ipw_priv *priv)
3025 {
3026 	return ipw_read32(priv, 0x90) == 0xd55555d5;
3027 }
3028 
3029 /* timeout in msec, attempted in 10-msec quanta */
3030 static int ipw_poll_bit(struct ipw_priv *priv, u32 addr, u32 mask,
3031 			       int timeout)
3032 {
3033 	int i = 0;
3034 
3035 	do {
3036 		if ((ipw_read32(priv, addr) & mask) == mask)
3037 			return i;
3038 		mdelay(10);
3039 		i += 10;
3040 	} while (i < timeout);
3041 
3042 	return -ETIME;
3043 }
3044 
3045 /* These functions load the firmware and micro code for the operation of
3046  * the ipw hardware.  It assumes the buffer has all the bits for the
3047  * image and the caller is handling the memory allocation and clean up.
3048  */
3049 
3050 static int ipw_stop_master(struct ipw_priv *priv)
3051 {
3052 	int rc;
3053 
3054 	IPW_DEBUG_TRACE(">>\n");
3055 	/* stop master. typical delay - 0 */
3056 	ipw_set_bit(priv, IPW_RESET_REG, IPW_RESET_REG_STOP_MASTER);
3057 
3058 	/* timeout is in msec, polled in 10-msec quanta */
3059 	rc = ipw_poll_bit(priv, IPW_RESET_REG,
3060 			  IPW_RESET_REG_MASTER_DISABLED, 100);
3061 	if (rc < 0) {
3062 		IPW_ERROR("wait for stop master failed after 100ms\n");
3063 		return -1;
3064 	}
3065 
3066 	IPW_DEBUG_INFO("stop master %dms\n", rc);
3067 
3068 	return rc;
3069 }
3070 
3071 static void ipw_arc_release(struct ipw_priv *priv)
3072 {
3073 	IPW_DEBUG_TRACE(">>\n");
3074 	mdelay(5);
3075 
3076 	ipw_clear_bit(priv, IPW_RESET_REG, CBD_RESET_REG_PRINCETON_RESET);
3077 
3078 	/* no one knows timing, for safety add some delay */
3079 	mdelay(5);
3080 }
3081 
3082 struct fw_chunk {
3083 	__le32 address;
3084 	__le32 length;
3085 };
3086 
3087 static int ipw_load_ucode(struct ipw_priv *priv, u8 * data, size_t len)
3088 {
3089 	int rc = 0, i, addr;
3090 	u8 cr = 0;
3091 	__le16 *image;
3092 
3093 	image = (__le16 *) data;
3094 
3095 	IPW_DEBUG_TRACE(">>\n");
3096 
3097 	rc = ipw_stop_master(priv);
3098 
3099 	if (rc < 0)
3100 		return rc;
3101 
3102 	for (addr = IPW_SHARED_LOWER_BOUND;
3103 	     addr < IPW_REGISTER_DOMAIN1_END; addr += 4) {
3104 		ipw_write32(priv, addr, 0);
3105 	}
3106 
3107 	/* no ucode (yet) */
3108 	memset(&priv->dino_alive, 0, sizeof(priv->dino_alive));
3109 	/* destroy DMA queues */
3110 	/* reset sequence */
3111 
3112 	ipw_write_reg32(priv, IPW_MEM_HALT_AND_RESET, IPW_BIT_HALT_RESET_ON);
3113 	ipw_arc_release(priv);
3114 	ipw_write_reg32(priv, IPW_MEM_HALT_AND_RESET, IPW_BIT_HALT_RESET_OFF);
3115 	mdelay(1);
3116 
3117 	/* reset PHY */
3118 	ipw_write_reg32(priv, IPW_INTERNAL_CMD_EVENT, IPW_BASEBAND_POWER_DOWN);
3119 	mdelay(1);
3120 
3121 	ipw_write_reg32(priv, IPW_INTERNAL_CMD_EVENT, 0);
3122 	mdelay(1);
3123 
3124 	/* enable ucode store */
3125 	ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0x0);
3126 	ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, DINO_ENABLE_CS);
3127 	mdelay(1);
3128 
3129 	/* write ucode */
3130 	/**
3131 	 * @bug
3132 	 * Do NOT set indirect address register once and then
3133 	 * store data to indirect data register in the loop.
3134 	 * It seems very reasonable, but in this case DINO do not
3135 	 * accept ucode. It is essential to set address each time.
3136 	 */
3137 	/* load new ipw uCode */
3138 	for (i = 0; i < len / 2; i++)
3139 		ipw_write_reg16(priv, IPW_BASEBAND_CONTROL_STORE,
3140 				le16_to_cpu(image[i]));
3141 
3142 	/* enable DINO */
3143 	ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0);
3144 	ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, DINO_ENABLE_SYSTEM);
3145 
3146 	/* this is where the igx / win driver deveates from the VAP driver. */
3147 
3148 	/* wait for alive response */
3149 	for (i = 0; i < 100; i++) {
3150 		/* poll for incoming data */
3151 		cr = ipw_read_reg8(priv, IPW_BASEBAND_CONTROL_STATUS);
3152 		if (cr & DINO_RXFIFO_DATA)
3153 			break;
3154 		mdelay(1);
3155 	}
3156 
3157 	if (cr & DINO_RXFIFO_DATA) {
3158 		/* alive_command_responce size is NOT multiple of 4 */
3159 		__le32 response_buffer[(sizeof(priv->dino_alive) + 3) / 4];
3160 
3161 		for (i = 0; i < ARRAY_SIZE(response_buffer); i++)
3162 			response_buffer[i] =
3163 			    cpu_to_le32(ipw_read_reg32(priv,
3164 						       IPW_BASEBAND_RX_FIFO_READ));
3165 		memcpy(&priv->dino_alive, response_buffer,
3166 		       sizeof(priv->dino_alive));
3167 		if (priv->dino_alive.alive_command == 1
3168 		    && priv->dino_alive.ucode_valid == 1) {
3169 			rc = 0;
3170 			IPW_DEBUG_INFO
3171 			    ("Microcode OK, rev. %d (0x%x) dev. %d (0x%x) "
3172 			     "of %02d/%02d/%02d %02d:%02d\n",
3173 			     priv->dino_alive.software_revision,
3174 			     priv->dino_alive.software_revision,
3175 			     priv->dino_alive.device_identifier,
3176 			     priv->dino_alive.device_identifier,
3177 			     priv->dino_alive.time_stamp[0],
3178 			     priv->dino_alive.time_stamp[1],
3179 			     priv->dino_alive.time_stamp[2],
3180 			     priv->dino_alive.time_stamp[3],
3181 			     priv->dino_alive.time_stamp[4]);
3182 		} else {
3183 			IPW_DEBUG_INFO("Microcode is not alive\n");
3184 			rc = -EINVAL;
3185 		}
3186 	} else {
3187 		IPW_DEBUG_INFO("No alive response from DINO\n");
3188 		rc = -ETIME;
3189 	}
3190 
3191 	/* disable DINO, otherwise for some reason
3192 	   firmware have problem getting alive resp. */
3193 	ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0);
3194 
3195 	return rc;
3196 }
3197 
3198 static int ipw_load_firmware(struct ipw_priv *priv, u8 * data, size_t len)
3199 {
3200 	int ret = -1;
3201 	int offset = 0;
3202 	struct fw_chunk *chunk;
3203 	int total_nr = 0;
3204 	int i;
3205 	struct dma_pool *pool;
3206 	void **virts;
3207 	dma_addr_t *phys;
3208 
3209 	IPW_DEBUG_TRACE("<< :\n");
3210 
3211 	virts = kmalloc_array(CB_NUMBER_OF_ELEMENTS_SMALL, sizeof(void *),
3212 			      GFP_KERNEL);
3213 	if (!virts)
3214 		return -ENOMEM;
3215 
3216 	phys = kmalloc_array(CB_NUMBER_OF_ELEMENTS_SMALL, sizeof(dma_addr_t),
3217 			     GFP_KERNEL);
3218 	if (!phys) {
3219 		kfree(virts);
3220 		return -ENOMEM;
3221 	}
3222 	pool = dma_pool_create("ipw2200", &priv->pci_dev->dev, CB_MAX_LENGTH, 0,
3223 			       0);
3224 	if (!pool) {
3225 		IPW_ERROR("dma_pool_create failed\n");
3226 		kfree(phys);
3227 		kfree(virts);
3228 		return -ENOMEM;
3229 	}
3230 
3231 	/* Start the Dma */
3232 	ret = ipw_fw_dma_enable(priv);
3233 
3234 	/* the DMA is already ready this would be a bug. */
3235 	BUG_ON(priv->sram_desc.last_cb_index > 0);
3236 
3237 	do {
3238 		u32 chunk_len;
3239 		u8 *start;
3240 		int size;
3241 		int nr = 0;
3242 
3243 		chunk = (struct fw_chunk *)(data + offset);
3244 		offset += sizeof(struct fw_chunk);
3245 		chunk_len = le32_to_cpu(chunk->length);
3246 		start = data + offset;
3247 
3248 		nr = (chunk_len + CB_MAX_LENGTH - 1) / CB_MAX_LENGTH;
3249 		for (i = 0; i < nr; i++) {
3250 			virts[total_nr] = dma_pool_alloc(pool, GFP_KERNEL,
3251 							 &phys[total_nr]);
3252 			if (!virts[total_nr]) {
3253 				ret = -ENOMEM;
3254 				goto out;
3255 			}
3256 			size = min_t(u32, chunk_len - i * CB_MAX_LENGTH,
3257 				     CB_MAX_LENGTH);
3258 			memcpy(virts[total_nr], start, size);
3259 			start += size;
3260 			total_nr++;
3261 			/* We don't support fw chunk larger than 64*8K */
3262 			BUG_ON(total_nr > CB_NUMBER_OF_ELEMENTS_SMALL);
3263 		}
3264 
3265 		/* build DMA packet and queue up for sending */
3266 		/* dma to chunk->address, the chunk->length bytes from data +
3267 		 * offeset*/
3268 		/* Dma loading */
3269 		ret = ipw_fw_dma_add_buffer(priv, &phys[total_nr - nr],
3270 					    nr, le32_to_cpu(chunk->address),
3271 					    chunk_len);
3272 		if (ret) {
3273 			IPW_DEBUG_INFO("dmaAddBuffer Failed\n");
3274 			goto out;
3275 		}
3276 
3277 		offset += chunk_len;
3278 	} while (offset < len);
3279 
3280 	/* Run the DMA and wait for the answer */
3281 	ret = ipw_fw_dma_kick(priv);
3282 	if (ret) {
3283 		IPW_ERROR("dmaKick Failed\n");
3284 		goto out;
3285 	}
3286 
3287 	ret = ipw_fw_dma_wait(priv);
3288 	if (ret) {
3289 		IPW_ERROR("dmaWaitSync Failed\n");
3290 		goto out;
3291 	}
3292  out:
3293 	for (i = 0; i < total_nr; i++)
3294 		dma_pool_free(pool, virts[i], phys[i]);
3295 
3296 	dma_pool_destroy(pool);
3297 	kfree(phys);
3298 	kfree(virts);
3299 
3300 	return ret;
3301 }
3302 
3303 /* stop nic */
3304 static int ipw_stop_nic(struct ipw_priv *priv)
3305 {
3306 	int rc = 0;
3307 
3308 	/* stop */
3309 	ipw_write32(priv, IPW_RESET_REG, IPW_RESET_REG_STOP_MASTER);
3310 
3311 	rc = ipw_poll_bit(priv, IPW_RESET_REG,
3312 			  IPW_RESET_REG_MASTER_DISABLED, 500);
3313 	if (rc < 0) {
3314 		IPW_ERROR("wait for reg master disabled failed after 500ms\n");
3315 		return rc;
3316 	}
3317 
3318 	ipw_set_bit(priv, IPW_RESET_REG, CBD_RESET_REG_PRINCETON_RESET);
3319 
3320 	return rc;
3321 }
3322 
3323 static void ipw_start_nic(struct ipw_priv *priv)
3324 {
3325 	IPW_DEBUG_TRACE(">>\n");
3326 
3327 	/* prvHwStartNic  release ARC */
3328 	ipw_clear_bit(priv, IPW_RESET_REG,
3329 		      IPW_RESET_REG_MASTER_DISABLED |
3330 		      IPW_RESET_REG_STOP_MASTER |
3331 		      CBD_RESET_REG_PRINCETON_RESET);
3332 
3333 	/* enable power management */
3334 	ipw_set_bit(priv, IPW_GP_CNTRL_RW,
3335 		    IPW_GP_CNTRL_BIT_HOST_ALLOWS_STANDBY);
3336 
3337 	IPW_DEBUG_TRACE("<<\n");
3338 }
3339 
3340 static int ipw_init_nic(struct ipw_priv *priv)
3341 {
3342 	int rc;
3343 
3344 	IPW_DEBUG_TRACE(">>\n");
3345 	/* reset */
3346 	/*prvHwInitNic */
3347 	/* set "initialization complete" bit to move adapter to D0 state */
3348 	ipw_set_bit(priv, IPW_GP_CNTRL_RW, IPW_GP_CNTRL_BIT_INIT_DONE);
3349 
3350 	/* low-level PLL activation */
3351 	ipw_write32(priv, IPW_READ_INT_REGISTER,
3352 		    IPW_BIT_INT_HOST_SRAM_READ_INT_REGISTER);
3353 
3354 	/* wait for clock stabilization */
3355 	rc = ipw_poll_bit(priv, IPW_GP_CNTRL_RW,
3356 			  IPW_GP_CNTRL_BIT_CLOCK_READY, 250);
3357 	if (rc < 0)
3358 		IPW_DEBUG_INFO("FAILED wait for clock stablization\n");
3359 
3360 	/* assert SW reset */
3361 	ipw_set_bit(priv, IPW_RESET_REG, IPW_RESET_REG_SW_RESET);
3362 
3363 	udelay(10);
3364 
3365 	/* set "initialization complete" bit to move adapter to D0 state */
3366 	ipw_set_bit(priv, IPW_GP_CNTRL_RW, IPW_GP_CNTRL_BIT_INIT_DONE);
3367 
3368 	IPW_DEBUG_TRACE(">>\n");
3369 	return 0;
3370 }
3371 
3372 /* Call this function from process context, it will sleep in request_firmware.
3373  * Probe is an ok place to call this from.
3374  */
3375 static int ipw_reset_nic(struct ipw_priv *priv)
3376 {
3377 	int rc = 0;
3378 	unsigned long flags;
3379 
3380 	IPW_DEBUG_TRACE(">>\n");
3381 
3382 	rc = ipw_init_nic(priv);
3383 
3384 	spin_lock_irqsave(&priv->lock, flags);
3385 	/* Clear the 'host command active' bit... */
3386 	priv->status &= ~STATUS_HCMD_ACTIVE;
3387 	wake_up_interruptible(&priv->wait_command_queue);
3388 	priv->status &= ~(STATUS_SCANNING | STATUS_SCAN_ABORTING);
3389 	wake_up_interruptible(&priv->wait_state);
3390 	spin_unlock_irqrestore(&priv->lock, flags);
3391 
3392 	IPW_DEBUG_TRACE("<<\n");
3393 	return rc;
3394 }
3395 
3396 
3397 struct ipw_fw {
3398 	__le32 ver;
3399 	__le32 boot_size;
3400 	__le32 ucode_size;
3401 	__le32 fw_size;
3402 	u8 data[0];
3403 };
3404 
3405 static int ipw_get_fw(struct ipw_priv *priv,
3406 		      const struct firmware **raw, const char *name)
3407 {
3408 	struct ipw_fw *fw;
3409 	int rc;
3410 
3411 	/* ask firmware_class module to get the boot firmware off disk */
3412 	rc = request_firmware(raw, name, &priv->pci_dev->dev);
3413 	if (rc < 0) {
3414 		IPW_ERROR("%s request_firmware failed: Reason %d\n", name, rc);
3415 		return rc;
3416 	}
3417 
3418 	if ((*raw)->size < sizeof(*fw)) {
3419 		IPW_ERROR("%s is too small (%zd)\n", name, (*raw)->size);
3420 		return -EINVAL;
3421 	}
3422 
3423 	fw = (void *)(*raw)->data;
3424 
3425 	if ((*raw)->size < sizeof(*fw) + le32_to_cpu(fw->boot_size) +
3426 	    le32_to_cpu(fw->ucode_size) + le32_to_cpu(fw->fw_size)) {
3427 		IPW_ERROR("%s is too small or corrupt (%zd)\n",
3428 			  name, (*raw)->size);
3429 		return -EINVAL;
3430 	}
3431 
3432 	IPW_DEBUG_INFO("Read firmware '%s' image v%d.%d (%zd bytes)\n",
3433 		       name,
3434 		       le32_to_cpu(fw->ver) >> 16,
3435 		       le32_to_cpu(fw->ver) & 0xff,
3436 		       (*raw)->size - sizeof(*fw));
3437 	return 0;
3438 }
3439 
3440 #define IPW_RX_BUF_SIZE (3000)
3441 
3442 static void ipw_rx_queue_reset(struct ipw_priv *priv,
3443 				      struct ipw_rx_queue *rxq)
3444 {
3445 	unsigned long flags;
3446 	int i;
3447 
3448 	spin_lock_irqsave(&rxq->lock, flags);
3449 
3450 	INIT_LIST_HEAD(&rxq->rx_free);
3451 	INIT_LIST_HEAD(&rxq->rx_used);
3452 
3453 	/* Fill the rx_used queue with _all_ of the Rx buffers */
3454 	for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
3455 		/* In the reset function, these buffers may have been allocated
3456 		 * to an SKB, so we need to unmap and free potential storage */
3457 		if (rxq->pool[i].skb != NULL) {
3458 			pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr,
3459 					 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
3460 			dev_kfree_skb(rxq->pool[i].skb);
3461 			rxq->pool[i].skb = NULL;
3462 		}
3463 		list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
3464 	}
3465 
3466 	/* Set us so that we have processed and used all buffers, but have
3467 	 * not restocked the Rx queue with fresh buffers */
3468 	rxq->read = rxq->write = 0;
3469 	rxq->free_count = 0;
3470 	spin_unlock_irqrestore(&rxq->lock, flags);
3471 }
3472 
3473 #ifdef CONFIG_PM
3474 static int fw_loaded = 0;
3475 static const struct firmware *raw = NULL;
3476 
3477 static void free_firmware(void)
3478 {
3479 	if (fw_loaded) {
3480 		release_firmware(raw);
3481 		raw = NULL;
3482 		fw_loaded = 0;
3483 	}
3484 }
3485 #else
3486 #define free_firmware() do {} while (0)
3487 #endif
3488 
3489 static int ipw_load(struct ipw_priv *priv)
3490 {
3491 #ifndef CONFIG_PM
3492 	const struct firmware *raw = NULL;
3493 #endif
3494 	struct ipw_fw *fw;
3495 	u8 *boot_img, *ucode_img, *fw_img;
3496 	u8 *name = NULL;
3497 	int rc = 0, retries = 3;
3498 
3499 	switch (priv->ieee->iw_mode) {
3500 	case IW_MODE_ADHOC:
3501 		name = "ipw2200-ibss.fw";
3502 		break;
3503 #ifdef CONFIG_IPW2200_MONITOR
3504 	case IW_MODE_MONITOR:
3505 		name = "ipw2200-sniffer.fw";
3506 		break;
3507 #endif
3508 	case IW_MODE_INFRA:
3509 		name = "ipw2200-bss.fw";
3510 		break;
3511 	}
3512 
3513 	if (!name) {
3514 		rc = -EINVAL;
3515 		goto error;
3516 	}
3517 
3518 #ifdef CONFIG_PM
3519 	if (!fw_loaded) {
3520 #endif
3521 		rc = ipw_get_fw(priv, &raw, name);
3522 		if (rc < 0)
3523 			goto error;
3524 #ifdef CONFIG_PM
3525 	}
3526 #endif
3527 
3528 	fw = (void *)raw->data;
3529 	boot_img = &fw->data[0];
3530 	ucode_img = &fw->data[le32_to_cpu(fw->boot_size)];
3531 	fw_img = &fw->data[le32_to_cpu(fw->boot_size) +
3532 			   le32_to_cpu(fw->ucode_size)];
3533 
3534 	if (!priv->rxq)
3535 		priv->rxq = ipw_rx_queue_alloc(priv);
3536 	else
3537 		ipw_rx_queue_reset(priv, priv->rxq);
3538 	if (!priv->rxq) {
3539 		IPW_ERROR("Unable to initialize Rx queue\n");
3540 		rc = -ENOMEM;
3541 		goto error;
3542 	}
3543 
3544       retry:
3545 	/* Ensure interrupts are disabled */
3546 	ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
3547 	priv->status &= ~STATUS_INT_ENABLED;
3548 
3549 	/* ack pending interrupts */
3550 	ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
3551 
3552 	ipw_stop_nic(priv);
3553 
3554 	rc = ipw_reset_nic(priv);
3555 	if (rc < 0) {
3556 		IPW_ERROR("Unable to reset NIC\n");
3557 		goto error;
3558 	}
3559 
3560 	ipw_zero_memory(priv, IPW_NIC_SRAM_LOWER_BOUND,
3561 			IPW_NIC_SRAM_UPPER_BOUND - IPW_NIC_SRAM_LOWER_BOUND);
3562 
3563 	/* DMA the initial boot firmware into the device */
3564 	rc = ipw_load_firmware(priv, boot_img, le32_to_cpu(fw->boot_size));
3565 	if (rc < 0) {
3566 		IPW_ERROR("Unable to load boot firmware: %d\n", rc);
3567 		goto error;
3568 	}
3569 
3570 	/* kick start the device */
3571 	ipw_start_nic(priv);
3572 
3573 	/* wait for the device to finish its initial startup sequence */
3574 	rc = ipw_poll_bit(priv, IPW_INTA_RW,
3575 			  IPW_INTA_BIT_FW_INITIALIZATION_DONE, 500);
3576 	if (rc < 0) {
3577 		IPW_ERROR("device failed to boot initial fw image\n");
3578 		goto error;
3579 	}
3580 	IPW_DEBUG_INFO("initial device response after %dms\n", rc);
3581 
3582 	/* ack fw init done interrupt */
3583 	ipw_write32(priv, IPW_INTA_RW, IPW_INTA_BIT_FW_INITIALIZATION_DONE);
3584 
3585 	/* DMA the ucode into the device */
3586 	rc = ipw_load_ucode(priv, ucode_img, le32_to_cpu(fw->ucode_size));
3587 	if (rc < 0) {
3588 		IPW_ERROR("Unable to load ucode: %d\n", rc);
3589 		goto error;
3590 	}
3591 
3592 	/* stop nic */
3593 	ipw_stop_nic(priv);
3594 
3595 	/* DMA bss firmware into the device */
3596 	rc = ipw_load_firmware(priv, fw_img, le32_to_cpu(fw->fw_size));
3597 	if (rc < 0) {
3598 		IPW_ERROR("Unable to load firmware: %d\n", rc);
3599 		goto error;
3600 	}
3601 #ifdef CONFIG_PM
3602 	fw_loaded = 1;
3603 #endif
3604 
3605 	ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0);
3606 
3607 	rc = ipw_queue_reset(priv);
3608 	if (rc < 0) {
3609 		IPW_ERROR("Unable to initialize queues\n");
3610 		goto error;
3611 	}
3612 
3613 	/* Ensure interrupts are disabled */
3614 	ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
3615 	/* ack pending interrupts */
3616 	ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
3617 
3618 	/* kick start the device */
3619 	ipw_start_nic(priv);
3620 
3621 	if (ipw_read32(priv, IPW_INTA_RW) & IPW_INTA_BIT_PARITY_ERROR) {
3622 		if (retries > 0) {
3623 			IPW_WARNING("Parity error.  Retrying init.\n");
3624 			retries--;
3625 			goto retry;
3626 		}
3627 
3628 		IPW_ERROR("TODO: Handle parity error -- schedule restart?\n");
3629 		rc = -EIO;
3630 		goto error;
3631 	}
3632 
3633 	/* wait for the device */
3634 	rc = ipw_poll_bit(priv, IPW_INTA_RW,
3635 			  IPW_INTA_BIT_FW_INITIALIZATION_DONE, 500);
3636 	if (rc < 0) {
3637 		IPW_ERROR("device failed to start within 500ms\n");
3638 		goto error;
3639 	}
3640 	IPW_DEBUG_INFO("device response after %dms\n", rc);
3641 
3642 	/* ack fw init done interrupt */
3643 	ipw_write32(priv, IPW_INTA_RW, IPW_INTA_BIT_FW_INITIALIZATION_DONE);
3644 
3645 	/* read eeprom data */
3646 	priv->eeprom_delay = 1;
3647 	ipw_read_eeprom(priv);
3648 	/* initialize the eeprom region of sram */
3649 	ipw_eeprom_init_sram(priv);
3650 
3651 	/* enable interrupts */
3652 	ipw_enable_interrupts(priv);
3653 
3654 	/* Ensure our queue has valid packets */
3655 	ipw_rx_queue_replenish(priv);
3656 
3657 	ipw_write32(priv, IPW_RX_READ_INDEX, priv->rxq->read);
3658 
3659 	/* ack pending interrupts */
3660 	ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
3661 
3662 #ifndef CONFIG_PM
3663 	release_firmware(raw);
3664 #endif
3665 	return 0;
3666 
3667       error:
3668 	if (priv->rxq) {
3669 		ipw_rx_queue_free(priv, priv->rxq);
3670 		priv->rxq = NULL;
3671 	}
3672 	ipw_tx_queue_free(priv);
3673 	release_firmware(raw);
3674 #ifdef CONFIG_PM
3675 	fw_loaded = 0;
3676 	raw = NULL;
3677 #endif
3678 
3679 	return rc;
3680 }
3681 
3682 /**
3683  * DMA services
3684  *
3685  * Theory of operation
3686  *
3687  * A queue is a circular buffers with 'Read' and 'Write' pointers.
3688  * 2 empty entries always kept in the buffer to protect from overflow.
3689  *
3690  * For Tx queue, there are low mark and high mark limits. If, after queuing
3691  * the packet for Tx, free space become < low mark, Tx queue stopped. When
3692  * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
3693  * Tx queue resumed.
3694  *
3695  * The IPW operates with six queues, one receive queue in the device's
3696  * sram, one transmit queue for sending commands to the device firmware,
3697  * and four transmit queues for data.
3698  *
3699  * The four transmit queues allow for performing quality of service (qos)
3700  * transmissions as per the 802.11 protocol.  Currently Linux does not
3701  * provide a mechanism to the user for utilizing prioritized queues, so
3702  * we only utilize the first data transmit queue (queue1).
3703  */
3704 
3705 /**
3706  * Driver allocates buffers of this size for Rx
3707  */
3708 
3709 /**
3710  * ipw_rx_queue_space - Return number of free slots available in queue.
3711  */
3712 static int ipw_rx_queue_space(const struct ipw_rx_queue *q)
3713 {
3714 	int s = q->read - q->write;
3715 	if (s <= 0)
3716 		s += RX_QUEUE_SIZE;
3717 	/* keep some buffer to not confuse full and empty queue */
3718 	s -= 2;
3719 	if (s < 0)
3720 		s = 0;
3721 	return s;
3722 }
3723 
3724 static inline int ipw_tx_queue_space(const struct clx2_queue *q)
3725 {
3726 	int s = q->last_used - q->first_empty;
3727 	if (s <= 0)
3728 		s += q->n_bd;
3729 	s -= 2;			/* keep some reserve to not confuse empty and full situations */
3730 	if (s < 0)
3731 		s = 0;
3732 	return s;
3733 }
3734 
3735 static inline int ipw_queue_inc_wrap(int index, int n_bd)
3736 {
3737 	return (++index == n_bd) ? 0 : index;
3738 }
3739 
3740 /**
3741  * Initialize common DMA queue structure
3742  *
3743  * @param q                queue to init
3744  * @param count            Number of BD's to allocate. Should be power of 2
3745  * @param read_register    Address for 'read' register
3746  *                         (not offset within BAR, full address)
3747  * @param write_register   Address for 'write' register
3748  *                         (not offset within BAR, full address)
3749  * @param base_register    Address for 'base' register
3750  *                         (not offset within BAR, full address)
3751  * @param size             Address for 'size' register
3752  *                         (not offset within BAR, full address)
3753  */
3754 static void ipw_queue_init(struct ipw_priv *priv, struct clx2_queue *q,
3755 			   int count, u32 read, u32 write, u32 base, u32 size)
3756 {
3757 	q->n_bd = count;
3758 
3759 	q->low_mark = q->n_bd / 4;
3760 	if (q->low_mark < 4)
3761 		q->low_mark = 4;
3762 
3763 	q->high_mark = q->n_bd / 8;
3764 	if (q->high_mark < 2)
3765 		q->high_mark = 2;
3766 
3767 	q->first_empty = q->last_used = 0;
3768 	q->reg_r = read;
3769 	q->reg_w = write;
3770 
3771 	ipw_write32(priv, base, q->dma_addr);
3772 	ipw_write32(priv, size, count);
3773 	ipw_write32(priv, read, 0);
3774 	ipw_write32(priv, write, 0);
3775 
3776 	_ipw_read32(priv, 0x90);
3777 }
3778 
3779 static int ipw_queue_tx_init(struct ipw_priv *priv,
3780 			     struct clx2_tx_queue *q,
3781 			     int count, u32 read, u32 write, u32 base, u32 size)
3782 {
3783 	struct pci_dev *dev = priv->pci_dev;
3784 
3785 	q->txb = kmalloc_array(count, sizeof(q->txb[0]), GFP_KERNEL);
3786 	if (!q->txb) {
3787 		IPW_ERROR("vmalloc for auxiliary BD structures failed\n");
3788 		return -ENOMEM;
3789 	}
3790 
3791 	q->bd =
3792 	    pci_alloc_consistent(dev, sizeof(q->bd[0]) * count, &q->q.dma_addr);
3793 	if (!q->bd) {
3794 		IPW_ERROR("pci_alloc_consistent(%zd) failed\n",
3795 			  sizeof(q->bd[0]) * count);
3796 		kfree(q->txb);
3797 		q->txb = NULL;
3798 		return -ENOMEM;
3799 	}
3800 
3801 	ipw_queue_init(priv, &q->q, count, read, write, base, size);
3802 	return 0;
3803 }
3804 
3805 /**
3806  * Free one TFD, those at index [txq->q.last_used].
3807  * Do NOT advance any indexes
3808  *
3809  * @param dev
3810  * @param txq
3811  */
3812 static void ipw_queue_tx_free_tfd(struct ipw_priv *priv,
3813 				  struct clx2_tx_queue *txq)
3814 {
3815 	struct tfd_frame *bd = &txq->bd[txq->q.last_used];
3816 	struct pci_dev *dev = priv->pci_dev;
3817 	int i;
3818 
3819 	/* classify bd */
3820 	if (bd->control_flags.message_type == TX_HOST_COMMAND_TYPE)
3821 		/* nothing to cleanup after for host commands */
3822 		return;
3823 
3824 	/* sanity check */
3825 	if (le32_to_cpu(bd->u.data.num_chunks) > NUM_TFD_CHUNKS) {
3826 		IPW_ERROR("Too many chunks: %i\n",
3827 			  le32_to_cpu(bd->u.data.num_chunks));
3828 		/** @todo issue fatal error, it is quite serious situation */
3829 		return;
3830 	}
3831 
3832 	/* unmap chunks if any */
3833 	for (i = 0; i < le32_to_cpu(bd->u.data.num_chunks); i++) {
3834 		pci_unmap_single(dev, le32_to_cpu(bd->u.data.chunk_ptr[i]),
3835 				 le16_to_cpu(bd->u.data.chunk_len[i]),
3836 				 PCI_DMA_TODEVICE);
3837 		if (txq->txb[txq->q.last_used]) {
3838 			libipw_txb_free(txq->txb[txq->q.last_used]);
3839 			txq->txb[txq->q.last_used] = NULL;
3840 		}
3841 	}
3842 }
3843 
3844 /**
3845  * Deallocate DMA queue.
3846  *
3847  * Empty queue by removing and destroying all BD's.
3848  * Free all buffers.
3849  *
3850  * @param dev
3851  * @param q
3852  */
3853 static void ipw_queue_tx_free(struct ipw_priv *priv, struct clx2_tx_queue *txq)
3854 {
3855 	struct clx2_queue *q = &txq->q;
3856 	struct pci_dev *dev = priv->pci_dev;
3857 
3858 	if (q->n_bd == 0)
3859 		return;
3860 
3861 	/* first, empty all BD's */
3862 	for (; q->first_empty != q->last_used;
3863 	     q->last_used = ipw_queue_inc_wrap(q->last_used, q->n_bd)) {
3864 		ipw_queue_tx_free_tfd(priv, txq);
3865 	}
3866 
3867 	/* free buffers belonging to queue itself */
3868 	pci_free_consistent(dev, sizeof(txq->bd[0]) * q->n_bd, txq->bd,
3869 			    q->dma_addr);
3870 	kfree(txq->txb);
3871 
3872 	/* 0 fill whole structure */
3873 	memset(txq, 0, sizeof(*txq));
3874 }
3875 
3876 /**
3877  * Destroy all DMA queues and structures
3878  *
3879  * @param priv
3880  */
3881 static void ipw_tx_queue_free(struct ipw_priv *priv)
3882 {
3883 	/* Tx CMD queue */
3884 	ipw_queue_tx_free(priv, &priv->txq_cmd);
3885 
3886 	/* Tx queues */
3887 	ipw_queue_tx_free(priv, &priv->txq[0]);
3888 	ipw_queue_tx_free(priv, &priv->txq[1]);
3889 	ipw_queue_tx_free(priv, &priv->txq[2]);
3890 	ipw_queue_tx_free(priv, &priv->txq[3]);
3891 }
3892 
3893 static void ipw_create_bssid(struct ipw_priv *priv, u8 * bssid)
3894 {
3895 	/* First 3 bytes are manufacturer */
3896 	bssid[0] = priv->mac_addr[0];
3897 	bssid[1] = priv->mac_addr[1];
3898 	bssid[2] = priv->mac_addr[2];
3899 
3900 	/* Last bytes are random */
3901 	get_random_bytes(&bssid[3], ETH_ALEN - 3);
3902 
3903 	bssid[0] &= 0xfe;	/* clear multicast bit */
3904 	bssid[0] |= 0x02;	/* set local assignment bit (IEEE802) */
3905 }
3906 
3907 static u8 ipw_add_station(struct ipw_priv *priv, u8 * bssid)
3908 {
3909 	struct ipw_station_entry entry;
3910 	int i;
3911 
3912 	for (i = 0; i < priv->num_stations; i++) {
3913 		if (ether_addr_equal(priv->stations[i], bssid)) {
3914 			/* Another node is active in network */
3915 			priv->missed_adhoc_beacons = 0;
3916 			if (!(priv->config & CFG_STATIC_CHANNEL))
3917 				/* when other nodes drop out, we drop out */
3918 				priv->config &= ~CFG_ADHOC_PERSIST;
3919 
3920 			return i;
3921 		}
3922 	}
3923 
3924 	if (i == MAX_STATIONS)
3925 		return IPW_INVALID_STATION;
3926 
3927 	IPW_DEBUG_SCAN("Adding AdHoc station: %pM\n", bssid);
3928 
3929 	entry.reserved = 0;
3930 	entry.support_mode = 0;
3931 	memcpy(entry.mac_addr, bssid, ETH_ALEN);
3932 	memcpy(priv->stations[i], bssid, ETH_ALEN);
3933 	ipw_write_direct(priv, IPW_STATION_TABLE_LOWER + i * sizeof(entry),
3934 			 &entry, sizeof(entry));
3935 	priv->num_stations++;
3936 
3937 	return i;
3938 }
3939 
3940 static u8 ipw_find_station(struct ipw_priv *priv, u8 * bssid)
3941 {
3942 	int i;
3943 
3944 	for (i = 0; i < priv->num_stations; i++)
3945 		if (ether_addr_equal(priv->stations[i], bssid))
3946 			return i;
3947 
3948 	return IPW_INVALID_STATION;
3949 }
3950 
3951 static void ipw_send_disassociate(struct ipw_priv *priv, int quiet)
3952 {
3953 	int err;
3954 
3955 	if (priv->status & STATUS_ASSOCIATING) {
3956 		IPW_DEBUG_ASSOC("Disassociating while associating.\n");
3957 		schedule_work(&priv->disassociate);
3958 		return;
3959 	}
3960 
3961 	if (!(priv->status & STATUS_ASSOCIATED)) {
3962 		IPW_DEBUG_ASSOC("Disassociating while not associated.\n");
3963 		return;
3964 	}
3965 
3966 	IPW_DEBUG_ASSOC("Disassociation attempt from %pM "
3967 			"on channel %d.\n",
3968 			priv->assoc_request.bssid,
3969 			priv->assoc_request.channel);
3970 
3971 	priv->status &= ~(STATUS_ASSOCIATING | STATUS_ASSOCIATED);
3972 	priv->status |= STATUS_DISASSOCIATING;
3973 
3974 	if (quiet)
3975 		priv->assoc_request.assoc_type = HC_DISASSOC_QUIET;
3976 	else
3977 		priv->assoc_request.assoc_type = HC_DISASSOCIATE;
3978 
3979 	err = ipw_send_associate(priv, &priv->assoc_request);
3980 	if (err) {
3981 		IPW_DEBUG_HC("Attempt to send [dis]associate command "
3982 			     "failed.\n");
3983 		return;
3984 	}
3985 
3986 }
3987 
3988 static int ipw_disassociate(void *data)
3989 {
3990 	struct ipw_priv *priv = data;
3991 	if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)))
3992 		return 0;
3993 	ipw_send_disassociate(data, 0);
3994 	netif_carrier_off(priv->net_dev);
3995 	return 1;
3996 }
3997 
3998 static void ipw_bg_disassociate(struct work_struct *work)
3999 {
4000 	struct ipw_priv *priv =
4001 		container_of(work, struct ipw_priv, disassociate);
4002 	mutex_lock(&priv->mutex);
4003 	ipw_disassociate(priv);
4004 	mutex_unlock(&priv->mutex);
4005 }
4006 
4007 static void ipw_system_config(struct work_struct *work)
4008 {
4009 	struct ipw_priv *priv =
4010 		container_of(work, struct ipw_priv, system_config);
4011 
4012 #ifdef CONFIG_IPW2200_PROMISCUOUS
4013 	if (priv->prom_net_dev && netif_running(priv->prom_net_dev)) {
4014 		priv->sys_config.accept_all_data_frames = 1;
4015 		priv->sys_config.accept_non_directed_frames = 1;
4016 		priv->sys_config.accept_all_mgmt_bcpr = 1;
4017 		priv->sys_config.accept_all_mgmt_frames = 1;
4018 	}
4019 #endif
4020 
4021 	ipw_send_system_config(priv);
4022 }
4023 
4024 struct ipw_status_code {
4025 	u16 status;
4026 	const char *reason;
4027 };
4028 
4029 static const struct ipw_status_code ipw_status_codes[] = {
4030 	{0x00, "Successful"},
4031 	{0x01, "Unspecified failure"},
4032 	{0x0A, "Cannot support all requested capabilities in the "
4033 	 "Capability information field"},
4034 	{0x0B, "Reassociation denied due to inability to confirm that "
4035 	 "association exists"},
4036 	{0x0C, "Association denied due to reason outside the scope of this "
4037 	 "standard"},
4038 	{0x0D,
4039 	 "Responding station does not support the specified authentication "
4040 	 "algorithm"},
4041 	{0x0E,
4042 	 "Received an Authentication frame with authentication sequence "
4043 	 "transaction sequence number out of expected sequence"},
4044 	{0x0F, "Authentication rejected because of challenge failure"},
4045 	{0x10, "Authentication rejected due to timeout waiting for next "
4046 	 "frame in sequence"},
4047 	{0x11, "Association denied because AP is unable to handle additional "
4048 	 "associated stations"},
4049 	{0x12,
4050 	 "Association denied due to requesting station not supporting all "
4051 	 "of the datarates in the BSSBasicServiceSet Parameter"},
4052 	{0x13,
4053 	 "Association denied due to requesting station not supporting "
4054 	 "short preamble operation"},
4055 	{0x14,
4056 	 "Association denied due to requesting station not supporting "
4057 	 "PBCC encoding"},
4058 	{0x15,
4059 	 "Association denied due to requesting station not supporting "
4060 	 "channel agility"},
4061 	{0x19,
4062 	 "Association denied due to requesting station not supporting "
4063 	 "short slot operation"},
4064 	{0x1A,
4065 	 "Association denied due to requesting station not supporting "
4066 	 "DSSS-OFDM operation"},
4067 	{0x28, "Invalid Information Element"},
4068 	{0x29, "Group Cipher is not valid"},
4069 	{0x2A, "Pairwise Cipher is not valid"},
4070 	{0x2B, "AKMP is not valid"},
4071 	{0x2C, "Unsupported RSN IE version"},
4072 	{0x2D, "Invalid RSN IE Capabilities"},
4073 	{0x2E, "Cipher suite is rejected per security policy"},
4074 };
4075 
4076 static const char *ipw_get_status_code(u16 status)
4077 {
4078 	int i;
4079 	for (i = 0; i < ARRAY_SIZE(ipw_status_codes); i++)
4080 		if (ipw_status_codes[i].status == (status & 0xff))
4081 			return ipw_status_codes[i].reason;
4082 	return "Unknown status value.";
4083 }
4084 
4085 static inline void average_init(struct average *avg)
4086 {
4087 	memset(avg, 0, sizeof(*avg));
4088 }
4089 
4090 #define DEPTH_RSSI 8
4091 #define DEPTH_NOISE 16
4092 static s16 exponential_average(s16 prev_avg, s16 val, u8 depth)
4093 {
4094 	return ((depth-1)*prev_avg +  val)/depth;
4095 }
4096 
4097 static void average_add(struct average *avg, s16 val)
4098 {
4099 	avg->sum -= avg->entries[avg->pos];
4100 	avg->sum += val;
4101 	avg->entries[avg->pos++] = val;
4102 	if (unlikely(avg->pos == AVG_ENTRIES)) {
4103 		avg->init = 1;
4104 		avg->pos = 0;
4105 	}
4106 }
4107 
4108 static s16 average_value(struct average *avg)
4109 {
4110 	if (!unlikely(avg->init)) {
4111 		if (avg->pos)
4112 			return avg->sum / avg->pos;
4113 		return 0;
4114 	}
4115 
4116 	return avg->sum / AVG_ENTRIES;
4117 }
4118 
4119 static void ipw_reset_stats(struct ipw_priv *priv)
4120 {
4121 	u32 len = sizeof(u32);
4122 
4123 	priv->quality = 0;
4124 
4125 	average_init(&priv->average_missed_beacons);
4126 	priv->exp_avg_rssi = -60;
4127 	priv->exp_avg_noise = -85 + 0x100;
4128 
4129 	priv->last_rate = 0;
4130 	priv->last_missed_beacons = 0;
4131 	priv->last_rx_packets = 0;
4132 	priv->last_tx_packets = 0;
4133 	priv->last_tx_failures = 0;
4134 
4135 	/* Firmware managed, reset only when NIC is restarted, so we have to
4136 	 * normalize on the current value */
4137 	ipw_get_ordinal(priv, IPW_ORD_STAT_RX_ERR_CRC,
4138 			&priv->last_rx_err, &len);
4139 	ipw_get_ordinal(priv, IPW_ORD_STAT_TX_FAILURE,
4140 			&priv->last_tx_failures, &len);
4141 
4142 	/* Driver managed, reset with each association */
4143 	priv->missed_adhoc_beacons = 0;
4144 	priv->missed_beacons = 0;
4145 	priv->tx_packets = 0;
4146 	priv->rx_packets = 0;
4147 
4148 }
4149 
4150 static u32 ipw_get_max_rate(struct ipw_priv *priv)
4151 {
4152 	u32 i = 0x80000000;
4153 	u32 mask = priv->rates_mask;
4154 	/* If currently associated in B mode, restrict the maximum
4155 	 * rate match to B rates */
4156 	if (priv->assoc_request.ieee_mode == IPW_B_MODE)
4157 		mask &= LIBIPW_CCK_RATES_MASK;
4158 
4159 	/* TODO: Verify that the rate is supported by the current rates
4160 	 * list. */
4161 
4162 	while (i && !(mask & i))
4163 		i >>= 1;
4164 	switch (i) {
4165 	case LIBIPW_CCK_RATE_1MB_MASK:
4166 		return 1000000;
4167 	case LIBIPW_CCK_RATE_2MB_MASK:
4168 		return 2000000;
4169 	case LIBIPW_CCK_RATE_5MB_MASK:
4170 		return 5500000;
4171 	case LIBIPW_OFDM_RATE_6MB_MASK:
4172 		return 6000000;
4173 	case LIBIPW_OFDM_RATE_9MB_MASK:
4174 		return 9000000;
4175 	case LIBIPW_CCK_RATE_11MB_MASK:
4176 		return 11000000;
4177 	case LIBIPW_OFDM_RATE_12MB_MASK:
4178 		return 12000000;
4179 	case LIBIPW_OFDM_RATE_18MB_MASK:
4180 		return 18000000;
4181 	case LIBIPW_OFDM_RATE_24MB_MASK:
4182 		return 24000000;
4183 	case LIBIPW_OFDM_RATE_36MB_MASK:
4184 		return 36000000;
4185 	case LIBIPW_OFDM_RATE_48MB_MASK:
4186 		return 48000000;
4187 	case LIBIPW_OFDM_RATE_54MB_MASK:
4188 		return 54000000;
4189 	}
4190 
4191 	if (priv->ieee->mode == IEEE_B)
4192 		return 11000000;
4193 	else
4194 		return 54000000;
4195 }
4196 
4197 static u32 ipw_get_current_rate(struct ipw_priv *priv)
4198 {
4199 	u32 rate, len = sizeof(rate);
4200 	int err;
4201 
4202 	if (!(priv->status & STATUS_ASSOCIATED))
4203 		return 0;
4204 
4205 	if (priv->tx_packets > IPW_REAL_RATE_RX_PACKET_THRESHOLD) {
4206 		err = ipw_get_ordinal(priv, IPW_ORD_STAT_TX_CURR_RATE, &rate,
4207 				      &len);
4208 		if (err) {
4209 			IPW_DEBUG_INFO("failed querying ordinals.\n");
4210 			return 0;
4211 		}
4212 	} else
4213 		return ipw_get_max_rate(priv);
4214 
4215 	switch (rate) {
4216 	case IPW_TX_RATE_1MB:
4217 		return 1000000;
4218 	case IPW_TX_RATE_2MB:
4219 		return 2000000;
4220 	case IPW_TX_RATE_5MB:
4221 		return 5500000;
4222 	case IPW_TX_RATE_6MB:
4223 		return 6000000;
4224 	case IPW_TX_RATE_9MB:
4225 		return 9000000;
4226 	case IPW_TX_RATE_11MB:
4227 		return 11000000;
4228 	case IPW_TX_RATE_12MB:
4229 		return 12000000;
4230 	case IPW_TX_RATE_18MB:
4231 		return 18000000;
4232 	case IPW_TX_RATE_24MB:
4233 		return 24000000;
4234 	case IPW_TX_RATE_36MB:
4235 		return 36000000;
4236 	case IPW_TX_RATE_48MB:
4237 		return 48000000;
4238 	case IPW_TX_RATE_54MB:
4239 		return 54000000;
4240 	}
4241 
4242 	return 0;
4243 }
4244 
4245 #define IPW_STATS_INTERVAL (2 * HZ)
4246 static void ipw_gather_stats(struct ipw_priv *priv)
4247 {
4248 	u32 rx_err, rx_err_delta, rx_packets_delta;
4249 	u32 tx_failures, tx_failures_delta, tx_packets_delta;
4250 	u32 missed_beacons_percent, missed_beacons_delta;
4251 	u32 quality = 0;
4252 	u32 len = sizeof(u32);
4253 	s16 rssi;
4254 	u32 beacon_quality, signal_quality, tx_quality, rx_quality,
4255 	    rate_quality;
4256 	u32 max_rate;
4257 
4258 	if (!(priv->status & STATUS_ASSOCIATED)) {
4259 		priv->quality = 0;
4260 		return;
4261 	}
4262 
4263 	/* Update the statistics */
4264 	ipw_get_ordinal(priv, IPW_ORD_STAT_MISSED_BEACONS,
4265 			&priv->missed_beacons, &len);
4266 	missed_beacons_delta = priv->missed_beacons - priv->last_missed_beacons;
4267 	priv->last_missed_beacons = priv->missed_beacons;
4268 	if (priv->assoc_request.beacon_interval) {
4269 		missed_beacons_percent = missed_beacons_delta *
4270 		    (HZ * le16_to_cpu(priv->assoc_request.beacon_interval)) /
4271 		    (IPW_STATS_INTERVAL * 10);
4272 	} else {
4273 		missed_beacons_percent = 0;
4274 	}
4275 	average_add(&priv->average_missed_beacons, missed_beacons_percent);
4276 
4277 	ipw_get_ordinal(priv, IPW_ORD_STAT_RX_ERR_CRC, &rx_err, &len);
4278 	rx_err_delta = rx_err - priv->last_rx_err;
4279 	priv->last_rx_err = rx_err;
4280 
4281 	ipw_get_ordinal(priv, IPW_ORD_STAT_TX_FAILURE, &tx_failures, &len);
4282 	tx_failures_delta = tx_failures - priv->last_tx_failures;
4283 	priv->last_tx_failures = tx_failures;
4284 
4285 	rx_packets_delta = priv->rx_packets - priv->last_rx_packets;
4286 	priv->last_rx_packets = priv->rx_packets;
4287 
4288 	tx_packets_delta = priv->tx_packets - priv->last_tx_packets;
4289 	priv->last_tx_packets = priv->tx_packets;
4290 
4291 	/* Calculate quality based on the following:
4292 	 *
4293 	 * Missed beacon: 100% = 0, 0% = 70% missed
4294 	 * Rate: 60% = 1Mbs, 100% = Max
4295 	 * Rx and Tx errors represent a straight % of total Rx/Tx
4296 	 * RSSI: 100% = > -50,  0% = < -80
4297 	 * Rx errors: 100% = 0, 0% = 50% missed
4298 	 *
4299 	 * The lowest computed quality is used.
4300 	 *
4301 	 */
4302 #define BEACON_THRESHOLD 5
4303 	beacon_quality = 100 - missed_beacons_percent;
4304 	if (beacon_quality < BEACON_THRESHOLD)
4305 		beacon_quality = 0;
4306 	else
4307 		beacon_quality = (beacon_quality - BEACON_THRESHOLD) * 100 /
4308 		    (100 - BEACON_THRESHOLD);
4309 	IPW_DEBUG_STATS("Missed beacon: %3d%% (%d%%)\n",
4310 			beacon_quality, missed_beacons_percent);
4311 
4312 	priv->last_rate = ipw_get_current_rate(priv);
4313 	max_rate = ipw_get_max_rate(priv);
4314 	rate_quality = priv->last_rate * 40 / max_rate + 60;
4315 	IPW_DEBUG_STATS("Rate quality : %3d%% (%dMbs)\n",
4316 			rate_quality, priv->last_rate / 1000000);
4317 
4318 	if (rx_packets_delta > 100 && rx_packets_delta + rx_err_delta)
4319 		rx_quality = 100 - (rx_err_delta * 100) /
4320 		    (rx_packets_delta + rx_err_delta);
4321 	else
4322 		rx_quality = 100;
4323 	IPW_DEBUG_STATS("Rx quality   : %3d%% (%u errors, %u packets)\n",
4324 			rx_quality, rx_err_delta, rx_packets_delta);
4325 
4326 	if (tx_packets_delta > 100 && tx_packets_delta + tx_failures_delta)
4327 		tx_quality = 100 - (tx_failures_delta * 100) /
4328 		    (tx_packets_delta + tx_failures_delta);
4329 	else
4330 		tx_quality = 100;
4331 	IPW_DEBUG_STATS("Tx quality   : %3d%% (%u errors, %u packets)\n",
4332 			tx_quality, tx_failures_delta, tx_packets_delta);
4333 
4334 	rssi = priv->exp_avg_rssi;
4335 	signal_quality =
4336 	    (100 *
4337 	     (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) *
4338 	     (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) -
4339 	     (priv->ieee->perfect_rssi - rssi) *
4340 	     (15 * (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) +
4341 	      62 * (priv->ieee->perfect_rssi - rssi))) /
4342 	    ((priv->ieee->perfect_rssi - priv->ieee->worst_rssi) *
4343 	     (priv->ieee->perfect_rssi - priv->ieee->worst_rssi));
4344 	if (signal_quality > 100)
4345 		signal_quality = 100;
4346 	else if (signal_quality < 1)
4347 		signal_quality = 0;
4348 
4349 	IPW_DEBUG_STATS("Signal level : %3d%% (%d dBm)\n",
4350 			signal_quality, rssi);
4351 
4352 	quality = min(rx_quality, signal_quality);
4353 	quality = min(tx_quality, quality);
4354 	quality = min(rate_quality, quality);
4355 	quality = min(beacon_quality, quality);
4356 	if (quality == beacon_quality)
4357 		IPW_DEBUG_STATS("Quality (%d%%): Clamped to missed beacons.\n",
4358 				quality);
4359 	if (quality == rate_quality)
4360 		IPW_DEBUG_STATS("Quality (%d%%): Clamped to rate quality.\n",
4361 				quality);
4362 	if (quality == tx_quality)
4363 		IPW_DEBUG_STATS("Quality (%d%%): Clamped to Tx quality.\n",
4364 				quality);
4365 	if (quality == rx_quality)
4366 		IPW_DEBUG_STATS("Quality (%d%%): Clamped to Rx quality.\n",
4367 				quality);
4368 	if (quality == signal_quality)
4369 		IPW_DEBUG_STATS("Quality (%d%%): Clamped to signal quality.\n",
4370 				quality);
4371 
4372 	priv->quality = quality;
4373 
4374 	schedule_delayed_work(&priv->gather_stats, IPW_STATS_INTERVAL);
4375 }
4376 
4377 static void ipw_bg_gather_stats(struct work_struct *work)
4378 {
4379 	struct ipw_priv *priv =
4380 		container_of(work, struct ipw_priv, gather_stats.work);
4381 	mutex_lock(&priv->mutex);
4382 	ipw_gather_stats(priv);
4383 	mutex_unlock(&priv->mutex);
4384 }
4385 
4386 /* Missed beacon behavior:
4387  * 1st missed -> roaming_threshold, just wait, don't do any scan/roam.
4388  * roaming_threshold -> disassociate_threshold, scan and roam for better signal.
4389  * Above disassociate threshold, give up and stop scanning.
4390  * Roaming is disabled if disassociate_threshold <= roaming_threshold  */
4391 static void ipw_handle_missed_beacon(struct ipw_priv *priv,
4392 					    int missed_count)
4393 {
4394 	priv->notif_missed_beacons = missed_count;
4395 
4396 	if (missed_count > priv->disassociate_threshold &&
4397 	    priv->status & STATUS_ASSOCIATED) {
4398 		/* If associated and we've hit the missed
4399 		 * beacon threshold, disassociate, turn
4400 		 * off roaming, and abort any active scans */
4401 		IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
4402 			  IPW_DL_STATE | IPW_DL_ASSOC,
4403 			  "Missed beacon: %d - disassociate\n", missed_count);
4404 		priv->status &= ~STATUS_ROAMING;
4405 		if (priv->status & STATUS_SCANNING) {
4406 			IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
4407 				  IPW_DL_STATE,
4408 				  "Aborting scan with missed beacon.\n");
4409 			schedule_work(&priv->abort_scan);
4410 		}
4411 
4412 		schedule_work(&priv->disassociate);
4413 		return;
4414 	}
4415 
4416 	if (priv->status & STATUS_ROAMING) {
4417 		/* If we are currently roaming, then just
4418 		 * print a debug statement... */
4419 		IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4420 			  "Missed beacon: %d - roam in progress\n",
4421 			  missed_count);
4422 		return;
4423 	}
4424 
4425 	if (roaming &&
4426 	    (missed_count > priv->roaming_threshold &&
4427 	     missed_count <= priv->disassociate_threshold)) {
4428 		/* If we are not already roaming, set the ROAM
4429 		 * bit in the status and kick off a scan.
4430 		 * This can happen several times before we reach
4431 		 * disassociate_threshold. */
4432 		IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4433 			  "Missed beacon: %d - initiate "
4434 			  "roaming\n", missed_count);
4435 		if (!(priv->status & STATUS_ROAMING)) {
4436 			priv->status |= STATUS_ROAMING;
4437 			if (!(priv->status & STATUS_SCANNING))
4438 				schedule_delayed_work(&priv->request_scan, 0);
4439 		}
4440 		return;
4441 	}
4442 
4443 	if (priv->status & STATUS_SCANNING &&
4444 	    missed_count > IPW_MB_SCAN_CANCEL_THRESHOLD) {
4445 		/* Stop scan to keep fw from getting
4446 		 * stuck (only if we aren't roaming --
4447 		 * otherwise we'll never scan more than 2 or 3
4448 		 * channels..) */
4449 		IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF | IPW_DL_STATE,
4450 			  "Aborting scan with missed beacon.\n");
4451 		schedule_work(&priv->abort_scan);
4452 	}
4453 
4454 	IPW_DEBUG_NOTIF("Missed beacon: %d\n", missed_count);
4455 }
4456 
4457 static void ipw_scan_event(struct work_struct *work)
4458 {
4459 	union iwreq_data wrqu;
4460 
4461 	struct ipw_priv *priv =
4462 		container_of(work, struct ipw_priv, scan_event.work);
4463 
4464 	wrqu.data.length = 0;
4465 	wrqu.data.flags = 0;
4466 	wireless_send_event(priv->net_dev, SIOCGIWSCAN, &wrqu, NULL);
4467 }
4468 
4469 static void handle_scan_event(struct ipw_priv *priv)
4470 {
4471 	/* Only userspace-requested scan completion events go out immediately */
4472 	if (!priv->user_requested_scan) {
4473 		schedule_delayed_work(&priv->scan_event,
4474 				      round_jiffies_relative(msecs_to_jiffies(4000)));
4475 	} else {
4476 		priv->user_requested_scan = 0;
4477 		mod_delayed_work(system_wq, &priv->scan_event, 0);
4478 	}
4479 }
4480 
4481 /**
4482  * Handle host notification packet.
4483  * Called from interrupt routine
4484  */
4485 static void ipw_rx_notification(struct ipw_priv *priv,
4486 				       struct ipw_rx_notification *notif)
4487 {
4488 	u16 size = le16_to_cpu(notif->size);
4489 
4490 	IPW_DEBUG_NOTIF("type = %i (%d bytes)\n", notif->subtype, size);
4491 
4492 	switch (notif->subtype) {
4493 	case HOST_NOTIFICATION_STATUS_ASSOCIATED:{
4494 			struct notif_association *assoc = &notif->u.assoc;
4495 
4496 			switch (assoc->state) {
4497 			case CMAS_ASSOCIATED:{
4498 					IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4499 						  IPW_DL_ASSOC,
4500 						  "associated: '%*pE' %pM\n",
4501 						  priv->essid_len, priv->essid,
4502 						  priv->bssid);
4503 
4504 					switch (priv->ieee->iw_mode) {
4505 					case IW_MODE_INFRA:
4506 						memcpy(priv->ieee->bssid,
4507 						       priv->bssid, ETH_ALEN);
4508 						break;
4509 
4510 					case IW_MODE_ADHOC:
4511 						memcpy(priv->ieee->bssid,
4512 						       priv->bssid, ETH_ALEN);
4513 
4514 						/* clear out the station table */
4515 						priv->num_stations = 0;
4516 
4517 						IPW_DEBUG_ASSOC
4518 						    ("queueing adhoc check\n");
4519 						schedule_delayed_work(
4520 							&priv->adhoc_check,
4521 							le16_to_cpu(priv->
4522 							assoc_request.
4523 							beacon_interval));
4524 						break;
4525 					}
4526 
4527 					priv->status &= ~STATUS_ASSOCIATING;
4528 					priv->status |= STATUS_ASSOCIATED;
4529 					schedule_work(&priv->system_config);
4530 
4531 #ifdef CONFIG_IPW2200_QOS
4532 #define IPW_GET_PACKET_STYPE(x) WLAN_FC_GET_STYPE( \
4533 			 le16_to_cpu(((struct ieee80211_hdr *)(x))->frame_control))
4534 					if ((priv->status & STATUS_AUTH) &&
4535 					    (IPW_GET_PACKET_STYPE(&notif->u.raw)
4536 					     == IEEE80211_STYPE_ASSOC_RESP)) {
4537 						if ((sizeof
4538 						     (struct
4539 						      libipw_assoc_response)
4540 						     <= size)
4541 						    && (size <= 2314)) {
4542 							struct
4543 							libipw_rx_stats
4544 							    stats = {
4545 								.len = size - 1,
4546 							};
4547 
4548 							IPW_DEBUG_QOS
4549 							    ("QoS Associate "
4550 							     "size %d\n", size);
4551 							libipw_rx_mgt(priv->
4552 									 ieee,
4553 									 (struct
4554 									  libipw_hdr_4addr
4555 									  *)
4556 									 &notif->u.raw, &stats);
4557 						}
4558 					}
4559 #endif
4560 
4561 					schedule_work(&priv->link_up);
4562 
4563 					break;
4564 				}
4565 
4566 			case CMAS_AUTHENTICATED:{
4567 					if (priv->
4568 					    status & (STATUS_ASSOCIATED |
4569 						      STATUS_AUTH)) {
4570 						struct notif_authenticate *auth
4571 						    = &notif->u.auth;
4572 						IPW_DEBUG(IPW_DL_NOTIF |
4573 							  IPW_DL_STATE |
4574 							  IPW_DL_ASSOC,
4575 							  "deauthenticated: '%*pE' %pM: (0x%04X) - %s\n",
4576 							  priv->essid_len,
4577 							  priv->essid,
4578 							  priv->bssid,
4579 							  le16_to_cpu(auth->status),
4580 							  ipw_get_status_code
4581 							  (le16_to_cpu
4582 							   (auth->status)));
4583 
4584 						priv->status &=
4585 						    ~(STATUS_ASSOCIATING |
4586 						      STATUS_AUTH |
4587 						      STATUS_ASSOCIATED);
4588 
4589 						schedule_work(&priv->link_down);
4590 						break;
4591 					}
4592 
4593 					IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4594 						  IPW_DL_ASSOC,
4595 						  "authenticated: '%*pE' %pM\n",
4596 						  priv->essid_len, priv->essid,
4597 						  priv->bssid);
4598 					break;
4599 				}
4600 
4601 			case CMAS_INIT:{
4602 					if (priv->status & STATUS_AUTH) {
4603 						struct
4604 						    libipw_assoc_response
4605 						*resp;
4606 						resp =
4607 						    (struct
4608 						     libipw_assoc_response
4609 						     *)&notif->u.raw;
4610 						IPW_DEBUG(IPW_DL_NOTIF |
4611 							  IPW_DL_STATE |
4612 							  IPW_DL_ASSOC,
4613 							  "association failed (0x%04X): %s\n",
4614 							  le16_to_cpu(resp->status),
4615 							  ipw_get_status_code
4616 							  (le16_to_cpu
4617 							   (resp->status)));
4618 					}
4619 
4620 					IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4621 						  IPW_DL_ASSOC,
4622 						  "disassociated: '%*pE' %pM\n",
4623 						  priv->essid_len, priv->essid,
4624 						  priv->bssid);
4625 
4626 					priv->status &=
4627 					    ~(STATUS_DISASSOCIATING |
4628 					      STATUS_ASSOCIATING |
4629 					      STATUS_ASSOCIATED | STATUS_AUTH);
4630 					if (priv->assoc_network
4631 					    && (priv->assoc_network->
4632 						capability &
4633 						WLAN_CAPABILITY_IBSS))
4634 						ipw_remove_current_network
4635 						    (priv);
4636 
4637 					schedule_work(&priv->link_down);
4638 
4639 					break;
4640 				}
4641 
4642 			case CMAS_RX_ASSOC_RESP:
4643 				break;
4644 
4645 			default:
4646 				IPW_ERROR("assoc: unknown (%d)\n",
4647 					  assoc->state);
4648 				break;
4649 			}
4650 
4651 			break;
4652 		}
4653 
4654 	case HOST_NOTIFICATION_STATUS_AUTHENTICATE:{
4655 			struct notif_authenticate *auth = &notif->u.auth;
4656 			switch (auth->state) {
4657 			case CMAS_AUTHENTICATED:
4658 				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4659 					  "authenticated: '%*pE' %pM\n",
4660 					  priv->essid_len, priv->essid,
4661 					  priv->bssid);
4662 				priv->status |= STATUS_AUTH;
4663 				break;
4664 
4665 			case CMAS_INIT:
4666 				if (priv->status & STATUS_AUTH) {
4667 					IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4668 						  IPW_DL_ASSOC,
4669 						  "authentication failed (0x%04X): %s\n",
4670 						  le16_to_cpu(auth->status),
4671 						  ipw_get_status_code(le16_to_cpu
4672 								      (auth->
4673 								       status)));
4674 				}
4675 				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4676 					  IPW_DL_ASSOC,
4677 					  "deauthenticated: '%*pE' %pM\n",
4678 					  priv->essid_len, priv->essid,
4679 					  priv->bssid);
4680 
4681 				priv->status &= ~(STATUS_ASSOCIATING |
4682 						  STATUS_AUTH |
4683 						  STATUS_ASSOCIATED);
4684 
4685 				schedule_work(&priv->link_down);
4686 				break;
4687 
4688 			case CMAS_TX_AUTH_SEQ_1:
4689 				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4690 					  IPW_DL_ASSOC, "AUTH_SEQ_1\n");
4691 				break;
4692 			case CMAS_RX_AUTH_SEQ_2:
4693 				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4694 					  IPW_DL_ASSOC, "AUTH_SEQ_2\n");
4695 				break;
4696 			case CMAS_AUTH_SEQ_1_PASS:
4697 				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4698 					  IPW_DL_ASSOC, "AUTH_SEQ_1_PASS\n");
4699 				break;
4700 			case CMAS_AUTH_SEQ_1_FAIL:
4701 				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4702 					  IPW_DL_ASSOC, "AUTH_SEQ_1_FAIL\n");
4703 				break;
4704 			case CMAS_TX_AUTH_SEQ_3:
4705 				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4706 					  IPW_DL_ASSOC, "AUTH_SEQ_3\n");
4707 				break;
4708 			case CMAS_RX_AUTH_SEQ_4:
4709 				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4710 					  IPW_DL_ASSOC, "RX_AUTH_SEQ_4\n");
4711 				break;
4712 			case CMAS_AUTH_SEQ_2_PASS:
4713 				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4714 					  IPW_DL_ASSOC, "AUTH_SEQ_2_PASS\n");
4715 				break;
4716 			case CMAS_AUTH_SEQ_2_FAIL:
4717 				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4718 					  IPW_DL_ASSOC, "AUT_SEQ_2_FAIL\n");
4719 				break;
4720 			case CMAS_TX_ASSOC:
4721 				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4722 					  IPW_DL_ASSOC, "TX_ASSOC\n");
4723 				break;
4724 			case CMAS_RX_ASSOC_RESP:
4725 				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4726 					  IPW_DL_ASSOC, "RX_ASSOC_RESP\n");
4727 
4728 				break;
4729 			case CMAS_ASSOCIATED:
4730 				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4731 					  IPW_DL_ASSOC, "ASSOCIATED\n");
4732 				break;
4733 			default:
4734 				IPW_DEBUG_NOTIF("auth: failure - %d\n",
4735 						auth->state);
4736 				break;
4737 			}
4738 			break;
4739 		}
4740 
4741 	case HOST_NOTIFICATION_STATUS_SCAN_CHANNEL_RESULT:{
4742 			struct notif_channel_result *x =
4743 			    &notif->u.channel_result;
4744 
4745 			if (size == sizeof(*x)) {
4746 				IPW_DEBUG_SCAN("Scan result for channel %d\n",
4747 					       x->channel_num);
4748 			} else {
4749 				IPW_DEBUG_SCAN("Scan result of wrong size %d "
4750 					       "(should be %zd)\n",
4751 					       size, sizeof(*x));
4752 			}
4753 			break;
4754 		}
4755 
4756 	case HOST_NOTIFICATION_STATUS_SCAN_COMPLETED:{
4757 			struct notif_scan_complete *x = &notif->u.scan_complete;
4758 			if (size == sizeof(*x)) {
4759 				IPW_DEBUG_SCAN
4760 				    ("Scan completed: type %d, %d channels, "
4761 				     "%d status\n", x->scan_type,
4762 				     x->num_channels, x->status);
4763 			} else {
4764 				IPW_ERROR("Scan completed of wrong size %d "
4765 					  "(should be %zd)\n",
4766 					  size, sizeof(*x));
4767 			}
4768 
4769 			priv->status &=
4770 			    ~(STATUS_SCANNING | STATUS_SCAN_ABORTING);
4771 
4772 			wake_up_interruptible(&priv->wait_state);
4773 			cancel_delayed_work(&priv->scan_check);
4774 
4775 			if (priv->status & STATUS_EXIT_PENDING)
4776 				break;
4777 
4778 			priv->ieee->scans++;
4779 
4780 #ifdef CONFIG_IPW2200_MONITOR
4781 			if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
4782 				priv->status |= STATUS_SCAN_FORCED;
4783 				schedule_delayed_work(&priv->request_scan, 0);
4784 				break;
4785 			}
4786 			priv->status &= ~STATUS_SCAN_FORCED;
4787 #endif				/* CONFIG_IPW2200_MONITOR */
4788 
4789 			/* Do queued direct scans first */
4790 			if (priv->status & STATUS_DIRECT_SCAN_PENDING)
4791 				schedule_delayed_work(&priv->request_direct_scan, 0);
4792 
4793 			if (!(priv->status & (STATUS_ASSOCIATED |
4794 					      STATUS_ASSOCIATING |
4795 					      STATUS_ROAMING |
4796 					      STATUS_DISASSOCIATING)))
4797 				schedule_work(&priv->associate);
4798 			else if (priv->status & STATUS_ROAMING) {
4799 				if (x->status == SCAN_COMPLETED_STATUS_COMPLETE)
4800 					/* If a scan completed and we are in roam mode, then
4801 					 * the scan that completed was the one requested as a
4802 					 * result of entering roam... so, schedule the
4803 					 * roam work */
4804 					schedule_work(&priv->roam);
4805 				else
4806 					/* Don't schedule if we aborted the scan */
4807 					priv->status &= ~STATUS_ROAMING;
4808 			} else if (priv->status & STATUS_SCAN_PENDING)
4809 				schedule_delayed_work(&priv->request_scan, 0);
4810 			else if (priv->config & CFG_BACKGROUND_SCAN
4811 				 && priv->status & STATUS_ASSOCIATED)
4812 				schedule_delayed_work(&priv->request_scan,
4813 						      round_jiffies_relative(HZ));
4814 
4815 			/* Send an empty event to user space.
4816 			 * We don't send the received data on the event because
4817 			 * it would require us to do complex transcoding, and
4818 			 * we want to minimise the work done in the irq handler
4819 			 * Use a request to extract the data.
4820 			 * Also, we generate this even for any scan, regardless
4821 			 * on how the scan was initiated. User space can just
4822 			 * sync on periodic scan to get fresh data...
4823 			 * Jean II */
4824 			if (x->status == SCAN_COMPLETED_STATUS_COMPLETE)
4825 				handle_scan_event(priv);
4826 			break;
4827 		}
4828 
4829 	case HOST_NOTIFICATION_STATUS_FRAG_LENGTH:{
4830 			struct notif_frag_length *x = &notif->u.frag_len;
4831 
4832 			if (size == sizeof(*x))
4833 				IPW_ERROR("Frag length: %d\n",
4834 					  le16_to_cpu(x->frag_length));
4835 			else
4836 				IPW_ERROR("Frag length of wrong size %d "
4837 					  "(should be %zd)\n",
4838 					  size, sizeof(*x));
4839 			break;
4840 		}
4841 
4842 	case HOST_NOTIFICATION_STATUS_LINK_DETERIORATION:{
4843 			struct notif_link_deterioration *x =
4844 			    &notif->u.link_deterioration;
4845 
4846 			if (size == sizeof(*x)) {
4847 				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4848 					"link deterioration: type %d, cnt %d\n",
4849 					x->silence_notification_type,
4850 					x->silence_count);
4851 				memcpy(&priv->last_link_deterioration, x,
4852 				       sizeof(*x));
4853 			} else {
4854 				IPW_ERROR("Link Deterioration of wrong size %d "
4855 					  "(should be %zd)\n",
4856 					  size, sizeof(*x));
4857 			}
4858 			break;
4859 		}
4860 
4861 	case HOST_NOTIFICATION_DINO_CONFIG_RESPONSE:{
4862 			IPW_ERROR("Dino config\n");
4863 			if (priv->hcmd
4864 			    && priv->hcmd->cmd != HOST_CMD_DINO_CONFIG)
4865 				IPW_ERROR("Unexpected DINO_CONFIG_RESPONSE\n");
4866 
4867 			break;
4868 		}
4869 
4870 	case HOST_NOTIFICATION_STATUS_BEACON_STATE:{
4871 			struct notif_beacon_state *x = &notif->u.beacon_state;
4872 			if (size != sizeof(*x)) {
4873 				IPW_ERROR
4874 				    ("Beacon state of wrong size %d (should "
4875 				     "be %zd)\n", size, sizeof(*x));
4876 				break;
4877 			}
4878 
4879 			if (le32_to_cpu(x->state) ==
4880 			    HOST_NOTIFICATION_STATUS_BEACON_MISSING)
4881 				ipw_handle_missed_beacon(priv,
4882 							 le32_to_cpu(x->
4883 								     number));
4884 
4885 			break;
4886 		}
4887 
4888 	case HOST_NOTIFICATION_STATUS_TGI_TX_KEY:{
4889 			struct notif_tgi_tx_key *x = &notif->u.tgi_tx_key;
4890 			if (size == sizeof(*x)) {
4891 				IPW_ERROR("TGi Tx Key: state 0x%02x sec type "
4892 					  "0x%02x station %d\n",
4893 					  x->key_state, x->security_type,
4894 					  x->station_index);
4895 				break;
4896 			}
4897 
4898 			IPW_ERROR
4899 			    ("TGi Tx Key of wrong size %d (should be %zd)\n",
4900 			     size, sizeof(*x));
4901 			break;
4902 		}
4903 
4904 	case HOST_NOTIFICATION_CALIB_KEEP_RESULTS:{
4905 			struct notif_calibration *x = &notif->u.calibration;
4906 
4907 			if (size == sizeof(*x)) {
4908 				memcpy(&priv->calib, x, sizeof(*x));
4909 				IPW_DEBUG_INFO("TODO: Calibration\n");
4910 				break;
4911 			}
4912 
4913 			IPW_ERROR
4914 			    ("Calibration of wrong size %d (should be %zd)\n",
4915 			     size, sizeof(*x));
4916 			break;
4917 		}
4918 
4919 	case HOST_NOTIFICATION_NOISE_STATS:{
4920 			if (size == sizeof(u32)) {
4921 				priv->exp_avg_noise =
4922 				    exponential_average(priv->exp_avg_noise,
4923 				    (u8) (le32_to_cpu(notif->u.noise.value) & 0xff),
4924 				    DEPTH_NOISE);
4925 				break;
4926 			}
4927 
4928 			IPW_ERROR
4929 			    ("Noise stat is wrong size %d (should be %zd)\n",
4930 			     size, sizeof(u32));
4931 			break;
4932 		}
4933 
4934 	default:
4935 		IPW_DEBUG_NOTIF("Unknown notification: "
4936 				"subtype=%d,flags=0x%2x,size=%d\n",
4937 				notif->subtype, notif->flags, size);
4938 	}
4939 }
4940 
4941 /**
4942  * Destroys all DMA structures and initialise them again
4943  *
4944  * @param priv
4945  * @return error code
4946  */
4947 static int ipw_queue_reset(struct ipw_priv *priv)
4948 {
4949 	int rc = 0;
4950 	/** @todo customize queue sizes */
4951 	int nTx = 64, nTxCmd = 8;
4952 	ipw_tx_queue_free(priv);
4953 	/* Tx CMD queue */
4954 	rc = ipw_queue_tx_init(priv, &priv->txq_cmd, nTxCmd,
4955 			       IPW_TX_CMD_QUEUE_READ_INDEX,
4956 			       IPW_TX_CMD_QUEUE_WRITE_INDEX,
4957 			       IPW_TX_CMD_QUEUE_BD_BASE,
4958 			       IPW_TX_CMD_QUEUE_BD_SIZE);
4959 	if (rc) {
4960 		IPW_ERROR("Tx Cmd queue init failed\n");
4961 		goto error;
4962 	}
4963 	/* Tx queue(s) */
4964 	rc = ipw_queue_tx_init(priv, &priv->txq[0], nTx,
4965 			       IPW_TX_QUEUE_0_READ_INDEX,
4966 			       IPW_TX_QUEUE_0_WRITE_INDEX,
4967 			       IPW_TX_QUEUE_0_BD_BASE, IPW_TX_QUEUE_0_BD_SIZE);
4968 	if (rc) {
4969 		IPW_ERROR("Tx 0 queue init failed\n");
4970 		goto error;
4971 	}
4972 	rc = ipw_queue_tx_init(priv, &priv->txq[1], nTx,
4973 			       IPW_TX_QUEUE_1_READ_INDEX,
4974 			       IPW_TX_QUEUE_1_WRITE_INDEX,
4975 			       IPW_TX_QUEUE_1_BD_BASE, IPW_TX_QUEUE_1_BD_SIZE);
4976 	if (rc) {
4977 		IPW_ERROR("Tx 1 queue init failed\n");
4978 		goto error;
4979 	}
4980 	rc = ipw_queue_tx_init(priv, &priv->txq[2], nTx,
4981 			       IPW_TX_QUEUE_2_READ_INDEX,
4982 			       IPW_TX_QUEUE_2_WRITE_INDEX,
4983 			       IPW_TX_QUEUE_2_BD_BASE, IPW_TX_QUEUE_2_BD_SIZE);
4984 	if (rc) {
4985 		IPW_ERROR("Tx 2 queue init failed\n");
4986 		goto error;
4987 	}
4988 	rc = ipw_queue_tx_init(priv, &priv->txq[3], nTx,
4989 			       IPW_TX_QUEUE_3_READ_INDEX,
4990 			       IPW_TX_QUEUE_3_WRITE_INDEX,
4991 			       IPW_TX_QUEUE_3_BD_BASE, IPW_TX_QUEUE_3_BD_SIZE);
4992 	if (rc) {
4993 		IPW_ERROR("Tx 3 queue init failed\n");
4994 		goto error;
4995 	}
4996 	/* statistics */
4997 	priv->rx_bufs_min = 0;
4998 	priv->rx_pend_max = 0;
4999 	return rc;
5000 
5001       error:
5002 	ipw_tx_queue_free(priv);
5003 	return rc;
5004 }
5005 
5006 /**
5007  * Reclaim Tx queue entries no more used by NIC.
5008  *
5009  * When FW advances 'R' index, all entries between old and
5010  * new 'R' index need to be reclaimed. As result, some free space
5011  * forms. If there is enough free space (> low mark), wake Tx queue.
5012  *
5013  * @note Need to protect against garbage in 'R' index
5014  * @param priv
5015  * @param txq
5016  * @param qindex
5017  * @return Number of used entries remains in the queue
5018  */
5019 static int ipw_queue_tx_reclaim(struct ipw_priv *priv,
5020 				struct clx2_tx_queue *txq, int qindex)
5021 {
5022 	u32 hw_tail;
5023 	int used;
5024 	struct clx2_queue *q = &txq->q;
5025 
5026 	hw_tail = ipw_read32(priv, q->reg_r);
5027 	if (hw_tail >= q->n_bd) {
5028 		IPW_ERROR
5029 		    ("Read index for DMA queue (%d) is out of range [0-%d)\n",
5030 		     hw_tail, q->n_bd);
5031 		goto done;
5032 	}
5033 	for (; q->last_used != hw_tail;
5034 	     q->last_used = ipw_queue_inc_wrap(q->last_used, q->n_bd)) {
5035 		ipw_queue_tx_free_tfd(priv, txq);
5036 		priv->tx_packets++;
5037 	}
5038       done:
5039 	if ((ipw_tx_queue_space(q) > q->low_mark) &&
5040 	    (qindex >= 0))
5041 		netif_wake_queue(priv->net_dev);
5042 	used = q->first_empty - q->last_used;
5043 	if (used < 0)
5044 		used += q->n_bd;
5045 
5046 	return used;
5047 }
5048 
5049 static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, void *buf,
5050 			     int len, int sync)
5051 {
5052 	struct clx2_tx_queue *txq = &priv->txq_cmd;
5053 	struct clx2_queue *q = &txq->q;
5054 	struct tfd_frame *tfd;
5055 
5056 	if (ipw_tx_queue_space(q) < (sync ? 1 : 2)) {
5057 		IPW_ERROR("No space for Tx\n");
5058 		return -EBUSY;
5059 	}
5060 
5061 	tfd = &txq->bd[q->first_empty];
5062 	txq->txb[q->first_empty] = NULL;
5063 
5064 	memset(tfd, 0, sizeof(*tfd));
5065 	tfd->control_flags.message_type = TX_HOST_COMMAND_TYPE;
5066 	tfd->control_flags.control_bits = TFD_NEED_IRQ_MASK;
5067 	priv->hcmd_seq++;
5068 	tfd->u.cmd.index = hcmd;
5069 	tfd->u.cmd.length = len;
5070 	memcpy(tfd->u.cmd.payload, buf, len);
5071 	q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd);
5072 	ipw_write32(priv, q->reg_w, q->first_empty);
5073 	_ipw_read32(priv, 0x90);
5074 
5075 	return 0;
5076 }
5077 
5078 /*
5079  * Rx theory of operation
5080  *
5081  * The host allocates 32 DMA target addresses and passes the host address
5082  * to the firmware at register IPW_RFDS_TABLE_LOWER + N * RFD_SIZE where N is
5083  * 0 to 31
5084  *
5085  * Rx Queue Indexes
5086  * The host/firmware share two index registers for managing the Rx buffers.
5087  *
5088  * The READ index maps to the first position that the firmware may be writing
5089  * to -- the driver can read up to (but not including) this position and get
5090  * good data.
5091  * The READ index is managed by the firmware once the card is enabled.
5092  *
5093  * The WRITE index maps to the last position the driver has read from -- the
5094  * position preceding WRITE is the last slot the firmware can place a packet.
5095  *
5096  * The queue is empty (no good data) if WRITE = READ - 1, and is full if
5097  * WRITE = READ.
5098  *
5099  * During initialization the host sets up the READ queue position to the first
5100  * INDEX position, and WRITE to the last (READ - 1 wrapped)
5101  *
5102  * When the firmware places a packet in a buffer it will advance the READ index
5103  * and fire the RX interrupt.  The driver can then query the READ index and
5104  * process as many packets as possible, moving the WRITE index forward as it
5105  * resets the Rx queue buffers with new memory.
5106  *
5107  * The management in the driver is as follows:
5108  * + A list of pre-allocated SKBs is stored in ipw->rxq->rx_free.  When
5109  *   ipw->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
5110  *   to replensish the ipw->rxq->rx_free.
5111  * + In ipw_rx_queue_replenish (scheduled) if 'processed' != 'read' then the
5112  *   ipw->rxq is replenished and the READ INDEX is updated (updating the
5113  *   'processed' and 'read' driver indexes as well)
5114  * + A received packet is processed and handed to the kernel network stack,
5115  *   detached from the ipw->rxq.  The driver 'processed' index is updated.
5116  * + The Host/Firmware ipw->rxq is replenished at tasklet time from the rx_free
5117  *   list. If there are no allocated buffers in ipw->rxq->rx_free, the READ
5118  *   INDEX is not incremented and ipw->status(RX_STALLED) is set.  If there
5119  *   were enough free buffers and RX_STALLED is set it is cleared.
5120  *
5121  *
5122  * Driver sequence:
5123  *
5124  * ipw_rx_queue_alloc()       Allocates rx_free
5125  * ipw_rx_queue_replenish()   Replenishes rx_free list from rx_used, and calls
5126  *                            ipw_rx_queue_restock
5127  * ipw_rx_queue_restock()     Moves available buffers from rx_free into Rx
5128  *                            queue, updates firmware pointers, and updates
5129  *                            the WRITE index.  If insufficient rx_free buffers
5130  *                            are available, schedules ipw_rx_queue_replenish
5131  *
5132  * -- enable interrupts --
5133  * ISR - ipw_rx()             Detach ipw_rx_mem_buffers from pool up to the
5134  *                            READ INDEX, detaching the SKB from the pool.
5135  *                            Moves the packet buffer from queue to rx_used.
5136  *                            Calls ipw_rx_queue_restock to refill any empty
5137  *                            slots.
5138  * ...
5139  *
5140  */
5141 
5142 /*
5143  * If there are slots in the RX queue that  need to be restocked,
5144  * and we have free pre-allocated buffers, fill the ranks as much
5145  * as we can pulling from rx_free.
5146  *
5147  * This moves the 'write' index forward to catch up with 'processed', and
5148  * also updates the memory address in the firmware to reference the new
5149  * target buffer.
5150  */
5151 static void ipw_rx_queue_restock(struct ipw_priv *priv)
5152 {
5153 	struct ipw_rx_queue *rxq = priv->rxq;
5154 	struct list_head *element;
5155 	struct ipw_rx_mem_buffer *rxb;
5156 	unsigned long flags;
5157 	int write;
5158 
5159 	spin_lock_irqsave(&rxq->lock, flags);
5160 	write = rxq->write;
5161 	while ((ipw_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
5162 		element = rxq->rx_free.next;
5163 		rxb = list_entry(element, struct ipw_rx_mem_buffer, list);
5164 		list_del(element);
5165 
5166 		ipw_write32(priv, IPW_RFDS_TABLE_LOWER + rxq->write * RFD_SIZE,
5167 			    rxb->dma_addr);
5168 		rxq->queue[rxq->write] = rxb;
5169 		rxq->write = (rxq->write + 1) % RX_QUEUE_SIZE;
5170 		rxq->free_count--;
5171 	}
5172 	spin_unlock_irqrestore(&rxq->lock, flags);
5173 
5174 	/* If the pre-allocated buffer pool is dropping low, schedule to
5175 	 * refill it */
5176 	if (rxq->free_count <= RX_LOW_WATERMARK)
5177 		schedule_work(&priv->rx_replenish);
5178 
5179 	/* If we've added more space for the firmware to place data, tell it */
5180 	if (write != rxq->write)
5181 		ipw_write32(priv, IPW_RX_WRITE_INDEX, rxq->write);
5182 }
5183 
5184 /*
5185  * Move all used packet from rx_used to rx_free, allocating a new SKB for each.
5186  * Also restock the Rx queue via ipw_rx_queue_restock.
5187  *
5188  * This is called as a scheduled work item (except for during initialization)
5189  */
5190 static void ipw_rx_queue_replenish(void *data)
5191 {
5192 	struct ipw_priv *priv = data;
5193 	struct ipw_rx_queue *rxq = priv->rxq;
5194 	struct list_head *element;
5195 	struct ipw_rx_mem_buffer *rxb;
5196 	unsigned long flags;
5197 
5198 	spin_lock_irqsave(&rxq->lock, flags);
5199 	while (!list_empty(&rxq->rx_used)) {
5200 		element = rxq->rx_used.next;
5201 		rxb = list_entry(element, struct ipw_rx_mem_buffer, list);
5202 		rxb->skb = alloc_skb(IPW_RX_BUF_SIZE, GFP_ATOMIC);
5203 		if (!rxb->skb) {
5204 			printk(KERN_CRIT "%s: Can not allocate SKB buffers.\n",
5205 			       priv->net_dev->name);
5206 			/* We don't reschedule replenish work here -- we will
5207 			 * call the restock method and if it still needs
5208 			 * more buffers it will schedule replenish */
5209 			break;
5210 		}
5211 		list_del(element);
5212 
5213 		rxb->dma_addr =
5214 		    pci_map_single(priv->pci_dev, rxb->skb->data,
5215 				   IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
5216 
5217 		list_add_tail(&rxb->list, &rxq->rx_free);
5218 		rxq->free_count++;
5219 	}
5220 	spin_unlock_irqrestore(&rxq->lock, flags);
5221 
5222 	ipw_rx_queue_restock(priv);
5223 }
5224 
5225 static void ipw_bg_rx_queue_replenish(struct work_struct *work)
5226 {
5227 	struct ipw_priv *priv =
5228 		container_of(work, struct ipw_priv, rx_replenish);
5229 	mutex_lock(&priv->mutex);
5230 	ipw_rx_queue_replenish(priv);
5231 	mutex_unlock(&priv->mutex);
5232 }
5233 
5234 /* Assumes that the skb field of the buffers in 'pool' is kept accurate.
5235  * If an SKB has been detached, the POOL needs to have its SKB set to NULL
5236  * This free routine walks the list of POOL entries and if SKB is set to
5237  * non NULL it is unmapped and freed
5238  */
5239 static void ipw_rx_queue_free(struct ipw_priv *priv, struct ipw_rx_queue *rxq)
5240 {
5241 	int i;
5242 
5243 	if (!rxq)
5244 		return;
5245 
5246 	for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
5247 		if (rxq->pool[i].skb != NULL) {
5248 			pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr,
5249 					 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
5250 			dev_kfree_skb(rxq->pool[i].skb);
5251 		}
5252 	}
5253 
5254 	kfree(rxq);
5255 }
5256 
5257 static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *priv)
5258 {
5259 	struct ipw_rx_queue *rxq;
5260 	int i;
5261 
5262 	rxq = kzalloc(sizeof(*rxq), GFP_KERNEL);
5263 	if (unlikely(!rxq)) {
5264 		IPW_ERROR("memory allocation failed\n");
5265 		return NULL;
5266 	}
5267 	spin_lock_init(&rxq->lock);
5268 	INIT_LIST_HEAD(&rxq->rx_free);
5269 	INIT_LIST_HEAD(&rxq->rx_used);
5270 
5271 	/* Fill the rx_used queue with _all_ of the Rx buffers */
5272 	for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
5273 		list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
5274 
5275 	/* Set us so that we have processed and used all buffers, but have
5276 	 * not restocked the Rx queue with fresh buffers */
5277 	rxq->read = rxq->write = 0;
5278 	rxq->free_count = 0;
5279 
5280 	return rxq;
5281 }
5282 
5283 static int ipw_is_rate_in_mask(struct ipw_priv *priv, int ieee_mode, u8 rate)
5284 {
5285 	rate &= ~LIBIPW_BASIC_RATE_MASK;
5286 	if (ieee_mode == IEEE_A) {
5287 		switch (rate) {
5288 		case LIBIPW_OFDM_RATE_6MB:
5289 			return priv->rates_mask & LIBIPW_OFDM_RATE_6MB_MASK ?
5290 			    1 : 0;
5291 		case LIBIPW_OFDM_RATE_9MB:
5292 			return priv->rates_mask & LIBIPW_OFDM_RATE_9MB_MASK ?
5293 			    1 : 0;
5294 		case LIBIPW_OFDM_RATE_12MB:
5295 			return priv->
5296 			    rates_mask & LIBIPW_OFDM_RATE_12MB_MASK ? 1 : 0;
5297 		case LIBIPW_OFDM_RATE_18MB:
5298 			return priv->
5299 			    rates_mask & LIBIPW_OFDM_RATE_18MB_MASK ? 1 : 0;
5300 		case LIBIPW_OFDM_RATE_24MB:
5301 			return priv->
5302 			    rates_mask & LIBIPW_OFDM_RATE_24MB_MASK ? 1 : 0;
5303 		case LIBIPW_OFDM_RATE_36MB:
5304 			return priv->
5305 			    rates_mask & LIBIPW_OFDM_RATE_36MB_MASK ? 1 : 0;
5306 		case LIBIPW_OFDM_RATE_48MB:
5307 			return priv->
5308 			    rates_mask & LIBIPW_OFDM_RATE_48MB_MASK ? 1 : 0;
5309 		case LIBIPW_OFDM_RATE_54MB:
5310 			return priv->
5311 			    rates_mask & LIBIPW_OFDM_RATE_54MB_MASK ? 1 : 0;
5312 		default:
5313 			return 0;
5314 		}
5315 	}
5316 
5317 	/* B and G mixed */
5318 	switch (rate) {
5319 	case LIBIPW_CCK_RATE_1MB:
5320 		return priv->rates_mask & LIBIPW_CCK_RATE_1MB_MASK ? 1 : 0;
5321 	case LIBIPW_CCK_RATE_2MB:
5322 		return priv->rates_mask & LIBIPW_CCK_RATE_2MB_MASK ? 1 : 0;
5323 	case LIBIPW_CCK_RATE_5MB:
5324 		return priv->rates_mask & LIBIPW_CCK_RATE_5MB_MASK ? 1 : 0;
5325 	case LIBIPW_CCK_RATE_11MB:
5326 		return priv->rates_mask & LIBIPW_CCK_RATE_11MB_MASK ? 1 : 0;
5327 	}
5328 
5329 	/* If we are limited to B modulations, bail at this point */
5330 	if (ieee_mode == IEEE_B)
5331 		return 0;
5332 
5333 	/* G */
5334 	switch (rate) {
5335 	case LIBIPW_OFDM_RATE_6MB:
5336 		return priv->rates_mask & LIBIPW_OFDM_RATE_6MB_MASK ? 1 : 0;
5337 	case LIBIPW_OFDM_RATE_9MB:
5338 		return priv->rates_mask & LIBIPW_OFDM_RATE_9MB_MASK ? 1 : 0;
5339 	case LIBIPW_OFDM_RATE_12MB:
5340 		return priv->rates_mask & LIBIPW_OFDM_RATE_12MB_MASK ? 1 : 0;
5341 	case LIBIPW_OFDM_RATE_18MB:
5342 		return priv->rates_mask & LIBIPW_OFDM_RATE_18MB_MASK ? 1 : 0;
5343 	case LIBIPW_OFDM_RATE_24MB:
5344 		return priv->rates_mask & LIBIPW_OFDM_RATE_24MB_MASK ? 1 : 0;
5345 	case LIBIPW_OFDM_RATE_36MB:
5346 		return priv->rates_mask & LIBIPW_OFDM_RATE_36MB_MASK ? 1 : 0;
5347 	case LIBIPW_OFDM_RATE_48MB:
5348 		return priv->rates_mask & LIBIPW_OFDM_RATE_48MB_MASK ? 1 : 0;
5349 	case LIBIPW_OFDM_RATE_54MB:
5350 		return priv->rates_mask & LIBIPW_OFDM_RATE_54MB_MASK ? 1 : 0;
5351 	}
5352 
5353 	return 0;
5354 }
5355 
5356 static int ipw_compatible_rates(struct ipw_priv *priv,
5357 				const struct libipw_network *network,
5358 				struct ipw_supported_rates *rates)
5359 {
5360 	int num_rates, i;
5361 
5362 	memset(rates, 0, sizeof(*rates));
5363 	num_rates = min(network->rates_len, (u8) IPW_MAX_RATES);
5364 	rates->num_rates = 0;
5365 	for (i = 0; i < num_rates; i++) {
5366 		if (!ipw_is_rate_in_mask(priv, network->mode,
5367 					 network->rates[i])) {
5368 
5369 			if (network->rates[i] & LIBIPW_BASIC_RATE_MASK) {
5370 				IPW_DEBUG_SCAN("Adding masked mandatory "
5371 					       "rate %02X\n",
5372 					       network->rates[i]);
5373 				rates->supported_rates[rates->num_rates++] =
5374 				    network->rates[i];
5375 				continue;
5376 			}
5377 
5378 			IPW_DEBUG_SCAN("Rate %02X masked : 0x%08X\n",
5379 				       network->rates[i], priv->rates_mask);
5380 			continue;
5381 		}
5382 
5383 		rates->supported_rates[rates->num_rates++] = network->rates[i];
5384 	}
5385 
5386 	num_rates = min(network->rates_ex_len,
5387 			(u8) (IPW_MAX_RATES - num_rates));
5388 	for (i = 0; i < num_rates; i++) {
5389 		if (!ipw_is_rate_in_mask(priv, network->mode,
5390 					 network->rates_ex[i])) {
5391 			if (network->rates_ex[i] & LIBIPW_BASIC_RATE_MASK) {
5392 				IPW_DEBUG_SCAN("Adding masked mandatory "
5393 					       "rate %02X\n",
5394 					       network->rates_ex[i]);
5395 				rates->supported_rates[rates->num_rates++] =
5396 				    network->rates[i];
5397 				continue;
5398 			}
5399 
5400 			IPW_DEBUG_SCAN("Rate %02X masked : 0x%08X\n",
5401 				       network->rates_ex[i], priv->rates_mask);
5402 			continue;
5403 		}
5404 
5405 		rates->supported_rates[rates->num_rates++] =
5406 		    network->rates_ex[i];
5407 	}
5408 
5409 	return 1;
5410 }
5411 
5412 static void ipw_copy_rates(struct ipw_supported_rates *dest,
5413 				  const struct ipw_supported_rates *src)
5414 {
5415 	u8 i;
5416 	for (i = 0; i < src->num_rates; i++)
5417 		dest->supported_rates[i] = src->supported_rates[i];
5418 	dest->num_rates = src->num_rates;
5419 }
5420 
5421 /* TODO: Look at sniffed packets in the air to determine if the basic rate
5422  * mask should ever be used -- right now all callers to add the scan rates are
5423  * set with the modulation = CCK, so BASIC_RATE_MASK is never set... */
5424 static void ipw_add_cck_scan_rates(struct ipw_supported_rates *rates,
5425 				   u8 modulation, u32 rate_mask)
5426 {
5427 	u8 basic_mask = (LIBIPW_OFDM_MODULATION == modulation) ?
5428 	    LIBIPW_BASIC_RATE_MASK : 0;
5429 
5430 	if (rate_mask & LIBIPW_CCK_RATE_1MB_MASK)
5431 		rates->supported_rates[rates->num_rates++] =
5432 		    LIBIPW_BASIC_RATE_MASK | LIBIPW_CCK_RATE_1MB;
5433 
5434 	if (rate_mask & LIBIPW_CCK_RATE_2MB_MASK)
5435 		rates->supported_rates[rates->num_rates++] =
5436 		    LIBIPW_BASIC_RATE_MASK | LIBIPW_CCK_RATE_2MB;
5437 
5438 	if (rate_mask & LIBIPW_CCK_RATE_5MB_MASK)
5439 		rates->supported_rates[rates->num_rates++] = basic_mask |
5440 		    LIBIPW_CCK_RATE_5MB;
5441 
5442 	if (rate_mask & LIBIPW_CCK_RATE_11MB_MASK)
5443 		rates->supported_rates[rates->num_rates++] = basic_mask |
5444 		    LIBIPW_CCK_RATE_11MB;
5445 }
5446 
5447 static void ipw_add_ofdm_scan_rates(struct ipw_supported_rates *rates,
5448 				    u8 modulation, u32 rate_mask)
5449 {
5450 	u8 basic_mask = (LIBIPW_OFDM_MODULATION == modulation) ?
5451 	    LIBIPW_BASIC_RATE_MASK : 0;
5452 
5453 	if (rate_mask & LIBIPW_OFDM_RATE_6MB_MASK)
5454 		rates->supported_rates[rates->num_rates++] = basic_mask |
5455 		    LIBIPW_OFDM_RATE_6MB;
5456 
5457 	if (rate_mask & LIBIPW_OFDM_RATE_9MB_MASK)
5458 		rates->supported_rates[rates->num_rates++] =
5459 		    LIBIPW_OFDM_RATE_9MB;
5460 
5461 	if (rate_mask & LIBIPW_OFDM_RATE_12MB_MASK)
5462 		rates->supported_rates[rates->num_rates++] = basic_mask |
5463 		    LIBIPW_OFDM_RATE_12MB;
5464 
5465 	if (rate_mask & LIBIPW_OFDM_RATE_18MB_MASK)
5466 		rates->supported_rates[rates->num_rates++] =
5467 		    LIBIPW_OFDM_RATE_18MB;
5468 
5469 	if (rate_mask & LIBIPW_OFDM_RATE_24MB_MASK)
5470 		rates->supported_rates[rates->num_rates++] = basic_mask |
5471 		    LIBIPW_OFDM_RATE_24MB;
5472 
5473 	if (rate_mask & LIBIPW_OFDM_RATE_36MB_MASK)
5474 		rates->supported_rates[rates->num_rates++] =
5475 		    LIBIPW_OFDM_RATE_36MB;
5476 
5477 	if (rate_mask & LIBIPW_OFDM_RATE_48MB_MASK)
5478 		rates->supported_rates[rates->num_rates++] =
5479 		    LIBIPW_OFDM_RATE_48MB;
5480 
5481 	if (rate_mask & LIBIPW_OFDM_RATE_54MB_MASK)
5482 		rates->supported_rates[rates->num_rates++] =
5483 		    LIBIPW_OFDM_RATE_54MB;
5484 }
5485 
5486 struct ipw_network_match {
5487 	struct libipw_network *network;
5488 	struct ipw_supported_rates rates;
5489 };
5490 
5491 static int ipw_find_adhoc_network(struct ipw_priv *priv,
5492 				  struct ipw_network_match *match,
5493 				  struct libipw_network *network,
5494 				  int roaming)
5495 {
5496 	struct ipw_supported_rates rates;
5497 
5498 	/* Verify that this network's capability is compatible with the
5499 	 * current mode (AdHoc or Infrastructure) */
5500 	if ((priv->ieee->iw_mode == IW_MODE_ADHOC &&
5501 	     !(network->capability & WLAN_CAPABILITY_IBSS))) {
5502 		IPW_DEBUG_MERGE("Network '%*pE (%pM)' excluded due to capability mismatch.\n",
5503 				network->ssid_len, network->ssid,
5504 				network->bssid);
5505 		return 0;
5506 	}
5507 
5508 	if (unlikely(roaming)) {
5509 		/* If we are roaming, then ensure check if this is a valid
5510 		 * network to try and roam to */
5511 		if ((network->ssid_len != match->network->ssid_len) ||
5512 		    memcmp(network->ssid, match->network->ssid,
5513 			   network->ssid_len)) {
5514 			IPW_DEBUG_MERGE("Network '%*pE (%pM)' excluded because of non-network ESSID.\n",
5515 					network->ssid_len, network->ssid,
5516 					network->bssid);
5517 			return 0;
5518 		}
5519 	} else {
5520 		/* If an ESSID has been configured then compare the broadcast
5521 		 * ESSID to ours */
5522 		if ((priv->config & CFG_STATIC_ESSID) &&
5523 		    ((network->ssid_len != priv->essid_len) ||
5524 		     memcmp(network->ssid, priv->essid,
5525 			    min(network->ssid_len, priv->essid_len)))) {
5526 			IPW_DEBUG_MERGE("Network '%*pE (%pM)' excluded because of ESSID mismatch: '%*pE'.\n",
5527 					network->ssid_len, network->ssid,
5528 					network->bssid, priv->essid_len,
5529 					priv->essid);
5530 			return 0;
5531 		}
5532 	}
5533 
5534 	/* If the old network rate is better than this one, don't bother
5535 	 * testing everything else. */
5536 
5537 	if (network->time_stamp[0] < match->network->time_stamp[0]) {
5538 		IPW_DEBUG_MERGE("Network '%*pE excluded because newer than current network.\n",
5539 				match->network->ssid_len, match->network->ssid);
5540 		return 0;
5541 	} else if (network->time_stamp[1] < match->network->time_stamp[1]) {
5542 		IPW_DEBUG_MERGE("Network '%*pE excluded because newer than current network.\n",
5543 				match->network->ssid_len, match->network->ssid);
5544 		return 0;
5545 	}
5546 
5547 	/* Now go through and see if the requested network is valid... */
5548 	if (priv->ieee->scan_age != 0 &&
5549 	    time_after(jiffies, network->last_scanned + priv->ieee->scan_age)) {
5550 		IPW_DEBUG_MERGE("Network '%*pE (%pM)' excluded because of age: %ums.\n",
5551 				network->ssid_len, network->ssid,
5552 				network->bssid,
5553 				jiffies_to_msecs(jiffies -
5554 						 network->last_scanned));
5555 		return 0;
5556 	}
5557 
5558 	if ((priv->config & CFG_STATIC_CHANNEL) &&
5559 	    (network->channel != priv->channel)) {
5560 		IPW_DEBUG_MERGE("Network '%*pE (%pM)' excluded because of channel mismatch: %d != %d.\n",
5561 				network->ssid_len, network->ssid,
5562 				network->bssid,
5563 				network->channel, priv->channel);
5564 		return 0;
5565 	}
5566 
5567 	/* Verify privacy compatibility */
5568 	if (((priv->capability & CAP_PRIVACY_ON) ? 1 : 0) !=
5569 	    ((network->capability & WLAN_CAPABILITY_PRIVACY) ? 1 : 0)) {
5570 		IPW_DEBUG_MERGE("Network '%*pE (%pM)' excluded because of privacy mismatch: %s != %s.\n",
5571 				network->ssid_len, network->ssid,
5572 				network->bssid,
5573 				priv->
5574 				capability & CAP_PRIVACY_ON ? "on" : "off",
5575 				network->
5576 				capability & WLAN_CAPABILITY_PRIVACY ? "on" :
5577 				"off");
5578 		return 0;
5579 	}
5580 
5581 	if (ether_addr_equal(network->bssid, priv->bssid)) {
5582 		IPW_DEBUG_MERGE("Network '%*pE (%pM)' excluded because of the same BSSID match: %pM.\n",
5583 				network->ssid_len, network->ssid,
5584 				network->bssid, priv->bssid);
5585 		return 0;
5586 	}
5587 
5588 	/* Filter out any incompatible freq / mode combinations */
5589 	if (!libipw_is_valid_mode(priv->ieee, network->mode)) {
5590 		IPW_DEBUG_MERGE("Network '%*pE (%pM)' excluded because of invalid frequency/mode combination.\n",
5591 				network->ssid_len, network->ssid,
5592 				network->bssid);
5593 		return 0;
5594 	}
5595 
5596 	/* Ensure that the rates supported by the driver are compatible with
5597 	 * this AP, including verification of basic rates (mandatory) */
5598 	if (!ipw_compatible_rates(priv, network, &rates)) {
5599 		IPW_DEBUG_MERGE("Network '%*pE (%pM)' excluded because configured rate mask excludes AP mandatory rate.\n",
5600 				network->ssid_len, network->ssid,
5601 				network->bssid);
5602 		return 0;
5603 	}
5604 
5605 	if (rates.num_rates == 0) {
5606 		IPW_DEBUG_MERGE("Network '%*pE (%pM)' excluded because of no compatible rates.\n",
5607 				network->ssid_len, network->ssid,
5608 				network->bssid);
5609 		return 0;
5610 	}
5611 
5612 	/* TODO: Perform any further minimal comparititive tests.  We do not
5613 	 * want to put too much policy logic here; intelligent scan selection
5614 	 * should occur within a generic IEEE 802.11 user space tool.  */
5615 
5616 	/* Set up 'new' AP to this network */
5617 	ipw_copy_rates(&match->rates, &rates);
5618 	match->network = network;
5619 	IPW_DEBUG_MERGE("Network '%*pE (%pM)' is a viable match.\n",
5620 			network->ssid_len, network->ssid, network->bssid);
5621 
5622 	return 1;
5623 }
5624 
5625 static void ipw_merge_adhoc_network(struct work_struct *work)
5626 {
5627 	struct ipw_priv *priv =
5628 		container_of(work, struct ipw_priv, merge_networks);
5629 	struct libipw_network *network = NULL;
5630 	struct ipw_network_match match = {
5631 		.network = priv->assoc_network
5632 	};
5633 
5634 	if ((priv->status & STATUS_ASSOCIATED) &&
5635 	    (priv->ieee->iw_mode == IW_MODE_ADHOC)) {
5636 		/* First pass through ROAM process -- look for a better
5637 		 * network */
5638 		unsigned long flags;
5639 
5640 		spin_lock_irqsave(&priv->ieee->lock, flags);
5641 		list_for_each_entry(network, &priv->ieee->network_list, list) {
5642 			if (network != priv->assoc_network)
5643 				ipw_find_adhoc_network(priv, &match, network,
5644 						       1);
5645 		}
5646 		spin_unlock_irqrestore(&priv->ieee->lock, flags);
5647 
5648 		if (match.network == priv->assoc_network) {
5649 			IPW_DEBUG_MERGE("No better ADHOC in this network to "
5650 					"merge to.\n");
5651 			return;
5652 		}
5653 
5654 		mutex_lock(&priv->mutex);
5655 		if ((priv->ieee->iw_mode == IW_MODE_ADHOC)) {
5656 			IPW_DEBUG_MERGE("remove network %*pE\n",
5657 					priv->essid_len, priv->essid);
5658 			ipw_remove_current_network(priv);
5659 		}
5660 
5661 		ipw_disassociate(priv);
5662 		priv->assoc_network = match.network;
5663 		mutex_unlock(&priv->mutex);
5664 		return;
5665 	}
5666 }
5667 
5668 static int ipw_best_network(struct ipw_priv *priv,
5669 			    struct ipw_network_match *match,
5670 			    struct libipw_network *network, int roaming)
5671 {
5672 	struct ipw_supported_rates rates;
5673 
5674 	/* Verify that this network's capability is compatible with the
5675 	 * current mode (AdHoc or Infrastructure) */
5676 	if ((priv->ieee->iw_mode == IW_MODE_INFRA &&
5677 	     !(network->capability & WLAN_CAPABILITY_ESS)) ||
5678 	    (priv->ieee->iw_mode == IW_MODE_ADHOC &&
5679 	     !(network->capability & WLAN_CAPABILITY_IBSS))) {
5680 		IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded due to capability mismatch.\n",
5681 				network->ssid_len, network->ssid,
5682 				network->bssid);
5683 		return 0;
5684 	}
5685 
5686 	if (unlikely(roaming)) {
5687 		/* If we are roaming, then ensure check if this is a valid
5688 		 * network to try and roam to */
5689 		if ((network->ssid_len != match->network->ssid_len) ||
5690 		    memcmp(network->ssid, match->network->ssid,
5691 			   network->ssid_len)) {
5692 			IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because of non-network ESSID.\n",
5693 					network->ssid_len, network->ssid,
5694 					network->bssid);
5695 			return 0;
5696 		}
5697 	} else {
5698 		/* If an ESSID has been configured then compare the broadcast
5699 		 * ESSID to ours */
5700 		if ((priv->config & CFG_STATIC_ESSID) &&
5701 		    ((network->ssid_len != priv->essid_len) ||
5702 		     memcmp(network->ssid, priv->essid,
5703 			    min(network->ssid_len, priv->essid_len)))) {
5704 			IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because of ESSID mismatch: '%*pE'.\n",
5705 					network->ssid_len, network->ssid,
5706 					network->bssid, priv->essid_len,
5707 					priv->essid);
5708 			return 0;
5709 		}
5710 	}
5711 
5712 	/* If the old network rate is better than this one, don't bother
5713 	 * testing everything else. */
5714 	if (match->network && match->network->stats.rssi > network->stats.rssi) {
5715 		IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because '%*pE (%pM)' has a stronger signal.\n",
5716 				network->ssid_len, network->ssid,
5717 				network->bssid, match->network->ssid_len,
5718 				match->network->ssid, match->network->bssid);
5719 		return 0;
5720 	}
5721 
5722 	/* If this network has already had an association attempt within the
5723 	 * last 3 seconds, do not try and associate again... */
5724 	if (network->last_associate &&
5725 	    time_after(network->last_associate + (HZ * 3UL), jiffies)) {
5726 		IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because of storming (%ums since last assoc attempt).\n",
5727 				network->ssid_len, network->ssid,
5728 				network->bssid,
5729 				jiffies_to_msecs(jiffies -
5730 						 network->last_associate));
5731 		return 0;
5732 	}
5733 
5734 	/* Now go through and see if the requested network is valid... */
5735 	if (priv->ieee->scan_age != 0 &&
5736 	    time_after(jiffies, network->last_scanned + priv->ieee->scan_age)) {
5737 		IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because of age: %ums.\n",
5738 				network->ssid_len, network->ssid,
5739 				network->bssid,
5740 				jiffies_to_msecs(jiffies -
5741 						 network->last_scanned));
5742 		return 0;
5743 	}
5744 
5745 	if ((priv->config & CFG_STATIC_CHANNEL) &&
5746 	    (network->channel != priv->channel)) {
5747 		IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because of channel mismatch: %d != %d.\n",
5748 				network->ssid_len, network->ssid,
5749 				network->bssid,
5750 				network->channel, priv->channel);
5751 		return 0;
5752 	}
5753 
5754 	/* Verify privacy compatibility */
5755 	if (((priv->capability & CAP_PRIVACY_ON) ? 1 : 0) !=
5756 	    ((network->capability & WLAN_CAPABILITY_PRIVACY) ? 1 : 0)) {
5757 		IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because of privacy mismatch: %s != %s.\n",
5758 				network->ssid_len, network->ssid,
5759 				network->bssid,
5760 				priv->capability & CAP_PRIVACY_ON ? "on" :
5761 				"off",
5762 				network->capability &
5763 				WLAN_CAPABILITY_PRIVACY ? "on" : "off");
5764 		return 0;
5765 	}
5766 
5767 	if ((priv->config & CFG_STATIC_BSSID) &&
5768 	    !ether_addr_equal(network->bssid, priv->bssid)) {
5769 		IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because of BSSID mismatch: %pM.\n",
5770 				network->ssid_len, network->ssid,
5771 				network->bssid, priv->bssid);
5772 		return 0;
5773 	}
5774 
5775 	/* Filter out any incompatible freq / mode combinations */
5776 	if (!libipw_is_valid_mode(priv->ieee, network->mode)) {
5777 		IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because of invalid frequency/mode combination.\n",
5778 				network->ssid_len, network->ssid,
5779 				network->bssid);
5780 		return 0;
5781 	}
5782 
5783 	/* Filter out invalid channel in current GEO */
5784 	if (!libipw_is_valid_channel(priv->ieee, network->channel)) {
5785 		IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because of invalid channel in current GEO\n",
5786 				network->ssid_len, network->ssid,
5787 				network->bssid);
5788 		return 0;
5789 	}
5790 
5791 	/* Ensure that the rates supported by the driver are compatible with
5792 	 * this AP, including verification of basic rates (mandatory) */
5793 	if (!ipw_compatible_rates(priv, network, &rates)) {
5794 		IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because configured rate mask excludes AP mandatory rate.\n",
5795 				network->ssid_len, network->ssid,
5796 				network->bssid);
5797 		return 0;
5798 	}
5799 
5800 	if (rates.num_rates == 0) {
5801 		IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because of no compatible rates.\n",
5802 				network->ssid_len, network->ssid,
5803 				network->bssid);
5804 		return 0;
5805 	}
5806 
5807 	/* TODO: Perform any further minimal comparititive tests.  We do not
5808 	 * want to put too much policy logic here; intelligent scan selection
5809 	 * should occur within a generic IEEE 802.11 user space tool.  */
5810 
5811 	/* Set up 'new' AP to this network */
5812 	ipw_copy_rates(&match->rates, &rates);
5813 	match->network = network;
5814 
5815 	IPW_DEBUG_ASSOC("Network '%*pE (%pM)' is a viable match.\n",
5816 			network->ssid_len, network->ssid, network->bssid);
5817 
5818 	return 1;
5819 }
5820 
5821 static void ipw_adhoc_create(struct ipw_priv *priv,
5822 			     struct libipw_network *network)
5823 {
5824 	const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
5825 	int i;
5826 
5827 	/*
5828 	 * For the purposes of scanning, we can set our wireless mode
5829 	 * to trigger scans across combinations of bands, but when it
5830 	 * comes to creating a new ad-hoc network, we have tell the FW
5831 	 * exactly which band to use.
5832 	 *
5833 	 * We also have the possibility of an invalid channel for the
5834 	 * chossen band.  Attempting to create a new ad-hoc network
5835 	 * with an invalid channel for wireless mode will trigger a
5836 	 * FW fatal error.
5837 	 *
5838 	 */
5839 	switch (libipw_is_valid_channel(priv->ieee, priv->channel)) {
5840 	case LIBIPW_52GHZ_BAND:
5841 		network->mode = IEEE_A;
5842 		i = libipw_channel_to_index(priv->ieee, priv->channel);
5843 		BUG_ON(i == -1);
5844 		if (geo->a[i].flags & LIBIPW_CH_PASSIVE_ONLY) {
5845 			IPW_WARNING("Overriding invalid channel\n");
5846 			priv->channel = geo->a[0].channel;
5847 		}
5848 		break;
5849 
5850 	case LIBIPW_24GHZ_BAND:
5851 		if (priv->ieee->mode & IEEE_G)
5852 			network->mode = IEEE_G;
5853 		else
5854 			network->mode = IEEE_B;
5855 		i = libipw_channel_to_index(priv->ieee, priv->channel);
5856 		BUG_ON(i == -1);
5857 		if (geo->bg[i].flags & LIBIPW_CH_PASSIVE_ONLY) {
5858 			IPW_WARNING("Overriding invalid channel\n");
5859 			priv->channel = geo->bg[0].channel;
5860 		}
5861 		break;
5862 
5863 	default:
5864 		IPW_WARNING("Overriding invalid channel\n");
5865 		if (priv->ieee->mode & IEEE_A) {
5866 			network->mode = IEEE_A;
5867 			priv->channel = geo->a[0].channel;
5868 		} else if (priv->ieee->mode & IEEE_G) {
5869 			network->mode = IEEE_G;
5870 			priv->channel = geo->bg[0].channel;
5871 		} else {
5872 			network->mode = IEEE_B;
5873 			priv->channel = geo->bg[0].channel;
5874 		}
5875 		break;
5876 	}
5877 
5878 	network->channel = priv->channel;
5879 	priv->config |= CFG_ADHOC_PERSIST;
5880 	ipw_create_bssid(priv, network->bssid);
5881 	network->ssid_len = priv->essid_len;
5882 	memcpy(network->ssid, priv->essid, priv->essid_len);
5883 	memset(&network->stats, 0, sizeof(network->stats));
5884 	network->capability = WLAN_CAPABILITY_IBSS;
5885 	if (!(priv->config & CFG_PREAMBLE_LONG))
5886 		network->capability |= WLAN_CAPABILITY_SHORT_PREAMBLE;
5887 	if (priv->capability & CAP_PRIVACY_ON)
5888 		network->capability |= WLAN_CAPABILITY_PRIVACY;
5889 	network->rates_len = min(priv->rates.num_rates, MAX_RATES_LENGTH);
5890 	memcpy(network->rates, priv->rates.supported_rates, network->rates_len);
5891 	network->rates_ex_len = priv->rates.num_rates - network->rates_len;
5892 	memcpy(network->rates_ex,
5893 	       &priv->rates.supported_rates[network->rates_len],
5894 	       network->rates_ex_len);
5895 	network->last_scanned = 0;
5896 	network->flags = 0;
5897 	network->last_associate = 0;
5898 	network->time_stamp[0] = 0;
5899 	network->time_stamp[1] = 0;
5900 	network->beacon_interval = 100;	/* Default */
5901 	network->listen_interval = 10;	/* Default */
5902 	network->atim_window = 0;	/* Default */
5903 	network->wpa_ie_len = 0;
5904 	network->rsn_ie_len = 0;
5905 }
5906 
5907 static void ipw_send_tgi_tx_key(struct ipw_priv *priv, int type, int index)
5908 {
5909 	struct ipw_tgi_tx_key key;
5910 
5911 	if (!(priv->ieee->sec.flags & (1 << index)))
5912 		return;
5913 
5914 	key.key_id = index;
5915 	memcpy(key.key, priv->ieee->sec.keys[index], SCM_TEMPORAL_KEY_LENGTH);
5916 	key.security_type = type;
5917 	key.station_index = 0;	/* always 0 for BSS */
5918 	key.flags = 0;
5919 	/* 0 for new key; previous value of counter (after fatal error) */
5920 	key.tx_counter[0] = cpu_to_le32(0);
5921 	key.tx_counter[1] = cpu_to_le32(0);
5922 
5923 	ipw_send_cmd_pdu(priv, IPW_CMD_TGI_TX_KEY, sizeof(key), &key);
5924 }
5925 
5926 static void ipw_send_wep_keys(struct ipw_priv *priv, int type)
5927 {
5928 	struct ipw_wep_key key;
5929 	int i;
5930 
5931 	key.cmd_id = DINO_CMD_WEP_KEY;
5932 	key.seq_num = 0;
5933 
5934 	/* Note: AES keys cannot be set for multiple times.
5935 	 * Only set it at the first time. */
5936 	for (i = 0; i < 4; i++) {
5937 		key.key_index = i | type;
5938 		if (!(priv->ieee->sec.flags & (1 << i))) {
5939 			key.key_size = 0;
5940 			continue;
5941 		}
5942 
5943 		key.key_size = priv->ieee->sec.key_sizes[i];
5944 		memcpy(key.key, priv->ieee->sec.keys[i], key.key_size);
5945 
5946 		ipw_send_cmd_pdu(priv, IPW_CMD_WEP_KEY, sizeof(key), &key);
5947 	}
5948 }
5949 
5950 static void ipw_set_hw_decrypt_unicast(struct ipw_priv *priv, int level)
5951 {
5952 	if (priv->ieee->host_encrypt)
5953 		return;
5954 
5955 	switch (level) {
5956 	case SEC_LEVEL_3:
5957 		priv->sys_config.disable_unicast_decryption = 0;
5958 		priv->ieee->host_decrypt = 0;
5959 		break;
5960 	case SEC_LEVEL_2:
5961 		priv->sys_config.disable_unicast_decryption = 1;
5962 		priv->ieee->host_decrypt = 1;
5963 		break;
5964 	case SEC_LEVEL_1:
5965 		priv->sys_config.disable_unicast_decryption = 0;
5966 		priv->ieee->host_decrypt = 0;
5967 		break;
5968 	case SEC_LEVEL_0:
5969 		priv->sys_config.disable_unicast_decryption = 1;
5970 		break;
5971 	default:
5972 		break;
5973 	}
5974 }
5975 
5976 static void ipw_set_hw_decrypt_multicast(struct ipw_priv *priv, int level)
5977 {
5978 	if (priv->ieee->host_encrypt)
5979 		return;
5980 
5981 	switch (level) {
5982 	case SEC_LEVEL_3:
5983 		priv->sys_config.disable_multicast_decryption = 0;
5984 		break;
5985 	case SEC_LEVEL_2:
5986 		priv->sys_config.disable_multicast_decryption = 1;
5987 		break;
5988 	case SEC_LEVEL_1:
5989 		priv->sys_config.disable_multicast_decryption = 0;
5990 		break;
5991 	case SEC_LEVEL_0:
5992 		priv->sys_config.disable_multicast_decryption = 1;
5993 		break;
5994 	default:
5995 		break;
5996 	}
5997 }
5998 
5999 static void ipw_set_hwcrypto_keys(struct ipw_priv *priv)
6000 {
6001 	switch (priv->ieee->sec.level) {
6002 	case SEC_LEVEL_3:
6003 		if (priv->ieee->sec.flags & SEC_ACTIVE_KEY)
6004 			ipw_send_tgi_tx_key(priv,
6005 					    DCT_FLAG_EXT_SECURITY_CCM,
6006 					    priv->ieee->sec.active_key);
6007 
6008 		if (!priv->ieee->host_mc_decrypt)
6009 			ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_CCM);
6010 		break;
6011 	case SEC_LEVEL_2:
6012 		if (priv->ieee->sec.flags & SEC_ACTIVE_KEY)
6013 			ipw_send_tgi_tx_key(priv,
6014 					    DCT_FLAG_EXT_SECURITY_TKIP,
6015 					    priv->ieee->sec.active_key);
6016 		break;
6017 	case SEC_LEVEL_1:
6018 		ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_WEP);
6019 		ipw_set_hw_decrypt_unicast(priv, priv->ieee->sec.level);
6020 		ipw_set_hw_decrypt_multicast(priv, priv->ieee->sec.level);
6021 		break;
6022 	case SEC_LEVEL_0:
6023 	default:
6024 		break;
6025 	}
6026 }
6027 
6028 static void ipw_adhoc_check(void *data)
6029 {
6030 	struct ipw_priv *priv = data;
6031 
6032 	if (priv->missed_adhoc_beacons++ > priv->disassociate_threshold &&
6033 	    !(priv->config & CFG_ADHOC_PERSIST)) {
6034 		IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
6035 			  IPW_DL_STATE | IPW_DL_ASSOC,
6036 			  "Missed beacon: %d - disassociate\n",
6037 			  priv->missed_adhoc_beacons);
6038 		ipw_remove_current_network(priv);
6039 		ipw_disassociate(priv);
6040 		return;
6041 	}
6042 
6043 	schedule_delayed_work(&priv->adhoc_check,
6044 			      le16_to_cpu(priv->assoc_request.beacon_interval));
6045 }
6046 
6047 static void ipw_bg_adhoc_check(struct work_struct *work)
6048 {
6049 	struct ipw_priv *priv =
6050 		container_of(work, struct ipw_priv, adhoc_check.work);
6051 	mutex_lock(&priv->mutex);
6052 	ipw_adhoc_check(priv);
6053 	mutex_unlock(&priv->mutex);
6054 }
6055 
6056 static void ipw_debug_config(struct ipw_priv *priv)
6057 {
6058 	IPW_DEBUG_INFO("Scan completed, no valid APs matched "
6059 		       "[CFG 0x%08X]\n", priv->config);
6060 	if (priv->config & CFG_STATIC_CHANNEL)
6061 		IPW_DEBUG_INFO("Channel locked to %d\n", priv->channel);
6062 	else
6063 		IPW_DEBUG_INFO("Channel unlocked.\n");
6064 	if (priv->config & CFG_STATIC_ESSID)
6065 		IPW_DEBUG_INFO("ESSID locked to '%*pE'\n",
6066 			       priv->essid_len, priv->essid);
6067 	else
6068 		IPW_DEBUG_INFO("ESSID unlocked.\n");
6069 	if (priv->config & CFG_STATIC_BSSID)
6070 		IPW_DEBUG_INFO("BSSID locked to %pM\n", priv->bssid);
6071 	else
6072 		IPW_DEBUG_INFO("BSSID unlocked.\n");
6073 	if (priv->capability & CAP_PRIVACY_ON)
6074 		IPW_DEBUG_INFO("PRIVACY on\n");
6075 	else
6076 		IPW_DEBUG_INFO("PRIVACY off\n");
6077 	IPW_DEBUG_INFO("RATE MASK: 0x%08X\n", priv->rates_mask);
6078 }
6079 
6080 static void ipw_set_fixed_rate(struct ipw_priv *priv, int mode)
6081 {
6082 	/* TODO: Verify that this works... */
6083 	struct ipw_fixed_rate fr;
6084 	u32 reg;
6085 	u16 mask = 0;
6086 	u16 new_tx_rates = priv->rates_mask;
6087 
6088 	/* Identify 'current FW band' and match it with the fixed
6089 	 * Tx rates */
6090 
6091 	switch (priv->ieee->freq_band) {
6092 	case LIBIPW_52GHZ_BAND:	/* A only */
6093 		/* IEEE_A */
6094 		if (priv->rates_mask & ~LIBIPW_OFDM_RATES_MASK) {
6095 			/* Invalid fixed rate mask */
6096 			IPW_DEBUG_WX
6097 			    ("invalid fixed rate mask in ipw_set_fixed_rate\n");
6098 			new_tx_rates = 0;
6099 			break;
6100 		}
6101 
6102 		new_tx_rates >>= LIBIPW_OFDM_SHIFT_MASK_A;
6103 		break;
6104 
6105 	default:		/* 2.4Ghz or Mixed */
6106 		/* IEEE_B */
6107 		if (mode == IEEE_B) {
6108 			if (new_tx_rates & ~LIBIPW_CCK_RATES_MASK) {
6109 				/* Invalid fixed rate mask */
6110 				IPW_DEBUG_WX
6111 				    ("invalid fixed rate mask in ipw_set_fixed_rate\n");
6112 				new_tx_rates = 0;
6113 			}
6114 			break;
6115 		}
6116 
6117 		/* IEEE_G */
6118 		if (new_tx_rates & ~(LIBIPW_CCK_RATES_MASK |
6119 				    LIBIPW_OFDM_RATES_MASK)) {
6120 			/* Invalid fixed rate mask */
6121 			IPW_DEBUG_WX
6122 			    ("invalid fixed rate mask in ipw_set_fixed_rate\n");
6123 			new_tx_rates = 0;
6124 			break;
6125 		}
6126 
6127 		if (LIBIPW_OFDM_RATE_6MB_MASK & new_tx_rates) {
6128 			mask |= (LIBIPW_OFDM_RATE_6MB_MASK >> 1);
6129 			new_tx_rates &= ~LIBIPW_OFDM_RATE_6MB_MASK;
6130 		}
6131 
6132 		if (LIBIPW_OFDM_RATE_9MB_MASK & new_tx_rates) {
6133 			mask |= (LIBIPW_OFDM_RATE_9MB_MASK >> 1);
6134 			new_tx_rates &= ~LIBIPW_OFDM_RATE_9MB_MASK;
6135 		}
6136 
6137 		if (LIBIPW_OFDM_RATE_12MB_MASK & new_tx_rates) {
6138 			mask |= (LIBIPW_OFDM_RATE_12MB_MASK >> 1);
6139 			new_tx_rates &= ~LIBIPW_OFDM_RATE_12MB_MASK;
6140 		}
6141 
6142 		new_tx_rates |= mask;
6143 		break;
6144 	}
6145 
6146 	fr.tx_rates = cpu_to_le16(new_tx_rates);
6147 
6148 	reg = ipw_read32(priv, IPW_MEM_FIXED_OVERRIDE);
6149 	ipw_write_reg32(priv, reg, *(u32 *) & fr);
6150 }
6151 
6152 static void ipw_abort_scan(struct ipw_priv *priv)
6153 {
6154 	int err;
6155 
6156 	if (priv->status & STATUS_SCAN_ABORTING) {
6157 		IPW_DEBUG_HC("Ignoring concurrent scan abort request.\n");
6158 		return;
6159 	}
6160 	priv->status |= STATUS_SCAN_ABORTING;
6161 
6162 	err = ipw_send_scan_abort(priv);
6163 	if (err)
6164 		IPW_DEBUG_HC("Request to abort scan failed.\n");
6165 }
6166 
6167 static void ipw_add_scan_channels(struct ipw_priv *priv,
6168 				  struct ipw_scan_request_ext *scan,
6169 				  int scan_type)
6170 {
6171 	int channel_index = 0;
6172 	const struct libipw_geo *geo;
6173 	int i;
6174 
6175 	geo = libipw_get_geo(priv->ieee);
6176 
6177 	if (priv->ieee->freq_band & LIBIPW_52GHZ_BAND) {
6178 		int start = channel_index;
6179 		for (i = 0; i < geo->a_channels; i++) {
6180 			if ((priv->status & STATUS_ASSOCIATED) &&
6181 			    geo->a[i].channel == priv->channel)
6182 				continue;
6183 			channel_index++;
6184 			scan->channels_list[channel_index] = geo->a[i].channel;
6185 			ipw_set_scan_type(scan, channel_index,
6186 					  geo->a[i].
6187 					  flags & LIBIPW_CH_PASSIVE_ONLY ?
6188 					  IPW_SCAN_PASSIVE_FULL_DWELL_SCAN :
6189 					  scan_type);
6190 		}
6191 
6192 		if (start != channel_index) {
6193 			scan->channels_list[start] = (u8) (IPW_A_MODE << 6) |
6194 			    (channel_index - start);
6195 			channel_index++;
6196 		}
6197 	}
6198 
6199 	if (priv->ieee->freq_band & LIBIPW_24GHZ_BAND) {
6200 		int start = channel_index;
6201 		if (priv->config & CFG_SPEED_SCAN) {
6202 			int index;
6203 			u8 channels[LIBIPW_24GHZ_CHANNELS] = {
6204 				/* nop out the list */
6205 				[0] = 0
6206 			};
6207 
6208 			u8 channel;
6209 			while (channel_index < IPW_SCAN_CHANNELS - 1) {
6210 				channel =
6211 				    priv->speed_scan[priv->speed_scan_pos];
6212 				if (channel == 0) {
6213 					priv->speed_scan_pos = 0;
6214 					channel = priv->speed_scan[0];
6215 				}
6216 				if ((priv->status & STATUS_ASSOCIATED) &&
6217 				    channel == priv->channel) {
6218 					priv->speed_scan_pos++;
6219 					continue;
6220 				}
6221 
6222 				/* If this channel has already been
6223 				 * added in scan, break from loop
6224 				 * and this will be the first channel
6225 				 * in the next scan.
6226 				 */
6227 				if (channels[channel - 1] != 0)
6228 					break;
6229 
6230 				channels[channel - 1] = 1;
6231 				priv->speed_scan_pos++;
6232 				channel_index++;
6233 				scan->channels_list[channel_index] = channel;
6234 				index =
6235 				    libipw_channel_to_index(priv->ieee, channel);
6236 				ipw_set_scan_type(scan, channel_index,
6237 						  geo->bg[index].
6238 						  flags &
6239 						  LIBIPW_CH_PASSIVE_ONLY ?
6240 						  IPW_SCAN_PASSIVE_FULL_DWELL_SCAN
6241 						  : scan_type);
6242 			}
6243 		} else {
6244 			for (i = 0; i < geo->bg_channels; i++) {
6245 				if ((priv->status & STATUS_ASSOCIATED) &&
6246 				    geo->bg[i].channel == priv->channel)
6247 					continue;
6248 				channel_index++;
6249 				scan->channels_list[channel_index] =
6250 				    geo->bg[i].channel;
6251 				ipw_set_scan_type(scan, channel_index,
6252 						  geo->bg[i].
6253 						  flags &
6254 						  LIBIPW_CH_PASSIVE_ONLY ?
6255 						  IPW_SCAN_PASSIVE_FULL_DWELL_SCAN
6256 						  : scan_type);
6257 			}
6258 		}
6259 
6260 		if (start != channel_index) {
6261 			scan->channels_list[start] = (u8) (IPW_B_MODE << 6) |
6262 			    (channel_index - start);
6263 		}
6264 	}
6265 }
6266 
6267 static int ipw_passive_dwell_time(struct ipw_priv *priv)
6268 {
6269 	/* staying on passive channels longer than the DTIM interval during a
6270 	 * scan, while associated, causes the firmware to cancel the scan
6271 	 * without notification. Hence, don't stay on passive channels longer
6272 	 * than the beacon interval.
6273 	 */
6274 	if (priv->status & STATUS_ASSOCIATED
6275 	    && priv->assoc_network->beacon_interval > 10)
6276 		return priv->assoc_network->beacon_interval - 10;
6277 	else
6278 		return 120;
6279 }
6280 
6281 static int ipw_request_scan_helper(struct ipw_priv *priv, int type, int direct)
6282 {
6283 	struct ipw_scan_request_ext scan;
6284 	int err = 0, scan_type;
6285 
6286 	if (!(priv->status & STATUS_INIT) ||
6287 	    (priv->status & STATUS_EXIT_PENDING))
6288 		return 0;
6289 
6290 	mutex_lock(&priv->mutex);
6291 
6292 	if (direct && (priv->direct_scan_ssid_len == 0)) {
6293 		IPW_DEBUG_HC("Direct scan requested but no SSID to scan for\n");
6294 		priv->status &= ~STATUS_DIRECT_SCAN_PENDING;
6295 		goto done;
6296 	}
6297 
6298 	if (priv->status & STATUS_SCANNING) {
6299 		IPW_DEBUG_HC("Concurrent scan requested.  Queuing.\n");
6300 		priv->status |= direct ? STATUS_DIRECT_SCAN_PENDING :
6301 					STATUS_SCAN_PENDING;
6302 		goto done;
6303 	}
6304 
6305 	if (!(priv->status & STATUS_SCAN_FORCED) &&
6306 	    priv->status & STATUS_SCAN_ABORTING) {
6307 		IPW_DEBUG_HC("Scan request while abort pending.  Queuing.\n");
6308 		priv->status |= direct ? STATUS_DIRECT_SCAN_PENDING :
6309 					STATUS_SCAN_PENDING;
6310 		goto done;
6311 	}
6312 
6313 	if (priv->status & STATUS_RF_KILL_MASK) {
6314 		IPW_DEBUG_HC("Queuing scan due to RF Kill activation\n");
6315 		priv->status |= direct ? STATUS_DIRECT_SCAN_PENDING :
6316 					STATUS_SCAN_PENDING;
6317 		goto done;
6318 	}
6319 
6320 	memset(&scan, 0, sizeof(scan));
6321 	scan.full_scan_index = cpu_to_le32(libipw_get_scans(priv->ieee));
6322 
6323 	if (type == IW_SCAN_TYPE_PASSIVE) {
6324 		IPW_DEBUG_WX("use passive scanning\n");
6325 		scan_type = IPW_SCAN_PASSIVE_FULL_DWELL_SCAN;
6326 		scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] =
6327 			cpu_to_le16(ipw_passive_dwell_time(priv));
6328 		ipw_add_scan_channels(priv, &scan, scan_type);
6329 		goto send_request;
6330 	}
6331 
6332 	/* Use active scan by default. */
6333 	if (priv->config & CFG_SPEED_SCAN)
6334 		scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
6335 			cpu_to_le16(30);
6336 	else
6337 		scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
6338 			cpu_to_le16(20);
6339 
6340 	scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN] =
6341 		cpu_to_le16(20);
6342 
6343 	scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] =
6344 		cpu_to_le16(ipw_passive_dwell_time(priv));
6345 	scan.dwell_time[IPW_SCAN_ACTIVE_DIRECT_SCAN] = cpu_to_le16(20);
6346 
6347 #ifdef CONFIG_IPW2200_MONITOR
6348 	if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
6349 		u8 channel;
6350 		u8 band = 0;
6351 
6352 		switch (libipw_is_valid_channel(priv->ieee, priv->channel)) {
6353 		case LIBIPW_52GHZ_BAND:
6354 			band = (u8) (IPW_A_MODE << 6) | 1;
6355 			channel = priv->channel;
6356 			break;
6357 
6358 		case LIBIPW_24GHZ_BAND:
6359 			band = (u8) (IPW_B_MODE << 6) | 1;
6360 			channel = priv->channel;
6361 			break;
6362 
6363 		default:
6364 			band = (u8) (IPW_B_MODE << 6) | 1;
6365 			channel = 9;
6366 			break;
6367 		}
6368 
6369 		scan.channels_list[0] = band;
6370 		scan.channels_list[1] = channel;
6371 		ipw_set_scan_type(&scan, 1, IPW_SCAN_PASSIVE_FULL_DWELL_SCAN);
6372 
6373 		/* NOTE:  The card will sit on this channel for this time
6374 		 * period.  Scan aborts are timing sensitive and frequently
6375 		 * result in firmware restarts.  As such, it is best to
6376 		 * set a small dwell_time here and just keep re-issuing
6377 		 * scans.  Otherwise fast channel hopping will not actually
6378 		 * hop channels.
6379 		 *
6380 		 * TODO: Move SPEED SCAN support to all modes and bands */
6381 		scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] =
6382 			cpu_to_le16(2000);
6383 	} else {
6384 #endif				/* CONFIG_IPW2200_MONITOR */
6385 		/* Honor direct scans first, otherwise if we are roaming make
6386 		 * this a direct scan for the current network.  Finally,
6387 		 * ensure that every other scan is a fast channel hop scan */
6388 		if (direct) {
6389 			err = ipw_send_ssid(priv, priv->direct_scan_ssid,
6390 			                    priv->direct_scan_ssid_len);
6391 			if (err) {
6392 				IPW_DEBUG_HC("Attempt to send SSID command  "
6393 					     "failed\n");
6394 				goto done;
6395 			}
6396 
6397 			scan_type = IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN;
6398 		} else if ((priv->status & STATUS_ROAMING)
6399 			   || (!(priv->status & STATUS_ASSOCIATED)
6400 			       && (priv->config & CFG_STATIC_ESSID)
6401 			       && (le32_to_cpu(scan.full_scan_index) % 2))) {
6402 			err = ipw_send_ssid(priv, priv->essid, priv->essid_len);
6403 			if (err) {
6404 				IPW_DEBUG_HC("Attempt to send SSID command "
6405 					     "failed.\n");
6406 				goto done;
6407 			}
6408 
6409 			scan_type = IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN;
6410 		} else
6411 			scan_type = IPW_SCAN_ACTIVE_BROADCAST_SCAN;
6412 
6413 		ipw_add_scan_channels(priv, &scan, scan_type);
6414 #ifdef CONFIG_IPW2200_MONITOR
6415 	}
6416 #endif
6417 
6418 send_request:
6419 	err = ipw_send_scan_request_ext(priv, &scan);
6420 	if (err) {
6421 		IPW_DEBUG_HC("Sending scan command failed: %08X\n", err);
6422 		goto done;
6423 	}
6424 
6425 	priv->status |= STATUS_SCANNING;
6426 	if (direct) {
6427 		priv->status &= ~STATUS_DIRECT_SCAN_PENDING;
6428 		priv->direct_scan_ssid_len = 0;
6429 	} else
6430 		priv->status &= ~STATUS_SCAN_PENDING;
6431 
6432 	schedule_delayed_work(&priv->scan_check, IPW_SCAN_CHECK_WATCHDOG);
6433 done:
6434 	mutex_unlock(&priv->mutex);
6435 	return err;
6436 }
6437 
6438 static void ipw_request_passive_scan(struct work_struct *work)
6439 {
6440 	struct ipw_priv *priv =
6441 		container_of(work, struct ipw_priv, request_passive_scan.work);
6442 	ipw_request_scan_helper(priv, IW_SCAN_TYPE_PASSIVE, 0);
6443 }
6444 
6445 static void ipw_request_scan(struct work_struct *work)
6446 {
6447 	struct ipw_priv *priv =
6448 		container_of(work, struct ipw_priv, request_scan.work);
6449 	ipw_request_scan_helper(priv, IW_SCAN_TYPE_ACTIVE, 0);
6450 }
6451 
6452 static void ipw_request_direct_scan(struct work_struct *work)
6453 {
6454 	struct ipw_priv *priv =
6455 		container_of(work, struct ipw_priv, request_direct_scan.work);
6456 	ipw_request_scan_helper(priv, IW_SCAN_TYPE_ACTIVE, 1);
6457 }
6458 
6459 static void ipw_bg_abort_scan(struct work_struct *work)
6460 {
6461 	struct ipw_priv *priv =
6462 		container_of(work, struct ipw_priv, abort_scan);
6463 	mutex_lock(&priv->mutex);
6464 	ipw_abort_scan(priv);
6465 	mutex_unlock(&priv->mutex);
6466 }
6467 
6468 static int ipw_wpa_enable(struct ipw_priv *priv, int value)
6469 {
6470 	/* This is called when wpa_supplicant loads and closes the driver
6471 	 * interface. */
6472 	priv->ieee->wpa_enabled = value;
6473 	return 0;
6474 }
6475 
6476 static int ipw_wpa_set_auth_algs(struct ipw_priv *priv, int value)
6477 {
6478 	struct libipw_device *ieee = priv->ieee;
6479 	struct libipw_security sec = {
6480 		.flags = SEC_AUTH_MODE,
6481 	};
6482 	int ret = 0;
6483 
6484 	if (value & IW_AUTH_ALG_SHARED_KEY) {
6485 		sec.auth_mode = WLAN_AUTH_SHARED_KEY;
6486 		ieee->open_wep = 0;
6487 	} else if (value & IW_AUTH_ALG_OPEN_SYSTEM) {
6488 		sec.auth_mode = WLAN_AUTH_OPEN;
6489 		ieee->open_wep = 1;
6490 	} else if (value & IW_AUTH_ALG_LEAP) {
6491 		sec.auth_mode = WLAN_AUTH_LEAP;
6492 		ieee->open_wep = 1;
6493 	} else
6494 		return -EINVAL;
6495 
6496 	if (ieee->set_security)
6497 		ieee->set_security(ieee->dev, &sec);
6498 	else
6499 		ret = -EOPNOTSUPP;
6500 
6501 	return ret;
6502 }
6503 
6504 static void ipw_wpa_assoc_frame(struct ipw_priv *priv, char *wpa_ie,
6505 				int wpa_ie_len)
6506 {
6507 	/* make sure WPA is enabled */
6508 	ipw_wpa_enable(priv, 1);
6509 }
6510 
6511 static int ipw_set_rsn_capa(struct ipw_priv *priv,
6512 			    char *capabilities, int length)
6513 {
6514 	IPW_DEBUG_HC("HOST_CMD_RSN_CAPABILITIES\n");
6515 
6516 	return ipw_send_cmd_pdu(priv, IPW_CMD_RSN_CAPABILITIES, length,
6517 				capabilities);
6518 }
6519 
6520 /*
6521  * WE-18 support
6522  */
6523 
6524 /* SIOCSIWGENIE */
6525 static int ipw_wx_set_genie(struct net_device *dev,
6526 			    struct iw_request_info *info,
6527 			    union iwreq_data *wrqu, char *extra)
6528 {
6529 	struct ipw_priv *priv = libipw_priv(dev);
6530 	struct libipw_device *ieee = priv->ieee;
6531 	u8 *buf;
6532 	int err = 0;
6533 
6534 	if (wrqu->data.length > MAX_WPA_IE_LEN ||
6535 	    (wrqu->data.length && extra == NULL))
6536 		return -EINVAL;
6537 
6538 	if (wrqu->data.length) {
6539 		buf = kmemdup(extra, wrqu->data.length, GFP_KERNEL);
6540 		if (buf == NULL) {
6541 			err = -ENOMEM;
6542 			goto out;
6543 		}
6544 
6545 		kfree(ieee->wpa_ie);
6546 		ieee->wpa_ie = buf;
6547 		ieee->wpa_ie_len = wrqu->data.length;
6548 	} else {
6549 		kfree(ieee->wpa_ie);
6550 		ieee->wpa_ie = NULL;
6551 		ieee->wpa_ie_len = 0;
6552 	}
6553 
6554 	ipw_wpa_assoc_frame(priv, ieee->wpa_ie, ieee->wpa_ie_len);
6555       out:
6556 	return err;
6557 }
6558 
6559 /* SIOCGIWGENIE */
6560 static int ipw_wx_get_genie(struct net_device *dev,
6561 			    struct iw_request_info *info,
6562 			    union iwreq_data *wrqu, char *extra)
6563 {
6564 	struct ipw_priv *priv = libipw_priv(dev);
6565 	struct libipw_device *ieee = priv->ieee;
6566 	int err = 0;
6567 
6568 	if (ieee->wpa_ie_len == 0 || ieee->wpa_ie == NULL) {
6569 		wrqu->data.length = 0;
6570 		goto out;
6571 	}
6572 
6573 	if (wrqu->data.length < ieee->wpa_ie_len) {
6574 		err = -E2BIG;
6575 		goto out;
6576 	}
6577 
6578 	wrqu->data.length = ieee->wpa_ie_len;
6579 	memcpy(extra, ieee->wpa_ie, ieee->wpa_ie_len);
6580 
6581       out:
6582 	return err;
6583 }
6584 
6585 static int wext_cipher2level(int cipher)
6586 {
6587 	switch (cipher) {
6588 	case IW_AUTH_CIPHER_NONE:
6589 		return SEC_LEVEL_0;
6590 	case IW_AUTH_CIPHER_WEP40:
6591 	case IW_AUTH_CIPHER_WEP104:
6592 		return SEC_LEVEL_1;
6593 	case IW_AUTH_CIPHER_TKIP:
6594 		return SEC_LEVEL_2;
6595 	case IW_AUTH_CIPHER_CCMP:
6596 		return SEC_LEVEL_3;
6597 	default:
6598 		return -1;
6599 	}
6600 }
6601 
6602 /* SIOCSIWAUTH */
6603 static int ipw_wx_set_auth(struct net_device *dev,
6604 			   struct iw_request_info *info,
6605 			   union iwreq_data *wrqu, char *extra)
6606 {
6607 	struct ipw_priv *priv = libipw_priv(dev);
6608 	struct libipw_device *ieee = priv->ieee;
6609 	struct iw_param *param = &wrqu->param;
6610 	struct lib80211_crypt_data *crypt;
6611 	unsigned long flags;
6612 	int ret = 0;
6613 
6614 	switch (param->flags & IW_AUTH_INDEX) {
6615 	case IW_AUTH_WPA_VERSION:
6616 		break;
6617 	case IW_AUTH_CIPHER_PAIRWISE:
6618 		ipw_set_hw_decrypt_unicast(priv,
6619 					   wext_cipher2level(param->value));
6620 		break;
6621 	case IW_AUTH_CIPHER_GROUP:
6622 		ipw_set_hw_decrypt_multicast(priv,
6623 					     wext_cipher2level(param->value));
6624 		break;
6625 	case IW_AUTH_KEY_MGMT:
6626 		/*
6627 		 * ipw2200 does not use these parameters
6628 		 */
6629 		break;
6630 
6631 	case IW_AUTH_TKIP_COUNTERMEASURES:
6632 		crypt = priv->ieee->crypt_info.crypt[priv->ieee->crypt_info.tx_keyidx];
6633 		if (!crypt || !crypt->ops->set_flags || !crypt->ops->get_flags)
6634 			break;
6635 
6636 		flags = crypt->ops->get_flags(crypt->priv);
6637 
6638 		if (param->value)
6639 			flags |= IEEE80211_CRYPTO_TKIP_COUNTERMEASURES;
6640 		else
6641 			flags &= ~IEEE80211_CRYPTO_TKIP_COUNTERMEASURES;
6642 
6643 		crypt->ops->set_flags(flags, crypt->priv);
6644 
6645 		break;
6646 
6647 	case IW_AUTH_DROP_UNENCRYPTED:{
6648 			/* HACK:
6649 			 *
6650 			 * wpa_supplicant calls set_wpa_enabled when the driver
6651 			 * is loaded and unloaded, regardless of if WPA is being
6652 			 * used.  No other calls are made which can be used to
6653 			 * determine if encryption will be used or not prior to
6654 			 * association being expected.  If encryption is not being
6655 			 * used, drop_unencrypted is set to false, else true -- we
6656 			 * can use this to determine if the CAP_PRIVACY_ON bit should
6657 			 * be set.
6658 			 */
6659 			struct libipw_security sec = {
6660 				.flags = SEC_ENABLED,
6661 				.enabled = param->value,
6662 			};
6663 			priv->ieee->drop_unencrypted = param->value;
6664 			/* We only change SEC_LEVEL for open mode. Others
6665 			 * are set by ipw_wpa_set_encryption.
6666 			 */
6667 			if (!param->value) {
6668 				sec.flags |= SEC_LEVEL;
6669 				sec.level = SEC_LEVEL_0;
6670 			} else {
6671 				sec.flags |= SEC_LEVEL;
6672 				sec.level = SEC_LEVEL_1;
6673 			}
6674 			if (priv->ieee->set_security)
6675 				priv->ieee->set_security(priv->ieee->dev, &sec);
6676 			break;
6677 		}
6678 
6679 	case IW_AUTH_80211_AUTH_ALG:
6680 		ret = ipw_wpa_set_auth_algs(priv, param->value);
6681 		break;
6682 
6683 	case IW_AUTH_WPA_ENABLED:
6684 		ret = ipw_wpa_enable(priv, param->value);
6685 		ipw_disassociate(priv);
6686 		break;
6687 
6688 	case IW_AUTH_RX_UNENCRYPTED_EAPOL:
6689 		ieee->ieee802_1x = param->value;
6690 		break;
6691 
6692 	case IW_AUTH_PRIVACY_INVOKED:
6693 		ieee->privacy_invoked = param->value;
6694 		break;
6695 
6696 	default:
6697 		return -EOPNOTSUPP;
6698 	}
6699 	return ret;
6700 }
6701 
6702 /* SIOCGIWAUTH */
6703 static int ipw_wx_get_auth(struct net_device *dev,
6704 			   struct iw_request_info *info,
6705 			   union iwreq_data *wrqu, char *extra)
6706 {
6707 	struct ipw_priv *priv = libipw_priv(dev);
6708 	struct libipw_device *ieee = priv->ieee;
6709 	struct lib80211_crypt_data *crypt;
6710 	struct iw_param *param = &wrqu->param;
6711 
6712 	switch (param->flags & IW_AUTH_INDEX) {
6713 	case IW_AUTH_WPA_VERSION:
6714 	case IW_AUTH_CIPHER_PAIRWISE:
6715 	case IW_AUTH_CIPHER_GROUP:
6716 	case IW_AUTH_KEY_MGMT:
6717 		/*
6718 		 * wpa_supplicant will control these internally
6719 		 */
6720 		return -EOPNOTSUPP;
6721 
6722 	case IW_AUTH_TKIP_COUNTERMEASURES:
6723 		crypt = priv->ieee->crypt_info.crypt[priv->ieee->crypt_info.tx_keyidx];
6724 		if (!crypt || !crypt->ops->get_flags)
6725 			break;
6726 
6727 		param->value = (crypt->ops->get_flags(crypt->priv) &
6728 				IEEE80211_CRYPTO_TKIP_COUNTERMEASURES) ? 1 : 0;
6729 
6730 		break;
6731 
6732 	case IW_AUTH_DROP_UNENCRYPTED:
6733 		param->value = ieee->drop_unencrypted;
6734 		break;
6735 
6736 	case IW_AUTH_80211_AUTH_ALG:
6737 		param->value = ieee->sec.auth_mode;
6738 		break;
6739 
6740 	case IW_AUTH_WPA_ENABLED:
6741 		param->value = ieee->wpa_enabled;
6742 		break;
6743 
6744 	case IW_AUTH_RX_UNENCRYPTED_EAPOL:
6745 		param->value = ieee->ieee802_1x;
6746 		break;
6747 
6748 	case IW_AUTH_ROAMING_CONTROL:
6749 	case IW_AUTH_PRIVACY_INVOKED:
6750 		param->value = ieee->privacy_invoked;
6751 		break;
6752 
6753 	default:
6754 		return -EOPNOTSUPP;
6755 	}
6756 	return 0;
6757 }
6758 
6759 /* SIOCSIWENCODEEXT */
6760 static int ipw_wx_set_encodeext(struct net_device *dev,
6761 				struct iw_request_info *info,
6762 				union iwreq_data *wrqu, char *extra)
6763 {
6764 	struct ipw_priv *priv = libipw_priv(dev);
6765 	struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
6766 
6767 	if (hwcrypto) {
6768 		if (ext->alg == IW_ENCODE_ALG_TKIP) {
6769 			/* IPW HW can't build TKIP MIC,
6770 			   host decryption still needed */
6771 			if (ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY)
6772 				priv->ieee->host_mc_decrypt = 1;
6773 			else {
6774 				priv->ieee->host_encrypt = 0;
6775 				priv->ieee->host_encrypt_msdu = 1;
6776 				priv->ieee->host_decrypt = 1;
6777 			}
6778 		} else {
6779 			priv->ieee->host_encrypt = 0;
6780 			priv->ieee->host_encrypt_msdu = 0;
6781 			priv->ieee->host_decrypt = 0;
6782 			priv->ieee->host_mc_decrypt = 0;
6783 		}
6784 	}
6785 
6786 	return libipw_wx_set_encodeext(priv->ieee, info, wrqu, extra);
6787 }
6788 
6789 /* SIOCGIWENCODEEXT */
6790 static int ipw_wx_get_encodeext(struct net_device *dev,
6791 				struct iw_request_info *info,
6792 				union iwreq_data *wrqu, char *extra)
6793 {
6794 	struct ipw_priv *priv = libipw_priv(dev);
6795 	return libipw_wx_get_encodeext(priv->ieee, info, wrqu, extra);
6796 }
6797 
6798 /* SIOCSIWMLME */
6799 static int ipw_wx_set_mlme(struct net_device *dev,
6800 			   struct iw_request_info *info,
6801 			   union iwreq_data *wrqu, char *extra)
6802 {
6803 	struct ipw_priv *priv = libipw_priv(dev);
6804 	struct iw_mlme *mlme = (struct iw_mlme *)extra;
6805 	__le16 reason;
6806 
6807 	reason = cpu_to_le16(mlme->reason_code);
6808 
6809 	switch (mlme->cmd) {
6810 	case IW_MLME_DEAUTH:
6811 		/* silently ignore */
6812 		break;
6813 
6814 	case IW_MLME_DISASSOC:
6815 		ipw_disassociate(priv);
6816 		break;
6817 
6818 	default:
6819 		return -EOPNOTSUPP;
6820 	}
6821 	return 0;
6822 }
6823 
6824 #ifdef CONFIG_IPW2200_QOS
6825 
6826 /* QoS */
6827 /*
6828 * get the modulation type of the current network or
6829 * the card current mode
6830 */
6831 static u8 ipw_qos_current_mode(struct ipw_priv * priv)
6832 {
6833 	u8 mode = 0;
6834 
6835 	if (priv->status & STATUS_ASSOCIATED) {
6836 		unsigned long flags;
6837 
6838 		spin_lock_irqsave(&priv->ieee->lock, flags);
6839 		mode = priv->assoc_network->mode;
6840 		spin_unlock_irqrestore(&priv->ieee->lock, flags);
6841 	} else {
6842 		mode = priv->ieee->mode;
6843 	}
6844 	IPW_DEBUG_QOS("QoS network/card mode %d\n", mode);
6845 	return mode;
6846 }
6847 
6848 /*
6849 * Handle management frame beacon and probe response
6850 */
6851 static int ipw_qos_handle_probe_response(struct ipw_priv *priv,
6852 					 int active_network,
6853 					 struct libipw_network *network)
6854 {
6855 	u32 size = sizeof(struct libipw_qos_parameters);
6856 
6857 	if (network->capability & WLAN_CAPABILITY_IBSS)
6858 		network->qos_data.active = network->qos_data.supported;
6859 
6860 	if (network->flags & NETWORK_HAS_QOS_MASK) {
6861 		if (active_network &&
6862 		    (network->flags & NETWORK_HAS_QOS_PARAMETERS))
6863 			network->qos_data.active = network->qos_data.supported;
6864 
6865 		if ((network->qos_data.active == 1) && (active_network == 1) &&
6866 		    (network->flags & NETWORK_HAS_QOS_PARAMETERS) &&
6867 		    (network->qos_data.old_param_count !=
6868 		     network->qos_data.param_count)) {
6869 			network->qos_data.old_param_count =
6870 			    network->qos_data.param_count;
6871 			schedule_work(&priv->qos_activate);
6872 			IPW_DEBUG_QOS("QoS parameters change call "
6873 				      "qos_activate\n");
6874 		}
6875 	} else {
6876 		if ((priv->ieee->mode == IEEE_B) || (network->mode == IEEE_B))
6877 			memcpy(&network->qos_data.parameters,
6878 			       &def_parameters_CCK, size);
6879 		else
6880 			memcpy(&network->qos_data.parameters,
6881 			       &def_parameters_OFDM, size);
6882 
6883 		if ((network->qos_data.active == 1) && (active_network == 1)) {
6884 			IPW_DEBUG_QOS("QoS was disabled call qos_activate\n");
6885 			schedule_work(&priv->qos_activate);
6886 		}
6887 
6888 		network->qos_data.active = 0;
6889 		network->qos_data.supported = 0;
6890 	}
6891 	if ((priv->status & STATUS_ASSOCIATED) &&
6892 	    (priv->ieee->iw_mode == IW_MODE_ADHOC) && (active_network == 0)) {
6893 		if (!ether_addr_equal(network->bssid, priv->bssid))
6894 			if (network->capability & WLAN_CAPABILITY_IBSS)
6895 				if ((network->ssid_len ==
6896 				     priv->assoc_network->ssid_len) &&
6897 				    !memcmp(network->ssid,
6898 					    priv->assoc_network->ssid,
6899 					    network->ssid_len)) {
6900 					schedule_work(&priv->merge_networks);
6901 				}
6902 	}
6903 
6904 	return 0;
6905 }
6906 
6907 /*
6908 * This function set up the firmware to support QoS. It sends
6909 * IPW_CMD_QOS_PARAMETERS and IPW_CMD_WME_INFO
6910 */
6911 static int ipw_qos_activate(struct ipw_priv *priv,
6912 			    struct libipw_qos_data *qos_network_data)
6913 {
6914 	int err;
6915 	struct libipw_qos_parameters qos_parameters[QOS_QOS_SETS];
6916 	struct libipw_qos_parameters *active_one = NULL;
6917 	u32 size = sizeof(struct libipw_qos_parameters);
6918 	u32 burst_duration;
6919 	int i;
6920 	u8 type;
6921 
6922 	type = ipw_qos_current_mode(priv);
6923 
6924 	active_one = &(qos_parameters[QOS_PARAM_SET_DEF_CCK]);
6925 	memcpy(active_one, priv->qos_data.def_qos_parm_CCK, size);
6926 	active_one = &(qos_parameters[QOS_PARAM_SET_DEF_OFDM]);
6927 	memcpy(active_one, priv->qos_data.def_qos_parm_OFDM, size);
6928 
6929 	if (qos_network_data == NULL) {
6930 		if (type == IEEE_B) {
6931 			IPW_DEBUG_QOS("QoS activate network mode %d\n", type);
6932 			active_one = &def_parameters_CCK;
6933 		} else
6934 			active_one = &def_parameters_OFDM;
6935 
6936 		memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
6937 		burst_duration = ipw_qos_get_burst_duration(priv);
6938 		for (i = 0; i < QOS_QUEUE_NUM; i++)
6939 			qos_parameters[QOS_PARAM_SET_ACTIVE].tx_op_limit[i] =
6940 			    cpu_to_le16(burst_duration);
6941 	} else if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
6942 		if (type == IEEE_B) {
6943 			IPW_DEBUG_QOS("QoS activate IBSS network mode %d\n",
6944 				      type);
6945 			if (priv->qos_data.qos_enable == 0)
6946 				active_one = &def_parameters_CCK;
6947 			else
6948 				active_one = priv->qos_data.def_qos_parm_CCK;
6949 		} else {
6950 			if (priv->qos_data.qos_enable == 0)
6951 				active_one = &def_parameters_OFDM;
6952 			else
6953 				active_one = priv->qos_data.def_qos_parm_OFDM;
6954 		}
6955 		memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
6956 	} else {
6957 		unsigned long flags;
6958 		int active;
6959 
6960 		spin_lock_irqsave(&priv->ieee->lock, flags);
6961 		active_one = &(qos_network_data->parameters);
6962 		qos_network_data->old_param_count =
6963 		    qos_network_data->param_count;
6964 		memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
6965 		active = qos_network_data->supported;
6966 		spin_unlock_irqrestore(&priv->ieee->lock, flags);
6967 
6968 		if (active == 0) {
6969 			burst_duration = ipw_qos_get_burst_duration(priv);
6970 			for (i = 0; i < QOS_QUEUE_NUM; i++)
6971 				qos_parameters[QOS_PARAM_SET_ACTIVE].
6972 				    tx_op_limit[i] = cpu_to_le16(burst_duration);
6973 		}
6974 	}
6975 
6976 	IPW_DEBUG_QOS("QoS sending IPW_CMD_QOS_PARAMETERS\n");
6977 	err = ipw_send_qos_params_command(priv, &qos_parameters[0]);
6978 	if (err)
6979 		IPW_DEBUG_QOS("QoS IPW_CMD_QOS_PARAMETERS failed\n");
6980 
6981 	return err;
6982 }
6983 
6984 /*
6985 * send IPW_CMD_WME_INFO to the firmware
6986 */
6987 static int ipw_qos_set_info_element(struct ipw_priv *priv)
6988 {
6989 	int ret = 0;
6990 	struct libipw_qos_information_element qos_info;
6991 
6992 	if (priv == NULL)
6993 		return -1;
6994 
6995 	qos_info.elementID = QOS_ELEMENT_ID;
6996 	qos_info.length = sizeof(struct libipw_qos_information_element) - 2;
6997 
6998 	qos_info.version = QOS_VERSION_1;
6999 	qos_info.ac_info = 0;
7000 
7001 	memcpy(qos_info.qui, qos_oui, QOS_OUI_LEN);
7002 	qos_info.qui_type = QOS_OUI_TYPE;
7003 	qos_info.qui_subtype = QOS_OUI_INFO_SUB_TYPE;
7004 
7005 	ret = ipw_send_qos_info_command(priv, &qos_info);
7006 	if (ret != 0) {
7007 		IPW_DEBUG_QOS("QoS error calling ipw_send_qos_info_command\n");
7008 	}
7009 	return ret;
7010 }
7011 
7012 /*
7013 * Set the QoS parameter with the association request structure
7014 */
7015 static int ipw_qos_association(struct ipw_priv *priv,
7016 			       struct libipw_network *network)
7017 {
7018 	int err = 0;
7019 	struct libipw_qos_data *qos_data = NULL;
7020 	struct libipw_qos_data ibss_data = {
7021 		.supported = 1,
7022 		.active = 1,
7023 	};
7024 
7025 	switch (priv->ieee->iw_mode) {
7026 	case IW_MODE_ADHOC:
7027 		BUG_ON(!(network->capability & WLAN_CAPABILITY_IBSS));
7028 
7029 		qos_data = &ibss_data;
7030 		break;
7031 
7032 	case IW_MODE_INFRA:
7033 		qos_data = &network->qos_data;
7034 		break;
7035 
7036 	default:
7037 		BUG();
7038 		break;
7039 	}
7040 
7041 	err = ipw_qos_activate(priv, qos_data);
7042 	if (err) {
7043 		priv->assoc_request.policy_support &= ~HC_QOS_SUPPORT_ASSOC;
7044 		return err;
7045 	}
7046 
7047 	if (priv->qos_data.qos_enable && qos_data->supported) {
7048 		IPW_DEBUG_QOS("QoS will be enabled for this association\n");
7049 		priv->assoc_request.policy_support |= HC_QOS_SUPPORT_ASSOC;
7050 		return ipw_qos_set_info_element(priv);
7051 	}
7052 
7053 	return 0;
7054 }
7055 
7056 /*
7057 * handling the beaconing responses. if we get different QoS setting
7058 * off the network from the associated setting, adjust the QoS
7059 * setting
7060 */
7061 static int ipw_qos_association_resp(struct ipw_priv *priv,
7062 				    struct libipw_network *network)
7063 {
7064 	int ret = 0;
7065 	unsigned long flags;
7066 	u32 size = sizeof(struct libipw_qos_parameters);
7067 	int set_qos_param = 0;
7068 
7069 	if ((priv == NULL) || (network == NULL) ||
7070 	    (priv->assoc_network == NULL))
7071 		return ret;
7072 
7073 	if (!(priv->status & STATUS_ASSOCIATED))
7074 		return ret;
7075 
7076 	if ((priv->ieee->iw_mode != IW_MODE_INFRA))
7077 		return ret;
7078 
7079 	spin_lock_irqsave(&priv->ieee->lock, flags);
7080 	if (network->flags & NETWORK_HAS_QOS_PARAMETERS) {
7081 		memcpy(&priv->assoc_network->qos_data, &network->qos_data,
7082 		       sizeof(struct libipw_qos_data));
7083 		priv->assoc_network->qos_data.active = 1;
7084 		if ((network->qos_data.old_param_count !=
7085 		     network->qos_data.param_count)) {
7086 			set_qos_param = 1;
7087 			network->qos_data.old_param_count =
7088 			    network->qos_data.param_count;
7089 		}
7090 
7091 	} else {
7092 		if ((network->mode == IEEE_B) || (priv->ieee->mode == IEEE_B))
7093 			memcpy(&priv->assoc_network->qos_data.parameters,
7094 			       &def_parameters_CCK, size);
7095 		else
7096 			memcpy(&priv->assoc_network->qos_data.parameters,
7097 			       &def_parameters_OFDM, size);
7098 		priv->assoc_network->qos_data.active = 0;
7099 		priv->assoc_network->qos_data.supported = 0;
7100 		set_qos_param = 1;
7101 	}
7102 
7103 	spin_unlock_irqrestore(&priv->ieee->lock, flags);
7104 
7105 	if (set_qos_param == 1)
7106 		schedule_work(&priv->qos_activate);
7107 
7108 	return ret;
7109 }
7110 
7111 static u32 ipw_qos_get_burst_duration(struct ipw_priv *priv)
7112 {
7113 	u32 ret = 0;
7114 
7115 	if ((priv == NULL))
7116 		return 0;
7117 
7118 	if (!(priv->ieee->modulation & LIBIPW_OFDM_MODULATION))
7119 		ret = priv->qos_data.burst_duration_CCK;
7120 	else
7121 		ret = priv->qos_data.burst_duration_OFDM;
7122 
7123 	return ret;
7124 }
7125 
7126 /*
7127 * Initialize the setting of QoS global
7128 */
7129 static void ipw_qos_init(struct ipw_priv *priv, int enable,
7130 			 int burst_enable, u32 burst_duration_CCK,
7131 			 u32 burst_duration_OFDM)
7132 {
7133 	priv->qos_data.qos_enable = enable;
7134 
7135 	if (priv->qos_data.qos_enable) {
7136 		priv->qos_data.def_qos_parm_CCK = &def_qos_parameters_CCK;
7137 		priv->qos_data.def_qos_parm_OFDM = &def_qos_parameters_OFDM;
7138 		IPW_DEBUG_QOS("QoS is enabled\n");
7139 	} else {
7140 		priv->qos_data.def_qos_parm_CCK = &def_parameters_CCK;
7141 		priv->qos_data.def_qos_parm_OFDM = &def_parameters_OFDM;
7142 		IPW_DEBUG_QOS("QoS is not enabled\n");
7143 	}
7144 
7145 	priv->qos_data.burst_enable = burst_enable;
7146 
7147 	if (burst_enable) {
7148 		priv->qos_data.burst_duration_CCK = burst_duration_CCK;
7149 		priv->qos_data.burst_duration_OFDM = burst_duration_OFDM;
7150 	} else {
7151 		priv->qos_data.burst_duration_CCK = 0;
7152 		priv->qos_data.burst_duration_OFDM = 0;
7153 	}
7154 }
7155 
7156 /*
7157 * map the packet priority to the right TX Queue
7158 */
7159 static int ipw_get_tx_queue_number(struct ipw_priv *priv, u16 priority)
7160 {
7161 	if (priority > 7 || !priv->qos_data.qos_enable)
7162 		priority = 0;
7163 
7164 	return from_priority_to_tx_queue[priority] - 1;
7165 }
7166 
7167 static int ipw_is_qos_active(struct net_device *dev,
7168 			     struct sk_buff *skb)
7169 {
7170 	struct ipw_priv *priv = libipw_priv(dev);
7171 	struct libipw_qos_data *qos_data = NULL;
7172 	int active, supported;
7173 	u8 *daddr = skb->data + ETH_ALEN;
7174 	int unicast = !is_multicast_ether_addr(daddr);
7175 
7176 	if (!(priv->status & STATUS_ASSOCIATED))
7177 		return 0;
7178 
7179 	qos_data = &priv->assoc_network->qos_data;
7180 
7181 	if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
7182 		if (unicast == 0)
7183 			qos_data->active = 0;
7184 		else
7185 			qos_data->active = qos_data->supported;
7186 	}
7187 	active = qos_data->active;
7188 	supported = qos_data->supported;
7189 	IPW_DEBUG_QOS("QoS  %d network is QoS active %d  supported %d  "
7190 		      "unicast %d\n",
7191 		      priv->qos_data.qos_enable, active, supported, unicast);
7192 	if (active && priv->qos_data.qos_enable)
7193 		return 1;
7194 
7195 	return 0;
7196 
7197 }
7198 /*
7199 * add QoS parameter to the TX command
7200 */
7201 static int ipw_qos_set_tx_queue_command(struct ipw_priv *priv,
7202 					u16 priority,
7203 					struct tfd_data *tfd)
7204 {
7205 	int tx_queue_id = 0;
7206 
7207 
7208 	tx_queue_id = from_priority_to_tx_queue[priority] - 1;
7209 	tfd->tx_flags_ext |= DCT_FLAG_EXT_QOS_ENABLED;
7210 
7211 	if (priv->qos_data.qos_no_ack_mask & (1UL << tx_queue_id)) {
7212 		tfd->tx_flags &= ~DCT_FLAG_ACK_REQD;
7213 		tfd->tfd.tfd_26.mchdr.qos_ctrl |= cpu_to_le16(CTRL_QOS_NO_ACK);
7214 	}
7215 	return 0;
7216 }
7217 
7218 /*
7219 * background support to run QoS activate functionality
7220 */
7221 static void ipw_bg_qos_activate(struct work_struct *work)
7222 {
7223 	struct ipw_priv *priv =
7224 		container_of(work, struct ipw_priv, qos_activate);
7225 
7226 	mutex_lock(&priv->mutex);
7227 
7228 	if (priv->status & STATUS_ASSOCIATED)
7229 		ipw_qos_activate(priv, &(priv->assoc_network->qos_data));
7230 
7231 	mutex_unlock(&priv->mutex);
7232 }
7233 
7234 static int ipw_handle_probe_response(struct net_device *dev,
7235 				     struct libipw_probe_response *resp,
7236 				     struct libipw_network *network)
7237 {
7238 	struct ipw_priv *priv = libipw_priv(dev);
7239 	int active_network = ((priv->status & STATUS_ASSOCIATED) &&
7240 			      (network == priv->assoc_network));
7241 
7242 	ipw_qos_handle_probe_response(priv, active_network, network);
7243 
7244 	return 0;
7245 }
7246 
7247 static int ipw_handle_beacon(struct net_device *dev,
7248 			     struct libipw_beacon *resp,
7249 			     struct libipw_network *network)
7250 {
7251 	struct ipw_priv *priv = libipw_priv(dev);
7252 	int active_network = ((priv->status & STATUS_ASSOCIATED) &&
7253 			      (network == priv->assoc_network));
7254 
7255 	ipw_qos_handle_probe_response(priv, active_network, network);
7256 
7257 	return 0;
7258 }
7259 
7260 static int ipw_handle_assoc_response(struct net_device *dev,
7261 				     struct libipw_assoc_response *resp,
7262 				     struct libipw_network *network)
7263 {
7264 	struct ipw_priv *priv = libipw_priv(dev);
7265 	ipw_qos_association_resp(priv, network);
7266 	return 0;
7267 }
7268 
7269 static int ipw_send_qos_params_command(struct ipw_priv *priv, struct libipw_qos_parameters
7270 				       *qos_param)
7271 {
7272 	return ipw_send_cmd_pdu(priv, IPW_CMD_QOS_PARAMETERS,
7273 				sizeof(*qos_param) * 3, qos_param);
7274 }
7275 
7276 static int ipw_send_qos_info_command(struct ipw_priv *priv, struct libipw_qos_information_element
7277 				     *qos_param)
7278 {
7279 	return ipw_send_cmd_pdu(priv, IPW_CMD_WME_INFO, sizeof(*qos_param),
7280 				qos_param);
7281 }
7282 
7283 #endif				/* CONFIG_IPW2200_QOS */
7284 
7285 static int ipw_associate_network(struct ipw_priv *priv,
7286 				 struct libipw_network *network,
7287 				 struct ipw_supported_rates *rates, int roaming)
7288 {
7289 	int err;
7290 
7291 	if (priv->config & CFG_FIXED_RATE)
7292 		ipw_set_fixed_rate(priv, network->mode);
7293 
7294 	if (!(priv->config & CFG_STATIC_ESSID)) {
7295 		priv->essid_len = min(network->ssid_len,
7296 				      (u8) IW_ESSID_MAX_SIZE);
7297 		memcpy(priv->essid, network->ssid, priv->essid_len);
7298 	}
7299 
7300 	network->last_associate = jiffies;
7301 
7302 	memset(&priv->assoc_request, 0, sizeof(priv->assoc_request));
7303 	priv->assoc_request.channel = network->channel;
7304 	priv->assoc_request.auth_key = 0;
7305 
7306 	if ((priv->capability & CAP_PRIVACY_ON) &&
7307 	    (priv->ieee->sec.auth_mode == WLAN_AUTH_SHARED_KEY)) {
7308 		priv->assoc_request.auth_type = AUTH_SHARED_KEY;
7309 		priv->assoc_request.auth_key = priv->ieee->sec.active_key;
7310 
7311 		if (priv->ieee->sec.level == SEC_LEVEL_1)
7312 			ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_WEP);
7313 
7314 	} else if ((priv->capability & CAP_PRIVACY_ON) &&
7315 		   (priv->ieee->sec.auth_mode == WLAN_AUTH_LEAP))
7316 		priv->assoc_request.auth_type = AUTH_LEAP;
7317 	else
7318 		priv->assoc_request.auth_type = AUTH_OPEN;
7319 
7320 	if (priv->ieee->wpa_ie_len) {
7321 		priv->assoc_request.policy_support = cpu_to_le16(0x02);	/* RSN active */
7322 		ipw_set_rsn_capa(priv, priv->ieee->wpa_ie,
7323 				 priv->ieee->wpa_ie_len);
7324 	}
7325 
7326 	/*
7327 	 * It is valid for our ieee device to support multiple modes, but
7328 	 * when it comes to associating to a given network we have to choose
7329 	 * just one mode.
7330 	 */
7331 	if (network->mode & priv->ieee->mode & IEEE_A)
7332 		priv->assoc_request.ieee_mode = IPW_A_MODE;
7333 	else if (network->mode & priv->ieee->mode & IEEE_G)
7334 		priv->assoc_request.ieee_mode = IPW_G_MODE;
7335 	else if (network->mode & priv->ieee->mode & IEEE_B)
7336 		priv->assoc_request.ieee_mode = IPW_B_MODE;
7337 
7338 	priv->assoc_request.capability = cpu_to_le16(network->capability);
7339 	if ((network->capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
7340 	    && !(priv->config & CFG_PREAMBLE_LONG)) {
7341 		priv->assoc_request.preamble_length = DCT_FLAG_SHORT_PREAMBLE;
7342 	} else {
7343 		priv->assoc_request.preamble_length = DCT_FLAG_LONG_PREAMBLE;
7344 
7345 		/* Clear the short preamble if we won't be supporting it */
7346 		priv->assoc_request.capability &=
7347 		    ~cpu_to_le16(WLAN_CAPABILITY_SHORT_PREAMBLE);
7348 	}
7349 
7350 	/* Clear capability bits that aren't used in Ad Hoc */
7351 	if (priv->ieee->iw_mode == IW_MODE_ADHOC)
7352 		priv->assoc_request.capability &=
7353 		    ~cpu_to_le16(WLAN_CAPABILITY_SHORT_SLOT_TIME);
7354 
7355 	IPW_DEBUG_ASSOC("%ssociation attempt: '%*pE', channel %d, 802.11%c [%d], %s[:%s], enc=%s%s%s%c%c\n",
7356 			roaming ? "Rea" : "A",
7357 			priv->essid_len, priv->essid,
7358 			network->channel,
7359 			ipw_modes[priv->assoc_request.ieee_mode],
7360 			rates->num_rates,
7361 			(priv->assoc_request.preamble_length ==
7362 			 DCT_FLAG_LONG_PREAMBLE) ? "long" : "short",
7363 			network->capability &
7364 			WLAN_CAPABILITY_SHORT_PREAMBLE ? "short" : "long",
7365 			priv->capability & CAP_PRIVACY_ON ? "on " : "off",
7366 			priv->capability & CAP_PRIVACY_ON ?
7367 			(priv->capability & CAP_SHARED_KEY ? "(shared)" :
7368 			 "(open)") : "",
7369 			priv->capability & CAP_PRIVACY_ON ? " key=" : "",
7370 			priv->capability & CAP_PRIVACY_ON ?
7371 			'1' + priv->ieee->sec.active_key : '.',
7372 			priv->capability & CAP_PRIVACY_ON ? '.' : ' ');
7373 
7374 	priv->assoc_request.beacon_interval = cpu_to_le16(network->beacon_interval);
7375 	if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
7376 	    (network->time_stamp[0] == 0) && (network->time_stamp[1] == 0)) {
7377 		priv->assoc_request.assoc_type = HC_IBSS_START;
7378 		priv->assoc_request.assoc_tsf_msw = 0;
7379 		priv->assoc_request.assoc_tsf_lsw = 0;
7380 	} else {
7381 		if (unlikely(roaming))
7382 			priv->assoc_request.assoc_type = HC_REASSOCIATE;
7383 		else
7384 			priv->assoc_request.assoc_type = HC_ASSOCIATE;
7385 		priv->assoc_request.assoc_tsf_msw = cpu_to_le32(network->time_stamp[1]);
7386 		priv->assoc_request.assoc_tsf_lsw = cpu_to_le32(network->time_stamp[0]);
7387 	}
7388 
7389 	memcpy(priv->assoc_request.bssid, network->bssid, ETH_ALEN);
7390 
7391 	if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
7392 		eth_broadcast_addr(priv->assoc_request.dest);
7393 		priv->assoc_request.atim_window = cpu_to_le16(network->atim_window);
7394 	} else {
7395 		memcpy(priv->assoc_request.dest, network->bssid, ETH_ALEN);
7396 		priv->assoc_request.atim_window = 0;
7397 	}
7398 
7399 	priv->assoc_request.listen_interval = cpu_to_le16(network->listen_interval);
7400 
7401 	err = ipw_send_ssid(priv, priv->essid, priv->essid_len);
7402 	if (err) {
7403 		IPW_DEBUG_HC("Attempt to send SSID command failed.\n");
7404 		return err;
7405 	}
7406 
7407 	rates->ieee_mode = priv->assoc_request.ieee_mode;
7408 	rates->purpose = IPW_RATE_CONNECT;
7409 	ipw_send_supported_rates(priv, rates);
7410 
7411 	if (priv->assoc_request.ieee_mode == IPW_G_MODE)
7412 		priv->sys_config.dot11g_auto_detection = 1;
7413 	else
7414 		priv->sys_config.dot11g_auto_detection = 0;
7415 
7416 	if (priv->ieee->iw_mode == IW_MODE_ADHOC)
7417 		priv->sys_config.answer_broadcast_ssid_probe = 1;
7418 	else
7419 		priv->sys_config.answer_broadcast_ssid_probe = 0;
7420 
7421 	err = ipw_send_system_config(priv);
7422 	if (err) {
7423 		IPW_DEBUG_HC("Attempt to send sys config command failed.\n");
7424 		return err;
7425 	}
7426 
7427 	IPW_DEBUG_ASSOC("Association sensitivity: %d\n", network->stats.rssi);
7428 	err = ipw_set_sensitivity(priv, network->stats.rssi + IPW_RSSI_TO_DBM);
7429 	if (err) {
7430 		IPW_DEBUG_HC("Attempt to send associate command failed.\n");
7431 		return err;
7432 	}
7433 
7434 	/*
7435 	 * If preemption is enabled, it is possible for the association
7436 	 * to complete before we return from ipw_send_associate.  Therefore
7437 	 * we have to be sure and update our priviate data first.
7438 	 */
7439 	priv->channel = network->channel;
7440 	memcpy(priv->bssid, network->bssid, ETH_ALEN);
7441 	priv->status |= STATUS_ASSOCIATING;
7442 	priv->status &= ~STATUS_SECURITY_UPDATED;
7443 
7444 	priv->assoc_network = network;
7445 
7446 #ifdef CONFIG_IPW2200_QOS
7447 	ipw_qos_association(priv, network);
7448 #endif
7449 
7450 	err = ipw_send_associate(priv, &priv->assoc_request);
7451 	if (err) {
7452 		IPW_DEBUG_HC("Attempt to send associate command failed.\n");
7453 		return err;
7454 	}
7455 
7456 	IPW_DEBUG(IPW_DL_STATE, "associating: '%*pE' %pM\n",
7457 		  priv->essid_len, priv->essid, priv->bssid);
7458 
7459 	return 0;
7460 }
7461 
7462 static void ipw_roam(void *data)
7463 {
7464 	struct ipw_priv *priv = data;
7465 	struct libipw_network *network = NULL;
7466 	struct ipw_network_match match = {
7467 		.network = priv->assoc_network
7468 	};
7469 
7470 	/* The roaming process is as follows:
7471 	 *
7472 	 * 1.  Missed beacon threshold triggers the roaming process by
7473 	 *     setting the status ROAM bit and requesting a scan.
7474 	 * 2.  When the scan completes, it schedules the ROAM work
7475 	 * 3.  The ROAM work looks at all of the known networks for one that
7476 	 *     is a better network than the currently associated.  If none
7477 	 *     found, the ROAM process is over (ROAM bit cleared)
7478 	 * 4.  If a better network is found, a disassociation request is
7479 	 *     sent.
7480 	 * 5.  When the disassociation completes, the roam work is again
7481 	 *     scheduled.  The second time through, the driver is no longer
7482 	 *     associated, and the newly selected network is sent an
7483 	 *     association request.
7484 	 * 6.  At this point ,the roaming process is complete and the ROAM
7485 	 *     status bit is cleared.
7486 	 */
7487 
7488 	/* If we are no longer associated, and the roaming bit is no longer
7489 	 * set, then we are not actively roaming, so just return */
7490 	if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ROAMING)))
7491 		return;
7492 
7493 	if (priv->status & STATUS_ASSOCIATED) {
7494 		/* First pass through ROAM process -- look for a better
7495 		 * network */
7496 		unsigned long flags;
7497 		u8 rssi = priv->assoc_network->stats.rssi;
7498 		priv->assoc_network->stats.rssi = -128;
7499 		spin_lock_irqsave(&priv->ieee->lock, flags);
7500 		list_for_each_entry(network, &priv->ieee->network_list, list) {
7501 			if (network != priv->assoc_network)
7502 				ipw_best_network(priv, &match, network, 1);
7503 		}
7504 		spin_unlock_irqrestore(&priv->ieee->lock, flags);
7505 		priv->assoc_network->stats.rssi = rssi;
7506 
7507 		if (match.network == priv->assoc_network) {
7508 			IPW_DEBUG_ASSOC("No better APs in this network to "
7509 					"roam to.\n");
7510 			priv->status &= ~STATUS_ROAMING;
7511 			ipw_debug_config(priv);
7512 			return;
7513 		}
7514 
7515 		ipw_send_disassociate(priv, 1);
7516 		priv->assoc_network = match.network;
7517 
7518 		return;
7519 	}
7520 
7521 	/* Second pass through ROAM process -- request association */
7522 	ipw_compatible_rates(priv, priv->assoc_network, &match.rates);
7523 	ipw_associate_network(priv, priv->assoc_network, &match.rates, 1);
7524 	priv->status &= ~STATUS_ROAMING;
7525 }
7526 
7527 static void ipw_bg_roam(struct work_struct *work)
7528 {
7529 	struct ipw_priv *priv =
7530 		container_of(work, struct ipw_priv, roam);
7531 	mutex_lock(&priv->mutex);
7532 	ipw_roam(priv);
7533 	mutex_unlock(&priv->mutex);
7534 }
7535 
7536 static int ipw_associate(void *data)
7537 {
7538 	struct ipw_priv *priv = data;
7539 
7540 	struct libipw_network *network = NULL;
7541 	struct ipw_network_match match = {
7542 		.network = NULL
7543 	};
7544 	struct ipw_supported_rates *rates;
7545 	struct list_head *element;
7546 	unsigned long flags;
7547 
7548 	if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
7549 		IPW_DEBUG_ASSOC("Not attempting association (monitor mode)\n");
7550 		return 0;
7551 	}
7552 
7553 	if (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
7554 		IPW_DEBUG_ASSOC("Not attempting association (already in "
7555 				"progress)\n");
7556 		return 0;
7557 	}
7558 
7559 	if (priv->status & STATUS_DISASSOCIATING) {
7560 		IPW_DEBUG_ASSOC("Not attempting association (in disassociating)\n");
7561 		schedule_work(&priv->associate);
7562 		return 0;
7563 	}
7564 
7565 	if (!ipw_is_init(priv) || (priv->status & STATUS_SCANNING)) {
7566 		IPW_DEBUG_ASSOC("Not attempting association (scanning or not "
7567 				"initialized)\n");
7568 		return 0;
7569 	}
7570 
7571 	if (!(priv->config & CFG_ASSOCIATE) &&
7572 	    !(priv->config & (CFG_STATIC_ESSID | CFG_STATIC_BSSID))) {
7573 		IPW_DEBUG_ASSOC("Not attempting association (associate=0)\n");
7574 		return 0;
7575 	}
7576 
7577 	/* Protect our use of the network_list */
7578 	spin_lock_irqsave(&priv->ieee->lock, flags);
7579 	list_for_each_entry(network, &priv->ieee->network_list, list)
7580 	    ipw_best_network(priv, &match, network, 0);
7581 
7582 	network = match.network;
7583 	rates = &match.rates;
7584 
7585 	if (network == NULL &&
7586 	    priv->ieee->iw_mode == IW_MODE_ADHOC &&
7587 	    priv->config & CFG_ADHOC_CREATE &&
7588 	    priv->config & CFG_STATIC_ESSID &&
7589 	    priv->config & CFG_STATIC_CHANNEL) {
7590 		/* Use oldest network if the free list is empty */
7591 		if (list_empty(&priv->ieee->network_free_list)) {
7592 			struct libipw_network *oldest = NULL;
7593 			struct libipw_network *target;
7594 
7595 			list_for_each_entry(target, &priv->ieee->network_list, list) {
7596 				if ((oldest == NULL) ||
7597 				    (target->last_scanned < oldest->last_scanned))
7598 					oldest = target;
7599 			}
7600 
7601 			/* If there are no more slots, expire the oldest */
7602 			list_del(&oldest->list);
7603 			target = oldest;
7604 			IPW_DEBUG_ASSOC("Expired '%*pE' (%pM) from network list.\n",
7605 					target->ssid_len, target->ssid,
7606 					target->bssid);
7607 			list_add_tail(&target->list,
7608 				      &priv->ieee->network_free_list);
7609 		}
7610 
7611 		element = priv->ieee->network_free_list.next;
7612 		network = list_entry(element, struct libipw_network, list);
7613 		ipw_adhoc_create(priv, network);
7614 		rates = &priv->rates;
7615 		list_del(element);
7616 		list_add_tail(&network->list, &priv->ieee->network_list);
7617 	}
7618 	spin_unlock_irqrestore(&priv->ieee->lock, flags);
7619 
7620 	/* If we reached the end of the list, then we don't have any valid
7621 	 * matching APs */
7622 	if (!network) {
7623 		ipw_debug_config(priv);
7624 
7625 		if (!(priv->status & STATUS_SCANNING)) {
7626 			if (!(priv->config & CFG_SPEED_SCAN))
7627 				schedule_delayed_work(&priv->request_scan,
7628 						      SCAN_INTERVAL);
7629 			else
7630 				schedule_delayed_work(&priv->request_scan, 0);
7631 		}
7632 
7633 		return 0;
7634 	}
7635 
7636 	ipw_associate_network(priv, network, rates, 0);
7637 
7638 	return 1;
7639 }
7640 
7641 static void ipw_bg_associate(struct work_struct *work)
7642 {
7643 	struct ipw_priv *priv =
7644 		container_of(work, struct ipw_priv, associate);
7645 	mutex_lock(&priv->mutex);
7646 	ipw_associate(priv);
7647 	mutex_unlock(&priv->mutex);
7648 }
7649 
7650 static void ipw_rebuild_decrypted_skb(struct ipw_priv *priv,
7651 				      struct sk_buff *skb)
7652 {
7653 	struct ieee80211_hdr *hdr;
7654 	u16 fc;
7655 
7656 	hdr = (struct ieee80211_hdr *)skb->data;
7657 	fc = le16_to_cpu(hdr->frame_control);
7658 	if (!(fc & IEEE80211_FCTL_PROTECTED))
7659 		return;
7660 
7661 	fc &= ~IEEE80211_FCTL_PROTECTED;
7662 	hdr->frame_control = cpu_to_le16(fc);
7663 	switch (priv->ieee->sec.level) {
7664 	case SEC_LEVEL_3:
7665 		/* Remove CCMP HDR */
7666 		memmove(skb->data + LIBIPW_3ADDR_LEN,
7667 			skb->data + LIBIPW_3ADDR_LEN + 8,
7668 			skb->len - LIBIPW_3ADDR_LEN - 8);
7669 		skb_trim(skb, skb->len - 16);	/* CCMP_HDR_LEN + CCMP_MIC_LEN */
7670 		break;
7671 	case SEC_LEVEL_2:
7672 		break;
7673 	case SEC_LEVEL_1:
7674 		/* Remove IV */
7675 		memmove(skb->data + LIBIPW_3ADDR_LEN,
7676 			skb->data + LIBIPW_3ADDR_LEN + 4,
7677 			skb->len - LIBIPW_3ADDR_LEN - 4);
7678 		skb_trim(skb, skb->len - 8);	/* IV + ICV */
7679 		break;
7680 	case SEC_LEVEL_0:
7681 		break;
7682 	default:
7683 		printk(KERN_ERR "Unknown security level %d\n",
7684 		       priv->ieee->sec.level);
7685 		break;
7686 	}
7687 }
7688 
7689 static void ipw_handle_data_packet(struct ipw_priv *priv,
7690 				   struct ipw_rx_mem_buffer *rxb,
7691 				   struct libipw_rx_stats *stats)
7692 {
7693 	struct net_device *dev = priv->net_dev;
7694 	struct libipw_hdr_4addr *hdr;
7695 	struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
7696 
7697 	/* We received data from the HW, so stop the watchdog */
7698 	netif_trans_update(dev);
7699 
7700 	/* We only process data packets if the
7701 	 * interface is open */
7702 	if (unlikely((le16_to_cpu(pkt->u.frame.length) + IPW_RX_FRAME_SIZE) >
7703 		     skb_tailroom(rxb->skb))) {
7704 		dev->stats.rx_errors++;
7705 		priv->wstats.discard.misc++;
7706 		IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
7707 		return;
7708 	} else if (unlikely(!netif_running(priv->net_dev))) {
7709 		dev->stats.rx_dropped++;
7710 		priv->wstats.discard.misc++;
7711 		IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
7712 		return;
7713 	}
7714 
7715 	/* Advance skb->data to the start of the actual payload */
7716 	skb_reserve(rxb->skb, offsetof(struct ipw_rx_packet, u.frame.data));
7717 
7718 	/* Set the size of the skb to the size of the frame */
7719 	skb_put(rxb->skb, le16_to_cpu(pkt->u.frame.length));
7720 
7721 	IPW_DEBUG_RX("Rx packet of %d bytes.\n", rxb->skb->len);
7722 
7723 	/* HW decrypt will not clear the WEP bit, MIC, PN, etc. */
7724 	hdr = (struct libipw_hdr_4addr *)rxb->skb->data;
7725 	if (priv->ieee->iw_mode != IW_MODE_MONITOR &&
7726 	    (is_multicast_ether_addr(hdr->addr1) ?
7727 	     !priv->ieee->host_mc_decrypt : !priv->ieee->host_decrypt))
7728 		ipw_rebuild_decrypted_skb(priv, rxb->skb);
7729 
7730 	if (!libipw_rx(priv->ieee, rxb->skb, stats))
7731 		dev->stats.rx_errors++;
7732 	else {			/* libipw_rx succeeded, so it now owns the SKB */
7733 		rxb->skb = NULL;
7734 		__ipw_led_activity_on(priv);
7735 	}
7736 }
7737 
7738 #ifdef CONFIG_IPW2200_RADIOTAP
7739 static void ipw_handle_data_packet_monitor(struct ipw_priv *priv,
7740 					   struct ipw_rx_mem_buffer *rxb,
7741 					   struct libipw_rx_stats *stats)
7742 {
7743 	struct net_device *dev = priv->net_dev;
7744 	struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
7745 	struct ipw_rx_frame *frame = &pkt->u.frame;
7746 
7747 	/* initial pull of some data */
7748 	u16 received_channel = frame->received_channel;
7749 	u8 antennaAndPhy = frame->antennaAndPhy;
7750 	s8 antsignal = frame->rssi_dbm - IPW_RSSI_TO_DBM;	/* call it signed anyhow */
7751 	u16 pktrate = frame->rate;
7752 
7753 	/* Magic struct that slots into the radiotap header -- no reason
7754 	 * to build this manually element by element, we can write it much
7755 	 * more efficiently than we can parse it. ORDER MATTERS HERE */
7756 	struct ipw_rt_hdr *ipw_rt;
7757 
7758 	unsigned short len = le16_to_cpu(pkt->u.frame.length);
7759 
7760 	/* We received data from the HW, so stop the watchdog */
7761 	netif_trans_update(dev);
7762 
7763 	/* We only process data packets if the
7764 	 * interface is open */
7765 	if (unlikely((le16_to_cpu(pkt->u.frame.length) + IPW_RX_FRAME_SIZE) >
7766 		     skb_tailroom(rxb->skb))) {
7767 		dev->stats.rx_errors++;
7768 		priv->wstats.discard.misc++;
7769 		IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
7770 		return;
7771 	} else if (unlikely(!netif_running(priv->net_dev))) {
7772 		dev->stats.rx_dropped++;
7773 		priv->wstats.discard.misc++;
7774 		IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
7775 		return;
7776 	}
7777 
7778 	/* Libpcap 0.9.3+ can handle variable length radiotap, so we'll use
7779 	 * that now */
7780 	if (len > IPW_RX_BUF_SIZE - sizeof(struct ipw_rt_hdr)) {
7781 		/* FIXME: Should alloc bigger skb instead */
7782 		dev->stats.rx_dropped++;
7783 		priv->wstats.discard.misc++;
7784 		IPW_DEBUG_DROP("Dropping too large packet in monitor\n");
7785 		return;
7786 	}
7787 
7788 	/* copy the frame itself */
7789 	memmove(rxb->skb->data + sizeof(struct ipw_rt_hdr),
7790 		rxb->skb->data + IPW_RX_FRAME_SIZE, len);
7791 
7792 	ipw_rt = (struct ipw_rt_hdr *)rxb->skb->data;
7793 
7794 	ipw_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION;
7795 	ipw_rt->rt_hdr.it_pad = 0;	/* always good to zero */
7796 	ipw_rt->rt_hdr.it_len = cpu_to_le16(sizeof(struct ipw_rt_hdr));	/* total header+data */
7797 
7798 	/* Big bitfield of all the fields we provide in radiotap */
7799 	ipw_rt->rt_hdr.it_present = cpu_to_le32(
7800 	     (1 << IEEE80211_RADIOTAP_TSFT) |
7801 	     (1 << IEEE80211_RADIOTAP_FLAGS) |
7802 	     (1 << IEEE80211_RADIOTAP_RATE) |
7803 	     (1 << IEEE80211_RADIOTAP_CHANNEL) |
7804 	     (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) |
7805 	     (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) |
7806 	     (1 << IEEE80211_RADIOTAP_ANTENNA));
7807 
7808 	/* Zero the flags, we'll add to them as we go */
7809 	ipw_rt->rt_flags = 0;
7810 	ipw_rt->rt_tsf = (u64)(frame->parent_tsf[3] << 24 |
7811 			       frame->parent_tsf[2] << 16 |
7812 			       frame->parent_tsf[1] << 8  |
7813 			       frame->parent_tsf[0]);
7814 
7815 	/* Convert signal to DBM */
7816 	ipw_rt->rt_dbmsignal = antsignal;
7817 	ipw_rt->rt_dbmnoise = (s8) le16_to_cpu(frame->noise);
7818 
7819 	/* Convert the channel data and set the flags */
7820 	ipw_rt->rt_channel = cpu_to_le16(ieee80211chan2mhz(received_channel));
7821 	if (received_channel > 14) {	/* 802.11a */
7822 		ipw_rt->rt_chbitmask =
7823 		    cpu_to_le16((IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ));
7824 	} else if (antennaAndPhy & 32) {	/* 802.11b */
7825 		ipw_rt->rt_chbitmask =
7826 		    cpu_to_le16((IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ));
7827 	} else {		/* 802.11g */
7828 		ipw_rt->rt_chbitmask =
7829 		    cpu_to_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ);
7830 	}
7831 
7832 	/* set the rate in multiples of 500k/s */
7833 	switch (pktrate) {
7834 	case IPW_TX_RATE_1MB:
7835 		ipw_rt->rt_rate = 2;
7836 		break;
7837 	case IPW_TX_RATE_2MB:
7838 		ipw_rt->rt_rate = 4;
7839 		break;
7840 	case IPW_TX_RATE_5MB:
7841 		ipw_rt->rt_rate = 10;
7842 		break;
7843 	case IPW_TX_RATE_6MB:
7844 		ipw_rt->rt_rate = 12;
7845 		break;
7846 	case IPW_TX_RATE_9MB:
7847 		ipw_rt->rt_rate = 18;
7848 		break;
7849 	case IPW_TX_RATE_11MB:
7850 		ipw_rt->rt_rate = 22;
7851 		break;
7852 	case IPW_TX_RATE_12MB:
7853 		ipw_rt->rt_rate = 24;
7854 		break;
7855 	case IPW_TX_RATE_18MB:
7856 		ipw_rt->rt_rate = 36;
7857 		break;
7858 	case IPW_TX_RATE_24MB:
7859 		ipw_rt->rt_rate = 48;
7860 		break;
7861 	case IPW_TX_RATE_36MB:
7862 		ipw_rt->rt_rate = 72;
7863 		break;
7864 	case IPW_TX_RATE_48MB:
7865 		ipw_rt->rt_rate = 96;
7866 		break;
7867 	case IPW_TX_RATE_54MB:
7868 		ipw_rt->rt_rate = 108;
7869 		break;
7870 	default:
7871 		ipw_rt->rt_rate = 0;
7872 		break;
7873 	}
7874 
7875 	/* antenna number */
7876 	ipw_rt->rt_antenna = (antennaAndPhy & 3);	/* Is this right? */
7877 
7878 	/* set the preamble flag if we have it */
7879 	if ((antennaAndPhy & 64))
7880 		ipw_rt->rt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
7881 
7882 	/* Set the size of the skb to the size of the frame */
7883 	skb_put(rxb->skb, len + sizeof(struct ipw_rt_hdr));
7884 
7885 	IPW_DEBUG_RX("Rx packet of %d bytes.\n", rxb->skb->len);
7886 
7887 	if (!libipw_rx(priv->ieee, rxb->skb, stats))
7888 		dev->stats.rx_errors++;
7889 	else {			/* libipw_rx succeeded, so it now owns the SKB */
7890 		rxb->skb = NULL;
7891 		/* no LED during capture */
7892 	}
7893 }
7894 #endif
7895 
7896 #ifdef CONFIG_IPW2200_PROMISCUOUS
7897 #define libipw_is_probe_response(fc) \
7898    ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT && \
7899     (fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_PROBE_RESP )
7900 
7901 #define libipw_is_management(fc) \
7902    ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT)
7903 
7904 #define libipw_is_control(fc) \
7905    ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_CTL)
7906 
7907 #define libipw_is_data(fc) \
7908    ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA)
7909 
7910 #define libipw_is_assoc_request(fc) \
7911    ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_ASSOC_REQ)
7912 
7913 #define libipw_is_reassoc_request(fc) \
7914    ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_REASSOC_REQ)
7915 
7916 static void ipw_handle_promiscuous_rx(struct ipw_priv *priv,
7917 				      struct ipw_rx_mem_buffer *rxb,
7918 				      struct libipw_rx_stats *stats)
7919 {
7920 	struct net_device *dev = priv->prom_net_dev;
7921 	struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
7922 	struct ipw_rx_frame *frame = &pkt->u.frame;
7923 	struct ipw_rt_hdr *ipw_rt;
7924 
7925 	/* First cache any information we need before we overwrite
7926 	 * the information provided in the skb from the hardware */
7927 	struct ieee80211_hdr *hdr;
7928 	u16 channel = frame->received_channel;
7929 	u8 phy_flags = frame->antennaAndPhy;
7930 	s8 signal = frame->rssi_dbm - IPW_RSSI_TO_DBM;
7931 	s8 noise = (s8) le16_to_cpu(frame->noise);
7932 	u8 rate = frame->rate;
7933 	unsigned short len = le16_to_cpu(pkt->u.frame.length);
7934 	struct sk_buff *skb;
7935 	int hdr_only = 0;
7936 	u16 filter = priv->prom_priv->filter;
7937 
7938 	/* If the filter is set to not include Rx frames then return */
7939 	if (filter & IPW_PROM_NO_RX)
7940 		return;
7941 
7942 	/* We received data from the HW, so stop the watchdog */
7943 	netif_trans_update(dev);
7944 
7945 	if (unlikely((len + IPW_RX_FRAME_SIZE) > skb_tailroom(rxb->skb))) {
7946 		dev->stats.rx_errors++;
7947 		IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
7948 		return;
7949 	}
7950 
7951 	/* We only process data packets if the interface is open */
7952 	if (unlikely(!netif_running(dev))) {
7953 		dev->stats.rx_dropped++;
7954 		IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
7955 		return;
7956 	}
7957 
7958 	/* Libpcap 0.9.3+ can handle variable length radiotap, so we'll use
7959 	 * that now */
7960 	if (len > IPW_RX_BUF_SIZE - sizeof(struct ipw_rt_hdr)) {
7961 		/* FIXME: Should alloc bigger skb instead */
7962 		dev->stats.rx_dropped++;
7963 		IPW_DEBUG_DROP("Dropping too large packet in monitor\n");
7964 		return;
7965 	}
7966 
7967 	hdr = (void *)rxb->skb->data + IPW_RX_FRAME_SIZE;
7968 	if (libipw_is_management(le16_to_cpu(hdr->frame_control))) {
7969 		if (filter & IPW_PROM_NO_MGMT)
7970 			return;
7971 		if (filter & IPW_PROM_MGMT_HEADER_ONLY)
7972 			hdr_only = 1;
7973 	} else if (libipw_is_control(le16_to_cpu(hdr->frame_control))) {
7974 		if (filter & IPW_PROM_NO_CTL)
7975 			return;
7976 		if (filter & IPW_PROM_CTL_HEADER_ONLY)
7977 			hdr_only = 1;
7978 	} else if (libipw_is_data(le16_to_cpu(hdr->frame_control))) {
7979 		if (filter & IPW_PROM_NO_DATA)
7980 			return;
7981 		if (filter & IPW_PROM_DATA_HEADER_ONLY)
7982 			hdr_only = 1;
7983 	}
7984 
7985 	/* Copy the SKB since this is for the promiscuous side */
7986 	skb = skb_copy(rxb->skb, GFP_ATOMIC);
7987 	if (skb == NULL) {
7988 		IPW_ERROR("skb_clone failed for promiscuous copy.\n");
7989 		return;
7990 	}
7991 
7992 	/* copy the frame data to write after where the radiotap header goes */
7993 	ipw_rt = (void *)skb->data;
7994 
7995 	if (hdr_only)
7996 		len = libipw_get_hdrlen(le16_to_cpu(hdr->frame_control));
7997 
7998 	memcpy(ipw_rt->payload, hdr, len);
7999 
8000 	ipw_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION;
8001 	ipw_rt->rt_hdr.it_pad = 0;	/* always good to zero */
8002 	ipw_rt->rt_hdr.it_len = cpu_to_le16(sizeof(*ipw_rt));	/* total header+data */
8003 
8004 	/* Set the size of the skb to the size of the frame */
8005 	skb_put(skb, sizeof(*ipw_rt) + len);
8006 
8007 	/* Big bitfield of all the fields we provide in radiotap */
8008 	ipw_rt->rt_hdr.it_present = cpu_to_le32(
8009 	     (1 << IEEE80211_RADIOTAP_TSFT) |
8010 	     (1 << IEEE80211_RADIOTAP_FLAGS) |
8011 	     (1 << IEEE80211_RADIOTAP_RATE) |
8012 	     (1 << IEEE80211_RADIOTAP_CHANNEL) |
8013 	     (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) |
8014 	     (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) |
8015 	     (1 << IEEE80211_RADIOTAP_ANTENNA));
8016 
8017 	/* Zero the flags, we'll add to them as we go */
8018 	ipw_rt->rt_flags = 0;
8019 	ipw_rt->rt_tsf = (u64)(frame->parent_tsf[3] << 24 |
8020 			       frame->parent_tsf[2] << 16 |
8021 			       frame->parent_tsf[1] << 8  |
8022 			       frame->parent_tsf[0]);
8023 
8024 	/* Convert to DBM */
8025 	ipw_rt->rt_dbmsignal = signal;
8026 	ipw_rt->rt_dbmnoise = noise;
8027 
8028 	/* Convert the channel data and set the flags */
8029 	ipw_rt->rt_channel = cpu_to_le16(ieee80211chan2mhz(channel));
8030 	if (channel > 14) {	/* 802.11a */
8031 		ipw_rt->rt_chbitmask =
8032 		    cpu_to_le16((IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ));
8033 	} else if (phy_flags & (1 << 5)) {	/* 802.11b */
8034 		ipw_rt->rt_chbitmask =
8035 		    cpu_to_le16((IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ));
8036 	} else {		/* 802.11g */
8037 		ipw_rt->rt_chbitmask =
8038 		    cpu_to_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ);
8039 	}
8040 
8041 	/* set the rate in multiples of 500k/s */
8042 	switch (rate) {
8043 	case IPW_TX_RATE_1MB:
8044 		ipw_rt->rt_rate = 2;
8045 		break;
8046 	case IPW_TX_RATE_2MB:
8047 		ipw_rt->rt_rate = 4;
8048 		break;
8049 	case IPW_TX_RATE_5MB:
8050 		ipw_rt->rt_rate = 10;
8051 		break;
8052 	case IPW_TX_RATE_6MB:
8053 		ipw_rt->rt_rate = 12;
8054 		break;
8055 	case IPW_TX_RATE_9MB:
8056 		ipw_rt->rt_rate = 18;
8057 		break;
8058 	case IPW_TX_RATE_11MB:
8059 		ipw_rt->rt_rate = 22;
8060 		break;
8061 	case IPW_TX_RATE_12MB:
8062 		ipw_rt->rt_rate = 24;
8063 		break;
8064 	case IPW_TX_RATE_18MB:
8065 		ipw_rt->rt_rate = 36;
8066 		break;
8067 	case IPW_TX_RATE_24MB:
8068 		ipw_rt->rt_rate = 48;
8069 		break;
8070 	case IPW_TX_RATE_36MB:
8071 		ipw_rt->rt_rate = 72;
8072 		break;
8073 	case IPW_TX_RATE_48MB:
8074 		ipw_rt->rt_rate = 96;
8075 		break;
8076 	case IPW_TX_RATE_54MB:
8077 		ipw_rt->rt_rate = 108;
8078 		break;
8079 	default:
8080 		ipw_rt->rt_rate = 0;
8081 		break;
8082 	}
8083 
8084 	/* antenna number */
8085 	ipw_rt->rt_antenna = (phy_flags & 3);
8086 
8087 	/* set the preamble flag if we have it */
8088 	if (phy_flags & (1 << 6))
8089 		ipw_rt->rt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
8090 
8091 	IPW_DEBUG_RX("Rx packet of %d bytes.\n", skb->len);
8092 
8093 	if (!libipw_rx(priv->prom_priv->ieee, skb, stats)) {
8094 		dev->stats.rx_errors++;
8095 		dev_kfree_skb_any(skb);
8096 	}
8097 }
8098 #endif
8099 
8100 static int is_network_packet(struct ipw_priv *priv,
8101 				    struct libipw_hdr_4addr *header)
8102 {
8103 	/* Filter incoming packets to determine if they are targeted toward
8104 	 * this network, discarding packets coming from ourselves */
8105 	switch (priv->ieee->iw_mode) {
8106 	case IW_MODE_ADHOC:	/* Header: Dest. | Source    | BSSID */
8107 		/* packets from our adapter are dropped (echo) */
8108 		if (ether_addr_equal(header->addr2, priv->net_dev->dev_addr))
8109 			return 0;
8110 
8111 		/* {broad,multi}cast packets to our BSSID go through */
8112 		if (is_multicast_ether_addr(header->addr1))
8113 			return ether_addr_equal(header->addr3, priv->bssid);
8114 
8115 		/* packets to our adapter go through */
8116 		return ether_addr_equal(header->addr1,
8117 					priv->net_dev->dev_addr);
8118 
8119 	case IW_MODE_INFRA:	/* Header: Dest. | BSSID | Source */
8120 		/* packets from our adapter are dropped (echo) */
8121 		if (ether_addr_equal(header->addr3, priv->net_dev->dev_addr))
8122 			return 0;
8123 
8124 		/* {broad,multi}cast packets to our BSS go through */
8125 		if (is_multicast_ether_addr(header->addr1))
8126 			return ether_addr_equal(header->addr2, priv->bssid);
8127 
8128 		/* packets to our adapter go through */
8129 		return ether_addr_equal(header->addr1,
8130 					priv->net_dev->dev_addr);
8131 	}
8132 
8133 	return 1;
8134 }
8135 
8136 #define IPW_PACKET_RETRY_TIME HZ
8137 
8138 static  int is_duplicate_packet(struct ipw_priv *priv,
8139 				      struct libipw_hdr_4addr *header)
8140 {
8141 	u16 sc = le16_to_cpu(header->seq_ctl);
8142 	u16 seq = WLAN_GET_SEQ_SEQ(sc);
8143 	u16 frag = WLAN_GET_SEQ_FRAG(sc);
8144 	u16 *last_seq, *last_frag;
8145 	unsigned long *last_time;
8146 
8147 	switch (priv->ieee->iw_mode) {
8148 	case IW_MODE_ADHOC:
8149 		{
8150 			struct list_head *p;
8151 			struct ipw_ibss_seq *entry = NULL;
8152 			u8 *mac = header->addr2;
8153 			int index = mac[5] % IPW_IBSS_MAC_HASH_SIZE;
8154 
8155 			list_for_each(p, &priv->ibss_mac_hash[index]) {
8156 				entry =
8157 				    list_entry(p, struct ipw_ibss_seq, list);
8158 				if (ether_addr_equal(entry->mac, mac))
8159 					break;
8160 			}
8161 			if (p == &priv->ibss_mac_hash[index]) {
8162 				entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
8163 				if (!entry) {
8164 					IPW_ERROR
8165 					    ("Cannot malloc new mac entry\n");
8166 					return 0;
8167 				}
8168 				memcpy(entry->mac, mac, ETH_ALEN);
8169 				entry->seq_num = seq;
8170 				entry->frag_num = frag;
8171 				entry->packet_time = jiffies;
8172 				list_add(&entry->list,
8173 					 &priv->ibss_mac_hash[index]);
8174 				return 0;
8175 			}
8176 			last_seq = &entry->seq_num;
8177 			last_frag = &entry->frag_num;
8178 			last_time = &entry->packet_time;
8179 			break;
8180 		}
8181 	case IW_MODE_INFRA:
8182 		last_seq = &priv->last_seq_num;
8183 		last_frag = &priv->last_frag_num;
8184 		last_time = &priv->last_packet_time;
8185 		break;
8186 	default:
8187 		return 0;
8188 	}
8189 	if ((*last_seq == seq) &&
8190 	    time_after(*last_time + IPW_PACKET_RETRY_TIME, jiffies)) {
8191 		if (*last_frag == frag)
8192 			goto drop;
8193 		if (*last_frag + 1 != frag)
8194 			/* out-of-order fragment */
8195 			goto drop;
8196 	} else
8197 		*last_seq = seq;
8198 
8199 	*last_frag = frag;
8200 	*last_time = jiffies;
8201 	return 0;
8202 
8203       drop:
8204 	/* Comment this line now since we observed the card receives
8205 	 * duplicate packets but the FCTL_RETRY bit is not set in the
8206 	 * IBSS mode with fragmentation enabled.
8207 	 BUG_ON(!(le16_to_cpu(header->frame_control) & IEEE80211_FCTL_RETRY)); */
8208 	return 1;
8209 }
8210 
8211 static void ipw_handle_mgmt_packet(struct ipw_priv *priv,
8212 				   struct ipw_rx_mem_buffer *rxb,
8213 				   struct libipw_rx_stats *stats)
8214 {
8215 	struct sk_buff *skb = rxb->skb;
8216 	struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)skb->data;
8217 	struct libipw_hdr_4addr *header = (struct libipw_hdr_4addr *)
8218 	    (skb->data + IPW_RX_FRAME_SIZE);
8219 
8220 	libipw_rx_mgt(priv->ieee, header, stats);
8221 
8222 	if (priv->ieee->iw_mode == IW_MODE_ADHOC &&
8223 	    ((WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl)) ==
8224 	      IEEE80211_STYPE_PROBE_RESP) ||
8225 	     (WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl)) ==
8226 	      IEEE80211_STYPE_BEACON))) {
8227 		if (ether_addr_equal(header->addr3, priv->bssid))
8228 			ipw_add_station(priv, header->addr2);
8229 	}
8230 
8231 	if (priv->config & CFG_NET_STATS) {
8232 		IPW_DEBUG_HC("sending stat packet\n");
8233 
8234 		/* Set the size of the skb to the size of the full
8235 		 * ipw header and 802.11 frame */
8236 		skb_put(skb, le16_to_cpu(pkt->u.frame.length) +
8237 			IPW_RX_FRAME_SIZE);
8238 
8239 		/* Advance past the ipw packet header to the 802.11 frame */
8240 		skb_pull(skb, IPW_RX_FRAME_SIZE);
8241 
8242 		/* Push the libipw_rx_stats before the 802.11 frame */
8243 		memcpy(skb_push(skb, sizeof(*stats)), stats, sizeof(*stats));
8244 
8245 		skb->dev = priv->ieee->dev;
8246 
8247 		/* Point raw at the libipw_stats */
8248 		skb_reset_mac_header(skb);
8249 
8250 		skb->pkt_type = PACKET_OTHERHOST;
8251 		skb->protocol = cpu_to_be16(ETH_P_80211_STATS);
8252 		memset(skb->cb, 0, sizeof(rxb->skb->cb));
8253 		netif_rx(skb);
8254 		rxb->skb = NULL;
8255 	}
8256 }
8257 
8258 /*
8259  * Main entry function for receiving a packet with 80211 headers.  This
8260  * should be called when ever the FW has notified us that there is a new
8261  * skb in the receive queue.
8262  */
8263 static void ipw_rx(struct ipw_priv *priv)
8264 {
8265 	struct ipw_rx_mem_buffer *rxb;
8266 	struct ipw_rx_packet *pkt;
8267 	struct libipw_hdr_4addr *header;
8268 	u32 r, w, i;
8269 	u8 network_packet;
8270 	u8 fill_rx = 0;
8271 
8272 	r = ipw_read32(priv, IPW_RX_READ_INDEX);
8273 	w = ipw_read32(priv, IPW_RX_WRITE_INDEX);
8274 	i = priv->rxq->read;
8275 
8276 	if (ipw_rx_queue_space (priv->rxq) > (RX_QUEUE_SIZE / 2))
8277 		fill_rx = 1;
8278 
8279 	while (i != r) {
8280 		rxb = priv->rxq->queue[i];
8281 		if (unlikely(rxb == NULL)) {
8282 			printk(KERN_CRIT "Queue not allocated!\n");
8283 			break;
8284 		}
8285 		priv->rxq->queue[i] = NULL;
8286 
8287 		pci_dma_sync_single_for_cpu(priv->pci_dev, rxb->dma_addr,
8288 					    IPW_RX_BUF_SIZE,
8289 					    PCI_DMA_FROMDEVICE);
8290 
8291 		pkt = (struct ipw_rx_packet *)rxb->skb->data;
8292 		IPW_DEBUG_RX("Packet: type=%02X seq=%02X bits=%02X\n",
8293 			     pkt->header.message_type,
8294 			     pkt->header.rx_seq_num, pkt->header.control_bits);
8295 
8296 		switch (pkt->header.message_type) {
8297 		case RX_FRAME_TYPE:	/* 802.11 frame */  {
8298 				struct libipw_rx_stats stats = {
8299 					.rssi = pkt->u.frame.rssi_dbm -
8300 					    IPW_RSSI_TO_DBM,
8301 					.signal =
8302 					    pkt->u.frame.rssi_dbm -
8303 					    IPW_RSSI_TO_DBM + 0x100,
8304 					.noise =
8305 					    le16_to_cpu(pkt->u.frame.noise),
8306 					.rate = pkt->u.frame.rate,
8307 					.mac_time = jiffies,
8308 					.received_channel =
8309 					    pkt->u.frame.received_channel,
8310 					.freq =
8311 					    (pkt->u.frame.
8312 					     control & (1 << 0)) ?
8313 					    LIBIPW_24GHZ_BAND :
8314 					    LIBIPW_52GHZ_BAND,
8315 					.len = le16_to_cpu(pkt->u.frame.length),
8316 				};
8317 
8318 				if (stats.rssi != 0)
8319 					stats.mask |= LIBIPW_STATMASK_RSSI;
8320 				if (stats.signal != 0)
8321 					stats.mask |= LIBIPW_STATMASK_SIGNAL;
8322 				if (stats.noise != 0)
8323 					stats.mask |= LIBIPW_STATMASK_NOISE;
8324 				if (stats.rate != 0)
8325 					stats.mask |= LIBIPW_STATMASK_RATE;
8326 
8327 				priv->rx_packets++;
8328 
8329 #ifdef CONFIG_IPW2200_PROMISCUOUS
8330 	if (priv->prom_net_dev && netif_running(priv->prom_net_dev))
8331 		ipw_handle_promiscuous_rx(priv, rxb, &stats);
8332 #endif
8333 
8334 #ifdef CONFIG_IPW2200_MONITOR
8335 				if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
8336 #ifdef CONFIG_IPW2200_RADIOTAP
8337 
8338                 ipw_handle_data_packet_monitor(priv,
8339 					       rxb,
8340 					       &stats);
8341 #else
8342 		ipw_handle_data_packet(priv, rxb,
8343 				       &stats);
8344 #endif
8345 					break;
8346 				}
8347 #endif
8348 
8349 				header =
8350 				    (struct libipw_hdr_4addr *)(rxb->skb->
8351 								   data +
8352 								   IPW_RX_FRAME_SIZE);
8353 				/* TODO: Check Ad-Hoc dest/source and make sure
8354 				 * that we are actually parsing these packets
8355 				 * correctly -- we should probably use the
8356 				 * frame control of the packet and disregard
8357 				 * the current iw_mode */
8358 
8359 				network_packet =
8360 				    is_network_packet(priv, header);
8361 				if (network_packet && priv->assoc_network) {
8362 					priv->assoc_network->stats.rssi =
8363 					    stats.rssi;
8364 					priv->exp_avg_rssi =
8365 					    exponential_average(priv->exp_avg_rssi,
8366 					    stats.rssi, DEPTH_RSSI);
8367 				}
8368 
8369 				IPW_DEBUG_RX("Frame: len=%u\n",
8370 					     le16_to_cpu(pkt->u.frame.length));
8371 
8372 				if (le16_to_cpu(pkt->u.frame.length) <
8373 				    libipw_get_hdrlen(le16_to_cpu(
8374 						    header->frame_ctl))) {
8375 					IPW_DEBUG_DROP
8376 					    ("Received packet is too small. "
8377 					     "Dropping.\n");
8378 					priv->net_dev->stats.rx_errors++;
8379 					priv->wstats.discard.misc++;
8380 					break;
8381 				}
8382 
8383 				switch (WLAN_FC_GET_TYPE
8384 					(le16_to_cpu(header->frame_ctl))) {
8385 
8386 				case IEEE80211_FTYPE_MGMT:
8387 					ipw_handle_mgmt_packet(priv, rxb,
8388 							       &stats);
8389 					break;
8390 
8391 				case IEEE80211_FTYPE_CTL:
8392 					break;
8393 
8394 				case IEEE80211_FTYPE_DATA:
8395 					if (unlikely(!network_packet ||
8396 						     is_duplicate_packet(priv,
8397 									 header)))
8398 					{
8399 						IPW_DEBUG_DROP("Dropping: "
8400 							       "%pM, "
8401 							       "%pM, "
8402 							       "%pM\n",
8403 							       header->addr1,
8404 							       header->addr2,
8405 							       header->addr3);
8406 						break;
8407 					}
8408 
8409 					ipw_handle_data_packet(priv, rxb,
8410 							       &stats);
8411 
8412 					break;
8413 				}
8414 				break;
8415 			}
8416 
8417 		case RX_HOST_NOTIFICATION_TYPE:{
8418 				IPW_DEBUG_RX
8419 				    ("Notification: subtype=%02X flags=%02X size=%d\n",
8420 				     pkt->u.notification.subtype,
8421 				     pkt->u.notification.flags,
8422 				     le16_to_cpu(pkt->u.notification.size));
8423 				ipw_rx_notification(priv, &pkt->u.notification);
8424 				break;
8425 			}
8426 
8427 		default:
8428 			IPW_DEBUG_RX("Bad Rx packet of type %d\n",
8429 				     pkt->header.message_type);
8430 			break;
8431 		}
8432 
8433 		/* For now we just don't re-use anything.  We can tweak this
8434 		 * later to try and re-use notification packets and SKBs that
8435 		 * fail to Rx correctly */
8436 		if (rxb->skb != NULL) {
8437 			dev_kfree_skb_any(rxb->skb);
8438 			rxb->skb = NULL;
8439 		}
8440 
8441 		pci_unmap_single(priv->pci_dev, rxb->dma_addr,
8442 				 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
8443 		list_add_tail(&rxb->list, &priv->rxq->rx_used);
8444 
8445 		i = (i + 1) % RX_QUEUE_SIZE;
8446 
8447 		/* If there are a lot of unsued frames, restock the Rx queue
8448 		 * so the ucode won't assert */
8449 		if (fill_rx) {
8450 			priv->rxq->read = i;
8451 			ipw_rx_queue_replenish(priv);
8452 		}
8453 	}
8454 
8455 	/* Backtrack one entry */
8456 	priv->rxq->read = i;
8457 	ipw_rx_queue_restock(priv);
8458 }
8459 
8460 #define DEFAULT_RTS_THRESHOLD     2304U
8461 #define MIN_RTS_THRESHOLD         1U
8462 #define MAX_RTS_THRESHOLD         2304U
8463 #define DEFAULT_BEACON_INTERVAL   100U
8464 #define	DEFAULT_SHORT_RETRY_LIMIT 7U
8465 #define	DEFAULT_LONG_RETRY_LIMIT  4U
8466 
8467 /**
8468  * ipw_sw_reset
8469  * @option: options to control different reset behaviour
8470  * 	    0 = reset everything except the 'disable' module_param
8471  * 	    1 = reset everything and print out driver info (for probe only)
8472  * 	    2 = reset everything
8473  */
8474 static int ipw_sw_reset(struct ipw_priv *priv, int option)
8475 {
8476 	int band, modulation;
8477 	int old_mode = priv->ieee->iw_mode;
8478 
8479 	/* Initialize module parameter values here */
8480 	priv->config = 0;
8481 
8482 	/* We default to disabling the LED code as right now it causes
8483 	 * too many systems to lock up... */
8484 	if (!led_support)
8485 		priv->config |= CFG_NO_LED;
8486 
8487 	if (associate)
8488 		priv->config |= CFG_ASSOCIATE;
8489 	else
8490 		IPW_DEBUG_INFO("Auto associate disabled.\n");
8491 
8492 	if (auto_create)
8493 		priv->config |= CFG_ADHOC_CREATE;
8494 	else
8495 		IPW_DEBUG_INFO("Auto adhoc creation disabled.\n");
8496 
8497 	priv->config &= ~CFG_STATIC_ESSID;
8498 	priv->essid_len = 0;
8499 	memset(priv->essid, 0, IW_ESSID_MAX_SIZE);
8500 
8501 	if (disable && option) {
8502 		priv->status |= STATUS_RF_KILL_SW;
8503 		IPW_DEBUG_INFO("Radio disabled.\n");
8504 	}
8505 
8506 	if (default_channel != 0) {
8507 		priv->config |= CFG_STATIC_CHANNEL;
8508 		priv->channel = default_channel;
8509 		IPW_DEBUG_INFO("Bind to static channel %d\n", default_channel);
8510 		/* TODO: Validate that provided channel is in range */
8511 	}
8512 #ifdef CONFIG_IPW2200_QOS
8513 	ipw_qos_init(priv, qos_enable, qos_burst_enable,
8514 		     burst_duration_CCK, burst_duration_OFDM);
8515 #endif				/* CONFIG_IPW2200_QOS */
8516 
8517 	switch (network_mode) {
8518 	case 1:
8519 		priv->ieee->iw_mode = IW_MODE_ADHOC;
8520 		priv->net_dev->type = ARPHRD_ETHER;
8521 
8522 		break;
8523 #ifdef CONFIG_IPW2200_MONITOR
8524 	case 2:
8525 		priv->ieee->iw_mode = IW_MODE_MONITOR;
8526 #ifdef CONFIG_IPW2200_RADIOTAP
8527 		priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
8528 #else
8529 		priv->net_dev->type = ARPHRD_IEEE80211;
8530 #endif
8531 		break;
8532 #endif
8533 	default:
8534 	case 0:
8535 		priv->net_dev->type = ARPHRD_ETHER;
8536 		priv->ieee->iw_mode = IW_MODE_INFRA;
8537 		break;
8538 	}
8539 
8540 	if (hwcrypto) {
8541 		priv->ieee->host_encrypt = 0;
8542 		priv->ieee->host_encrypt_msdu = 0;
8543 		priv->ieee->host_decrypt = 0;
8544 		priv->ieee->host_mc_decrypt = 0;
8545 	}
8546 	IPW_DEBUG_INFO("Hardware crypto [%s]\n", hwcrypto ? "on" : "off");
8547 
8548 	/* IPW2200/2915 is abled to do hardware fragmentation. */
8549 	priv->ieee->host_open_frag = 0;
8550 
8551 	if ((priv->pci_dev->device == 0x4223) ||
8552 	    (priv->pci_dev->device == 0x4224)) {
8553 		if (option == 1)
8554 			printk(KERN_INFO DRV_NAME
8555 			       ": Detected Intel PRO/Wireless 2915ABG Network "
8556 			       "Connection\n");
8557 		priv->ieee->abg_true = 1;
8558 		band = LIBIPW_52GHZ_BAND | LIBIPW_24GHZ_BAND;
8559 		modulation = LIBIPW_OFDM_MODULATION |
8560 		    LIBIPW_CCK_MODULATION;
8561 		priv->adapter = IPW_2915ABG;
8562 		priv->ieee->mode = IEEE_A | IEEE_G | IEEE_B;
8563 	} else {
8564 		if (option == 1)
8565 			printk(KERN_INFO DRV_NAME
8566 			       ": Detected Intel PRO/Wireless 2200BG Network "
8567 			       "Connection\n");
8568 
8569 		priv->ieee->abg_true = 0;
8570 		band = LIBIPW_24GHZ_BAND;
8571 		modulation = LIBIPW_OFDM_MODULATION |
8572 		    LIBIPW_CCK_MODULATION;
8573 		priv->adapter = IPW_2200BG;
8574 		priv->ieee->mode = IEEE_G | IEEE_B;
8575 	}
8576 
8577 	priv->ieee->freq_band = band;
8578 	priv->ieee->modulation = modulation;
8579 
8580 	priv->rates_mask = LIBIPW_DEFAULT_RATES_MASK;
8581 
8582 	priv->disassociate_threshold = IPW_MB_DISASSOCIATE_THRESHOLD_DEFAULT;
8583 	priv->roaming_threshold = IPW_MB_ROAMING_THRESHOLD_DEFAULT;
8584 
8585 	priv->rts_threshold = DEFAULT_RTS_THRESHOLD;
8586 	priv->short_retry_limit = DEFAULT_SHORT_RETRY_LIMIT;
8587 	priv->long_retry_limit = DEFAULT_LONG_RETRY_LIMIT;
8588 
8589 	/* If power management is turned on, default to AC mode */
8590 	priv->power_mode = IPW_POWER_AC;
8591 	priv->tx_power = IPW_TX_POWER_DEFAULT;
8592 
8593 	return old_mode == priv->ieee->iw_mode;
8594 }
8595 
8596 /*
8597  * This file defines the Wireless Extension handlers.  It does not
8598  * define any methods of hardware manipulation and relies on the
8599  * functions defined in ipw_main to provide the HW interaction.
8600  *
8601  * The exception to this is the use of the ipw_get_ordinal()
8602  * function used to poll the hardware vs. making unnecessary calls.
8603  *
8604  */
8605 
8606 static int ipw_set_channel(struct ipw_priv *priv, u8 channel)
8607 {
8608 	if (channel == 0) {
8609 		IPW_DEBUG_INFO("Setting channel to ANY (0)\n");
8610 		priv->config &= ~CFG_STATIC_CHANNEL;
8611 		IPW_DEBUG_ASSOC("Attempting to associate with new "
8612 				"parameters.\n");
8613 		ipw_associate(priv);
8614 		return 0;
8615 	}
8616 
8617 	priv->config |= CFG_STATIC_CHANNEL;
8618 
8619 	if (priv->channel == channel) {
8620 		IPW_DEBUG_INFO("Request to set channel to current value (%d)\n",
8621 			       channel);
8622 		return 0;
8623 	}
8624 
8625 	IPW_DEBUG_INFO("Setting channel to %i\n", (int)channel);
8626 	priv->channel = channel;
8627 
8628 #ifdef CONFIG_IPW2200_MONITOR
8629 	if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
8630 		int i;
8631 		if (priv->status & STATUS_SCANNING) {
8632 			IPW_DEBUG_SCAN("Scan abort triggered due to "
8633 				       "channel change.\n");
8634 			ipw_abort_scan(priv);
8635 		}
8636 
8637 		for (i = 1000; i && (priv->status & STATUS_SCANNING); i--)
8638 			udelay(10);
8639 
8640 		if (priv->status & STATUS_SCANNING)
8641 			IPW_DEBUG_SCAN("Still scanning...\n");
8642 		else
8643 			IPW_DEBUG_SCAN("Took %dms to abort current scan\n",
8644 				       1000 - i);
8645 
8646 		return 0;
8647 	}
8648 #endif				/* CONFIG_IPW2200_MONITOR */
8649 
8650 	/* Network configuration changed -- force [re]association */
8651 	IPW_DEBUG_ASSOC("[re]association triggered due to channel change.\n");
8652 	if (!ipw_disassociate(priv))
8653 		ipw_associate(priv);
8654 
8655 	return 0;
8656 }
8657 
8658 static int ipw_wx_set_freq(struct net_device *dev,
8659 			   struct iw_request_info *info,
8660 			   union iwreq_data *wrqu, char *extra)
8661 {
8662 	struct ipw_priv *priv = libipw_priv(dev);
8663 	const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
8664 	struct iw_freq *fwrq = &wrqu->freq;
8665 	int ret = 0, i;
8666 	u8 channel, flags;
8667 	int band;
8668 
8669 	if (fwrq->m == 0) {
8670 		IPW_DEBUG_WX("SET Freq/Channel -> any\n");
8671 		mutex_lock(&priv->mutex);
8672 		ret = ipw_set_channel(priv, 0);
8673 		mutex_unlock(&priv->mutex);
8674 		return ret;
8675 	}
8676 	/* if setting by freq convert to channel */
8677 	if (fwrq->e == 1) {
8678 		channel = libipw_freq_to_channel(priv->ieee, fwrq->m);
8679 		if (channel == 0)
8680 			return -EINVAL;
8681 	} else
8682 		channel = fwrq->m;
8683 
8684 	if (!(band = libipw_is_valid_channel(priv->ieee, channel)))
8685 		return -EINVAL;
8686 
8687 	if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
8688 		i = libipw_channel_to_index(priv->ieee, channel);
8689 		if (i == -1)
8690 			return -EINVAL;
8691 
8692 		flags = (band == LIBIPW_24GHZ_BAND) ?
8693 		    geo->bg[i].flags : geo->a[i].flags;
8694 		if (flags & LIBIPW_CH_PASSIVE_ONLY) {
8695 			IPW_DEBUG_WX("Invalid Ad-Hoc channel for 802.11a\n");
8696 			return -EINVAL;
8697 		}
8698 	}
8699 
8700 	IPW_DEBUG_WX("SET Freq/Channel -> %d\n", fwrq->m);
8701 	mutex_lock(&priv->mutex);
8702 	ret = ipw_set_channel(priv, channel);
8703 	mutex_unlock(&priv->mutex);
8704 	return ret;
8705 }
8706 
8707 static int ipw_wx_get_freq(struct net_device *dev,
8708 			   struct iw_request_info *info,
8709 			   union iwreq_data *wrqu, char *extra)
8710 {
8711 	struct ipw_priv *priv = libipw_priv(dev);
8712 
8713 	wrqu->freq.e = 0;
8714 
8715 	/* If we are associated, trying to associate, or have a statically
8716 	 * configured CHANNEL then return that; otherwise return ANY */
8717 	mutex_lock(&priv->mutex);
8718 	if (priv->config & CFG_STATIC_CHANNEL ||
8719 	    priv->status & (STATUS_ASSOCIATING | STATUS_ASSOCIATED)) {
8720 		int i;
8721 
8722 		i = libipw_channel_to_index(priv->ieee, priv->channel);
8723 		BUG_ON(i == -1);
8724 		wrqu->freq.e = 1;
8725 
8726 		switch (libipw_is_valid_channel(priv->ieee, priv->channel)) {
8727 		case LIBIPW_52GHZ_BAND:
8728 			wrqu->freq.m = priv->ieee->geo.a[i].freq * 100000;
8729 			break;
8730 
8731 		case LIBIPW_24GHZ_BAND:
8732 			wrqu->freq.m = priv->ieee->geo.bg[i].freq * 100000;
8733 			break;
8734 
8735 		default:
8736 			BUG();
8737 		}
8738 	} else
8739 		wrqu->freq.m = 0;
8740 
8741 	mutex_unlock(&priv->mutex);
8742 	IPW_DEBUG_WX("GET Freq/Channel -> %d\n", priv->channel);
8743 	return 0;
8744 }
8745 
8746 static int ipw_wx_set_mode(struct net_device *dev,
8747 			   struct iw_request_info *info,
8748 			   union iwreq_data *wrqu, char *extra)
8749 {
8750 	struct ipw_priv *priv = libipw_priv(dev);
8751 	int err = 0;
8752 
8753 	IPW_DEBUG_WX("Set MODE: %d\n", wrqu->mode);
8754 
8755 	switch (wrqu->mode) {
8756 #ifdef CONFIG_IPW2200_MONITOR
8757 	case IW_MODE_MONITOR:
8758 #endif
8759 	case IW_MODE_ADHOC:
8760 	case IW_MODE_INFRA:
8761 		break;
8762 	case IW_MODE_AUTO:
8763 		wrqu->mode = IW_MODE_INFRA;
8764 		break;
8765 	default:
8766 		return -EINVAL;
8767 	}
8768 	if (wrqu->mode == priv->ieee->iw_mode)
8769 		return 0;
8770 
8771 	mutex_lock(&priv->mutex);
8772 
8773 	ipw_sw_reset(priv, 0);
8774 
8775 #ifdef CONFIG_IPW2200_MONITOR
8776 	if (priv->ieee->iw_mode == IW_MODE_MONITOR)
8777 		priv->net_dev->type = ARPHRD_ETHER;
8778 
8779 	if (wrqu->mode == IW_MODE_MONITOR)
8780 #ifdef CONFIG_IPW2200_RADIOTAP
8781 		priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
8782 #else
8783 		priv->net_dev->type = ARPHRD_IEEE80211;
8784 #endif
8785 #endif				/* CONFIG_IPW2200_MONITOR */
8786 
8787 	/* Free the existing firmware and reset the fw_loaded
8788 	 * flag so ipw_load() will bring in the new firmware */
8789 	free_firmware();
8790 
8791 	priv->ieee->iw_mode = wrqu->mode;
8792 
8793 	schedule_work(&priv->adapter_restart);
8794 	mutex_unlock(&priv->mutex);
8795 	return err;
8796 }
8797 
8798 static int ipw_wx_get_mode(struct net_device *dev,
8799 			   struct iw_request_info *info,
8800 			   union iwreq_data *wrqu, char *extra)
8801 {
8802 	struct ipw_priv *priv = libipw_priv(dev);
8803 	mutex_lock(&priv->mutex);
8804 	wrqu->mode = priv->ieee->iw_mode;
8805 	IPW_DEBUG_WX("Get MODE -> %d\n", wrqu->mode);
8806 	mutex_unlock(&priv->mutex);
8807 	return 0;
8808 }
8809 
8810 /* Values are in microsecond */
8811 static const s32 timeout_duration[] = {
8812 	350000,
8813 	250000,
8814 	75000,
8815 	37000,
8816 	25000,
8817 };
8818 
8819 static const s32 period_duration[] = {
8820 	400000,
8821 	700000,
8822 	1000000,
8823 	1000000,
8824 	1000000
8825 };
8826 
8827 static int ipw_wx_get_range(struct net_device *dev,
8828 			    struct iw_request_info *info,
8829 			    union iwreq_data *wrqu, char *extra)
8830 {
8831 	struct ipw_priv *priv = libipw_priv(dev);
8832 	struct iw_range *range = (struct iw_range *)extra;
8833 	const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
8834 	int i = 0, j;
8835 
8836 	wrqu->data.length = sizeof(*range);
8837 	memset(range, 0, sizeof(*range));
8838 
8839 	/* 54Mbs == ~27 Mb/s real (802.11g) */
8840 	range->throughput = 27 * 1000 * 1000;
8841 
8842 	range->max_qual.qual = 100;
8843 	/* TODO: Find real max RSSI and stick here */
8844 	range->max_qual.level = 0;
8845 	range->max_qual.noise = 0;
8846 	range->max_qual.updated = 7;	/* Updated all three */
8847 
8848 	range->avg_qual.qual = 70;
8849 	/* TODO: Find real 'good' to 'bad' threshold value for RSSI */
8850 	range->avg_qual.level = 0;	/* FIXME to real average level */
8851 	range->avg_qual.noise = 0;
8852 	range->avg_qual.updated = 7;	/* Updated all three */
8853 	mutex_lock(&priv->mutex);
8854 	range->num_bitrates = min(priv->rates.num_rates, (u8) IW_MAX_BITRATES);
8855 
8856 	for (i = 0; i < range->num_bitrates; i++)
8857 		range->bitrate[i] = (priv->rates.supported_rates[i] & 0x7F) *
8858 		    500000;
8859 
8860 	range->max_rts = DEFAULT_RTS_THRESHOLD;
8861 	range->min_frag = MIN_FRAG_THRESHOLD;
8862 	range->max_frag = MAX_FRAG_THRESHOLD;
8863 
8864 	range->encoding_size[0] = 5;
8865 	range->encoding_size[1] = 13;
8866 	range->num_encoding_sizes = 2;
8867 	range->max_encoding_tokens = WEP_KEYS;
8868 
8869 	/* Set the Wireless Extension versions */
8870 	range->we_version_compiled = WIRELESS_EXT;
8871 	range->we_version_source = 18;
8872 
8873 	i = 0;
8874 	if (priv->ieee->mode & (IEEE_B | IEEE_G)) {
8875 		for (j = 0; j < geo->bg_channels && i < IW_MAX_FREQUENCIES; j++) {
8876 			if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
8877 			    (geo->bg[j].flags & LIBIPW_CH_PASSIVE_ONLY))
8878 				continue;
8879 
8880 			range->freq[i].i = geo->bg[j].channel;
8881 			range->freq[i].m = geo->bg[j].freq * 100000;
8882 			range->freq[i].e = 1;
8883 			i++;
8884 		}
8885 	}
8886 
8887 	if (priv->ieee->mode & IEEE_A) {
8888 		for (j = 0; j < geo->a_channels && i < IW_MAX_FREQUENCIES; j++) {
8889 			if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
8890 			    (geo->a[j].flags & LIBIPW_CH_PASSIVE_ONLY))
8891 				continue;
8892 
8893 			range->freq[i].i = geo->a[j].channel;
8894 			range->freq[i].m = geo->a[j].freq * 100000;
8895 			range->freq[i].e = 1;
8896 			i++;
8897 		}
8898 	}
8899 
8900 	range->num_channels = i;
8901 	range->num_frequency = i;
8902 
8903 	mutex_unlock(&priv->mutex);
8904 
8905 	/* Event capability (kernel + driver) */
8906 	range->event_capa[0] = (IW_EVENT_CAPA_K_0 |
8907 				IW_EVENT_CAPA_MASK(SIOCGIWTHRSPY) |
8908 				IW_EVENT_CAPA_MASK(SIOCGIWAP) |
8909 				IW_EVENT_CAPA_MASK(SIOCGIWSCAN));
8910 	range->event_capa[1] = IW_EVENT_CAPA_K_1;
8911 
8912 	range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_WPA2 |
8913 		IW_ENC_CAPA_CIPHER_TKIP | IW_ENC_CAPA_CIPHER_CCMP;
8914 
8915 	range->scan_capa = IW_SCAN_CAPA_ESSID | IW_SCAN_CAPA_TYPE;
8916 
8917 	IPW_DEBUG_WX("GET Range\n");
8918 	return 0;
8919 }
8920 
8921 static int ipw_wx_set_wap(struct net_device *dev,
8922 			  struct iw_request_info *info,
8923 			  union iwreq_data *wrqu, char *extra)
8924 {
8925 	struct ipw_priv *priv = libipw_priv(dev);
8926 
8927 	if (wrqu->ap_addr.sa_family != ARPHRD_ETHER)
8928 		return -EINVAL;
8929 	mutex_lock(&priv->mutex);
8930 	if (is_broadcast_ether_addr(wrqu->ap_addr.sa_data) ||
8931 	    is_zero_ether_addr(wrqu->ap_addr.sa_data)) {
8932 		/* we disable mandatory BSSID association */
8933 		IPW_DEBUG_WX("Setting AP BSSID to ANY\n");
8934 		priv->config &= ~CFG_STATIC_BSSID;
8935 		IPW_DEBUG_ASSOC("Attempting to associate with new "
8936 				"parameters.\n");
8937 		ipw_associate(priv);
8938 		mutex_unlock(&priv->mutex);
8939 		return 0;
8940 	}
8941 
8942 	priv->config |= CFG_STATIC_BSSID;
8943 	if (ether_addr_equal(priv->bssid, wrqu->ap_addr.sa_data)) {
8944 		IPW_DEBUG_WX("BSSID set to current BSSID.\n");
8945 		mutex_unlock(&priv->mutex);
8946 		return 0;
8947 	}
8948 
8949 	IPW_DEBUG_WX("Setting mandatory BSSID to %pM\n",
8950 		     wrqu->ap_addr.sa_data);
8951 
8952 	memcpy(priv->bssid, wrqu->ap_addr.sa_data, ETH_ALEN);
8953 
8954 	/* Network configuration changed -- force [re]association */
8955 	IPW_DEBUG_ASSOC("[re]association triggered due to BSSID change.\n");
8956 	if (!ipw_disassociate(priv))
8957 		ipw_associate(priv);
8958 
8959 	mutex_unlock(&priv->mutex);
8960 	return 0;
8961 }
8962 
8963 static int ipw_wx_get_wap(struct net_device *dev,
8964 			  struct iw_request_info *info,
8965 			  union iwreq_data *wrqu, char *extra)
8966 {
8967 	struct ipw_priv *priv = libipw_priv(dev);
8968 
8969 	/* If we are associated, trying to associate, or have a statically
8970 	 * configured BSSID then return that; otherwise return ANY */
8971 	mutex_lock(&priv->mutex);
8972 	if (priv->config & CFG_STATIC_BSSID ||
8973 	    priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
8974 		wrqu->ap_addr.sa_family = ARPHRD_ETHER;
8975 		memcpy(wrqu->ap_addr.sa_data, priv->bssid, ETH_ALEN);
8976 	} else
8977 		eth_zero_addr(wrqu->ap_addr.sa_data);
8978 
8979 	IPW_DEBUG_WX("Getting WAP BSSID: %pM\n",
8980 		     wrqu->ap_addr.sa_data);
8981 	mutex_unlock(&priv->mutex);
8982 	return 0;
8983 }
8984 
8985 static int ipw_wx_set_essid(struct net_device *dev,
8986 			    struct iw_request_info *info,
8987 			    union iwreq_data *wrqu, char *extra)
8988 {
8989 	struct ipw_priv *priv = libipw_priv(dev);
8990         int length;
8991 
8992         mutex_lock(&priv->mutex);
8993 
8994         if (!wrqu->essid.flags)
8995         {
8996                 IPW_DEBUG_WX("Setting ESSID to ANY\n");
8997                 ipw_disassociate(priv);
8998                 priv->config &= ~CFG_STATIC_ESSID;
8999                 ipw_associate(priv);
9000                 mutex_unlock(&priv->mutex);
9001                 return 0;
9002         }
9003 
9004 	length = min((int)wrqu->essid.length, IW_ESSID_MAX_SIZE);
9005 
9006 	priv->config |= CFG_STATIC_ESSID;
9007 
9008 	if (priv->essid_len == length && !memcmp(priv->essid, extra, length)
9009 	    && (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING))) {
9010 		IPW_DEBUG_WX("ESSID set to current ESSID.\n");
9011 		mutex_unlock(&priv->mutex);
9012 		return 0;
9013 	}
9014 
9015 	IPW_DEBUG_WX("Setting ESSID: '%*pE' (%d)\n", length, extra, length);
9016 
9017 	priv->essid_len = length;
9018 	memcpy(priv->essid, extra, priv->essid_len);
9019 
9020 	/* Network configuration changed -- force [re]association */
9021 	IPW_DEBUG_ASSOC("[re]association triggered due to ESSID change.\n");
9022 	if (!ipw_disassociate(priv))
9023 		ipw_associate(priv);
9024 
9025 	mutex_unlock(&priv->mutex);
9026 	return 0;
9027 }
9028 
9029 static int ipw_wx_get_essid(struct net_device *dev,
9030 			    struct iw_request_info *info,
9031 			    union iwreq_data *wrqu, char *extra)
9032 {
9033 	struct ipw_priv *priv = libipw_priv(dev);
9034 
9035 	/* If we are associated, trying to associate, or have a statically
9036 	 * configured ESSID then return that; otherwise return ANY */
9037 	mutex_lock(&priv->mutex);
9038 	if (priv->config & CFG_STATIC_ESSID ||
9039 	    priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
9040 		IPW_DEBUG_WX("Getting essid: '%*pE'\n",
9041 			     priv->essid_len, priv->essid);
9042 		memcpy(extra, priv->essid, priv->essid_len);
9043 		wrqu->essid.length = priv->essid_len;
9044 		wrqu->essid.flags = 1;	/* active */
9045 	} else {
9046 		IPW_DEBUG_WX("Getting essid: ANY\n");
9047 		wrqu->essid.length = 0;
9048 		wrqu->essid.flags = 0;	/* active */
9049 	}
9050 	mutex_unlock(&priv->mutex);
9051 	return 0;
9052 }
9053 
9054 static int ipw_wx_set_nick(struct net_device *dev,
9055 			   struct iw_request_info *info,
9056 			   union iwreq_data *wrqu, char *extra)
9057 {
9058 	struct ipw_priv *priv = libipw_priv(dev);
9059 
9060 	IPW_DEBUG_WX("Setting nick to '%s'\n", extra);
9061 	if (wrqu->data.length > IW_ESSID_MAX_SIZE)
9062 		return -E2BIG;
9063 	mutex_lock(&priv->mutex);
9064 	wrqu->data.length = min_t(size_t, wrqu->data.length, sizeof(priv->nick));
9065 	memset(priv->nick, 0, sizeof(priv->nick));
9066 	memcpy(priv->nick, extra, wrqu->data.length);
9067 	IPW_DEBUG_TRACE("<<\n");
9068 	mutex_unlock(&priv->mutex);
9069 	return 0;
9070 
9071 }
9072 
9073 static int ipw_wx_get_nick(struct net_device *dev,
9074 			   struct iw_request_info *info,
9075 			   union iwreq_data *wrqu, char *extra)
9076 {
9077 	struct ipw_priv *priv = libipw_priv(dev);
9078 	IPW_DEBUG_WX("Getting nick\n");
9079 	mutex_lock(&priv->mutex);
9080 	wrqu->data.length = strlen(priv->nick);
9081 	memcpy(extra, priv->nick, wrqu->data.length);
9082 	wrqu->data.flags = 1;	/* active */
9083 	mutex_unlock(&priv->mutex);
9084 	return 0;
9085 }
9086 
9087 static int ipw_wx_set_sens(struct net_device *dev,
9088 			    struct iw_request_info *info,
9089 			    union iwreq_data *wrqu, char *extra)
9090 {
9091 	struct ipw_priv *priv = libipw_priv(dev);
9092 	int err = 0;
9093 
9094 	IPW_DEBUG_WX("Setting roaming threshold to %d\n", wrqu->sens.value);
9095 	IPW_DEBUG_WX("Setting disassociate threshold to %d\n", 3*wrqu->sens.value);
9096 	mutex_lock(&priv->mutex);
9097 
9098 	if (wrqu->sens.fixed == 0)
9099 	{
9100 		priv->roaming_threshold = IPW_MB_ROAMING_THRESHOLD_DEFAULT;
9101 		priv->disassociate_threshold = IPW_MB_DISASSOCIATE_THRESHOLD_DEFAULT;
9102 		goto out;
9103 	}
9104 	if ((wrqu->sens.value > IPW_MB_ROAMING_THRESHOLD_MAX) ||
9105 	    (wrqu->sens.value < IPW_MB_ROAMING_THRESHOLD_MIN)) {
9106 		err = -EINVAL;
9107 		goto out;
9108 	}
9109 
9110 	priv->roaming_threshold = wrqu->sens.value;
9111 	priv->disassociate_threshold = 3*wrqu->sens.value;
9112       out:
9113 	mutex_unlock(&priv->mutex);
9114 	return err;
9115 }
9116 
9117 static int ipw_wx_get_sens(struct net_device *dev,
9118 			    struct iw_request_info *info,
9119 			    union iwreq_data *wrqu, char *extra)
9120 {
9121 	struct ipw_priv *priv = libipw_priv(dev);
9122 	mutex_lock(&priv->mutex);
9123 	wrqu->sens.fixed = 1;
9124 	wrqu->sens.value = priv->roaming_threshold;
9125 	mutex_unlock(&priv->mutex);
9126 
9127 	IPW_DEBUG_WX("GET roaming threshold -> %s %d\n",
9128 		     wrqu->power.disabled ? "OFF" : "ON", wrqu->power.value);
9129 
9130 	return 0;
9131 }
9132 
9133 static int ipw_wx_set_rate(struct net_device *dev,
9134 			   struct iw_request_info *info,
9135 			   union iwreq_data *wrqu, char *extra)
9136 {
9137 	/* TODO: We should use semaphores or locks for access to priv */
9138 	struct ipw_priv *priv = libipw_priv(dev);
9139 	u32 target_rate = wrqu->bitrate.value;
9140 	u32 fixed, mask;
9141 
9142 	/* value = -1, fixed = 0 means auto only, so we should use all rates offered by AP */
9143 	/* value = X, fixed = 1 means only rate X */
9144 	/* value = X, fixed = 0 means all rates lower equal X */
9145 
9146 	if (target_rate == -1) {
9147 		fixed = 0;
9148 		mask = LIBIPW_DEFAULT_RATES_MASK;
9149 		/* Now we should reassociate */
9150 		goto apply;
9151 	}
9152 
9153 	mask = 0;
9154 	fixed = wrqu->bitrate.fixed;
9155 
9156 	if (target_rate == 1000000 || !fixed)
9157 		mask |= LIBIPW_CCK_RATE_1MB_MASK;
9158 	if (target_rate == 1000000)
9159 		goto apply;
9160 
9161 	if (target_rate == 2000000 || !fixed)
9162 		mask |= LIBIPW_CCK_RATE_2MB_MASK;
9163 	if (target_rate == 2000000)
9164 		goto apply;
9165 
9166 	if (target_rate == 5500000 || !fixed)
9167 		mask |= LIBIPW_CCK_RATE_5MB_MASK;
9168 	if (target_rate == 5500000)
9169 		goto apply;
9170 
9171 	if (target_rate == 6000000 || !fixed)
9172 		mask |= LIBIPW_OFDM_RATE_6MB_MASK;
9173 	if (target_rate == 6000000)
9174 		goto apply;
9175 
9176 	if (target_rate == 9000000 || !fixed)
9177 		mask |= LIBIPW_OFDM_RATE_9MB_MASK;
9178 	if (target_rate == 9000000)
9179 		goto apply;
9180 
9181 	if (target_rate == 11000000 || !fixed)
9182 		mask |= LIBIPW_CCK_RATE_11MB_MASK;
9183 	if (target_rate == 11000000)
9184 		goto apply;
9185 
9186 	if (target_rate == 12000000 || !fixed)
9187 		mask |= LIBIPW_OFDM_RATE_12MB_MASK;
9188 	if (target_rate == 12000000)
9189 		goto apply;
9190 
9191 	if (target_rate == 18000000 || !fixed)
9192 		mask |= LIBIPW_OFDM_RATE_18MB_MASK;
9193 	if (target_rate == 18000000)
9194 		goto apply;
9195 
9196 	if (target_rate == 24000000 || !fixed)
9197 		mask |= LIBIPW_OFDM_RATE_24MB_MASK;
9198 	if (target_rate == 24000000)
9199 		goto apply;
9200 
9201 	if (target_rate == 36000000 || !fixed)
9202 		mask |= LIBIPW_OFDM_RATE_36MB_MASK;
9203 	if (target_rate == 36000000)
9204 		goto apply;
9205 
9206 	if (target_rate == 48000000 || !fixed)
9207 		mask |= LIBIPW_OFDM_RATE_48MB_MASK;
9208 	if (target_rate == 48000000)
9209 		goto apply;
9210 
9211 	if (target_rate == 54000000 || !fixed)
9212 		mask |= LIBIPW_OFDM_RATE_54MB_MASK;
9213 	if (target_rate == 54000000)
9214 		goto apply;
9215 
9216 	IPW_DEBUG_WX("invalid rate specified, returning error\n");
9217 	return -EINVAL;
9218 
9219       apply:
9220 	IPW_DEBUG_WX("Setting rate mask to 0x%08X [%s]\n",
9221 		     mask, fixed ? "fixed" : "sub-rates");
9222 	mutex_lock(&priv->mutex);
9223 	if (mask == LIBIPW_DEFAULT_RATES_MASK) {
9224 		priv->config &= ~CFG_FIXED_RATE;
9225 		ipw_set_fixed_rate(priv, priv->ieee->mode);
9226 	} else
9227 		priv->config |= CFG_FIXED_RATE;
9228 
9229 	if (priv->rates_mask == mask) {
9230 		IPW_DEBUG_WX("Mask set to current mask.\n");
9231 		mutex_unlock(&priv->mutex);
9232 		return 0;
9233 	}
9234 
9235 	priv->rates_mask = mask;
9236 
9237 	/* Network configuration changed -- force [re]association */
9238 	IPW_DEBUG_ASSOC("[re]association triggered due to rates change.\n");
9239 	if (!ipw_disassociate(priv))
9240 		ipw_associate(priv);
9241 
9242 	mutex_unlock(&priv->mutex);
9243 	return 0;
9244 }
9245 
9246 static int ipw_wx_get_rate(struct net_device *dev,
9247 			   struct iw_request_info *info,
9248 			   union iwreq_data *wrqu, char *extra)
9249 {
9250 	struct ipw_priv *priv = libipw_priv(dev);
9251 	mutex_lock(&priv->mutex);
9252 	wrqu->bitrate.value = priv->last_rate;
9253 	wrqu->bitrate.fixed = (priv->config & CFG_FIXED_RATE) ? 1 : 0;
9254 	mutex_unlock(&priv->mutex);
9255 	IPW_DEBUG_WX("GET Rate -> %d\n", wrqu->bitrate.value);
9256 	return 0;
9257 }
9258 
9259 static int ipw_wx_set_rts(struct net_device *dev,
9260 			  struct iw_request_info *info,
9261 			  union iwreq_data *wrqu, char *extra)
9262 {
9263 	struct ipw_priv *priv = libipw_priv(dev);
9264 	mutex_lock(&priv->mutex);
9265 	if (wrqu->rts.disabled || !wrqu->rts.fixed)
9266 		priv->rts_threshold = DEFAULT_RTS_THRESHOLD;
9267 	else {
9268 		if (wrqu->rts.value < MIN_RTS_THRESHOLD ||
9269 		    wrqu->rts.value > MAX_RTS_THRESHOLD) {
9270 			mutex_unlock(&priv->mutex);
9271 			return -EINVAL;
9272 		}
9273 		priv->rts_threshold = wrqu->rts.value;
9274 	}
9275 
9276 	ipw_send_rts_threshold(priv, priv->rts_threshold);
9277 	mutex_unlock(&priv->mutex);
9278 	IPW_DEBUG_WX("SET RTS Threshold -> %d\n", priv->rts_threshold);
9279 	return 0;
9280 }
9281 
9282 static int ipw_wx_get_rts(struct net_device *dev,
9283 			  struct iw_request_info *info,
9284 			  union iwreq_data *wrqu, char *extra)
9285 {
9286 	struct ipw_priv *priv = libipw_priv(dev);
9287 	mutex_lock(&priv->mutex);
9288 	wrqu->rts.value = priv->rts_threshold;
9289 	wrqu->rts.fixed = 0;	/* no auto select */
9290 	wrqu->rts.disabled = (wrqu->rts.value == DEFAULT_RTS_THRESHOLD);
9291 	mutex_unlock(&priv->mutex);
9292 	IPW_DEBUG_WX("GET RTS Threshold -> %d\n", wrqu->rts.value);
9293 	return 0;
9294 }
9295 
9296 static int ipw_wx_set_txpow(struct net_device *dev,
9297 			    struct iw_request_info *info,
9298 			    union iwreq_data *wrqu, char *extra)
9299 {
9300 	struct ipw_priv *priv = libipw_priv(dev);
9301 	int err = 0;
9302 
9303 	mutex_lock(&priv->mutex);
9304 	if (ipw_radio_kill_sw(priv, wrqu->power.disabled)) {
9305 		err = -EINPROGRESS;
9306 		goto out;
9307 	}
9308 
9309 	if (!wrqu->power.fixed)
9310 		wrqu->power.value = IPW_TX_POWER_DEFAULT;
9311 
9312 	if (wrqu->power.flags != IW_TXPOW_DBM) {
9313 		err = -EINVAL;
9314 		goto out;
9315 	}
9316 
9317 	if ((wrqu->power.value > IPW_TX_POWER_MAX) ||
9318 	    (wrqu->power.value < IPW_TX_POWER_MIN)) {
9319 		err = -EINVAL;
9320 		goto out;
9321 	}
9322 
9323 	priv->tx_power = wrqu->power.value;
9324 	err = ipw_set_tx_power(priv);
9325       out:
9326 	mutex_unlock(&priv->mutex);
9327 	return err;
9328 }
9329 
9330 static int ipw_wx_get_txpow(struct net_device *dev,
9331 			    struct iw_request_info *info,
9332 			    union iwreq_data *wrqu, char *extra)
9333 {
9334 	struct ipw_priv *priv = libipw_priv(dev);
9335 	mutex_lock(&priv->mutex);
9336 	wrqu->power.value = priv->tx_power;
9337 	wrqu->power.fixed = 1;
9338 	wrqu->power.flags = IW_TXPOW_DBM;
9339 	wrqu->power.disabled = (priv->status & STATUS_RF_KILL_MASK) ? 1 : 0;
9340 	mutex_unlock(&priv->mutex);
9341 
9342 	IPW_DEBUG_WX("GET TX Power -> %s %d\n",
9343 		     wrqu->power.disabled ? "OFF" : "ON", wrqu->power.value);
9344 
9345 	return 0;
9346 }
9347 
9348 static int ipw_wx_set_frag(struct net_device *dev,
9349 			   struct iw_request_info *info,
9350 			   union iwreq_data *wrqu, char *extra)
9351 {
9352 	struct ipw_priv *priv = libipw_priv(dev);
9353 	mutex_lock(&priv->mutex);
9354 	if (wrqu->frag.disabled || !wrqu->frag.fixed)
9355 		priv->ieee->fts = DEFAULT_FTS;
9356 	else {
9357 		if (wrqu->frag.value < MIN_FRAG_THRESHOLD ||
9358 		    wrqu->frag.value > MAX_FRAG_THRESHOLD) {
9359 			mutex_unlock(&priv->mutex);
9360 			return -EINVAL;
9361 		}
9362 
9363 		priv->ieee->fts = wrqu->frag.value & ~0x1;
9364 	}
9365 
9366 	ipw_send_frag_threshold(priv, wrqu->frag.value);
9367 	mutex_unlock(&priv->mutex);
9368 	IPW_DEBUG_WX("SET Frag Threshold -> %d\n", wrqu->frag.value);
9369 	return 0;
9370 }
9371 
9372 static int ipw_wx_get_frag(struct net_device *dev,
9373 			   struct iw_request_info *info,
9374 			   union iwreq_data *wrqu, char *extra)
9375 {
9376 	struct ipw_priv *priv = libipw_priv(dev);
9377 	mutex_lock(&priv->mutex);
9378 	wrqu->frag.value = priv->ieee->fts;
9379 	wrqu->frag.fixed = 0;	/* no auto select */
9380 	wrqu->frag.disabled = (wrqu->frag.value == DEFAULT_FTS);
9381 	mutex_unlock(&priv->mutex);
9382 	IPW_DEBUG_WX("GET Frag Threshold -> %d\n", wrqu->frag.value);
9383 
9384 	return 0;
9385 }
9386 
9387 static int ipw_wx_set_retry(struct net_device *dev,
9388 			    struct iw_request_info *info,
9389 			    union iwreq_data *wrqu, char *extra)
9390 {
9391 	struct ipw_priv *priv = libipw_priv(dev);
9392 
9393 	if (wrqu->retry.flags & IW_RETRY_LIFETIME || wrqu->retry.disabled)
9394 		return -EINVAL;
9395 
9396 	if (!(wrqu->retry.flags & IW_RETRY_LIMIT))
9397 		return 0;
9398 
9399 	if (wrqu->retry.value < 0 || wrqu->retry.value >= 255)
9400 		return -EINVAL;
9401 
9402 	mutex_lock(&priv->mutex);
9403 	if (wrqu->retry.flags & IW_RETRY_SHORT)
9404 		priv->short_retry_limit = (u8) wrqu->retry.value;
9405 	else if (wrqu->retry.flags & IW_RETRY_LONG)
9406 		priv->long_retry_limit = (u8) wrqu->retry.value;
9407 	else {
9408 		priv->short_retry_limit = (u8) wrqu->retry.value;
9409 		priv->long_retry_limit = (u8) wrqu->retry.value;
9410 	}
9411 
9412 	ipw_send_retry_limit(priv, priv->short_retry_limit,
9413 			     priv->long_retry_limit);
9414 	mutex_unlock(&priv->mutex);
9415 	IPW_DEBUG_WX("SET retry limit -> short:%d long:%d\n",
9416 		     priv->short_retry_limit, priv->long_retry_limit);
9417 	return 0;
9418 }
9419 
9420 static int ipw_wx_get_retry(struct net_device *dev,
9421 			    struct iw_request_info *info,
9422 			    union iwreq_data *wrqu, char *extra)
9423 {
9424 	struct ipw_priv *priv = libipw_priv(dev);
9425 
9426 	mutex_lock(&priv->mutex);
9427 	wrqu->retry.disabled = 0;
9428 
9429 	if ((wrqu->retry.flags & IW_RETRY_TYPE) == IW_RETRY_LIFETIME) {
9430 		mutex_unlock(&priv->mutex);
9431 		return -EINVAL;
9432 	}
9433 
9434 	if (wrqu->retry.flags & IW_RETRY_LONG) {
9435 		wrqu->retry.flags = IW_RETRY_LIMIT | IW_RETRY_LONG;
9436 		wrqu->retry.value = priv->long_retry_limit;
9437 	} else if (wrqu->retry.flags & IW_RETRY_SHORT) {
9438 		wrqu->retry.flags = IW_RETRY_LIMIT | IW_RETRY_SHORT;
9439 		wrqu->retry.value = priv->short_retry_limit;
9440 	} else {
9441 		wrqu->retry.flags = IW_RETRY_LIMIT;
9442 		wrqu->retry.value = priv->short_retry_limit;
9443 	}
9444 	mutex_unlock(&priv->mutex);
9445 
9446 	IPW_DEBUG_WX("GET retry -> %d\n", wrqu->retry.value);
9447 
9448 	return 0;
9449 }
9450 
9451 static int ipw_wx_set_scan(struct net_device *dev,
9452 			   struct iw_request_info *info,
9453 			   union iwreq_data *wrqu, char *extra)
9454 {
9455 	struct ipw_priv *priv = libipw_priv(dev);
9456 	struct iw_scan_req *req = (struct iw_scan_req *)extra;
9457 	struct delayed_work *work = NULL;
9458 
9459 	mutex_lock(&priv->mutex);
9460 
9461 	priv->user_requested_scan = 1;
9462 
9463 	if (wrqu->data.length == sizeof(struct iw_scan_req)) {
9464 		if (wrqu->data.flags & IW_SCAN_THIS_ESSID) {
9465 			int len = min((int)req->essid_len,
9466 			              (int)sizeof(priv->direct_scan_ssid));
9467 			memcpy(priv->direct_scan_ssid, req->essid, len);
9468 			priv->direct_scan_ssid_len = len;
9469 			work = &priv->request_direct_scan;
9470 		} else if (req->scan_type == IW_SCAN_TYPE_PASSIVE) {
9471 			work = &priv->request_passive_scan;
9472 		}
9473 	} else {
9474 		/* Normal active broadcast scan */
9475 		work = &priv->request_scan;
9476 	}
9477 
9478 	mutex_unlock(&priv->mutex);
9479 
9480 	IPW_DEBUG_WX("Start scan\n");
9481 
9482 	schedule_delayed_work(work, 0);
9483 
9484 	return 0;
9485 }
9486 
9487 static int ipw_wx_get_scan(struct net_device *dev,
9488 			   struct iw_request_info *info,
9489 			   union iwreq_data *wrqu, char *extra)
9490 {
9491 	struct ipw_priv *priv = libipw_priv(dev);
9492 	return libipw_wx_get_scan(priv->ieee, info, wrqu, extra);
9493 }
9494 
9495 static int ipw_wx_set_encode(struct net_device *dev,
9496 			     struct iw_request_info *info,
9497 			     union iwreq_data *wrqu, char *key)
9498 {
9499 	struct ipw_priv *priv = libipw_priv(dev);
9500 	int ret;
9501 	u32 cap = priv->capability;
9502 
9503 	mutex_lock(&priv->mutex);
9504 	ret = libipw_wx_set_encode(priv->ieee, info, wrqu, key);
9505 
9506 	/* In IBSS mode, we need to notify the firmware to update
9507 	 * the beacon info after we changed the capability. */
9508 	if (cap != priv->capability &&
9509 	    priv->ieee->iw_mode == IW_MODE_ADHOC &&
9510 	    priv->status & STATUS_ASSOCIATED)
9511 		ipw_disassociate(priv);
9512 
9513 	mutex_unlock(&priv->mutex);
9514 	return ret;
9515 }
9516 
9517 static int ipw_wx_get_encode(struct net_device *dev,
9518 			     struct iw_request_info *info,
9519 			     union iwreq_data *wrqu, char *key)
9520 {
9521 	struct ipw_priv *priv = libipw_priv(dev);
9522 	return libipw_wx_get_encode(priv->ieee, info, wrqu, key);
9523 }
9524 
9525 static int ipw_wx_set_power(struct net_device *dev,
9526 			    struct iw_request_info *info,
9527 			    union iwreq_data *wrqu, char *extra)
9528 {
9529 	struct ipw_priv *priv = libipw_priv(dev);
9530 	int err;
9531 	mutex_lock(&priv->mutex);
9532 	if (wrqu->power.disabled) {
9533 		priv->power_mode = IPW_POWER_LEVEL(priv->power_mode);
9534 		err = ipw_send_power_mode(priv, IPW_POWER_MODE_CAM);
9535 		if (err) {
9536 			IPW_DEBUG_WX("failed setting power mode.\n");
9537 			mutex_unlock(&priv->mutex);
9538 			return err;
9539 		}
9540 		IPW_DEBUG_WX("SET Power Management Mode -> off\n");
9541 		mutex_unlock(&priv->mutex);
9542 		return 0;
9543 	}
9544 
9545 	switch (wrqu->power.flags & IW_POWER_MODE) {
9546 	case IW_POWER_ON:	/* If not specified */
9547 	case IW_POWER_MODE:	/* If set all mask */
9548 	case IW_POWER_ALL_R:	/* If explicitly state all */
9549 		break;
9550 	default:		/* Otherwise we don't support it */
9551 		IPW_DEBUG_WX("SET PM Mode: %X not supported.\n",
9552 			     wrqu->power.flags);
9553 		mutex_unlock(&priv->mutex);
9554 		return -EOPNOTSUPP;
9555 	}
9556 
9557 	/* If the user hasn't specified a power management mode yet, default
9558 	 * to BATTERY */
9559 	if (IPW_POWER_LEVEL(priv->power_mode) == IPW_POWER_AC)
9560 		priv->power_mode = IPW_POWER_ENABLED | IPW_POWER_BATTERY;
9561 	else
9562 		priv->power_mode = IPW_POWER_ENABLED | priv->power_mode;
9563 
9564 	err = ipw_send_power_mode(priv, IPW_POWER_LEVEL(priv->power_mode));
9565 	if (err) {
9566 		IPW_DEBUG_WX("failed setting power mode.\n");
9567 		mutex_unlock(&priv->mutex);
9568 		return err;
9569 	}
9570 
9571 	IPW_DEBUG_WX("SET Power Management Mode -> 0x%02X\n", priv->power_mode);
9572 	mutex_unlock(&priv->mutex);
9573 	return 0;
9574 }
9575 
9576 static int ipw_wx_get_power(struct net_device *dev,
9577 			    struct iw_request_info *info,
9578 			    union iwreq_data *wrqu, char *extra)
9579 {
9580 	struct ipw_priv *priv = libipw_priv(dev);
9581 	mutex_lock(&priv->mutex);
9582 	if (!(priv->power_mode & IPW_POWER_ENABLED))
9583 		wrqu->power.disabled = 1;
9584 	else
9585 		wrqu->power.disabled = 0;
9586 
9587 	mutex_unlock(&priv->mutex);
9588 	IPW_DEBUG_WX("GET Power Management Mode -> %02X\n", priv->power_mode);
9589 
9590 	return 0;
9591 }
9592 
9593 static int ipw_wx_set_powermode(struct net_device *dev,
9594 				struct iw_request_info *info,
9595 				union iwreq_data *wrqu, char *extra)
9596 {
9597 	struct ipw_priv *priv = libipw_priv(dev);
9598 	int mode = *(int *)extra;
9599 	int err;
9600 
9601 	mutex_lock(&priv->mutex);
9602 	if ((mode < 1) || (mode > IPW_POWER_LIMIT))
9603 		mode = IPW_POWER_AC;
9604 
9605 	if (IPW_POWER_LEVEL(priv->power_mode) != mode) {
9606 		err = ipw_send_power_mode(priv, mode);
9607 		if (err) {
9608 			IPW_DEBUG_WX("failed setting power mode.\n");
9609 			mutex_unlock(&priv->mutex);
9610 			return err;
9611 		}
9612 		priv->power_mode = IPW_POWER_ENABLED | mode;
9613 	}
9614 	mutex_unlock(&priv->mutex);
9615 	return 0;
9616 }
9617 
9618 #define MAX_WX_STRING 80
9619 static int ipw_wx_get_powermode(struct net_device *dev,
9620 				struct iw_request_info *info,
9621 				union iwreq_data *wrqu, char *extra)
9622 {
9623 	struct ipw_priv *priv = libipw_priv(dev);
9624 	int level = IPW_POWER_LEVEL(priv->power_mode);
9625 	char *p = extra;
9626 
9627 	p += snprintf(p, MAX_WX_STRING, "Power save level: %d ", level);
9628 
9629 	switch (level) {
9630 	case IPW_POWER_AC:
9631 		p += snprintf(p, MAX_WX_STRING - (p - extra), "(AC)");
9632 		break;
9633 	case IPW_POWER_BATTERY:
9634 		p += snprintf(p, MAX_WX_STRING - (p - extra), "(BATTERY)");
9635 		break;
9636 	default:
9637 		p += snprintf(p, MAX_WX_STRING - (p - extra),
9638 			      "(Timeout %dms, Period %dms)",
9639 			      timeout_duration[level - 1] / 1000,
9640 			      period_duration[level - 1] / 1000);
9641 	}
9642 
9643 	if (!(priv->power_mode & IPW_POWER_ENABLED))
9644 		p += snprintf(p, MAX_WX_STRING - (p - extra), " OFF");
9645 
9646 	wrqu->data.length = p - extra + 1;
9647 
9648 	return 0;
9649 }
9650 
9651 static int ipw_wx_set_wireless_mode(struct net_device *dev,
9652 				    struct iw_request_info *info,
9653 				    union iwreq_data *wrqu, char *extra)
9654 {
9655 	struct ipw_priv *priv = libipw_priv(dev);
9656 	int mode = *(int *)extra;
9657 	u8 band = 0, modulation = 0;
9658 
9659 	if (mode == 0 || mode & ~IEEE_MODE_MASK) {
9660 		IPW_WARNING("Attempt to set invalid wireless mode: %d\n", mode);
9661 		return -EINVAL;
9662 	}
9663 	mutex_lock(&priv->mutex);
9664 	if (priv->adapter == IPW_2915ABG) {
9665 		priv->ieee->abg_true = 1;
9666 		if (mode & IEEE_A) {
9667 			band |= LIBIPW_52GHZ_BAND;
9668 			modulation |= LIBIPW_OFDM_MODULATION;
9669 		} else
9670 			priv->ieee->abg_true = 0;
9671 	} else {
9672 		if (mode & IEEE_A) {
9673 			IPW_WARNING("Attempt to set 2200BG into "
9674 				    "802.11a mode\n");
9675 			mutex_unlock(&priv->mutex);
9676 			return -EINVAL;
9677 		}
9678 
9679 		priv->ieee->abg_true = 0;
9680 	}
9681 
9682 	if (mode & IEEE_B) {
9683 		band |= LIBIPW_24GHZ_BAND;
9684 		modulation |= LIBIPW_CCK_MODULATION;
9685 	} else
9686 		priv->ieee->abg_true = 0;
9687 
9688 	if (mode & IEEE_G) {
9689 		band |= LIBIPW_24GHZ_BAND;
9690 		modulation |= LIBIPW_OFDM_MODULATION;
9691 	} else
9692 		priv->ieee->abg_true = 0;
9693 
9694 	priv->ieee->mode = mode;
9695 	priv->ieee->freq_band = band;
9696 	priv->ieee->modulation = modulation;
9697 	init_supported_rates(priv, &priv->rates);
9698 
9699 	/* Network configuration changed -- force [re]association */
9700 	IPW_DEBUG_ASSOC("[re]association triggered due to mode change.\n");
9701 	if (!ipw_disassociate(priv)) {
9702 		ipw_send_supported_rates(priv, &priv->rates);
9703 		ipw_associate(priv);
9704 	}
9705 
9706 	/* Update the band LEDs */
9707 	ipw_led_band_on(priv);
9708 
9709 	IPW_DEBUG_WX("PRIV SET MODE: %c%c%c\n",
9710 		     mode & IEEE_A ? 'a' : '.',
9711 		     mode & IEEE_B ? 'b' : '.', mode & IEEE_G ? 'g' : '.');
9712 	mutex_unlock(&priv->mutex);
9713 	return 0;
9714 }
9715 
9716 static int ipw_wx_get_wireless_mode(struct net_device *dev,
9717 				    struct iw_request_info *info,
9718 				    union iwreq_data *wrqu, char *extra)
9719 {
9720 	struct ipw_priv *priv = libipw_priv(dev);
9721 	mutex_lock(&priv->mutex);
9722 	switch (priv->ieee->mode) {
9723 	case IEEE_A:
9724 		strncpy(extra, "802.11a (1)", MAX_WX_STRING);
9725 		break;
9726 	case IEEE_B:
9727 		strncpy(extra, "802.11b (2)", MAX_WX_STRING);
9728 		break;
9729 	case IEEE_A | IEEE_B:
9730 		strncpy(extra, "802.11ab (3)", MAX_WX_STRING);
9731 		break;
9732 	case IEEE_G:
9733 		strncpy(extra, "802.11g (4)", MAX_WX_STRING);
9734 		break;
9735 	case IEEE_A | IEEE_G:
9736 		strncpy(extra, "802.11ag (5)", MAX_WX_STRING);
9737 		break;
9738 	case IEEE_B | IEEE_G:
9739 		strncpy(extra, "802.11bg (6)", MAX_WX_STRING);
9740 		break;
9741 	case IEEE_A | IEEE_B | IEEE_G:
9742 		strncpy(extra, "802.11abg (7)", MAX_WX_STRING);
9743 		break;
9744 	default:
9745 		strncpy(extra, "unknown", MAX_WX_STRING);
9746 		break;
9747 	}
9748 	extra[MAX_WX_STRING - 1] = '\0';
9749 
9750 	IPW_DEBUG_WX("PRIV GET MODE: %s\n", extra);
9751 
9752 	wrqu->data.length = strlen(extra) + 1;
9753 	mutex_unlock(&priv->mutex);
9754 
9755 	return 0;
9756 }
9757 
9758 static int ipw_wx_set_preamble(struct net_device *dev,
9759 			       struct iw_request_info *info,
9760 			       union iwreq_data *wrqu, char *extra)
9761 {
9762 	struct ipw_priv *priv = libipw_priv(dev);
9763 	int mode = *(int *)extra;
9764 	mutex_lock(&priv->mutex);
9765 	/* Switching from SHORT -> LONG requires a disassociation */
9766 	if (mode == 1) {
9767 		if (!(priv->config & CFG_PREAMBLE_LONG)) {
9768 			priv->config |= CFG_PREAMBLE_LONG;
9769 
9770 			/* Network configuration changed -- force [re]association */
9771 			IPW_DEBUG_ASSOC
9772 			    ("[re]association triggered due to preamble change.\n");
9773 			if (!ipw_disassociate(priv))
9774 				ipw_associate(priv);
9775 		}
9776 		goto done;
9777 	}
9778 
9779 	if (mode == 0) {
9780 		priv->config &= ~CFG_PREAMBLE_LONG;
9781 		goto done;
9782 	}
9783 	mutex_unlock(&priv->mutex);
9784 	return -EINVAL;
9785 
9786       done:
9787 	mutex_unlock(&priv->mutex);
9788 	return 0;
9789 }
9790 
9791 static int ipw_wx_get_preamble(struct net_device *dev,
9792 			       struct iw_request_info *info,
9793 			       union iwreq_data *wrqu, char *extra)
9794 {
9795 	struct ipw_priv *priv = libipw_priv(dev);
9796 	mutex_lock(&priv->mutex);
9797 	if (priv->config & CFG_PREAMBLE_LONG)
9798 		snprintf(wrqu->name, IFNAMSIZ, "long (1)");
9799 	else
9800 		snprintf(wrqu->name, IFNAMSIZ, "auto (0)");
9801 	mutex_unlock(&priv->mutex);
9802 	return 0;
9803 }
9804 
9805 #ifdef CONFIG_IPW2200_MONITOR
9806 static int ipw_wx_set_monitor(struct net_device *dev,
9807 			      struct iw_request_info *info,
9808 			      union iwreq_data *wrqu, char *extra)
9809 {
9810 	struct ipw_priv *priv = libipw_priv(dev);
9811 	int *parms = (int *)extra;
9812 	int enable = (parms[0] > 0);
9813 	mutex_lock(&priv->mutex);
9814 	IPW_DEBUG_WX("SET MONITOR: %d %d\n", enable, parms[1]);
9815 	if (enable) {
9816 		if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
9817 #ifdef CONFIG_IPW2200_RADIOTAP
9818 			priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
9819 #else
9820 			priv->net_dev->type = ARPHRD_IEEE80211;
9821 #endif
9822 			schedule_work(&priv->adapter_restart);
9823 		}
9824 
9825 		ipw_set_channel(priv, parms[1]);
9826 	} else {
9827 		if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
9828 			mutex_unlock(&priv->mutex);
9829 			return 0;
9830 		}
9831 		priv->net_dev->type = ARPHRD_ETHER;
9832 		schedule_work(&priv->adapter_restart);
9833 	}
9834 	mutex_unlock(&priv->mutex);
9835 	return 0;
9836 }
9837 
9838 #endif				/* CONFIG_IPW2200_MONITOR */
9839 
9840 static int ipw_wx_reset(struct net_device *dev,
9841 			struct iw_request_info *info,
9842 			union iwreq_data *wrqu, char *extra)
9843 {
9844 	struct ipw_priv *priv = libipw_priv(dev);
9845 	IPW_DEBUG_WX("RESET\n");
9846 	schedule_work(&priv->adapter_restart);
9847 	return 0;
9848 }
9849 
9850 static int ipw_wx_sw_reset(struct net_device *dev,
9851 			   struct iw_request_info *info,
9852 			   union iwreq_data *wrqu, char *extra)
9853 {
9854 	struct ipw_priv *priv = libipw_priv(dev);
9855 	union iwreq_data wrqu_sec = {
9856 		.encoding = {
9857 			     .flags = IW_ENCODE_DISABLED,
9858 			     },
9859 	};
9860 	int ret;
9861 
9862 	IPW_DEBUG_WX("SW_RESET\n");
9863 
9864 	mutex_lock(&priv->mutex);
9865 
9866 	ret = ipw_sw_reset(priv, 2);
9867 	if (!ret) {
9868 		free_firmware();
9869 		ipw_adapter_restart(priv);
9870 	}
9871 
9872 	/* The SW reset bit might have been toggled on by the 'disable'
9873 	 * module parameter, so take appropriate action */
9874 	ipw_radio_kill_sw(priv, priv->status & STATUS_RF_KILL_SW);
9875 
9876 	mutex_unlock(&priv->mutex);
9877 	libipw_wx_set_encode(priv->ieee, info, &wrqu_sec, NULL);
9878 	mutex_lock(&priv->mutex);
9879 
9880 	if (!(priv->status & STATUS_RF_KILL_MASK)) {
9881 		/* Configuration likely changed -- force [re]association */
9882 		IPW_DEBUG_ASSOC("[re]association triggered due to sw "
9883 				"reset.\n");
9884 		if (!ipw_disassociate(priv))
9885 			ipw_associate(priv);
9886 	}
9887 
9888 	mutex_unlock(&priv->mutex);
9889 
9890 	return 0;
9891 }
9892 
9893 /* Rebase the WE IOCTLs to zero for the handler array */
9894 static iw_handler ipw_wx_handlers[] = {
9895 	IW_HANDLER(SIOCGIWNAME, (iw_handler)cfg80211_wext_giwname),
9896 	IW_HANDLER(SIOCSIWFREQ, ipw_wx_set_freq),
9897 	IW_HANDLER(SIOCGIWFREQ, ipw_wx_get_freq),
9898 	IW_HANDLER(SIOCSIWMODE, ipw_wx_set_mode),
9899 	IW_HANDLER(SIOCGIWMODE, ipw_wx_get_mode),
9900 	IW_HANDLER(SIOCSIWSENS, ipw_wx_set_sens),
9901 	IW_HANDLER(SIOCGIWSENS, ipw_wx_get_sens),
9902 	IW_HANDLER(SIOCGIWRANGE, ipw_wx_get_range),
9903 	IW_HANDLER(SIOCSIWAP, ipw_wx_set_wap),
9904 	IW_HANDLER(SIOCGIWAP, ipw_wx_get_wap),
9905 	IW_HANDLER(SIOCSIWSCAN, ipw_wx_set_scan),
9906 	IW_HANDLER(SIOCGIWSCAN, ipw_wx_get_scan),
9907 	IW_HANDLER(SIOCSIWESSID, ipw_wx_set_essid),
9908 	IW_HANDLER(SIOCGIWESSID, ipw_wx_get_essid),
9909 	IW_HANDLER(SIOCSIWNICKN, ipw_wx_set_nick),
9910 	IW_HANDLER(SIOCGIWNICKN, ipw_wx_get_nick),
9911 	IW_HANDLER(SIOCSIWRATE, ipw_wx_set_rate),
9912 	IW_HANDLER(SIOCGIWRATE, ipw_wx_get_rate),
9913 	IW_HANDLER(SIOCSIWRTS, ipw_wx_set_rts),
9914 	IW_HANDLER(SIOCGIWRTS, ipw_wx_get_rts),
9915 	IW_HANDLER(SIOCSIWFRAG, ipw_wx_set_frag),
9916 	IW_HANDLER(SIOCGIWFRAG, ipw_wx_get_frag),
9917 	IW_HANDLER(SIOCSIWTXPOW, ipw_wx_set_txpow),
9918 	IW_HANDLER(SIOCGIWTXPOW, ipw_wx_get_txpow),
9919 	IW_HANDLER(SIOCSIWRETRY, ipw_wx_set_retry),
9920 	IW_HANDLER(SIOCGIWRETRY, ipw_wx_get_retry),
9921 	IW_HANDLER(SIOCSIWENCODE, ipw_wx_set_encode),
9922 	IW_HANDLER(SIOCGIWENCODE, ipw_wx_get_encode),
9923 	IW_HANDLER(SIOCSIWPOWER, ipw_wx_set_power),
9924 	IW_HANDLER(SIOCGIWPOWER, ipw_wx_get_power),
9925 	IW_HANDLER(SIOCSIWSPY, iw_handler_set_spy),
9926 	IW_HANDLER(SIOCGIWSPY, iw_handler_get_spy),
9927 	IW_HANDLER(SIOCSIWTHRSPY, iw_handler_set_thrspy),
9928 	IW_HANDLER(SIOCGIWTHRSPY, iw_handler_get_thrspy),
9929 	IW_HANDLER(SIOCSIWGENIE, ipw_wx_set_genie),
9930 	IW_HANDLER(SIOCGIWGENIE, ipw_wx_get_genie),
9931 	IW_HANDLER(SIOCSIWMLME, ipw_wx_set_mlme),
9932 	IW_HANDLER(SIOCSIWAUTH, ipw_wx_set_auth),
9933 	IW_HANDLER(SIOCGIWAUTH, ipw_wx_get_auth),
9934 	IW_HANDLER(SIOCSIWENCODEEXT, ipw_wx_set_encodeext),
9935 	IW_HANDLER(SIOCGIWENCODEEXT, ipw_wx_get_encodeext),
9936 };
9937 
9938 enum {
9939 	IPW_PRIV_SET_POWER = SIOCIWFIRSTPRIV,
9940 	IPW_PRIV_GET_POWER,
9941 	IPW_PRIV_SET_MODE,
9942 	IPW_PRIV_GET_MODE,
9943 	IPW_PRIV_SET_PREAMBLE,
9944 	IPW_PRIV_GET_PREAMBLE,
9945 	IPW_PRIV_RESET,
9946 	IPW_PRIV_SW_RESET,
9947 #ifdef CONFIG_IPW2200_MONITOR
9948 	IPW_PRIV_SET_MONITOR,
9949 #endif
9950 };
9951 
9952 static struct iw_priv_args ipw_priv_args[] = {
9953 	{
9954 	 .cmd = IPW_PRIV_SET_POWER,
9955 	 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
9956 	 .name = "set_power"},
9957 	{
9958 	 .cmd = IPW_PRIV_GET_POWER,
9959 	 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
9960 	 .name = "get_power"},
9961 	{
9962 	 .cmd = IPW_PRIV_SET_MODE,
9963 	 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
9964 	 .name = "set_mode"},
9965 	{
9966 	 .cmd = IPW_PRIV_GET_MODE,
9967 	 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
9968 	 .name = "get_mode"},
9969 	{
9970 	 .cmd = IPW_PRIV_SET_PREAMBLE,
9971 	 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
9972 	 .name = "set_preamble"},
9973 	{
9974 	 .cmd = IPW_PRIV_GET_PREAMBLE,
9975 	 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | IFNAMSIZ,
9976 	 .name = "get_preamble"},
9977 	{
9978 	 IPW_PRIV_RESET,
9979 	 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 0, 0, "reset"},
9980 	{
9981 	 IPW_PRIV_SW_RESET,
9982 	 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 0, 0, "sw_reset"},
9983 #ifdef CONFIG_IPW2200_MONITOR
9984 	{
9985 	 IPW_PRIV_SET_MONITOR,
9986 	 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 2, 0, "monitor"},
9987 #endif				/* CONFIG_IPW2200_MONITOR */
9988 };
9989 
9990 static iw_handler ipw_priv_handler[] = {
9991 	ipw_wx_set_powermode,
9992 	ipw_wx_get_powermode,
9993 	ipw_wx_set_wireless_mode,
9994 	ipw_wx_get_wireless_mode,
9995 	ipw_wx_set_preamble,
9996 	ipw_wx_get_preamble,
9997 	ipw_wx_reset,
9998 	ipw_wx_sw_reset,
9999 #ifdef CONFIG_IPW2200_MONITOR
10000 	ipw_wx_set_monitor,
10001 #endif
10002 };
10003 
10004 static const struct iw_handler_def ipw_wx_handler_def = {
10005 	.standard = ipw_wx_handlers,
10006 	.num_standard = ARRAY_SIZE(ipw_wx_handlers),
10007 	.num_private = ARRAY_SIZE(ipw_priv_handler),
10008 	.num_private_args = ARRAY_SIZE(ipw_priv_args),
10009 	.private = ipw_priv_handler,
10010 	.private_args = ipw_priv_args,
10011 	.get_wireless_stats = ipw_get_wireless_stats,
10012 };
10013 
10014 /*
10015  * Get wireless statistics.
10016  * Called by /proc/net/wireless
10017  * Also called by SIOCGIWSTATS
10018  */
10019 static struct iw_statistics *ipw_get_wireless_stats(struct net_device *dev)
10020 {
10021 	struct ipw_priv *priv = libipw_priv(dev);
10022 	struct iw_statistics *wstats;
10023 
10024 	wstats = &priv->wstats;
10025 
10026 	/* if hw is disabled, then ipw_get_ordinal() can't be called.
10027 	 * netdev->get_wireless_stats seems to be called before fw is
10028 	 * initialized.  STATUS_ASSOCIATED will only be set if the hw is up
10029 	 * and associated; if not associcated, the values are all meaningless
10030 	 * anyway, so set them all to NULL and INVALID */
10031 	if (!(priv->status & STATUS_ASSOCIATED)) {
10032 		wstats->miss.beacon = 0;
10033 		wstats->discard.retries = 0;
10034 		wstats->qual.qual = 0;
10035 		wstats->qual.level = 0;
10036 		wstats->qual.noise = 0;
10037 		wstats->qual.updated = 7;
10038 		wstats->qual.updated |= IW_QUAL_NOISE_INVALID |
10039 		    IW_QUAL_QUAL_INVALID | IW_QUAL_LEVEL_INVALID;
10040 		return wstats;
10041 	}
10042 
10043 	wstats->qual.qual = priv->quality;
10044 	wstats->qual.level = priv->exp_avg_rssi;
10045 	wstats->qual.noise = priv->exp_avg_noise;
10046 	wstats->qual.updated = IW_QUAL_QUAL_UPDATED | IW_QUAL_LEVEL_UPDATED |
10047 	    IW_QUAL_NOISE_UPDATED | IW_QUAL_DBM;
10048 
10049 	wstats->miss.beacon = average_value(&priv->average_missed_beacons);
10050 	wstats->discard.retries = priv->last_tx_failures;
10051 	wstats->discard.code = priv->ieee->ieee_stats.rx_discards_undecryptable;
10052 
10053 /*	if (ipw_get_ordinal(priv, IPW_ORD_STAT_TX_RETRY, &tx_retry, &len))
10054 	goto fail_get_ordinal;
10055 	wstats->discard.retries += tx_retry; */
10056 
10057 	return wstats;
10058 }
10059 
10060 /* net device stuff */
10061 
10062 static  void init_sys_config(struct ipw_sys_config *sys_config)
10063 {
10064 	memset(sys_config, 0, sizeof(struct ipw_sys_config));
10065 	sys_config->bt_coexistence = 0;
10066 	sys_config->answer_broadcast_ssid_probe = 0;
10067 	sys_config->accept_all_data_frames = 0;
10068 	sys_config->accept_non_directed_frames = 1;
10069 	sys_config->exclude_unicast_unencrypted = 0;
10070 	sys_config->disable_unicast_decryption = 1;
10071 	sys_config->exclude_multicast_unencrypted = 0;
10072 	sys_config->disable_multicast_decryption = 1;
10073 	if (antenna < CFG_SYS_ANTENNA_BOTH || antenna > CFG_SYS_ANTENNA_B)
10074 		antenna = CFG_SYS_ANTENNA_BOTH;
10075 	sys_config->antenna_diversity = antenna;
10076 	sys_config->pass_crc_to_host = 0;	/* TODO: See if 1 gives us FCS */
10077 	sys_config->dot11g_auto_detection = 0;
10078 	sys_config->enable_cts_to_self = 0;
10079 	sys_config->bt_coexist_collision_thr = 0;
10080 	sys_config->pass_noise_stats_to_host = 1;	/* 1 -- fix for 256 */
10081 	sys_config->silence_threshold = 0x1e;
10082 }
10083 
10084 static int ipw_net_open(struct net_device *dev)
10085 {
10086 	IPW_DEBUG_INFO("dev->open\n");
10087 	netif_start_queue(dev);
10088 	return 0;
10089 }
10090 
10091 static int ipw_net_stop(struct net_device *dev)
10092 {
10093 	IPW_DEBUG_INFO("dev->close\n");
10094 	netif_stop_queue(dev);
10095 	return 0;
10096 }
10097 
10098 /*
10099 todo:
10100 
10101 modify to send one tfd per fragment instead of using chunking.  otherwise
10102 we need to heavily modify the libipw_skb_to_txb.
10103 */
10104 
10105 static int ipw_tx_skb(struct ipw_priv *priv, struct libipw_txb *txb,
10106 			     int pri)
10107 {
10108 	struct libipw_hdr_3addrqos *hdr = (struct libipw_hdr_3addrqos *)
10109 	    txb->fragments[0]->data;
10110 	int i = 0;
10111 	struct tfd_frame *tfd;
10112 #ifdef CONFIG_IPW2200_QOS
10113 	int tx_id = ipw_get_tx_queue_number(priv, pri);
10114 	struct clx2_tx_queue *txq = &priv->txq[tx_id];
10115 #else
10116 	struct clx2_tx_queue *txq = &priv->txq[0];
10117 #endif
10118 	struct clx2_queue *q = &txq->q;
10119 	u8 id, hdr_len, unicast;
10120 	int fc;
10121 
10122 	if (!(priv->status & STATUS_ASSOCIATED))
10123 		goto drop;
10124 
10125 	hdr_len = libipw_get_hdrlen(le16_to_cpu(hdr->frame_ctl));
10126 	switch (priv->ieee->iw_mode) {
10127 	case IW_MODE_ADHOC:
10128 		unicast = !is_multicast_ether_addr(hdr->addr1);
10129 		id = ipw_find_station(priv, hdr->addr1);
10130 		if (id == IPW_INVALID_STATION) {
10131 			id = ipw_add_station(priv, hdr->addr1);
10132 			if (id == IPW_INVALID_STATION) {
10133 				IPW_WARNING("Attempt to send data to "
10134 					    "invalid cell: %pM\n",
10135 					    hdr->addr1);
10136 				goto drop;
10137 			}
10138 		}
10139 		break;
10140 
10141 	case IW_MODE_INFRA:
10142 	default:
10143 		unicast = !is_multicast_ether_addr(hdr->addr3);
10144 		id = 0;
10145 		break;
10146 	}
10147 
10148 	tfd = &txq->bd[q->first_empty];
10149 	txq->txb[q->first_empty] = txb;
10150 	memset(tfd, 0, sizeof(*tfd));
10151 	tfd->u.data.station_number = id;
10152 
10153 	tfd->control_flags.message_type = TX_FRAME_TYPE;
10154 	tfd->control_flags.control_bits = TFD_NEED_IRQ_MASK;
10155 
10156 	tfd->u.data.cmd_id = DINO_CMD_TX;
10157 	tfd->u.data.len = cpu_to_le16(txb->payload_size);
10158 
10159 	if (priv->assoc_request.ieee_mode == IPW_B_MODE)
10160 		tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_MODE_CCK;
10161 	else
10162 		tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_MODE_OFDM;
10163 
10164 	if (priv->assoc_request.preamble_length == DCT_FLAG_SHORT_PREAMBLE)
10165 		tfd->u.data.tx_flags |= DCT_FLAG_SHORT_PREAMBLE;
10166 
10167 	fc = le16_to_cpu(hdr->frame_ctl);
10168 	hdr->frame_ctl = cpu_to_le16(fc & ~IEEE80211_FCTL_MOREFRAGS);
10169 
10170 	memcpy(&tfd->u.data.tfd.tfd_24.mchdr, hdr, hdr_len);
10171 
10172 	if (likely(unicast))
10173 		tfd->u.data.tx_flags |= DCT_FLAG_ACK_REQD;
10174 
10175 	if (txb->encrypted && !priv->ieee->host_encrypt) {
10176 		switch (priv->ieee->sec.level) {
10177 		case SEC_LEVEL_3:
10178 			tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
10179 			    cpu_to_le16(IEEE80211_FCTL_PROTECTED);
10180 			/* XXX: ACK flag must be set for CCMP even if it
10181 			 * is a multicast/broadcast packet, because CCMP
10182 			 * group communication encrypted by GTK is
10183 			 * actually done by the AP. */
10184 			if (!unicast)
10185 				tfd->u.data.tx_flags |= DCT_FLAG_ACK_REQD;
10186 
10187 			tfd->u.data.tx_flags &= ~DCT_FLAG_NO_WEP;
10188 			tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_SECURITY_CCM;
10189 			tfd->u.data.key_index = 0;
10190 			tfd->u.data.key_index |= DCT_WEP_INDEX_USE_IMMEDIATE;
10191 			break;
10192 		case SEC_LEVEL_2:
10193 			tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
10194 			    cpu_to_le16(IEEE80211_FCTL_PROTECTED);
10195 			tfd->u.data.tx_flags &= ~DCT_FLAG_NO_WEP;
10196 			tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_SECURITY_TKIP;
10197 			tfd->u.data.key_index = DCT_WEP_INDEX_USE_IMMEDIATE;
10198 			break;
10199 		case SEC_LEVEL_1:
10200 			tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
10201 			    cpu_to_le16(IEEE80211_FCTL_PROTECTED);
10202 			tfd->u.data.key_index = priv->ieee->crypt_info.tx_keyidx;
10203 			if (priv->ieee->sec.key_sizes[priv->ieee->crypt_info.tx_keyidx] <=
10204 			    40)
10205 				tfd->u.data.key_index |= DCT_WEP_KEY_64Bit;
10206 			else
10207 				tfd->u.data.key_index |= DCT_WEP_KEY_128Bit;
10208 			break;
10209 		case SEC_LEVEL_0:
10210 			break;
10211 		default:
10212 			printk(KERN_ERR "Unknown security level %d\n",
10213 			       priv->ieee->sec.level);
10214 			break;
10215 		}
10216 	} else
10217 		/* No hardware encryption */
10218 		tfd->u.data.tx_flags |= DCT_FLAG_NO_WEP;
10219 
10220 #ifdef CONFIG_IPW2200_QOS
10221 	if (fc & IEEE80211_STYPE_QOS_DATA)
10222 		ipw_qos_set_tx_queue_command(priv, pri, &(tfd->u.data));
10223 #endif				/* CONFIG_IPW2200_QOS */
10224 
10225 	/* payload */
10226 	tfd->u.data.num_chunks = cpu_to_le32(min((u8) (NUM_TFD_CHUNKS - 2),
10227 						 txb->nr_frags));
10228 	IPW_DEBUG_FRAG("%i fragments being sent as %i chunks.\n",
10229 		       txb->nr_frags, le32_to_cpu(tfd->u.data.num_chunks));
10230 	for (i = 0; i < le32_to_cpu(tfd->u.data.num_chunks); i++) {
10231 		IPW_DEBUG_FRAG("Adding fragment %i of %i (%d bytes).\n",
10232 			       i, le32_to_cpu(tfd->u.data.num_chunks),
10233 			       txb->fragments[i]->len - hdr_len);
10234 		IPW_DEBUG_TX("Dumping TX packet frag %i of %i (%d bytes):\n",
10235 			     i, tfd->u.data.num_chunks,
10236 			     txb->fragments[i]->len - hdr_len);
10237 		printk_buf(IPW_DL_TX, txb->fragments[i]->data + hdr_len,
10238 			   txb->fragments[i]->len - hdr_len);
10239 
10240 		tfd->u.data.chunk_ptr[i] =
10241 		    cpu_to_le32(pci_map_single
10242 				(priv->pci_dev,
10243 				 txb->fragments[i]->data + hdr_len,
10244 				 txb->fragments[i]->len - hdr_len,
10245 				 PCI_DMA_TODEVICE));
10246 		tfd->u.data.chunk_len[i] =
10247 		    cpu_to_le16(txb->fragments[i]->len - hdr_len);
10248 	}
10249 
10250 	if (i != txb->nr_frags) {
10251 		struct sk_buff *skb;
10252 		u16 remaining_bytes = 0;
10253 		int j;
10254 
10255 		for (j = i; j < txb->nr_frags; j++)
10256 			remaining_bytes += txb->fragments[j]->len - hdr_len;
10257 
10258 		printk(KERN_INFO "Trying to reallocate for %d bytes\n",
10259 		       remaining_bytes);
10260 		skb = alloc_skb(remaining_bytes, GFP_ATOMIC);
10261 		if (skb != NULL) {
10262 			tfd->u.data.chunk_len[i] = cpu_to_le16(remaining_bytes);
10263 			for (j = i; j < txb->nr_frags; j++) {
10264 				int size = txb->fragments[j]->len - hdr_len;
10265 
10266 				printk(KERN_INFO "Adding frag %d %d...\n",
10267 				       j, size);
10268 				skb_put_data(skb,
10269 					     txb->fragments[j]->data + hdr_len,
10270 					     size);
10271 			}
10272 			dev_kfree_skb_any(txb->fragments[i]);
10273 			txb->fragments[i] = skb;
10274 			tfd->u.data.chunk_ptr[i] =
10275 			    cpu_to_le32(pci_map_single
10276 					(priv->pci_dev, skb->data,
10277 					 remaining_bytes,
10278 					 PCI_DMA_TODEVICE));
10279 
10280 			le32_add_cpu(&tfd->u.data.num_chunks, 1);
10281 		}
10282 	}
10283 
10284 	/* kick DMA */
10285 	q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd);
10286 	ipw_write32(priv, q->reg_w, q->first_empty);
10287 
10288 	if (ipw_tx_queue_space(q) < q->high_mark)
10289 		netif_stop_queue(priv->net_dev);
10290 
10291 	return NETDEV_TX_OK;
10292 
10293       drop:
10294 	IPW_DEBUG_DROP("Silently dropping Tx packet.\n");
10295 	libipw_txb_free(txb);
10296 	return NETDEV_TX_OK;
10297 }
10298 
10299 static int ipw_net_is_queue_full(struct net_device *dev, int pri)
10300 {
10301 	struct ipw_priv *priv = libipw_priv(dev);
10302 #ifdef CONFIG_IPW2200_QOS
10303 	int tx_id = ipw_get_tx_queue_number(priv, pri);
10304 	struct clx2_tx_queue *txq = &priv->txq[tx_id];
10305 #else
10306 	struct clx2_tx_queue *txq = &priv->txq[0];
10307 #endif				/* CONFIG_IPW2200_QOS */
10308 
10309 	if (ipw_tx_queue_space(&txq->q) < txq->q.high_mark)
10310 		return 1;
10311 
10312 	return 0;
10313 }
10314 
10315 #ifdef CONFIG_IPW2200_PROMISCUOUS
10316 static void ipw_handle_promiscuous_tx(struct ipw_priv *priv,
10317 				      struct libipw_txb *txb)
10318 {
10319 	struct libipw_rx_stats dummystats;
10320 	struct ieee80211_hdr *hdr;
10321 	u8 n;
10322 	u16 filter = priv->prom_priv->filter;
10323 	int hdr_only = 0;
10324 
10325 	if (filter & IPW_PROM_NO_TX)
10326 		return;
10327 
10328 	memset(&dummystats, 0, sizeof(dummystats));
10329 
10330 	/* Filtering of fragment chains is done against the first fragment */
10331 	hdr = (void *)txb->fragments[0]->data;
10332 	if (libipw_is_management(le16_to_cpu(hdr->frame_control))) {
10333 		if (filter & IPW_PROM_NO_MGMT)
10334 			return;
10335 		if (filter & IPW_PROM_MGMT_HEADER_ONLY)
10336 			hdr_only = 1;
10337 	} else if (libipw_is_control(le16_to_cpu(hdr->frame_control))) {
10338 		if (filter & IPW_PROM_NO_CTL)
10339 			return;
10340 		if (filter & IPW_PROM_CTL_HEADER_ONLY)
10341 			hdr_only = 1;
10342 	} else if (libipw_is_data(le16_to_cpu(hdr->frame_control))) {
10343 		if (filter & IPW_PROM_NO_DATA)
10344 			return;
10345 		if (filter & IPW_PROM_DATA_HEADER_ONLY)
10346 			hdr_only = 1;
10347 	}
10348 
10349 	for(n=0; n<txb->nr_frags; ++n) {
10350 		struct sk_buff *src = txb->fragments[n];
10351 		struct sk_buff *dst;
10352 		struct ieee80211_radiotap_header *rt_hdr;
10353 		int len;
10354 
10355 		if (hdr_only) {
10356 			hdr = (void *)src->data;
10357 			len = libipw_get_hdrlen(le16_to_cpu(hdr->frame_control));
10358 		} else
10359 			len = src->len;
10360 
10361 		dst = alloc_skb(len + sizeof(*rt_hdr) + sizeof(u16)*2, GFP_ATOMIC);
10362 		if (!dst)
10363 			continue;
10364 
10365 		rt_hdr = skb_put(dst, sizeof(*rt_hdr));
10366 
10367 		rt_hdr->it_version = PKTHDR_RADIOTAP_VERSION;
10368 		rt_hdr->it_pad = 0;
10369 		rt_hdr->it_present = 0; /* after all, it's just an idea */
10370 		rt_hdr->it_present |=  cpu_to_le32(1 << IEEE80211_RADIOTAP_CHANNEL);
10371 
10372 		*(__le16*)skb_put(dst, sizeof(u16)) = cpu_to_le16(
10373 			ieee80211chan2mhz(priv->channel));
10374 		if (priv->channel > 14) 	/* 802.11a */
10375 			*(__le16*)skb_put(dst, sizeof(u16)) =
10376 				cpu_to_le16(IEEE80211_CHAN_OFDM |
10377 					     IEEE80211_CHAN_5GHZ);
10378 		else if (priv->ieee->mode == IEEE_B) /* 802.11b */
10379 			*(__le16*)skb_put(dst, sizeof(u16)) =
10380 				cpu_to_le16(IEEE80211_CHAN_CCK |
10381 					     IEEE80211_CHAN_2GHZ);
10382 		else 		/* 802.11g */
10383 			*(__le16*)skb_put(dst, sizeof(u16)) =
10384 				cpu_to_le16(IEEE80211_CHAN_OFDM |
10385 				 IEEE80211_CHAN_2GHZ);
10386 
10387 		rt_hdr->it_len = cpu_to_le16(dst->len);
10388 
10389 		skb_copy_from_linear_data(src, skb_put(dst, len), len);
10390 
10391 		if (!libipw_rx(priv->prom_priv->ieee, dst, &dummystats))
10392 			dev_kfree_skb_any(dst);
10393 	}
10394 }
10395 #endif
10396 
10397 static netdev_tx_t ipw_net_hard_start_xmit(struct libipw_txb *txb,
10398 					   struct net_device *dev, int pri)
10399 {
10400 	struct ipw_priv *priv = libipw_priv(dev);
10401 	unsigned long flags;
10402 	netdev_tx_t ret;
10403 
10404 	IPW_DEBUG_TX("dev->xmit(%d bytes)\n", txb->payload_size);
10405 	spin_lock_irqsave(&priv->lock, flags);
10406 
10407 #ifdef CONFIG_IPW2200_PROMISCUOUS
10408 	if (rtap_iface && netif_running(priv->prom_net_dev))
10409 		ipw_handle_promiscuous_tx(priv, txb);
10410 #endif
10411 
10412 	ret = ipw_tx_skb(priv, txb, pri);
10413 	if (ret == NETDEV_TX_OK)
10414 		__ipw_led_activity_on(priv);
10415 	spin_unlock_irqrestore(&priv->lock, flags);
10416 
10417 	return ret;
10418 }
10419 
10420 static void ipw_net_set_multicast_list(struct net_device *dev)
10421 {
10422 
10423 }
10424 
10425 static int ipw_net_set_mac_address(struct net_device *dev, void *p)
10426 {
10427 	struct ipw_priv *priv = libipw_priv(dev);
10428 	struct sockaddr *addr = p;
10429 
10430 	if (!is_valid_ether_addr(addr->sa_data))
10431 		return -EADDRNOTAVAIL;
10432 	mutex_lock(&priv->mutex);
10433 	priv->config |= CFG_CUSTOM_MAC;
10434 	memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN);
10435 	printk(KERN_INFO "%s: Setting MAC to %pM\n",
10436 	       priv->net_dev->name, priv->mac_addr);
10437 	schedule_work(&priv->adapter_restart);
10438 	mutex_unlock(&priv->mutex);
10439 	return 0;
10440 }
10441 
10442 static void ipw_ethtool_get_drvinfo(struct net_device *dev,
10443 				    struct ethtool_drvinfo *info)
10444 {
10445 	struct ipw_priv *p = libipw_priv(dev);
10446 	char vers[64];
10447 	char date[32];
10448 	u32 len;
10449 
10450 	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
10451 	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
10452 
10453 	len = sizeof(vers);
10454 	ipw_get_ordinal(p, IPW_ORD_STAT_FW_VERSION, vers, &len);
10455 	len = sizeof(date);
10456 	ipw_get_ordinal(p, IPW_ORD_STAT_FW_DATE, date, &len);
10457 
10458 	snprintf(info->fw_version, sizeof(info->fw_version), "%s (%s)",
10459 		 vers, date);
10460 	strlcpy(info->bus_info, pci_name(p->pci_dev),
10461 		sizeof(info->bus_info));
10462 }
10463 
10464 static u32 ipw_ethtool_get_link(struct net_device *dev)
10465 {
10466 	struct ipw_priv *priv = libipw_priv(dev);
10467 	return (priv->status & STATUS_ASSOCIATED) != 0;
10468 }
10469 
10470 static int ipw_ethtool_get_eeprom_len(struct net_device *dev)
10471 {
10472 	return IPW_EEPROM_IMAGE_SIZE;
10473 }
10474 
10475 static int ipw_ethtool_get_eeprom(struct net_device *dev,
10476 				  struct ethtool_eeprom *eeprom, u8 * bytes)
10477 {
10478 	struct ipw_priv *p = libipw_priv(dev);
10479 
10480 	if (eeprom->offset + eeprom->len > IPW_EEPROM_IMAGE_SIZE)
10481 		return -EINVAL;
10482 	mutex_lock(&p->mutex);
10483 	memcpy(bytes, &p->eeprom[eeprom->offset], eeprom->len);
10484 	mutex_unlock(&p->mutex);
10485 	return 0;
10486 }
10487 
10488 static int ipw_ethtool_set_eeprom(struct net_device *dev,
10489 				  struct ethtool_eeprom *eeprom, u8 * bytes)
10490 {
10491 	struct ipw_priv *p = libipw_priv(dev);
10492 	int i;
10493 
10494 	if (eeprom->offset + eeprom->len > IPW_EEPROM_IMAGE_SIZE)
10495 		return -EINVAL;
10496 	mutex_lock(&p->mutex);
10497 	memcpy(&p->eeprom[eeprom->offset], bytes, eeprom->len);
10498 	for (i = 0; i < IPW_EEPROM_IMAGE_SIZE; i++)
10499 		ipw_write8(p, i + IPW_EEPROM_DATA, p->eeprom[i]);
10500 	mutex_unlock(&p->mutex);
10501 	return 0;
10502 }
10503 
10504 static const struct ethtool_ops ipw_ethtool_ops = {
10505 	.get_link = ipw_ethtool_get_link,
10506 	.get_drvinfo = ipw_ethtool_get_drvinfo,
10507 	.get_eeprom_len = ipw_ethtool_get_eeprom_len,
10508 	.get_eeprom = ipw_ethtool_get_eeprom,
10509 	.set_eeprom = ipw_ethtool_set_eeprom,
10510 };
10511 
10512 static irqreturn_t ipw_isr(int irq, void *data)
10513 {
10514 	struct ipw_priv *priv = data;
10515 	u32 inta, inta_mask;
10516 
10517 	if (!priv)
10518 		return IRQ_NONE;
10519 
10520 	spin_lock(&priv->irq_lock);
10521 
10522 	if (!(priv->status & STATUS_INT_ENABLED)) {
10523 		/* IRQ is disabled */
10524 		goto none;
10525 	}
10526 
10527 	inta = ipw_read32(priv, IPW_INTA_RW);
10528 	inta_mask = ipw_read32(priv, IPW_INTA_MASK_R);
10529 
10530 	if (inta == 0xFFFFFFFF) {
10531 		/* Hardware disappeared */
10532 		IPW_WARNING("IRQ INTA == 0xFFFFFFFF\n");
10533 		goto none;
10534 	}
10535 
10536 	if (!(inta & (IPW_INTA_MASK_ALL & inta_mask))) {
10537 		/* Shared interrupt */
10538 		goto none;
10539 	}
10540 
10541 	/* tell the device to stop sending interrupts */
10542 	__ipw_disable_interrupts(priv);
10543 
10544 	/* ack current interrupts */
10545 	inta &= (IPW_INTA_MASK_ALL & inta_mask);
10546 	ipw_write32(priv, IPW_INTA_RW, inta);
10547 
10548 	/* Cache INTA value for our tasklet */
10549 	priv->isr_inta = inta;
10550 
10551 	tasklet_schedule(&priv->irq_tasklet);
10552 
10553 	spin_unlock(&priv->irq_lock);
10554 
10555 	return IRQ_HANDLED;
10556       none:
10557 	spin_unlock(&priv->irq_lock);
10558 	return IRQ_NONE;
10559 }
10560 
10561 static void ipw_rf_kill(void *adapter)
10562 {
10563 	struct ipw_priv *priv = adapter;
10564 	unsigned long flags;
10565 
10566 	spin_lock_irqsave(&priv->lock, flags);
10567 
10568 	if (rf_kill_active(priv)) {
10569 		IPW_DEBUG_RF_KILL("RF Kill active, rescheduling GPIO check\n");
10570 		schedule_delayed_work(&priv->rf_kill, 2 * HZ);
10571 		goto exit_unlock;
10572 	}
10573 
10574 	/* RF Kill is now disabled, so bring the device back up */
10575 
10576 	if (!(priv->status & STATUS_RF_KILL_MASK)) {
10577 		IPW_DEBUG_RF_KILL("HW RF Kill no longer active, restarting "
10578 				  "device\n");
10579 
10580 		/* we can not do an adapter restart while inside an irq lock */
10581 		schedule_work(&priv->adapter_restart);
10582 	} else
10583 		IPW_DEBUG_RF_KILL("HW RF Kill deactivated.  SW RF Kill still "
10584 				  "enabled\n");
10585 
10586       exit_unlock:
10587 	spin_unlock_irqrestore(&priv->lock, flags);
10588 }
10589 
10590 static void ipw_bg_rf_kill(struct work_struct *work)
10591 {
10592 	struct ipw_priv *priv =
10593 		container_of(work, struct ipw_priv, rf_kill.work);
10594 	mutex_lock(&priv->mutex);
10595 	ipw_rf_kill(priv);
10596 	mutex_unlock(&priv->mutex);
10597 }
10598 
10599 static void ipw_link_up(struct ipw_priv *priv)
10600 {
10601 	priv->last_seq_num = -1;
10602 	priv->last_frag_num = -1;
10603 	priv->last_packet_time = 0;
10604 
10605 	netif_carrier_on(priv->net_dev);
10606 
10607 	cancel_delayed_work(&priv->request_scan);
10608 	cancel_delayed_work(&priv->request_direct_scan);
10609 	cancel_delayed_work(&priv->request_passive_scan);
10610 	cancel_delayed_work(&priv->scan_event);
10611 	ipw_reset_stats(priv);
10612 	/* Ensure the rate is updated immediately */
10613 	priv->last_rate = ipw_get_current_rate(priv);
10614 	ipw_gather_stats(priv);
10615 	ipw_led_link_up(priv);
10616 	notify_wx_assoc_event(priv);
10617 
10618 	if (priv->config & CFG_BACKGROUND_SCAN)
10619 		schedule_delayed_work(&priv->request_scan, HZ);
10620 }
10621 
10622 static void ipw_bg_link_up(struct work_struct *work)
10623 {
10624 	struct ipw_priv *priv =
10625 		container_of(work, struct ipw_priv, link_up);
10626 	mutex_lock(&priv->mutex);
10627 	ipw_link_up(priv);
10628 	mutex_unlock(&priv->mutex);
10629 }
10630 
10631 static void ipw_link_down(struct ipw_priv *priv)
10632 {
10633 	ipw_led_link_down(priv);
10634 	netif_carrier_off(priv->net_dev);
10635 	notify_wx_assoc_event(priv);
10636 
10637 	/* Cancel any queued work ... */
10638 	cancel_delayed_work(&priv->request_scan);
10639 	cancel_delayed_work(&priv->request_direct_scan);
10640 	cancel_delayed_work(&priv->request_passive_scan);
10641 	cancel_delayed_work(&priv->adhoc_check);
10642 	cancel_delayed_work(&priv->gather_stats);
10643 
10644 	ipw_reset_stats(priv);
10645 
10646 	if (!(priv->status & STATUS_EXIT_PENDING)) {
10647 		/* Queue up another scan... */
10648 		schedule_delayed_work(&priv->request_scan, 0);
10649 	} else
10650 		cancel_delayed_work(&priv->scan_event);
10651 }
10652 
10653 static void ipw_bg_link_down(struct work_struct *work)
10654 {
10655 	struct ipw_priv *priv =
10656 		container_of(work, struct ipw_priv, link_down);
10657 	mutex_lock(&priv->mutex);
10658 	ipw_link_down(priv);
10659 	mutex_unlock(&priv->mutex);
10660 }
10661 
10662 static int ipw_setup_deferred_work(struct ipw_priv *priv)
10663 {
10664 	int ret = 0;
10665 
10666 	init_waitqueue_head(&priv->wait_command_queue);
10667 	init_waitqueue_head(&priv->wait_state);
10668 
10669 	INIT_DELAYED_WORK(&priv->adhoc_check, ipw_bg_adhoc_check);
10670 	INIT_WORK(&priv->associate, ipw_bg_associate);
10671 	INIT_WORK(&priv->disassociate, ipw_bg_disassociate);
10672 	INIT_WORK(&priv->system_config, ipw_system_config);
10673 	INIT_WORK(&priv->rx_replenish, ipw_bg_rx_queue_replenish);
10674 	INIT_WORK(&priv->adapter_restart, ipw_bg_adapter_restart);
10675 	INIT_DELAYED_WORK(&priv->rf_kill, ipw_bg_rf_kill);
10676 	INIT_WORK(&priv->up, ipw_bg_up);
10677 	INIT_WORK(&priv->down, ipw_bg_down);
10678 	INIT_DELAYED_WORK(&priv->request_scan, ipw_request_scan);
10679 	INIT_DELAYED_WORK(&priv->request_direct_scan, ipw_request_direct_scan);
10680 	INIT_DELAYED_WORK(&priv->request_passive_scan, ipw_request_passive_scan);
10681 	INIT_DELAYED_WORK(&priv->scan_event, ipw_scan_event);
10682 	INIT_DELAYED_WORK(&priv->gather_stats, ipw_bg_gather_stats);
10683 	INIT_WORK(&priv->abort_scan, ipw_bg_abort_scan);
10684 	INIT_WORK(&priv->roam, ipw_bg_roam);
10685 	INIT_DELAYED_WORK(&priv->scan_check, ipw_bg_scan_check);
10686 	INIT_WORK(&priv->link_up, ipw_bg_link_up);
10687 	INIT_WORK(&priv->link_down, ipw_bg_link_down);
10688 	INIT_DELAYED_WORK(&priv->led_link_on, ipw_bg_led_link_on);
10689 	INIT_DELAYED_WORK(&priv->led_link_off, ipw_bg_led_link_off);
10690 	INIT_DELAYED_WORK(&priv->led_act_off, ipw_bg_led_activity_off);
10691 	INIT_WORK(&priv->merge_networks, ipw_merge_adhoc_network);
10692 
10693 #ifdef CONFIG_IPW2200_QOS
10694 	INIT_WORK(&priv->qos_activate, ipw_bg_qos_activate);
10695 #endif				/* CONFIG_IPW2200_QOS */
10696 
10697 	tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
10698 		     ipw_irq_tasklet, (unsigned long)priv);
10699 
10700 	return ret;
10701 }
10702 
10703 static void shim__set_security(struct net_device *dev,
10704 			       struct libipw_security *sec)
10705 {
10706 	struct ipw_priv *priv = libipw_priv(dev);
10707 	int i;
10708 	for (i = 0; i < 4; i++) {
10709 		if (sec->flags & (1 << i)) {
10710 			priv->ieee->sec.encode_alg[i] = sec->encode_alg[i];
10711 			priv->ieee->sec.key_sizes[i] = sec->key_sizes[i];
10712 			if (sec->key_sizes[i] == 0)
10713 				priv->ieee->sec.flags &= ~(1 << i);
10714 			else {
10715 				memcpy(priv->ieee->sec.keys[i], sec->keys[i],
10716 				       sec->key_sizes[i]);
10717 				priv->ieee->sec.flags |= (1 << i);
10718 			}
10719 			priv->status |= STATUS_SECURITY_UPDATED;
10720 		} else if (sec->level != SEC_LEVEL_1)
10721 			priv->ieee->sec.flags &= ~(1 << i);
10722 	}
10723 
10724 	if (sec->flags & SEC_ACTIVE_KEY) {
10725 		if (sec->active_key <= 3) {
10726 			priv->ieee->sec.active_key = sec->active_key;
10727 			priv->ieee->sec.flags |= SEC_ACTIVE_KEY;
10728 		} else
10729 			priv->ieee->sec.flags &= ~SEC_ACTIVE_KEY;
10730 		priv->status |= STATUS_SECURITY_UPDATED;
10731 	} else
10732 		priv->ieee->sec.flags &= ~SEC_ACTIVE_KEY;
10733 
10734 	if ((sec->flags & SEC_AUTH_MODE) &&
10735 	    (priv->ieee->sec.auth_mode != sec->auth_mode)) {
10736 		priv->ieee->sec.auth_mode = sec->auth_mode;
10737 		priv->ieee->sec.flags |= SEC_AUTH_MODE;
10738 		if (sec->auth_mode == WLAN_AUTH_SHARED_KEY)
10739 			priv->capability |= CAP_SHARED_KEY;
10740 		else
10741 			priv->capability &= ~CAP_SHARED_KEY;
10742 		priv->status |= STATUS_SECURITY_UPDATED;
10743 	}
10744 
10745 	if (sec->flags & SEC_ENABLED && priv->ieee->sec.enabled != sec->enabled) {
10746 		priv->ieee->sec.flags |= SEC_ENABLED;
10747 		priv->ieee->sec.enabled = sec->enabled;
10748 		priv->status |= STATUS_SECURITY_UPDATED;
10749 		if (sec->enabled)
10750 			priv->capability |= CAP_PRIVACY_ON;
10751 		else
10752 			priv->capability &= ~CAP_PRIVACY_ON;
10753 	}
10754 
10755 	if (sec->flags & SEC_ENCRYPT)
10756 		priv->ieee->sec.encrypt = sec->encrypt;
10757 
10758 	if (sec->flags & SEC_LEVEL && priv->ieee->sec.level != sec->level) {
10759 		priv->ieee->sec.level = sec->level;
10760 		priv->ieee->sec.flags |= SEC_LEVEL;
10761 		priv->status |= STATUS_SECURITY_UPDATED;
10762 	}
10763 
10764 	if (!priv->ieee->host_encrypt && (sec->flags & SEC_ENCRYPT))
10765 		ipw_set_hwcrypto_keys(priv);
10766 
10767 	/* To match current functionality of ipw2100 (which works well w/
10768 	 * various supplicants, we don't force a disassociate if the
10769 	 * privacy capability changes ... */
10770 #if 0
10771 	if ((priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) &&
10772 	    (((priv->assoc_request.capability &
10773 	       cpu_to_le16(WLAN_CAPABILITY_PRIVACY)) && !sec->enabled) ||
10774 	     (!(priv->assoc_request.capability &
10775 		cpu_to_le16(WLAN_CAPABILITY_PRIVACY)) && sec->enabled))) {
10776 		IPW_DEBUG_ASSOC("Disassociating due to capability "
10777 				"change.\n");
10778 		ipw_disassociate(priv);
10779 	}
10780 #endif
10781 }
10782 
10783 static int init_supported_rates(struct ipw_priv *priv,
10784 				struct ipw_supported_rates *rates)
10785 {
10786 	/* TODO: Mask out rates based on priv->rates_mask */
10787 
10788 	memset(rates, 0, sizeof(*rates));
10789 	/* configure supported rates */
10790 	switch (priv->ieee->freq_band) {
10791 	case LIBIPW_52GHZ_BAND:
10792 		rates->ieee_mode = IPW_A_MODE;
10793 		rates->purpose = IPW_RATE_CAPABILITIES;
10794 		ipw_add_ofdm_scan_rates(rates, LIBIPW_CCK_MODULATION,
10795 					LIBIPW_OFDM_DEFAULT_RATES_MASK);
10796 		break;
10797 
10798 	default:		/* Mixed or 2.4Ghz */
10799 		rates->ieee_mode = IPW_G_MODE;
10800 		rates->purpose = IPW_RATE_CAPABILITIES;
10801 		ipw_add_cck_scan_rates(rates, LIBIPW_CCK_MODULATION,
10802 				       LIBIPW_CCK_DEFAULT_RATES_MASK);
10803 		if (priv->ieee->modulation & LIBIPW_OFDM_MODULATION) {
10804 			ipw_add_ofdm_scan_rates(rates, LIBIPW_CCK_MODULATION,
10805 						LIBIPW_OFDM_DEFAULT_RATES_MASK);
10806 		}
10807 		break;
10808 	}
10809 
10810 	return 0;
10811 }
10812 
10813 static int ipw_config(struct ipw_priv *priv)
10814 {
10815 	/* This is only called from ipw_up, which resets/reloads the firmware
10816 	   so, we don't need to first disable the card before we configure
10817 	   it */
10818 	if (ipw_set_tx_power(priv))
10819 		goto error;
10820 
10821 	/* initialize adapter address */
10822 	if (ipw_send_adapter_address(priv, priv->net_dev->dev_addr))
10823 		goto error;
10824 
10825 	/* set basic system config settings */
10826 	init_sys_config(&priv->sys_config);
10827 
10828 	/* Support Bluetooth if we have BT h/w on board, and user wants to.
10829 	 * Does not support BT priority yet (don't abort or defer our Tx) */
10830 	if (bt_coexist) {
10831 		unsigned char bt_caps = priv->eeprom[EEPROM_SKU_CAPABILITY];
10832 
10833 		if (bt_caps & EEPROM_SKU_CAP_BT_CHANNEL_SIG)
10834 			priv->sys_config.bt_coexistence
10835 			    |= CFG_BT_COEXISTENCE_SIGNAL_CHNL;
10836 		if (bt_caps & EEPROM_SKU_CAP_BT_OOB)
10837 			priv->sys_config.bt_coexistence
10838 			    |= CFG_BT_COEXISTENCE_OOB;
10839 	}
10840 
10841 #ifdef CONFIG_IPW2200_PROMISCUOUS
10842 	if (priv->prom_net_dev && netif_running(priv->prom_net_dev)) {
10843 		priv->sys_config.accept_all_data_frames = 1;
10844 		priv->sys_config.accept_non_directed_frames = 1;
10845 		priv->sys_config.accept_all_mgmt_bcpr = 1;
10846 		priv->sys_config.accept_all_mgmt_frames = 1;
10847 	}
10848 #endif
10849 
10850 	if (priv->ieee->iw_mode == IW_MODE_ADHOC)
10851 		priv->sys_config.answer_broadcast_ssid_probe = 1;
10852 	else
10853 		priv->sys_config.answer_broadcast_ssid_probe = 0;
10854 
10855 	if (ipw_send_system_config(priv))
10856 		goto error;
10857 
10858 	init_supported_rates(priv, &priv->rates);
10859 	if (ipw_send_supported_rates(priv, &priv->rates))
10860 		goto error;
10861 
10862 	/* Set request-to-send threshold */
10863 	if (priv->rts_threshold) {
10864 		if (ipw_send_rts_threshold(priv, priv->rts_threshold))
10865 			goto error;
10866 	}
10867 #ifdef CONFIG_IPW2200_QOS
10868 	IPW_DEBUG_QOS("QoS: call ipw_qos_activate\n");
10869 	ipw_qos_activate(priv, NULL);
10870 #endif				/* CONFIG_IPW2200_QOS */
10871 
10872 	if (ipw_set_random_seed(priv))
10873 		goto error;
10874 
10875 	/* final state transition to the RUN state */
10876 	if (ipw_send_host_complete(priv))
10877 		goto error;
10878 
10879 	priv->status |= STATUS_INIT;
10880 
10881 	ipw_led_init(priv);
10882 	ipw_led_radio_on(priv);
10883 	priv->notif_missed_beacons = 0;
10884 
10885 	/* Set hardware WEP key if it is configured. */
10886 	if ((priv->capability & CAP_PRIVACY_ON) &&
10887 	    (priv->ieee->sec.level == SEC_LEVEL_1) &&
10888 	    !(priv->ieee->host_encrypt || priv->ieee->host_decrypt))
10889 		ipw_set_hwcrypto_keys(priv);
10890 
10891 	return 0;
10892 
10893       error:
10894 	return -EIO;
10895 }
10896 
10897 /*
10898  * NOTE:
10899  *
10900  * These tables have been tested in conjunction with the
10901  * Intel PRO/Wireless 2200BG and 2915ABG Network Connection Adapters.
10902  *
10903  * Altering this values, using it on other hardware, or in geographies
10904  * not intended for resale of the above mentioned Intel adapters has
10905  * not been tested.
10906  *
10907  * Remember to update the table in README.ipw2200 when changing this
10908  * table.
10909  *
10910  */
10911 static const struct libipw_geo ipw_geos[] = {
10912 	{			/* Restricted */
10913 	 "---",
10914 	 .bg_channels = 11,
10915 	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10916 		{2427, 4}, {2432, 5}, {2437, 6},
10917 		{2442, 7}, {2447, 8}, {2452, 9},
10918 		{2457, 10}, {2462, 11}},
10919 	 },
10920 
10921 	{			/* Custom US/Canada */
10922 	 "ZZF",
10923 	 .bg_channels = 11,
10924 	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10925 		{2427, 4}, {2432, 5}, {2437, 6},
10926 		{2442, 7}, {2447, 8}, {2452, 9},
10927 		{2457, 10}, {2462, 11}},
10928 	 .a_channels = 8,
10929 	 .a = {{5180, 36},
10930 	       {5200, 40},
10931 	       {5220, 44},
10932 	       {5240, 48},
10933 	       {5260, 52, LIBIPW_CH_PASSIVE_ONLY},
10934 	       {5280, 56, LIBIPW_CH_PASSIVE_ONLY},
10935 	       {5300, 60, LIBIPW_CH_PASSIVE_ONLY},
10936 	       {5320, 64, LIBIPW_CH_PASSIVE_ONLY}},
10937 	 },
10938 
10939 	{			/* Rest of World */
10940 	 "ZZD",
10941 	 .bg_channels = 13,
10942 	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10943 		{2427, 4}, {2432, 5}, {2437, 6},
10944 		{2442, 7}, {2447, 8}, {2452, 9},
10945 		{2457, 10}, {2462, 11}, {2467, 12},
10946 		{2472, 13}},
10947 	 },
10948 
10949 	{			/* Custom USA & Europe & High */
10950 	 "ZZA",
10951 	 .bg_channels = 11,
10952 	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10953 		{2427, 4}, {2432, 5}, {2437, 6},
10954 		{2442, 7}, {2447, 8}, {2452, 9},
10955 		{2457, 10}, {2462, 11}},
10956 	 .a_channels = 13,
10957 	 .a = {{5180, 36},
10958 	       {5200, 40},
10959 	       {5220, 44},
10960 	       {5240, 48},
10961 	       {5260, 52, LIBIPW_CH_PASSIVE_ONLY},
10962 	       {5280, 56, LIBIPW_CH_PASSIVE_ONLY},
10963 	       {5300, 60, LIBIPW_CH_PASSIVE_ONLY},
10964 	       {5320, 64, LIBIPW_CH_PASSIVE_ONLY},
10965 	       {5745, 149},
10966 	       {5765, 153},
10967 	       {5785, 157},
10968 	       {5805, 161},
10969 	       {5825, 165}},
10970 	 },
10971 
10972 	{			/* Custom NA & Europe */
10973 	 "ZZB",
10974 	 .bg_channels = 11,
10975 	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10976 		{2427, 4}, {2432, 5}, {2437, 6},
10977 		{2442, 7}, {2447, 8}, {2452, 9},
10978 		{2457, 10}, {2462, 11}},
10979 	 .a_channels = 13,
10980 	 .a = {{5180, 36},
10981 	       {5200, 40},
10982 	       {5220, 44},
10983 	       {5240, 48},
10984 	       {5260, 52, LIBIPW_CH_PASSIVE_ONLY},
10985 	       {5280, 56, LIBIPW_CH_PASSIVE_ONLY},
10986 	       {5300, 60, LIBIPW_CH_PASSIVE_ONLY},
10987 	       {5320, 64, LIBIPW_CH_PASSIVE_ONLY},
10988 	       {5745, 149, LIBIPW_CH_PASSIVE_ONLY},
10989 	       {5765, 153, LIBIPW_CH_PASSIVE_ONLY},
10990 	       {5785, 157, LIBIPW_CH_PASSIVE_ONLY},
10991 	       {5805, 161, LIBIPW_CH_PASSIVE_ONLY},
10992 	       {5825, 165, LIBIPW_CH_PASSIVE_ONLY}},
10993 	 },
10994 
10995 	{			/* Custom Japan */
10996 	 "ZZC",
10997 	 .bg_channels = 11,
10998 	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10999 		{2427, 4}, {2432, 5}, {2437, 6},
11000 		{2442, 7}, {2447, 8}, {2452, 9},
11001 		{2457, 10}, {2462, 11}},
11002 	 .a_channels = 4,
11003 	 .a = {{5170, 34}, {5190, 38},
11004 	       {5210, 42}, {5230, 46}},
11005 	 },
11006 
11007 	{			/* Custom */
11008 	 "ZZM",
11009 	 .bg_channels = 11,
11010 	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11011 		{2427, 4}, {2432, 5}, {2437, 6},
11012 		{2442, 7}, {2447, 8}, {2452, 9},
11013 		{2457, 10}, {2462, 11}},
11014 	 },
11015 
11016 	{			/* Europe */
11017 	 "ZZE",
11018 	 .bg_channels = 13,
11019 	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11020 		{2427, 4}, {2432, 5}, {2437, 6},
11021 		{2442, 7}, {2447, 8}, {2452, 9},
11022 		{2457, 10}, {2462, 11}, {2467, 12},
11023 		{2472, 13}},
11024 	 .a_channels = 19,
11025 	 .a = {{5180, 36},
11026 	       {5200, 40},
11027 	       {5220, 44},
11028 	       {5240, 48},
11029 	       {5260, 52, LIBIPW_CH_PASSIVE_ONLY},
11030 	       {5280, 56, LIBIPW_CH_PASSIVE_ONLY},
11031 	       {5300, 60, LIBIPW_CH_PASSIVE_ONLY},
11032 	       {5320, 64, LIBIPW_CH_PASSIVE_ONLY},
11033 	       {5500, 100, LIBIPW_CH_PASSIVE_ONLY},
11034 	       {5520, 104, LIBIPW_CH_PASSIVE_ONLY},
11035 	       {5540, 108, LIBIPW_CH_PASSIVE_ONLY},
11036 	       {5560, 112, LIBIPW_CH_PASSIVE_ONLY},
11037 	       {5580, 116, LIBIPW_CH_PASSIVE_ONLY},
11038 	       {5600, 120, LIBIPW_CH_PASSIVE_ONLY},
11039 	       {5620, 124, LIBIPW_CH_PASSIVE_ONLY},
11040 	       {5640, 128, LIBIPW_CH_PASSIVE_ONLY},
11041 	       {5660, 132, LIBIPW_CH_PASSIVE_ONLY},
11042 	       {5680, 136, LIBIPW_CH_PASSIVE_ONLY},
11043 	       {5700, 140, LIBIPW_CH_PASSIVE_ONLY}},
11044 	 },
11045 
11046 	{			/* Custom Japan */
11047 	 "ZZJ",
11048 	 .bg_channels = 14,
11049 	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11050 		{2427, 4}, {2432, 5}, {2437, 6},
11051 		{2442, 7}, {2447, 8}, {2452, 9},
11052 		{2457, 10}, {2462, 11}, {2467, 12},
11053 		{2472, 13}, {2484, 14, LIBIPW_CH_B_ONLY}},
11054 	 .a_channels = 4,
11055 	 .a = {{5170, 34}, {5190, 38},
11056 	       {5210, 42}, {5230, 46}},
11057 	 },
11058 
11059 	{			/* Rest of World */
11060 	 "ZZR",
11061 	 .bg_channels = 14,
11062 	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11063 		{2427, 4}, {2432, 5}, {2437, 6},
11064 		{2442, 7}, {2447, 8}, {2452, 9},
11065 		{2457, 10}, {2462, 11}, {2467, 12},
11066 		{2472, 13}, {2484, 14, LIBIPW_CH_B_ONLY |
11067 			     LIBIPW_CH_PASSIVE_ONLY}},
11068 	 },
11069 
11070 	{			/* High Band */
11071 	 "ZZH",
11072 	 .bg_channels = 13,
11073 	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11074 		{2427, 4}, {2432, 5}, {2437, 6},
11075 		{2442, 7}, {2447, 8}, {2452, 9},
11076 		{2457, 10}, {2462, 11},
11077 		{2467, 12, LIBIPW_CH_PASSIVE_ONLY},
11078 		{2472, 13, LIBIPW_CH_PASSIVE_ONLY}},
11079 	 .a_channels = 4,
11080 	 .a = {{5745, 149}, {5765, 153},
11081 	       {5785, 157}, {5805, 161}},
11082 	 },
11083 
11084 	{			/* Custom Europe */
11085 	 "ZZG",
11086 	 .bg_channels = 13,
11087 	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11088 		{2427, 4}, {2432, 5}, {2437, 6},
11089 		{2442, 7}, {2447, 8}, {2452, 9},
11090 		{2457, 10}, {2462, 11},
11091 		{2467, 12}, {2472, 13}},
11092 	 .a_channels = 4,
11093 	 .a = {{5180, 36}, {5200, 40},
11094 	       {5220, 44}, {5240, 48}},
11095 	 },
11096 
11097 	{			/* Europe */
11098 	 "ZZK",
11099 	 .bg_channels = 13,
11100 	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11101 		{2427, 4}, {2432, 5}, {2437, 6},
11102 		{2442, 7}, {2447, 8}, {2452, 9},
11103 		{2457, 10}, {2462, 11},
11104 		{2467, 12, LIBIPW_CH_PASSIVE_ONLY},
11105 		{2472, 13, LIBIPW_CH_PASSIVE_ONLY}},
11106 	 .a_channels = 24,
11107 	 .a = {{5180, 36, LIBIPW_CH_PASSIVE_ONLY},
11108 	       {5200, 40, LIBIPW_CH_PASSIVE_ONLY},
11109 	       {5220, 44, LIBIPW_CH_PASSIVE_ONLY},
11110 	       {5240, 48, LIBIPW_CH_PASSIVE_ONLY},
11111 	       {5260, 52, LIBIPW_CH_PASSIVE_ONLY},
11112 	       {5280, 56, LIBIPW_CH_PASSIVE_ONLY},
11113 	       {5300, 60, LIBIPW_CH_PASSIVE_ONLY},
11114 	       {5320, 64, LIBIPW_CH_PASSIVE_ONLY},
11115 	       {5500, 100, LIBIPW_CH_PASSIVE_ONLY},
11116 	       {5520, 104, LIBIPW_CH_PASSIVE_ONLY},
11117 	       {5540, 108, LIBIPW_CH_PASSIVE_ONLY},
11118 	       {5560, 112, LIBIPW_CH_PASSIVE_ONLY},
11119 	       {5580, 116, LIBIPW_CH_PASSIVE_ONLY},
11120 	       {5600, 120, LIBIPW_CH_PASSIVE_ONLY},
11121 	       {5620, 124, LIBIPW_CH_PASSIVE_ONLY},
11122 	       {5640, 128, LIBIPW_CH_PASSIVE_ONLY},
11123 	       {5660, 132, LIBIPW_CH_PASSIVE_ONLY},
11124 	       {5680, 136, LIBIPW_CH_PASSIVE_ONLY},
11125 	       {5700, 140, LIBIPW_CH_PASSIVE_ONLY},
11126 	       {5745, 149, LIBIPW_CH_PASSIVE_ONLY},
11127 	       {5765, 153, LIBIPW_CH_PASSIVE_ONLY},
11128 	       {5785, 157, LIBIPW_CH_PASSIVE_ONLY},
11129 	       {5805, 161, LIBIPW_CH_PASSIVE_ONLY},
11130 	       {5825, 165, LIBIPW_CH_PASSIVE_ONLY}},
11131 	 },
11132 
11133 	{			/* Europe */
11134 	 "ZZL",
11135 	 .bg_channels = 11,
11136 	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11137 		{2427, 4}, {2432, 5}, {2437, 6},
11138 		{2442, 7}, {2447, 8}, {2452, 9},
11139 		{2457, 10}, {2462, 11}},
11140 	 .a_channels = 13,
11141 	 .a = {{5180, 36, LIBIPW_CH_PASSIVE_ONLY},
11142 	       {5200, 40, LIBIPW_CH_PASSIVE_ONLY},
11143 	       {5220, 44, LIBIPW_CH_PASSIVE_ONLY},
11144 	       {5240, 48, LIBIPW_CH_PASSIVE_ONLY},
11145 	       {5260, 52, LIBIPW_CH_PASSIVE_ONLY},
11146 	       {5280, 56, LIBIPW_CH_PASSIVE_ONLY},
11147 	       {5300, 60, LIBIPW_CH_PASSIVE_ONLY},
11148 	       {5320, 64, LIBIPW_CH_PASSIVE_ONLY},
11149 	       {5745, 149, LIBIPW_CH_PASSIVE_ONLY},
11150 	       {5765, 153, LIBIPW_CH_PASSIVE_ONLY},
11151 	       {5785, 157, LIBIPW_CH_PASSIVE_ONLY},
11152 	       {5805, 161, LIBIPW_CH_PASSIVE_ONLY},
11153 	       {5825, 165, LIBIPW_CH_PASSIVE_ONLY}},
11154 	 }
11155 };
11156 
11157 static void ipw_set_geo(struct ipw_priv *priv)
11158 {
11159 	int j;
11160 
11161 	for (j = 0; j < ARRAY_SIZE(ipw_geos); j++) {
11162 		if (!memcmp(&priv->eeprom[EEPROM_COUNTRY_CODE],
11163 			    ipw_geos[j].name, 3))
11164 			break;
11165 	}
11166 
11167 	if (j == ARRAY_SIZE(ipw_geos)) {
11168 		IPW_WARNING("SKU [%c%c%c] not recognized.\n",
11169 			    priv->eeprom[EEPROM_COUNTRY_CODE + 0],
11170 			    priv->eeprom[EEPROM_COUNTRY_CODE + 1],
11171 			    priv->eeprom[EEPROM_COUNTRY_CODE + 2]);
11172 		j = 0;
11173 	}
11174 
11175 	libipw_set_geo(priv->ieee, &ipw_geos[j]);
11176 }
11177 
11178 #define MAX_HW_RESTARTS 5
11179 static int ipw_up(struct ipw_priv *priv)
11180 {
11181 	int rc, i;
11182 
11183 	/* Age scan list entries found before suspend */
11184 	if (priv->suspend_time) {
11185 		libipw_networks_age(priv->ieee, priv->suspend_time);
11186 		priv->suspend_time = 0;
11187 	}
11188 
11189 	if (priv->status & STATUS_EXIT_PENDING)
11190 		return -EIO;
11191 
11192 	if (cmdlog && !priv->cmdlog) {
11193 		priv->cmdlog = kcalloc(cmdlog, sizeof(*priv->cmdlog),
11194 				       GFP_KERNEL);
11195 		if (priv->cmdlog == NULL) {
11196 			IPW_ERROR("Error allocating %d command log entries.\n",
11197 				  cmdlog);
11198 			return -ENOMEM;
11199 		} else {
11200 			priv->cmdlog_len = cmdlog;
11201 		}
11202 	}
11203 
11204 	for (i = 0; i < MAX_HW_RESTARTS; i++) {
11205 		/* Load the microcode, firmware, and eeprom.
11206 		 * Also start the clocks. */
11207 		rc = ipw_load(priv);
11208 		if (rc) {
11209 			IPW_ERROR("Unable to load firmware: %d\n", rc);
11210 			return rc;
11211 		}
11212 
11213 		ipw_init_ordinals(priv);
11214 		if (!(priv->config & CFG_CUSTOM_MAC))
11215 			eeprom_parse_mac(priv, priv->mac_addr);
11216 		memcpy(priv->net_dev->dev_addr, priv->mac_addr, ETH_ALEN);
11217 
11218 		ipw_set_geo(priv);
11219 
11220 		if (priv->status & STATUS_RF_KILL_SW) {
11221 			IPW_WARNING("Radio disabled by module parameter.\n");
11222 			return 0;
11223 		} else if (rf_kill_active(priv)) {
11224 			IPW_WARNING("Radio Frequency Kill Switch is On:\n"
11225 				    "Kill switch must be turned off for "
11226 				    "wireless networking to work.\n");
11227 			schedule_delayed_work(&priv->rf_kill, 2 * HZ);
11228 			return 0;
11229 		}
11230 
11231 		rc = ipw_config(priv);
11232 		if (!rc) {
11233 			IPW_DEBUG_INFO("Configured device on count %i\n", i);
11234 
11235 			/* If configure to try and auto-associate, kick
11236 			 * off a scan. */
11237 			schedule_delayed_work(&priv->request_scan, 0);
11238 
11239 			return 0;
11240 		}
11241 
11242 		IPW_DEBUG_INFO("Device configuration failed: 0x%08X\n", rc);
11243 		IPW_DEBUG_INFO("Failed to config device on retry %d of %d\n",
11244 			       i, MAX_HW_RESTARTS);
11245 
11246 		/* We had an error bringing up the hardware, so take it
11247 		 * all the way back down so we can try again */
11248 		ipw_down(priv);
11249 	}
11250 
11251 	/* tried to restart and config the device for as long as our
11252 	 * patience could withstand */
11253 	IPW_ERROR("Unable to initialize device after %d attempts.\n", i);
11254 
11255 	return -EIO;
11256 }
11257 
11258 static void ipw_bg_up(struct work_struct *work)
11259 {
11260 	struct ipw_priv *priv =
11261 		container_of(work, struct ipw_priv, up);
11262 	mutex_lock(&priv->mutex);
11263 	ipw_up(priv);
11264 	mutex_unlock(&priv->mutex);
11265 }
11266 
11267 static void ipw_deinit(struct ipw_priv *priv)
11268 {
11269 	int i;
11270 
11271 	if (priv->status & STATUS_SCANNING) {
11272 		IPW_DEBUG_INFO("Aborting scan during shutdown.\n");
11273 		ipw_abort_scan(priv);
11274 	}
11275 
11276 	if (priv->status & STATUS_ASSOCIATED) {
11277 		IPW_DEBUG_INFO("Disassociating during shutdown.\n");
11278 		ipw_disassociate(priv);
11279 	}
11280 
11281 	ipw_led_shutdown(priv);
11282 
11283 	/* Wait up to 1s for status to change to not scanning and not
11284 	 * associated (disassociation can take a while for a ful 802.11
11285 	 * exchange */
11286 	for (i = 1000; i && (priv->status &
11287 			     (STATUS_DISASSOCIATING |
11288 			      STATUS_ASSOCIATED | STATUS_SCANNING)); i--)
11289 		udelay(10);
11290 
11291 	if (priv->status & (STATUS_DISASSOCIATING |
11292 			    STATUS_ASSOCIATED | STATUS_SCANNING))
11293 		IPW_DEBUG_INFO("Still associated or scanning...\n");
11294 	else
11295 		IPW_DEBUG_INFO("Took %dms to de-init\n", 1000 - i);
11296 
11297 	/* Attempt to disable the card */
11298 	ipw_send_card_disable(priv, 0);
11299 
11300 	priv->status &= ~STATUS_INIT;
11301 }
11302 
11303 static void ipw_down(struct ipw_priv *priv)
11304 {
11305 	int exit_pending = priv->status & STATUS_EXIT_PENDING;
11306 
11307 	priv->status |= STATUS_EXIT_PENDING;
11308 
11309 	if (ipw_is_init(priv))
11310 		ipw_deinit(priv);
11311 
11312 	/* Wipe out the EXIT_PENDING status bit if we are not actually
11313 	 * exiting the module */
11314 	if (!exit_pending)
11315 		priv->status &= ~STATUS_EXIT_PENDING;
11316 
11317 	/* tell the device to stop sending interrupts */
11318 	ipw_disable_interrupts(priv);
11319 
11320 	/* Clear all bits but the RF Kill */
11321 	priv->status &= STATUS_RF_KILL_MASK | STATUS_EXIT_PENDING;
11322 	netif_carrier_off(priv->net_dev);
11323 
11324 	ipw_stop_nic(priv);
11325 
11326 	ipw_led_radio_off(priv);
11327 }
11328 
11329 static void ipw_bg_down(struct work_struct *work)
11330 {
11331 	struct ipw_priv *priv =
11332 		container_of(work, struct ipw_priv, down);
11333 	mutex_lock(&priv->mutex);
11334 	ipw_down(priv);
11335 	mutex_unlock(&priv->mutex);
11336 }
11337 
11338 static int ipw_wdev_init(struct net_device *dev)
11339 {
11340 	int i, rc = 0;
11341 	struct ipw_priv *priv = libipw_priv(dev);
11342 	const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
11343 	struct wireless_dev *wdev = &priv->ieee->wdev;
11344 
11345 	memcpy(wdev->wiphy->perm_addr, priv->mac_addr, ETH_ALEN);
11346 
11347 	/* fill-out priv->ieee->bg_band */
11348 	if (geo->bg_channels) {
11349 		struct ieee80211_supported_band *bg_band = &priv->ieee->bg_band;
11350 
11351 		bg_band->band = NL80211_BAND_2GHZ;
11352 		bg_band->n_channels = geo->bg_channels;
11353 		bg_band->channels = kcalloc(geo->bg_channels,
11354 					    sizeof(struct ieee80211_channel),
11355 					    GFP_KERNEL);
11356 		if (!bg_band->channels) {
11357 			rc = -ENOMEM;
11358 			goto out;
11359 		}
11360 		/* translate geo->bg to bg_band.channels */
11361 		for (i = 0; i < geo->bg_channels; i++) {
11362 			bg_band->channels[i].band = NL80211_BAND_2GHZ;
11363 			bg_band->channels[i].center_freq = geo->bg[i].freq;
11364 			bg_band->channels[i].hw_value = geo->bg[i].channel;
11365 			bg_band->channels[i].max_power = geo->bg[i].max_power;
11366 			if (geo->bg[i].flags & LIBIPW_CH_PASSIVE_ONLY)
11367 				bg_band->channels[i].flags |=
11368 					IEEE80211_CHAN_NO_IR;
11369 			if (geo->bg[i].flags & LIBIPW_CH_NO_IBSS)
11370 				bg_band->channels[i].flags |=
11371 					IEEE80211_CHAN_NO_IR;
11372 			if (geo->bg[i].flags & LIBIPW_CH_RADAR_DETECT)
11373 				bg_band->channels[i].flags |=
11374 					IEEE80211_CHAN_RADAR;
11375 			/* No equivalent for LIBIPW_CH_80211H_RULES,
11376 			   LIBIPW_CH_UNIFORM_SPREADING, or
11377 			   LIBIPW_CH_B_ONLY... */
11378 		}
11379 		/* point at bitrate info */
11380 		bg_band->bitrates = ipw2200_bg_rates;
11381 		bg_band->n_bitrates = ipw2200_num_bg_rates;
11382 
11383 		wdev->wiphy->bands[NL80211_BAND_2GHZ] = bg_band;
11384 	}
11385 
11386 	/* fill-out priv->ieee->a_band */
11387 	if (geo->a_channels) {
11388 		struct ieee80211_supported_band *a_band = &priv->ieee->a_band;
11389 
11390 		a_band->band = NL80211_BAND_5GHZ;
11391 		a_band->n_channels = geo->a_channels;
11392 		a_band->channels = kcalloc(geo->a_channels,
11393 					   sizeof(struct ieee80211_channel),
11394 					   GFP_KERNEL);
11395 		if (!a_band->channels) {
11396 			rc = -ENOMEM;
11397 			goto out;
11398 		}
11399 		/* translate geo->a to a_band.channels */
11400 		for (i = 0; i < geo->a_channels; i++) {
11401 			a_band->channels[i].band = NL80211_BAND_5GHZ;
11402 			a_band->channels[i].center_freq = geo->a[i].freq;
11403 			a_band->channels[i].hw_value = geo->a[i].channel;
11404 			a_band->channels[i].max_power = geo->a[i].max_power;
11405 			if (geo->a[i].flags & LIBIPW_CH_PASSIVE_ONLY)
11406 				a_band->channels[i].flags |=
11407 					IEEE80211_CHAN_NO_IR;
11408 			if (geo->a[i].flags & LIBIPW_CH_NO_IBSS)
11409 				a_band->channels[i].flags |=
11410 					IEEE80211_CHAN_NO_IR;
11411 			if (geo->a[i].flags & LIBIPW_CH_RADAR_DETECT)
11412 				a_band->channels[i].flags |=
11413 					IEEE80211_CHAN_RADAR;
11414 			/* No equivalent for LIBIPW_CH_80211H_RULES,
11415 			   LIBIPW_CH_UNIFORM_SPREADING, or
11416 			   LIBIPW_CH_B_ONLY... */
11417 		}
11418 		/* point at bitrate info */
11419 		a_band->bitrates = ipw2200_a_rates;
11420 		a_band->n_bitrates = ipw2200_num_a_rates;
11421 
11422 		wdev->wiphy->bands[NL80211_BAND_5GHZ] = a_band;
11423 	}
11424 
11425 	wdev->wiphy->cipher_suites = ipw_cipher_suites;
11426 	wdev->wiphy->n_cipher_suites = ARRAY_SIZE(ipw_cipher_suites);
11427 
11428 	set_wiphy_dev(wdev->wiphy, &priv->pci_dev->dev);
11429 
11430 	/* With that information in place, we can now register the wiphy... */
11431 	if (wiphy_register(wdev->wiphy))
11432 		rc = -EIO;
11433 out:
11434 	return rc;
11435 }
11436 
11437 /* PCI driver stuff */
11438 static const struct pci_device_id card_ids[] = {
11439 	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2701, 0, 0, 0},
11440 	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2702, 0, 0, 0},
11441 	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2711, 0, 0, 0},
11442 	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2712, 0, 0, 0},
11443 	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2721, 0, 0, 0},
11444 	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2722, 0, 0, 0},
11445 	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2731, 0, 0, 0},
11446 	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2732, 0, 0, 0},
11447 	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2741, 0, 0, 0},
11448 	{PCI_VENDOR_ID_INTEL, 0x1043, 0x103c, 0x2741, 0, 0, 0},
11449 	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2742, 0, 0, 0},
11450 	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2751, 0, 0, 0},
11451 	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2752, 0, 0, 0},
11452 	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2753, 0, 0, 0},
11453 	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2754, 0, 0, 0},
11454 	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2761, 0, 0, 0},
11455 	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2762, 0, 0, 0},
11456 	{PCI_VDEVICE(INTEL, 0x104f), 0},
11457 	{PCI_VDEVICE(INTEL, 0x4220), 0},	/* BG */
11458 	{PCI_VDEVICE(INTEL, 0x4221), 0},	/* BG */
11459 	{PCI_VDEVICE(INTEL, 0x4223), 0},	/* ABG */
11460 	{PCI_VDEVICE(INTEL, 0x4224), 0},	/* ABG */
11461 
11462 	/* required last entry */
11463 	{0,}
11464 };
11465 
11466 MODULE_DEVICE_TABLE(pci, card_ids);
11467 
11468 static struct attribute *ipw_sysfs_entries[] = {
11469 	&dev_attr_rf_kill.attr,
11470 	&dev_attr_direct_dword.attr,
11471 	&dev_attr_indirect_byte.attr,
11472 	&dev_attr_indirect_dword.attr,
11473 	&dev_attr_mem_gpio_reg.attr,
11474 	&dev_attr_command_event_reg.attr,
11475 	&dev_attr_nic_type.attr,
11476 	&dev_attr_status.attr,
11477 	&dev_attr_cfg.attr,
11478 	&dev_attr_error.attr,
11479 	&dev_attr_event_log.attr,
11480 	&dev_attr_cmd_log.attr,
11481 	&dev_attr_eeprom_delay.attr,
11482 	&dev_attr_ucode_version.attr,
11483 	&dev_attr_rtc.attr,
11484 	&dev_attr_scan_age.attr,
11485 	&dev_attr_led.attr,
11486 	&dev_attr_speed_scan.attr,
11487 	&dev_attr_net_stats.attr,
11488 	&dev_attr_channels.attr,
11489 #ifdef CONFIG_IPW2200_PROMISCUOUS
11490 	&dev_attr_rtap_iface.attr,
11491 	&dev_attr_rtap_filter.attr,
11492 #endif
11493 	NULL
11494 };
11495 
11496 static const struct attribute_group ipw_attribute_group = {
11497 	.name = NULL,		/* put in device directory */
11498 	.attrs = ipw_sysfs_entries,
11499 };
11500 
11501 #ifdef CONFIG_IPW2200_PROMISCUOUS
11502 static int ipw_prom_open(struct net_device *dev)
11503 {
11504 	struct ipw_prom_priv *prom_priv = libipw_priv(dev);
11505 	struct ipw_priv *priv = prom_priv->priv;
11506 
11507 	IPW_DEBUG_INFO("prom dev->open\n");
11508 	netif_carrier_off(dev);
11509 
11510 	if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
11511 		priv->sys_config.accept_all_data_frames = 1;
11512 		priv->sys_config.accept_non_directed_frames = 1;
11513 		priv->sys_config.accept_all_mgmt_bcpr = 1;
11514 		priv->sys_config.accept_all_mgmt_frames = 1;
11515 
11516 		ipw_send_system_config(priv);
11517 	}
11518 
11519 	return 0;
11520 }
11521 
11522 static int ipw_prom_stop(struct net_device *dev)
11523 {
11524 	struct ipw_prom_priv *prom_priv = libipw_priv(dev);
11525 	struct ipw_priv *priv = prom_priv->priv;
11526 
11527 	IPW_DEBUG_INFO("prom dev->stop\n");
11528 
11529 	if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
11530 		priv->sys_config.accept_all_data_frames = 0;
11531 		priv->sys_config.accept_non_directed_frames = 0;
11532 		priv->sys_config.accept_all_mgmt_bcpr = 0;
11533 		priv->sys_config.accept_all_mgmt_frames = 0;
11534 
11535 		ipw_send_system_config(priv);
11536 	}
11537 
11538 	return 0;
11539 }
11540 
11541 static netdev_tx_t ipw_prom_hard_start_xmit(struct sk_buff *skb,
11542 					    struct net_device *dev)
11543 {
11544 	IPW_DEBUG_INFO("prom dev->xmit\n");
11545 	dev_kfree_skb(skb);
11546 	return NETDEV_TX_OK;
11547 }
11548 
11549 static const struct net_device_ops ipw_prom_netdev_ops = {
11550 	.ndo_open 		= ipw_prom_open,
11551 	.ndo_stop		= ipw_prom_stop,
11552 	.ndo_start_xmit		= ipw_prom_hard_start_xmit,
11553 	.ndo_set_mac_address 	= eth_mac_addr,
11554 	.ndo_validate_addr	= eth_validate_addr,
11555 };
11556 
11557 static int ipw_prom_alloc(struct ipw_priv *priv)
11558 {
11559 	int rc = 0;
11560 
11561 	if (priv->prom_net_dev)
11562 		return -EPERM;
11563 
11564 	priv->prom_net_dev = alloc_libipw(sizeof(struct ipw_prom_priv), 1);
11565 	if (priv->prom_net_dev == NULL)
11566 		return -ENOMEM;
11567 
11568 	priv->prom_priv = libipw_priv(priv->prom_net_dev);
11569 	priv->prom_priv->ieee = netdev_priv(priv->prom_net_dev);
11570 	priv->prom_priv->priv = priv;
11571 
11572 	strcpy(priv->prom_net_dev->name, "rtap%d");
11573 	memcpy(priv->prom_net_dev->dev_addr, priv->mac_addr, ETH_ALEN);
11574 
11575 	priv->prom_net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
11576 	priv->prom_net_dev->netdev_ops = &ipw_prom_netdev_ops;
11577 
11578 	priv->prom_net_dev->min_mtu = 68;
11579 	priv->prom_net_dev->max_mtu = LIBIPW_DATA_LEN;
11580 
11581 	priv->prom_priv->ieee->iw_mode = IW_MODE_MONITOR;
11582 	SET_NETDEV_DEV(priv->prom_net_dev, &priv->pci_dev->dev);
11583 
11584 	rc = register_netdev(priv->prom_net_dev);
11585 	if (rc) {
11586 		free_libipw(priv->prom_net_dev, 1);
11587 		priv->prom_net_dev = NULL;
11588 		return rc;
11589 	}
11590 
11591 	return 0;
11592 }
11593 
11594 static void ipw_prom_free(struct ipw_priv *priv)
11595 {
11596 	if (!priv->prom_net_dev)
11597 		return;
11598 
11599 	unregister_netdev(priv->prom_net_dev);
11600 	free_libipw(priv->prom_net_dev, 1);
11601 
11602 	priv->prom_net_dev = NULL;
11603 }
11604 
11605 #endif
11606 
11607 static const struct net_device_ops ipw_netdev_ops = {
11608 	.ndo_open		= ipw_net_open,
11609 	.ndo_stop		= ipw_net_stop,
11610 	.ndo_set_rx_mode	= ipw_net_set_multicast_list,
11611 	.ndo_set_mac_address	= ipw_net_set_mac_address,
11612 	.ndo_start_xmit		= libipw_xmit,
11613 	.ndo_validate_addr	= eth_validate_addr,
11614 };
11615 
11616 static int ipw_pci_probe(struct pci_dev *pdev,
11617 				   const struct pci_device_id *ent)
11618 {
11619 	int err = 0;
11620 	struct net_device *net_dev;
11621 	void __iomem *base;
11622 	u32 length, val;
11623 	struct ipw_priv *priv;
11624 	int i;
11625 
11626 	net_dev = alloc_libipw(sizeof(struct ipw_priv), 0);
11627 	if (net_dev == NULL) {
11628 		err = -ENOMEM;
11629 		goto out;
11630 	}
11631 
11632 	priv = libipw_priv(net_dev);
11633 	priv->ieee = netdev_priv(net_dev);
11634 
11635 	priv->net_dev = net_dev;
11636 	priv->pci_dev = pdev;
11637 	ipw_debug_level = debug;
11638 	spin_lock_init(&priv->irq_lock);
11639 	spin_lock_init(&priv->lock);
11640 	for (i = 0; i < IPW_IBSS_MAC_HASH_SIZE; i++)
11641 		INIT_LIST_HEAD(&priv->ibss_mac_hash[i]);
11642 
11643 	mutex_init(&priv->mutex);
11644 	if (pci_enable_device(pdev)) {
11645 		err = -ENODEV;
11646 		goto out_free_libipw;
11647 	}
11648 
11649 	pci_set_master(pdev);
11650 
11651 	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
11652 	if (!err)
11653 		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
11654 	if (err) {
11655 		printk(KERN_WARNING DRV_NAME ": No suitable DMA available.\n");
11656 		goto out_pci_disable_device;
11657 	}
11658 
11659 	pci_set_drvdata(pdev, priv);
11660 
11661 	err = pci_request_regions(pdev, DRV_NAME);
11662 	if (err)
11663 		goto out_pci_disable_device;
11664 
11665 	/* We disable the RETRY_TIMEOUT register (0x41) to keep
11666 	 * PCI Tx retries from interfering with C3 CPU state */
11667 	pci_read_config_dword(pdev, 0x40, &val);
11668 	if ((val & 0x0000ff00) != 0)
11669 		pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
11670 
11671 	length = pci_resource_len(pdev, 0);
11672 	priv->hw_len = length;
11673 
11674 	base = pci_ioremap_bar(pdev, 0);
11675 	if (!base) {
11676 		err = -ENODEV;
11677 		goto out_pci_release_regions;
11678 	}
11679 
11680 	priv->hw_base = base;
11681 	IPW_DEBUG_INFO("pci_resource_len = 0x%08x\n", length);
11682 	IPW_DEBUG_INFO("pci_resource_base = %p\n", base);
11683 
11684 	err = ipw_setup_deferred_work(priv);
11685 	if (err) {
11686 		IPW_ERROR("Unable to setup deferred work\n");
11687 		goto out_iounmap;
11688 	}
11689 
11690 	ipw_sw_reset(priv, 1);
11691 
11692 	err = request_irq(pdev->irq, ipw_isr, IRQF_SHARED, DRV_NAME, priv);
11693 	if (err) {
11694 		IPW_ERROR("Error allocating IRQ %d\n", pdev->irq);
11695 		goto out_iounmap;
11696 	}
11697 
11698 	SET_NETDEV_DEV(net_dev, &pdev->dev);
11699 
11700 	mutex_lock(&priv->mutex);
11701 
11702 	priv->ieee->hard_start_xmit = ipw_net_hard_start_xmit;
11703 	priv->ieee->set_security = shim__set_security;
11704 	priv->ieee->is_queue_full = ipw_net_is_queue_full;
11705 
11706 #ifdef CONFIG_IPW2200_QOS
11707 	priv->ieee->is_qos_active = ipw_is_qos_active;
11708 	priv->ieee->handle_probe_response = ipw_handle_beacon;
11709 	priv->ieee->handle_beacon = ipw_handle_probe_response;
11710 	priv->ieee->handle_assoc_response = ipw_handle_assoc_response;
11711 #endif				/* CONFIG_IPW2200_QOS */
11712 
11713 	priv->ieee->perfect_rssi = -20;
11714 	priv->ieee->worst_rssi = -85;
11715 
11716 	net_dev->netdev_ops = &ipw_netdev_ops;
11717 	priv->wireless_data.spy_data = &priv->ieee->spy_data;
11718 	net_dev->wireless_data = &priv->wireless_data;
11719 	net_dev->wireless_handlers = &ipw_wx_handler_def;
11720 	net_dev->ethtool_ops = &ipw_ethtool_ops;
11721 
11722 	net_dev->min_mtu = 68;
11723 	net_dev->max_mtu = LIBIPW_DATA_LEN;
11724 
11725 	err = sysfs_create_group(&pdev->dev.kobj, &ipw_attribute_group);
11726 	if (err) {
11727 		IPW_ERROR("failed to create sysfs device attributes\n");
11728 		mutex_unlock(&priv->mutex);
11729 		goto out_release_irq;
11730 	}
11731 
11732 	if (ipw_up(priv)) {
11733 		mutex_unlock(&priv->mutex);
11734 		err = -EIO;
11735 		goto out_remove_sysfs;
11736 	}
11737 
11738 	mutex_unlock(&priv->mutex);
11739 
11740 	err = ipw_wdev_init(net_dev);
11741 	if (err) {
11742 		IPW_ERROR("failed to register wireless device\n");
11743 		goto out_remove_sysfs;
11744 	}
11745 
11746 	err = register_netdev(net_dev);
11747 	if (err) {
11748 		IPW_ERROR("failed to register network device\n");
11749 		goto out_unregister_wiphy;
11750 	}
11751 
11752 #ifdef CONFIG_IPW2200_PROMISCUOUS
11753 	if (rtap_iface) {
11754 	        err = ipw_prom_alloc(priv);
11755 		if (err) {
11756 			IPW_ERROR("Failed to register promiscuous network "
11757 				  "device (error %d).\n", err);
11758 			unregister_netdev(priv->net_dev);
11759 			goto out_unregister_wiphy;
11760 		}
11761 	}
11762 #endif
11763 
11764 	printk(KERN_INFO DRV_NAME ": Detected geography %s (%d 802.11bg "
11765 	       "channels, %d 802.11a channels)\n",
11766 	       priv->ieee->geo.name, priv->ieee->geo.bg_channels,
11767 	       priv->ieee->geo.a_channels);
11768 
11769 	return 0;
11770 
11771       out_unregister_wiphy:
11772 	wiphy_unregister(priv->ieee->wdev.wiphy);
11773 	kfree(priv->ieee->a_band.channels);
11774 	kfree(priv->ieee->bg_band.channels);
11775       out_remove_sysfs:
11776 	sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group);
11777       out_release_irq:
11778 	free_irq(pdev->irq, priv);
11779       out_iounmap:
11780 	iounmap(priv->hw_base);
11781       out_pci_release_regions:
11782 	pci_release_regions(pdev);
11783       out_pci_disable_device:
11784 	pci_disable_device(pdev);
11785       out_free_libipw:
11786 	free_libipw(priv->net_dev, 0);
11787       out:
11788 	return err;
11789 }
11790 
11791 static void ipw_pci_remove(struct pci_dev *pdev)
11792 {
11793 	struct ipw_priv *priv = pci_get_drvdata(pdev);
11794 	struct list_head *p, *q;
11795 	int i;
11796 
11797 	if (!priv)
11798 		return;
11799 
11800 	mutex_lock(&priv->mutex);
11801 
11802 	priv->status |= STATUS_EXIT_PENDING;
11803 	ipw_down(priv);
11804 	sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group);
11805 
11806 	mutex_unlock(&priv->mutex);
11807 
11808 	unregister_netdev(priv->net_dev);
11809 
11810 	if (priv->rxq) {
11811 		ipw_rx_queue_free(priv, priv->rxq);
11812 		priv->rxq = NULL;
11813 	}
11814 	ipw_tx_queue_free(priv);
11815 
11816 	if (priv->cmdlog) {
11817 		kfree(priv->cmdlog);
11818 		priv->cmdlog = NULL;
11819 	}
11820 
11821 	/* make sure all works are inactive */
11822 	cancel_delayed_work_sync(&priv->adhoc_check);
11823 	cancel_work_sync(&priv->associate);
11824 	cancel_work_sync(&priv->disassociate);
11825 	cancel_work_sync(&priv->system_config);
11826 	cancel_work_sync(&priv->rx_replenish);
11827 	cancel_work_sync(&priv->adapter_restart);
11828 	cancel_delayed_work_sync(&priv->rf_kill);
11829 	cancel_work_sync(&priv->up);
11830 	cancel_work_sync(&priv->down);
11831 	cancel_delayed_work_sync(&priv->request_scan);
11832 	cancel_delayed_work_sync(&priv->request_direct_scan);
11833 	cancel_delayed_work_sync(&priv->request_passive_scan);
11834 	cancel_delayed_work_sync(&priv->scan_event);
11835 	cancel_delayed_work_sync(&priv->gather_stats);
11836 	cancel_work_sync(&priv->abort_scan);
11837 	cancel_work_sync(&priv->roam);
11838 	cancel_delayed_work_sync(&priv->scan_check);
11839 	cancel_work_sync(&priv->link_up);
11840 	cancel_work_sync(&priv->link_down);
11841 	cancel_delayed_work_sync(&priv->led_link_on);
11842 	cancel_delayed_work_sync(&priv->led_link_off);
11843 	cancel_delayed_work_sync(&priv->led_act_off);
11844 	cancel_work_sync(&priv->merge_networks);
11845 
11846 	/* Free MAC hash list for ADHOC */
11847 	for (i = 0; i < IPW_IBSS_MAC_HASH_SIZE; i++) {
11848 		list_for_each_safe(p, q, &priv->ibss_mac_hash[i]) {
11849 			list_del(p);
11850 			kfree(list_entry(p, struct ipw_ibss_seq, list));
11851 		}
11852 	}
11853 
11854 	kfree(priv->error);
11855 	priv->error = NULL;
11856 
11857 #ifdef CONFIG_IPW2200_PROMISCUOUS
11858 	ipw_prom_free(priv);
11859 #endif
11860 
11861 	free_irq(pdev->irq, priv);
11862 	iounmap(priv->hw_base);
11863 	pci_release_regions(pdev);
11864 	pci_disable_device(pdev);
11865 	/* wiphy_unregister needs to be here, before free_libipw */
11866 	wiphy_unregister(priv->ieee->wdev.wiphy);
11867 	kfree(priv->ieee->a_band.channels);
11868 	kfree(priv->ieee->bg_band.channels);
11869 	free_libipw(priv->net_dev, 0);
11870 	free_firmware();
11871 }
11872 
11873 #ifdef CONFIG_PM
11874 static int ipw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
11875 {
11876 	struct ipw_priv *priv = pci_get_drvdata(pdev);
11877 	struct net_device *dev = priv->net_dev;
11878 
11879 	printk(KERN_INFO "%s: Going into suspend...\n", dev->name);
11880 
11881 	/* Take down the device; powers it off, etc. */
11882 	ipw_down(priv);
11883 
11884 	/* Remove the PRESENT state of the device */
11885 	netif_device_detach(dev);
11886 
11887 	pci_save_state(pdev);
11888 	pci_disable_device(pdev);
11889 	pci_set_power_state(pdev, pci_choose_state(pdev, state));
11890 
11891 	priv->suspend_at = get_seconds();
11892 
11893 	return 0;
11894 }
11895 
11896 static int ipw_pci_resume(struct pci_dev *pdev)
11897 {
11898 	struct ipw_priv *priv = pci_get_drvdata(pdev);
11899 	struct net_device *dev = priv->net_dev;
11900 	int err;
11901 	u32 val;
11902 
11903 	printk(KERN_INFO "%s: Coming out of suspend...\n", dev->name);
11904 
11905 	pci_set_power_state(pdev, PCI_D0);
11906 	err = pci_enable_device(pdev);
11907 	if (err) {
11908 		printk(KERN_ERR "%s: pci_enable_device failed on resume\n",
11909 		       dev->name);
11910 		return err;
11911 	}
11912 	pci_restore_state(pdev);
11913 
11914 	/*
11915 	 * Suspend/Resume resets the PCI configuration space, so we have to
11916 	 * re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries
11917 	 * from interfering with C3 CPU state. pci_restore_state won't help
11918 	 * here since it only restores the first 64 bytes pci config header.
11919 	 */
11920 	pci_read_config_dword(pdev, 0x40, &val);
11921 	if ((val & 0x0000ff00) != 0)
11922 		pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
11923 
11924 	/* Set the device back into the PRESENT state; this will also wake
11925 	 * the queue of needed */
11926 	netif_device_attach(dev);
11927 
11928 	priv->suspend_time = get_seconds() - priv->suspend_at;
11929 
11930 	/* Bring the device back up */
11931 	schedule_work(&priv->up);
11932 
11933 	return 0;
11934 }
11935 #endif
11936 
11937 static void ipw_pci_shutdown(struct pci_dev *pdev)
11938 {
11939 	struct ipw_priv *priv = pci_get_drvdata(pdev);
11940 
11941 	/* Take down the device; powers it off, etc. */
11942 	ipw_down(priv);
11943 
11944 	pci_disable_device(pdev);
11945 }
11946 
11947 /* driver initialization stuff */
11948 static struct pci_driver ipw_driver = {
11949 	.name = DRV_NAME,
11950 	.id_table = card_ids,
11951 	.probe = ipw_pci_probe,
11952 	.remove = ipw_pci_remove,
11953 #ifdef CONFIG_PM
11954 	.suspend = ipw_pci_suspend,
11955 	.resume = ipw_pci_resume,
11956 #endif
11957 	.shutdown = ipw_pci_shutdown,
11958 };
11959 
11960 static int __init ipw_init(void)
11961 {
11962 	int ret;
11963 
11964 	printk(KERN_INFO DRV_NAME ": " DRV_DESCRIPTION ", " DRV_VERSION "\n");
11965 	printk(KERN_INFO DRV_NAME ": " DRV_COPYRIGHT "\n");
11966 
11967 	ret = pci_register_driver(&ipw_driver);
11968 	if (ret) {
11969 		IPW_ERROR("Unable to initialize PCI module\n");
11970 		return ret;
11971 	}
11972 
11973 	ret = driver_create_file(&ipw_driver.driver, &driver_attr_debug_level);
11974 	if (ret) {
11975 		IPW_ERROR("Unable to create driver sysfs file\n");
11976 		pci_unregister_driver(&ipw_driver);
11977 		return ret;
11978 	}
11979 
11980 	return ret;
11981 }
11982 
11983 static void __exit ipw_exit(void)
11984 {
11985 	driver_remove_file(&ipw_driver.driver, &driver_attr_debug_level);
11986 	pci_unregister_driver(&ipw_driver);
11987 }
11988 
11989 module_param(disable, int, 0444);
11990 MODULE_PARM_DESC(disable, "manually disable the radio (default 0 [radio on])");
11991 
11992 module_param(associate, int, 0444);
11993 MODULE_PARM_DESC(associate, "auto associate when scanning (default off)");
11994 
11995 module_param(auto_create, int, 0444);
11996 MODULE_PARM_DESC(auto_create, "auto create adhoc network (default on)");
11997 
11998 module_param_named(led, led_support, int, 0444);
11999 MODULE_PARM_DESC(led, "enable led control on some systems (default 1 on)");
12000 
12001 module_param(debug, int, 0444);
12002 MODULE_PARM_DESC(debug, "debug output mask");
12003 
12004 module_param_named(channel, default_channel, int, 0444);
12005 MODULE_PARM_DESC(channel, "channel to limit associate to (default 0 [ANY])");
12006 
12007 #ifdef CONFIG_IPW2200_PROMISCUOUS
12008 module_param(rtap_iface, int, 0444);
12009 MODULE_PARM_DESC(rtap_iface, "create the rtap interface (1 - create, default 0)");
12010 #endif
12011 
12012 #ifdef CONFIG_IPW2200_QOS
12013 module_param(qos_enable, int, 0444);
12014 MODULE_PARM_DESC(qos_enable, "enable all QoS functionalities");
12015 
12016 module_param(qos_burst_enable, int, 0444);
12017 MODULE_PARM_DESC(qos_burst_enable, "enable QoS burst mode");
12018 
12019 module_param(qos_no_ack_mask, int, 0444);
12020 MODULE_PARM_DESC(qos_no_ack_mask, "mask Tx_Queue to no ack");
12021 
12022 module_param(burst_duration_CCK, int, 0444);
12023 MODULE_PARM_DESC(burst_duration_CCK, "set CCK burst value");
12024 
12025 module_param(burst_duration_OFDM, int, 0444);
12026 MODULE_PARM_DESC(burst_duration_OFDM, "set OFDM burst value");
12027 #endif				/* CONFIG_IPW2200_QOS */
12028 
12029 #ifdef CONFIG_IPW2200_MONITOR
12030 module_param_named(mode, network_mode, int, 0444);
12031 MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS,2=Monitor)");
12032 #else
12033 module_param_named(mode, network_mode, int, 0444);
12034 MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS)");
12035 #endif
12036 
12037 module_param(bt_coexist, int, 0444);
12038 MODULE_PARM_DESC(bt_coexist, "enable bluetooth coexistence (default off)");
12039 
12040 module_param(hwcrypto, int, 0444);
12041 MODULE_PARM_DESC(hwcrypto, "enable hardware crypto (default off)");
12042 
12043 module_param(cmdlog, int, 0444);
12044 MODULE_PARM_DESC(cmdlog,
12045 		 "allocate a ring buffer for logging firmware commands");
12046 
12047 module_param(roaming, int, 0444);
12048 MODULE_PARM_DESC(roaming, "enable roaming support (default on)");
12049 
12050 module_param(antenna, int, 0444);
12051 MODULE_PARM_DESC(antenna, "select antenna 1=Main, 3=Aux, default 0 [both], 2=slow_diversity (choose the one with lower background noise)");
12052 
12053 module_exit(ipw_exit);
12054 module_init(ipw_init);
12055