xref: /illumos-gate/usr/src/uts/common/io/bnx/570x/driver/common/lmdev/bnx_lm_main.c (revision 55fea89dcaa64928bed4327112404dcb3e07b79f)
1 /*
2  * Copyright 2014-2017 Cavium, Inc.
3  * The contents of this file are subject to the terms of the Common Development
4  * and Distribution License, v.1,  (the "License").
5  *
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the License at available
9  * at http://opensource.org/licenses/CDDL-1.0
10  *
11  * See the License for the specific language governing permissions and
12  * limitations under the License.
13  */
14 
15 #include "lm5706.h"
16 #if !defined(LINUX) && !defined(SOLARIS)
17 #include "string.h"     // needed by some OS for memset
18 #pragma warning(disable:28718)
19 #endif
20 
21 
22 /*******************************************************************************
23  * Description:
24  *
25  * Return:
26  ******************************************************************************/
27 void
lm_abort(lm_device_t * pdev,u32_t abort_op,u32_t idx)28 lm_abort(
29     lm_device_t *pdev,
30     u32_t abort_op,
31     u32_t idx)
32 {
33     if(abort_op == ABORT_OP_RX_CHAIN)
34     {
35         lm_recv_abort(pdev, idx);
36     }
37     else if(abort_op == ABORT_OP_TX_CHAIN)
38     {
39         lm_send_abort(pdev, idx);
40     }
41     else
42     {
43         DbgBreakMsg("Invalid abort.\n");
44     }
45 } /* lm_abort */
46 
47 
48 
49 /*******************************************************************************
50  * Description:
51  *
52  * Return:
53  ******************************************************************************/
54 STATIC char *
val_to_decimal_string(char * str_buf,u32_t buf_size,u32_t val)55 val_to_decimal_string(
56     char *str_buf,
57     u32_t buf_size,
58     u32_t val)
59 {
60     u32_t digit;
61 
62     if(buf_size == 0)
63     {
64         return str_buf;
65     }
66 
67     digit = val % 10;
68     val = val / 10;
69 
70     if(val)
71     {
72         buf_size--;
73         str_buf = val_to_decimal_string(str_buf, buf_size, val);
74     }
75 
76     *str_buf = '0' + digit;
77 
78     str_buf++;
79 
80     return str_buf;
81 } /* val_to_decimal_string */
82 
83 
84 
85 /*******************************************************************************
86  * Description:
87  *
88  * Return:
89  ******************************************************************************/
90 STATIC u32_t
build_ver_string(char * str_buf,u32_t buf_size,u8_t major_ver,u8_t minor_ver,u8_t rel_num,u8_t fix_num)91 build_ver_string(
92     char *str_buf,
93     u32_t buf_size,
94     u8_t major_ver,
95     u8_t minor_ver,
96     u8_t rel_num,
97     u8_t fix_num)
98 {
99     char *p;
100 
101     if(buf_size == 0)
102     {
103         return 0;
104     }
105 
106     p = str_buf;
107 
108     if(buf_size - (p - str_buf) > 1)
109     {
110         *p = 'v';
111         p++;
112     }
113 
114     if(buf_size - (p - str_buf) > 1)
115     {
116         p = val_to_decimal_string(
117             p,
118             buf_size - (u32_t) PTR_SUB(p, str_buf),
119             major_ver);
120     }
121 
122     if(buf_size - (p - str_buf) > 1)
123     {
124         *p = '.';
125         p++;
126     }
127 
128     if(buf_size - (u32_t) PTR_SUB(p, str_buf) > 1)
129     {
130         p = val_to_decimal_string(
131             p,
132             buf_size - (u32_t) PTR_SUB(p, str_buf),
133             minor_ver);
134     }
135 
136     if(buf_size - (u32_t) PTR_SUB(p, str_buf) > 1)
137     {
138         *p = '.';
139         p++;
140     }
141 
142     if(buf_size - (u32_t) PTR_SUB(p, str_buf) > 1)
143     {
144         p = val_to_decimal_string(
145             p,
146             buf_size - (u32_t) PTR_SUB(p, str_buf),
147             rel_num);
148     }
149 
150     if(buf_size - (u32_t) PTR_SUB(p, str_buf) > 1)
151     {
152         *p = '.';
153         p++;
154     }
155 
156     if(buf_size - (u32_t) PTR_SUB(p, str_buf) > 1)
157     {
158         p = val_to_decimal_string(
159             p,
160             buf_size - (u32_t) PTR_SUB(p, str_buf),
161             fix_num);
162     }
163 
164     if(buf_size - (u32_t) PTR_SUB(p, str_buf) > 1)
165     {
166         *p = '.';
167         p++;
168     }
169 
170     if(buf_size - (u32_t) PTR_SUB(p, str_buf) > 1)
171     {
172         #if DBG
173         *p = 'd';
174         #else
175         *p = 'r';
176         #endif
177 
178         p++;
179     }
180 
181     if(buf_size - (u32_t) PTR_SUB(p, str_buf) > 1)
182     {
183         #if DBG
184         *p = 'b';
185         #else
186         *p = 't';
187         #endif
188 
189         p++;
190     }
191 
192     if(buf_size - (u32_t) PTR_SUB(p, str_buf) > 1)
193     {
194         #if DBG
195         *p = 'g';
196         #else
197         *p = 'l';
198         #endif
199 
200         p++;
201     }
202 
203     *p = 0;
204     p++;
205 
206     return (u32_t) PTR_SUB(p, str_buf);
207 } /* build_ver_string */
208 
209 
210 
211 /*******************************************************************************
212  * Description:
213  *
214  * Return:
215  ******************************************************************************/
216 STATIC void
get_max_conns(lm_device_t * pdev,u32_t * max_toe_conn,u32_t * max_iscsi_conn,u32_t * max_iscsi_pending_tasks)217 get_max_conns(
218     lm_device_t *pdev,
219     u32_t *max_toe_conn,
220     u32_t *max_iscsi_conn,
221     u32_t *max_iscsi_pending_tasks)
222 {
223     u32_t max_lic_conn;
224     u32_t max_res_conn;
225     u32_t res_flags;
226 
227     /* get resource reservation flag. */
228     REG_RD_IND(
229         pdev,
230         pdev->hw_info.shmem_base +
231             OFFSETOF(shmem_region_t,
232                 dev_info.port_feature_config.resource.res_cfg),
233         &res_flags);
234 
235     /* get max_lic_conn for toe. */
236     REG_RD_IND(
237         pdev,
238         pdev->hw_info.shmem_base +
239             OFFSETOF(shmem_region_t, fw_lic_key.max_toe_conn),
240         &max_lic_conn);
241 
242     max_lic_conn &= 0xffff;
243 
244     if(max_lic_conn)
245     {
246         max_lic_conn ^= FW_ENCODE_16BIT_PATTERN;
247 
248         if(max_lic_conn == 0xffff)
249         {
250             max_lic_conn = 1024;
251         }
252     }
253 
254     /* get max_res_conn for toe. */
255     if(res_flags & RES_RES_CFG_VALID)
256     {
257         if(res_flags & RES_RES_CFG_L2)
258         {
259             REG_RD_IND(
260                 pdev,
261                 pdev->hw_info.shmem_base +
262                     OFFSETOF(shmem_region_t,
263                         dev_info.port_feature_config.resource.conn_resource1),
264                 &max_res_conn);
265             /*
266              * if(max_res_conn == 0 || !(res_flags & RES_RES_CFG_FCFS_DISABLED))
267              * CQ#42214 HH, SK and HYF all agreed on removing the test
268              * for max_res_conn == 0
269              */
270             if (!(res_flags & RES_RES_CFG_FCFS_DISABLED))
271             {
272                 max_res_conn = 1024;
273             }
274         }
275         else
276         {
277             max_res_conn = 0;
278         }
279     }
280     else
281     {
282         max_res_conn = 1024;
283     }
284 
285     *max_toe_conn = (max_lic_conn < max_res_conn) ? max_lic_conn: max_res_conn;
286 
287     /* get iscsi pending tasks. */
288     if((res_flags & RES_RES_CFG_VALID) && (res_flags & RES_RES_CFG_ISCSI))
289     {
290         REG_RD_IND(
291             pdev,
292             pdev->hw_info.shmem_base +
293                 OFFSETOF(shmem_region_t,
294                     dev_info.port_feature_config.resource.conn_resource3),
295             max_iscsi_pending_tasks);
296 
297         *max_iscsi_pending_tasks &= RES_CONN_ISCSI_PTASK_MASK;
298 
299         if(*max_iscsi_pending_tasks == 0 || *max_iscsi_pending_tasks > 128)
300         {
301             *max_iscsi_pending_tasks = 128;
302         }
303     }
304     else
305     {
306         *max_iscsi_pending_tasks = 128;
307         *max_iscsi_conn = 0;
308     }
309 
310     REG_RD_IND(
311         pdev,
312         pdev->hw_info.shmem_base +
313             OFFSETOF(shmem_region_t, fw_lic_key.max_iscsi_trgt_conn),
314         &max_lic_conn);
315 
316     if(max_lic_conn)
317     {
318         max_lic_conn ^= FW_ENCODE_32BIT_PATTERN;
319         max_lic_conn >>= 16;
320     }
321 
322     *max_iscsi_conn = max_lic_conn;
323 
324     /* no license information. */
325     if(*max_toe_conn == 0)
326     {
327         if(pdev->hw_info.svid == 0x103c)        /* HP device. */
328         {
329             *max_toe_conn = 1024;
330         }
331         else if(CHIP_REV(pdev) == CHIP_REV_IKOS ||
332                 CHIP_REV(pdev) == CHIP_REV_FPGA)
333         {
334             *max_toe_conn = 32;
335         }
336     }
337 
338     /* cq#39856 - iSCSI Device Disappears from System after reboot. */
339     if(*max_iscsi_conn == 0)
340     {
341         if(pdev->hw_info.svid == 0x103c)        /* HP device. */
342         {
343             *max_iscsi_conn = 1024;
344         }
345         else if(CHIP_REV(pdev) == CHIP_REV_IKOS ||
346                 CHIP_REV(pdev) == CHIP_REV_FPGA)
347         {
348             *max_iscsi_conn = 32;
349         }
350     }
351 } /* get_max_conns */
352 
353 
354 
355 /*******************************************************************************
356  * Description:
357  *
358  * Return:
359  ******************************************************************************/
360 lm_status_t
lm_get_dev_info(lm_device_t * pdev)361 lm_get_dev_info(
362     lm_device_t *pdev)
363 {
364     typedef struct _param_entry_t
365     {
366         /* Ideally, we want to save the address of the parameter here.
367          * However, some compiler will not allow us to dynamically
368          * initialize the pointer to a parameter in the table below.
369          * As an alternative, we will save the offset to the parameter
370          * from pdev device structure. */
371         u32_t offset;
372 
373         /* Parameter default value. */
374         u32_t asic_default;
375         u32_t fpga_ikos_default;
376 
377         /* Limit checking is diabled if min and max are zeros. */
378         u32_t min;
379         u32_t max;
380     } param_entry_t;
381 
382     #define _OFFSET(_name)          (OFFSETOF(lm_device_t, params._name))
383     #define PARAM_VAL(_pdev, _entry) \
384         (*((u32_t *) ((u8_t *) (_pdev) + (_entry)->offset)))
385     #define SET_PARAM_VAL(_pdev, _entry, _val) \
386         *((u32_t *) ((u8_t *) (_pdev) + (_entry)->offset)) = (_val)
387 
388     static param_entry_t param_list[] =
389     {
390         /*                                 asic     fpga/ikos
391            offset                          default  default  min     max */
392         { _OFFSET(mtu),                    1500,    1500,    1500,   9018 },
393         { _OFFSET(l2_rx_desc_cnt[0]),      200,     150,     0,      0 },
394         { _OFFSET(l2_rx_desc_cnt[1]),      0,       0,       0,      0 },
395         { _OFFSET(l2_rx_desc_cnt[2]),      0,       0,       0,      0 },
396         { _OFFSET(l2_rx_desc_cnt[3]),      0,       0,       0,      0 },
397         { _OFFSET(l2_rx_desc_cnt[4]),      0,       0,       0,      0 },
398         { _OFFSET(l2_rx_desc_cnt[5]),      0,       0,       0,      0 },
399         { _OFFSET(l2_rx_desc_cnt[6]),      0,       0,       0,      0 },
400         { _OFFSET(l2_rx_desc_cnt[7]),      0,       0,       0,      0 },
401         { _OFFSET(l2_rx_desc_cnt[8]),      0,       0,       0,      0 },
402         { _OFFSET(l2_rx_desc_cnt[9]),      0,       0,       0,      0 },
403         { _OFFSET(l2_rx_desc_cnt[10]),     0,       0,       0,      0 },
404         { _OFFSET(l2_rx_desc_cnt[11]),     0,       0,       0,      0 },
405         #if 0
406         { _OFFSET(l2_rx_desc_cnt[12]),     0,       0,       0,      0 },
407         { _OFFSET(l2_rx_desc_cnt[13]),     0,       0,       0,      0 },
408         { _OFFSET(l2_rx_desc_cnt[14]),     0,       0,       0,      0 },
409         { _OFFSET(l2_rx_desc_cnt[15]),     0,       0,       0,      0 },
410         #endif
411 
412         /* The maximum page count is chosen to prevent us from having
413          * more than 32767 pending entries at any one time. */
414         { _OFFSET(l2_tx_bd_page_cnt[0]),   2,       2,       1,      127 },
415         { _OFFSET(l2_tx_bd_page_cnt[1]),   1,       1,       1,      127 },
416         { _OFFSET(l2_tx_bd_page_cnt[2]),   1,       1,       1,      127 },
417         { _OFFSET(l2_tx_bd_page_cnt[3]),   1,       1,       1,      127 },
418         { _OFFSET(l2_tx_bd_page_cnt[4]),   1,       1,       1,      127 },
419         { _OFFSET(l2_tx_bd_page_cnt[5]),   1,       1,       1,      127 },
420         { _OFFSET(l2_tx_bd_page_cnt[6]),   1,       1,       1,      127 },
421         { _OFFSET(l2_tx_bd_page_cnt[7]),   1,       1,       1,      127 },
422         { _OFFSET(l2_tx_bd_page_cnt[8]),   1,       1,       1,      127 },
423         { _OFFSET(l2_tx_bd_page_cnt[9]),   1,       1,       1,      127 },
424         { _OFFSET(l2_tx_bd_page_cnt[10]),  1,       1,       1,      127 },
425         { _OFFSET(l2_tx_bd_page_cnt[11]),  1,       1,       1,      127 },
426 
427         { _OFFSET(l2_rx_bd_page_cnt[0]),   2,       2,       1,      127 },
428         { _OFFSET(l2_rx_bd_page_cnt[1]),   1,       1,       1,      127 },
429         { _OFFSET(l2_rx_bd_page_cnt[2]),   1,       1,       1,      127 },
430         { _OFFSET(l2_rx_bd_page_cnt[3]),   1,       1,       1,      127 },
431         { _OFFSET(l2_rx_bd_page_cnt[4]),   1,       1,       1,      127 },
432         { _OFFSET(l2_rx_bd_page_cnt[5]),   1,       1,       1,      127 },
433         { _OFFSET(l2_rx_bd_page_cnt[6]),   1,       1,       1,      127 },
434         { _OFFSET(l2_rx_bd_page_cnt[7]),   1,       1,       1,      127 },
435         { _OFFSET(l2_rx_bd_page_cnt[8]),   1,       1,       1,      127 },
436         { _OFFSET(l2_rx_bd_page_cnt[9]),   1,       1,       1,      127 },
437         { _OFFSET(l2_rx_bd_page_cnt[10]),  1,       1,       1,      127 },
438         { _OFFSET(l2_rx_bd_page_cnt[11]),  1,       1,       1,      127 },
439         #if 0
440         { _OFFSET(l2_rx_bd_page_cnt[12]),  1,       1,       1,      127 },
441         { _OFFSET(l2_rx_bd_page_cnt[13]),  1,       1,       1,      127 },
442         { _OFFSET(l2_rx_bd_page_cnt[14]),  1,       1,       1,      127 },
443         { _OFFSET(l2_rx_bd_page_cnt[15]),  1,       1,       1,      127 },
444         #endif
445 
446         { _OFFSET(l4_tx_bd_page_cnt),      1,       1,       1,      255 },
447         { _OFFSET(limit_l4_tx_bd_cnt),     0,       0,       0,      0 },
448         { _OFFSET(l4_rx_bd_page_cnt),      1,       1,       1,      255 },
449         { _OFFSET(limit_l4_rx_bd_cnt),     0,       0,       0,      0 },
450 
451         #ifndef EXCLUDE_KQE_SUPPORT
452         #if INCLUDE_OFLD_SUPPORT
453         { _OFFSET(kwq_page_cnt),           4,       2,       1,      255 },
454         { _OFFSET(kcq_page_cnt),           32,      32,      1,      255 },
455         { _OFFSET(kcq_history_size),       0x80,    0x80,    0,      0   },
456         #else
457         /* Kernel queues are used when RSS or TCP offload is enabled.
458          * When RSS is enabled, the upper module should modify the
459          * default settings for these parameters. */
460         { _OFFSET(kwq_page_cnt),           0,       0,       0,      0 },
461         { _OFFSET(kcq_page_cnt),           0,       0,       0,      0 },
462         { _OFFSET(kcq_history_size),       0,       0,       0,      0 },
463         #endif
464 
465         /* Connection kcqe/kwqe history. */
466         { _OFFSET(con_kcqe_history_size),  0,       0,       0,      0 },
467         { _OFFSET(con_kwqe_history_size),  0,       0,       0,      0 },
468         #endif
469 
470         { _OFFSET(gen_bd_page_cnt),        2,       2,       1,      127 },
471         { _OFFSET(max_gen_buf_cnt),        0x8000,  0x8000,  0,      0 },
472         { _OFFSET(gen_buf_per_alloc),      0x4,    0x4,      0,      0 },
473 
474         { _OFFSET(copy_buffered_data),     0,       0,       0,      0 },
475         { _OFFSET(rcv_buffer_offset),      0x38,    0x38,    0,      0 },
476         { _OFFSET(enable_syn_rcvq),        0,       0,       0,      0 },
477 
478         { _OFFSET(hcopy_desc_cnt),         0,       0,       0,      0 },
479         { _OFFSET(hcopy_bd_page_cnt),      2,       2,       1,      127 },
480         { _OFFSET(buffered_kcqe_cnt),      0x80,    0x80,    0,      0 },
481 
482         { _OFFSET(deferred_kcqe_cnt),      0x100,   0x100,   0,      0 },
483 
484         { _OFFSET(test_mode),              0x60,    0x60,    0,      0 },
485         { _OFFSET(ofld_cap),               0,       0,       0,      0 },
486         { _OFFSET(wol_cap),                0,       0,       0,      0 },
487         { _OFFSET(flow_ctrl_cap),          0,       0,       0,      0 },
488         { _OFFSET(req_medium),             0,       0,       0,      0xfffff },
489         { _OFFSET(selective_autoneg),      0,       0,       0,      0 },
490         { _OFFSET(wire_speed),             1,       0,       0,      0 },
491         { _OFFSET(phy_addr),               1,       0,       0,      0 },
492         { _OFFSET(phy_int_mode),           2,       2,       0,      0 },
493         { _OFFSET(link_chng_mode),         2,       2,       0,      0 },
494 
495         { _OFFSET(hc_timer_mode),          0,       0,       0,      0 },
496         { _OFFSET(ind_comp_limit),         200,     100,     0,      0 },
497         { _OFFSET(tx_quick_cons_trip_int), 3,       10,      0,      0 },
498         { _OFFSET(tx_quick_cons_trip),     3,       30,      0,      0 },
499         { _OFFSET(tx_ticks_int),           30,      10,      0,      0 },
500         { _OFFSET(tx_ticks),               60,      200,     0,      0 },
501         { _OFFSET(rx_quick_cons_trip_int), 1,       3,       0,      0 },
502         { _OFFSET(rx_quick_cons_trip),     2,       1,       0,      0 },
503         { _OFFSET(rx_ticks_int),           15,      5,       0,      0 },
504         { _OFFSET(rx_ticks),               45,      1,       0,      0 },
505         { _OFFSET(comp_prod_trip_int),     2,       3,       0,      0 },
506         { _OFFSET(comp_prod_trip),         4,       1,       0,      0 },
507         { _OFFSET(com_ticks_int),          64,      5,       0,      0 },
508         { _OFFSET(com_ticks),              220,     1,       0,      0 },
509         { _OFFSET(cmd_ticks_int),          64,      5,       0,      0 },
510         { _OFFSET(cmd_ticks),              220,     1,       0,      0 },
511         { _OFFSET(stats_ticks),            1000000, 1000000, 0,      0 },
512 
513         /* Xinan per-processor HC configuration. */
514         { _OFFSET(psb_tx_cons_trip),       0x100010,0x100010,0,      0 },
515         { _OFFSET(psb_tx_ticks),           0x100040,0x100040,0,      0 },
516         { _OFFSET(psb_rx_cons_trip),       0x100010,0x100010,0,      0 },
517         { _OFFSET(psb_rx_ticks),           0x80020, 0x80020, 0,      0 },
518         { _OFFSET(psb_comp_prod_trip),     0x80008, 0x80008, 0,      0 },
519         { _OFFSET(psb_com_ticks),          0x400040,0x400040,0,      0 },
520         { _OFFSET(psb_cmd_ticks),          0x400040,0x400040,0,      0 },
521         { _OFFSET(psb_period_ticks),       0,       0,       0,      0 },
522 
523         { _OFFSET(enable_fir),             1,       1,       0,      0 },
524         { _OFFSET(num_rchans),             5,       5,       0,      0 },
525         { _OFFSET(num_wchans),             3,       3,       0,      0 },
526 
527         /* One some system, with one_tdma disabled, we will get data
528          * corruption.  Currently this looks like a chipset bug.  The
529          * chip group will continue to look into this.  So for now, we
530          * will enable one_tdma for all chip revisions. */
531         { _OFFSET(one_tdma),               0,       0,       0,      0 },
532 
533         { _OFFSET(ping_pong_dma),          0,       0,       0,      0 },
534         { _OFFSET(tmr_reload_value1),      0x6c627970, 0,    0,      0 },
535         { _OFFSET(keep_vlan_tag),          0,       0,       0,      0 },
536 
537         { _OFFSET(enable_remote_phy),      0,       0,       0,      0 },
538         { _OFFSET(rphy_req_medium),        0,       0,       0,      0 },
539         { _OFFSET(rphy_flow_ctrl_cap),     0,       0,       0,      0 },
540         { _OFFSET(rphy_selective_autoneg), 0,       0,       0,      0 },
541         { _OFFSET(rphy_wire_speed),        1,       0,       0,      0 },
542 
543         { _OFFSET(bin_mq_mode),            0,       0,       0,      0 },
544         { _OFFSET(validate_l4_data),       0,       0,       0,      0 },
545         { _OFFSET(disable_pcie_nfr),       0,       0,       0,      0 },
546         { _OFFSET(fw_flow_control),        0,       0,       0,      0 },
547         { _OFFSET(fw_flow_control_wait),   0xffff,  0xffff,  0,      0xffff },
548         { _OFFSET(ena_large_grc_timeout),  0,       0,       0,      0 },
549         { _OFFSET(flow_control_reporting_mode),     0,       0,      0,      0 },
550         { 0,                               0,       0,       0,      0 }
551     };
552 
553     lm_status_t lm_status;
554     param_entry_t *param;
555     u32_t val;
556 
557     DbgMessage(pdev, INFORMi, "### lm_get_dev_info\n");
558 
559     /* Get PCI device and vendor id. */
560     lm_status = mm_read_pci(
561         pdev,
562         OFFSETOF(reg_space_t, pci_config.pcicfg_vendor_id),
563         &val);
564     if(lm_status != LM_STATUS_SUCCESS)
565     {
566         return lm_status;
567     }
568 
569     pdev->hw_info.vid = (u16_t) val;
570     DbgMessage1(pdev, INFORMi, "vid 0x%x\n", pdev->hw_info.vid);
571 
572     pdev->hw_info.did = (u16_t) (val >> 16);
573     DbgMessage1(pdev, INFORMi, "did 0x%x\n", pdev->hw_info.did);
574 
575     /* Get subsystem and subvendor id. */
576     lm_status = mm_read_pci(
577         pdev,
578         OFFSETOF(reg_space_t, pci_config.pcicfg_subsystem_vendor_id),
579         &val);
580     if(lm_status != LM_STATUS_SUCCESS)
581     {
582         return lm_status;
583     }
584 
585     pdev->hw_info.svid = (u16_t) val;
586     DbgMessage1(pdev, INFORMi, "svid 0x%x\n", pdev->hw_info.svid);
587 
588     pdev->hw_info.ssid = (u16_t) (val >> 16);
589     DbgMessage1(pdev, INFORMi, "ssid 0x%x\n", pdev->hw_info.ssid);
590 
591     /* Get IRQ, and interrupt pin. */
592     lm_status = mm_read_pci(
593         pdev,
594         OFFSETOF(reg_space_t, pci_config.pcicfg_int_line),
595         &val);
596     if(lm_status != LM_STATUS_SUCCESS)
597     {
598         return lm_status;
599     }
600 
601     pdev->hw_info.irq = (u8_t) val;
602     DbgMessage1(pdev, INFORMi, "IRQ 0x%x\n", pdev->hw_info.irq);
603 
604     pdev->hw_info.int_pin = (u8_t) (val >> 8);
605     DbgMessage1(pdev, INFORMi, "Int pin 0x%x\n", pdev->hw_info.int_pin);
606 
607     /* Get cache line size. */
608     lm_status = mm_read_pci(
609         pdev,
610         OFFSETOF(reg_space_t, pci_config.pcicfg_cache_line_size),
611         &val);
612     if(lm_status != LM_STATUS_SUCCESS)
613     {
614         return lm_status;
615     }
616 
617     pdev->hw_info.cache_line_size = (u8_t) val;
618     DbgMessage1(pdev, INFORMi, "Cache line size 0x%x\n", (u8_t) val);
619 
620     pdev->hw_info.latency_timer = (u8_t) (val >> 8);
621     DbgMessage1(pdev, INFORMi, "Latency timer 0x%x\n", (u8_t) (val >> 8));
622 
623     /* Get PCI revision id. */
624     lm_status = mm_read_pci(
625         pdev,
626         OFFSETOF(reg_space_t, pci_config.pcicfg_class_code),
627         &val);
628     if(lm_status != LM_STATUS_SUCCESS)
629     {
630         return lm_status;
631     }
632 
633     pdev->hw_info.rev_id = (u8_t) val;
634     DbgMessage1(pdev, INFORMi, "Revision id 0x%x\n", pdev->hw_info.rev_id);
635 
636     /* Get the base address. */
637     lm_status = mm_read_pci(
638         pdev,
639         OFFSETOF(reg_space_t, pci_config.pcicfg_bar_1),
640         &val);
641     if(lm_status != LM_STATUS_SUCCESS)
642     {
643         return lm_status;
644     }
645 
646 #ifndef CONFIG_PPC64
647     pdev->hw_info.mem_base.as_u32.low = val & 0xfffffff0;
648 #endif
649 
650     DbgMessage1(pdev, INFORMi, "Mem base low 0x%x\n", pdev->hw_info.mem_base.as_u32.low);
651 
652     val = 0;
653 
654     lm_status = mm_read_pci(
655         pdev,
656         OFFSETOF(reg_space_t, pci_config.pcicfg_bar_2),
657         &val);
658     if(lm_status != LM_STATUS_SUCCESS)
659     {
660         return lm_status;
661     }
662 
663 #ifndef CONFIG_PPC64
664     pdev->hw_info.mem_base.as_u32.high = val;
665 #endif
666 
667     DbgMessage1(pdev, INFORMi, "Mem base high 0x%x\n",
668         pdev->hw_info.mem_base.as_u32.high);
669 
670     /* Enable PCI bus master.  This is supposed to be enabled by the
671      * BIOS, however, BIOS on older systems may not set this bit. */
672     lm_status = mm_read_pci(
673         pdev,
674         OFFSETOF(reg_space_t, pci_config.pcicfg_command),
675         &val);
676     if(lm_status != LM_STATUS_SUCCESS)
677     {
678         return lm_status;
679     }
680 
681     /* Error out if memory map is NOT enabled.  This could occur if the
682      * BIOS is not able to reserve an address range for the device. */
683     if(!(val & PCICFG_COMMAND_MEM_SPACE))
684     {
685         DbgBreakMsg("MEM_SPACE not enabled.\n");
686 
687         return LM_STATUS_FAILURE;
688     }
689 
690     val |= PCICFG_COMMAND_BUS_MASTER;
691 
692     lm_status = mm_write_pci(
693         pdev,
694         OFFSETOF(reg_space_t, pci_config.pcicfg_command),
695         val);
696     if(lm_status != LM_STATUS_SUCCESS)
697     {
698         return lm_status;
699     }
700 
701     /* Configure byte swap and enable write to the reg_window registers. */
702     val = PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
703         PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
704     lm_status = mm_write_pci(
705         pdev,
706         OFFSETOF(reg_space_t, pci_config.pcicfg_misc_config),
707         val);
708     if(lm_status != LM_STATUS_SUCCESS)
709     {
710         return lm_status;
711     }
712 
713     /* Get the bar size at register 0x408 via PCI configuration indirect. */
714     lm_status = mm_write_pci(
715         pdev,
716         OFFSETOF(pci_config_t, pcicfg_reg_window_address),
717         OFFSETOF(reg_space_t, pci.pci_config_2));
718     if(lm_status != LM_STATUS_SUCCESS)
719     {
720         return lm_status;
721     }
722 
723     lm_status = mm_read_pci(
724         pdev,
725         OFFSETOF(pci_config_t, pcicfg_reg_window),
726         &val);
727     if(lm_status != LM_STATUS_SUCCESS)
728     {
729         return lm_status;
730     }
731 
732     val &= PCI_CONFIG_2_BAR1_SIZE;
733     if(val == PCI_CONFIG_2_BAR1_SIZE_DISABLED ||
734         val > PCI_CONFIG_2_BAR1_SIZE_1G)
735     {
736         DbgBreakMsg("Invalid bar size.\n");
737 
738         return LM_STATUS_FAILURE;
739     }
740 
741     pdev->hw_info.bar_size = 1 << (val+15);
742     DbgMessage1(pdev, INFORM, "bar_size 0x%x\n", pdev->hw_info.bar_size);
743 
744     /* Map memory base to system address space. */
745     pdev->vars.regview = (reg_space_t *) mm_map_io_base(
746         pdev,
747         pdev->hw_info.mem_base,
748         pdev->hw_info.bar_size);
749     if(pdev->vars.regview == NULL)
750     {
751         return LM_STATUS_FAILURE;
752     }
753     DbgMessage1(pdev, INFORMi, "Mapped base %p\n", pdev->vars.regview);
754 
755     #if DBG
756     /* Make sure byte swapping is properly configured. */
757     REG_RD(pdev, pci.pci_swap_diag0, &val);
758 
759     DbgBreakIf(val != 0x1020304);
760     #endif
761 
762     /* Get the chip revision id and number. */
763     REG_RD(pdev, misc.misc_id, &pdev->hw_info.chip_id);
764     DbgMessage1(pdev, INFORMi, "chip id 0x%x\n", pdev->hw_info.chip_id);
765 
766     if(CHIP_NUM(pdev) == CHIP_NUM_5709)
767     {
768         pdev->hw_info.bus_mode = BUS_MODE_PCIE;
769     }
770     else
771     {
772         /* Get bus information. */
773         REG_RD(pdev, pci_config.pcicfg_misc_status, &val);
774 
775         if(val & PCICFG_MISC_STATUS_32BIT_DET)
776         {
777             pdev->hw_info.bus_width = BUS_WIDTH_32_BIT;
778             DbgMessage(pdev, INFORM, "32bit bus width.\n");
779         }
780         else
781         {
782             pdev->hw_info.bus_width = BUS_WIDTH_64_BIT;
783             DbgMessage(pdev, INFORM, "64bit bus width.\n");
784         }
785 
786         if(val & PCICFG_MISC_STATUS_PCIX_DET)
787         {
788             pdev->hw_info.bus_mode = BUS_MODE_PCIX;
789             DbgMessage(pdev, INFORM, "PCIX bus detected.\n");
790 
791             REG_RD(pdev, pci_config.pcicfg_pci_clock_control_bits, &val);
792             switch(val & PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET)
793             {
794             case PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
795                 pdev->hw_info.bus_speed = BUS_SPEED_133_MHZ;
796                 DbgMessage(pdev, INFORM, "Bus speed is 133Mhz.\n");
797                 break;
798 
799             case PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
800                 pdev->hw_info.bus_speed = BUS_SPEED_100_MHZ;
801                 DbgMessage(pdev, INFORM, "Bus speed is 100Mhz.\n");
802                 break;
803 
804             case PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
805             case PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
806                 pdev->hw_info.bus_speed = BUS_SPEED_66_MHZ;
807                 DbgMessage(pdev, INFORM, "Bus speed is 66Mhz.\n");
808                 break;
809 
810             case PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
811             case PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
812                 pdev->hw_info.bus_speed = BUS_SPEED_50_MHZ;
813                 DbgMessage(pdev, INFORM, "Bus speed is 50Mhz.\n");
814                 break;
815 
816             case PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET:
817             case PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
818             case PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
819             default:
820                 pdev->hw_info.bus_speed = BUS_SPEED_33_MHZ;
821                 DbgMessage(pdev, INFORM, "Bus speed is 33Mhz.\n");
822                 break;
823             }
824         }
825         else
826         {
827             pdev->hw_info.bus_mode = BUS_MODE_PCI;
828             DbgMessage(pdev, INFORM, "Conventional PCI bus detected.\n");
829 
830             if(val & PCICFG_MISC_STATUS_M66EN)
831             {
832                 pdev->hw_info.bus_speed = BUS_SPEED_66_MHZ;
833                 DbgMessage(pdev, INFORM, "Bus speed is 66Mhz.\n");
834             }
835             else
836             {
837                 pdev->hw_info.bus_speed = BUS_SPEED_33_MHZ;
838                 DbgMessage(pdev, INFORM, "Bus speed is 33Mhz.\n");
839             }
840         }
841     }
842 
843     if(CHIP_ID(pdev) == CHIP_ID_5706_A0 || CHIP_ID(pdev) == CHIP_ID_5706_A1)
844     {
845         REG_RD_OFFSET(
846             pdev,
847             OFFSETOF(reg_space_t, pci_config.pcicfg_command),
848             &val);
849 
850         /* 5706A0 may falsely detect SERR and PERR. */
851         if(CHIP_ID(pdev) == CHIP_ID_5706_A0)
852         {
853             val &= ~(PCICFG_COMMAND_SERR_ENA | PCICFG_COMMAND_PERR_ENA);
854         }
855 
856         /* 5706A1 PCI 64-bit. */
857         else if(pdev->hw_info.bus_mode == BUS_MODE_PCI &&
858             pdev->hw_info.bus_width == BUS_WIDTH_64_BIT)
859         {
860             /* E4_5706A1_577: PERR IS INCORRECTLY GENERATED IN PCI 64-BIT.
861                Description: If the data on the upper AD and CBE busses
862                   do not match the parity of PAR64 during a 32-bit target
863                   access, a parity error is incorrectly generated. This
864                   happens only after a 64-bit master DMA operation has been
865                   done by the chip.
866                Scope: All PCI 64-bit systems.
867                Impact: Ability to indicate a real parity error is lost.
868                Workaround: Driver needs to clear PERR_EN. */
869             val &= ~PCICFG_COMMAND_PERR_ENA;
870         }
871 
872         REG_WR_OFFSET(
873             pdev,
874             OFFSETOF(reg_space_t, pci_config.pcicfg_command),
875             val);
876     }
877     else if(CHIP_ID(pdev) == CHIP_ID_5708_A0)
878     {
879         /* 5708A0 errata. */
880         REG_RD_OFFSET(
881             pdev,
882             OFFSETOF(reg_space_t, pci_config.pcicfg_command),
883             &val);
884 
885         val &= ~(PCICFG_COMMAND_SERR_ENA | PCICFG_COMMAND_PERR_ENA);
886 
887         REG_WR_OFFSET(
888             pdev,
889             OFFSETOF(reg_space_t, pci_config.pcicfg_command),
890             val);
891     }
892 
893     /* Get the EPB info. */
894     if(CHIP_NUM(pdev) == CHIP_NUM_5708)
895     {
896         REG_RD_IND(pdev, 0x240000+0x18, &val);
897         pdev->hw_info.pcie_bus_num = (u8_t) val;
898 
899         REG_RD_IND(pdev, 0x240000+0x6c, &val);
900         pdev->hw_info.pcie_max_width = (u8_t) ((val & 0x3f0) >> 4);
901 
902         switch(val & 0xf)
903         {
904             case 1:
905                 pdev->hw_info.pcie_max_speed = PCIE_SPEED_2_5_G;
906                 break;
907 
908             default:
909                 pdev->hw_info.pcie_max_speed = 0;
910                 break;
911         }
912 
913         REG_RD_IND(pdev, 0x240000+0x70, &val);
914         pdev->hw_info.pcie_width = (u8_t) ((val & 0x3f00000) >> 20);
915 
916         switch(val & 0xf0000)
917         {
918             case 0x10000:
919                 pdev->hw_info.pcie_speed = PCIE_SPEED_2_5_G;
920                 break;
921 
922             default:
923                 pdev->hw_info.pcie_speed = 0;
924                 break;
925         }
926     }
927     else if(CHIP_NUM(pdev) == CHIP_NUM_5709)
928     {
929         REG_RD(pdev, pci_config.pcicfg_link_capability, &val);
930         pdev->hw_info.pcie_max_width =
931             (u8_t) ((val & PCICFG_LINK_CAPABILITY_MAX_LINK_WIDTH) >> 4);
932         switch (val & PCICFG_LINK_CAPABILITY_MAX_LINK_SPEED)
933         {
934             case PCICFG_LINK_CAPABILITY_MAX_LINK_SPEED_5:
935                 pdev->hw_info.pcie_max_speed = PCIE_SPEED_5_G;
936                 break;
937             case PCICFG_LINK_CAPABILITY_MAX_LINK_SPEED_2_5:
938                 pdev->hw_info.pcie_max_speed = PCIE_SPEED_2_5_G;
939                 break;
940             default:
941                 pdev->hw_info.pcie_max_speed = 0;
942                 break;
943         }
944 
945         REG_RD(pdev, pci_config.pcicfg_link_status, &val);
946         pdev->hw_info.pcie_width =
947             (u8_t) ((val & PCICFG_LINK_STATUS_NEG_LINK_WIDTH) >> 4);
948         switch (val & PCICFG_LINK_STATUS_SPEED)
949         {
950             case PCICFG_LINK_CAPABILITY_MAX_LINK_SPEED_5:
951                 pdev->hw_info.pcie_speed = PCIE_SPEED_5_G;
952                 break;
953             case PCICFG_LINK_CAPABILITY_MAX_LINK_SPEED_2_5:
954                 pdev->hw_info.pcie_speed = PCIE_SPEED_2_5_G;
955                 break;
956             default:
957                 pdev->hw_info.pcie_speed = 0;
958                 break;
959         }
960 
961         REG_RD_IND(pdev, OFFSETOF(reg_space_t, mcp.mcp_toe_id), &val);
962         if(val & MCP_TOE_ID_FUNCTION_ID)
963         {
964             pdev->hw_info.mac_id = 1;
965         }
966     }
967 
968     /* Get firmware share memory base address. */
969     REG_RD_IND(
970         pdev,
971         MCP_SCRATCHPAD_START + OFFSETOF(shm_hdr_t, shm_hdr_signature),
972         &val);
973     if((val & SHM_ADDR_SIGN_MASK) == SHM_ADDR_SIGNATURE)
974     {
975         REG_RD_IND(
976             pdev,
977             MCP_SCRATCHPAD_START +
978                 OFFSETOF(shm_hdr_t, shm_addr[pdev->hw_info.mac_id]),
979             &pdev->hw_info.shmem_base);
980     }
981     else
982     {
983         /* Pre v1.3.2 bootcode. */
984         pdev->hw_info.shmem_base = HOST_VIEW_SHMEM_BASE;
985     }
986 
987     /* Get the hw config word. */
988     REG_RD_IND(
989         pdev,
990         pdev->hw_info.shmem_base +
991             OFFSETOF(shmem_region_t, dev_info.shared_hw_config.config),
992         &val);
993     pdev->hw_info.nvm_hw_config = val;
994 
995     get_max_conns(
996         pdev,
997         &pdev->hw_info.max_toe_conn,
998         &pdev->hw_info.max_iscsi_conn,
999         &pdev->hw_info.max_iscsi_pending_tasks);
1000 
1001     /* Get the permanent MAC address. */
1002     REG_RD_IND(
1003         pdev,
1004         pdev->hw_info.shmem_base +
1005             OFFSETOF(shmem_region_t, dev_info.port_hw_config.mac_upper),
1006         &val);
1007     pdev->hw_info.mac_addr[0] = (u8_t) (val >> 8);
1008     pdev->hw_info.mac_addr[1] = (u8_t) val;
1009 
1010     REG_RD_IND(
1011         pdev,
1012         pdev->hw_info.shmem_base +
1013             OFFSETOF(shmem_region_t, dev_info.port_hw_config.mac_lower),
1014         &val);
1015 
1016     pdev->hw_info.mac_addr[2] = (u8_t) (val >> 24);
1017     pdev->hw_info.mac_addr[3] = (u8_t) (val >> 16);
1018     pdev->hw_info.mac_addr[4] = (u8_t) (val >> 8);
1019     pdev->hw_info.mac_addr[5] = (u8_t) val;
1020 
1021     /* Get iSCSI MAC address. */
1022     REG_RD_IND(
1023         pdev,
1024         pdev->hw_info.shmem_base +
1025             OFFSETOF(
1026                 shmem_region_t,
1027                 dev_info.port_hw_config.iscsi_mac_upper),
1028          &val);
1029     pdev->hw_info.iscsi_mac_addr[0] = (u8_t) (val >> 8);
1030     pdev->hw_info.iscsi_mac_addr[1] = (u8_t) val;
1031 
1032     REG_RD_IND(
1033         pdev,
1034         pdev->hw_info.shmem_base +
1035             OFFSETOF(
1036                 shmem_region_t,
1037                 dev_info.port_hw_config.iscsi_mac_lower),
1038         &val);
1039     pdev->hw_info.iscsi_mac_addr[2] = (u8_t) (val >> 24);
1040     pdev->hw_info.iscsi_mac_addr[3] = (u8_t) (val >> 16);
1041     pdev->hw_info.iscsi_mac_addr[4] = (u8_t) (val >> 8);
1042     pdev->hw_info.iscsi_mac_addr[5] = (u8_t) val;
1043 
1044     DbgMessage6(pdev, INFORM, "mac addr: %02x %02x %02x %02x %02x %02x\n",
1045         pdev->hw_info.mac_addr[0],
1046         pdev->hw_info.mac_addr[1],
1047         pdev->hw_info.mac_addr[2],
1048         pdev->hw_info.mac_addr[3],
1049         pdev->hw_info.mac_addr[4],
1050         pdev->hw_info.mac_addr[5]);
1051 
1052     DbgBreakIf(LM_DRIVER_MAJOR_VER > 255);
1053     DbgBreakIf(LM_DRIVER_MINOR_VER > 255);
1054     DbgBreakIf(LM_DRIVER_REL_NUM > 255);
1055     DbgBreakIf(LM_DRIVER_FIX_NUM > 255);
1056 
1057     pdev->ver_num =
1058         (LM_DRIVER_MAJOR_VER << 24) |
1059         (LM_DRIVER_MINOR_VER << 16) |
1060         (LM_DRIVER_REL_NUM << 8)    |
1061         LM_DRIVER_FIX_NUM;
1062 
1063     (void) build_ver_string(
1064         (char *)pdev->ver_str,
1065         sizeof(pdev->ver_str),
1066         LM_DRIVER_MAJOR_VER,
1067         LM_DRIVER_MINOR_VER,
1068         LM_DRIVER_REL_NUM,
1069         LM_DRIVER_FIX_NUM);
1070 
1071     pdev->params.mac_addr[0] = pdev->hw_info.mac_addr[0];
1072     pdev->params.mac_addr[1] = pdev->hw_info.mac_addr[1];
1073     pdev->params.mac_addr[2] = pdev->hw_info.mac_addr[2];
1074     pdev->params.mac_addr[3] = pdev->hw_info.mac_addr[3];
1075     pdev->params.mac_addr[4] = pdev->hw_info.mac_addr[4];
1076     pdev->params.mac_addr[5] = pdev->hw_info.mac_addr[5];
1077 
1078     /* Initialize the default parameters. */
1079     param = param_list;
1080     while(param->offset)
1081     {
1082         if(CHIP_REV(pdev) == CHIP_REV_FPGA || CHIP_REV(pdev) == CHIP_REV_IKOS)
1083         {
1084             SET_PARAM_VAL(pdev, param, param->fpga_ikos_default);
1085         }
1086         else
1087         {
1088             SET_PARAM_VAL(pdev, param, param->asic_default);
1089         }
1090 
1091         param++;
1092     }
1093 
1094     if(CHIP_REV(pdev) == CHIP_REV_FPGA || CHIP_REV(pdev) == CHIP_REV_IKOS)
1095     {
1096         pdev->params.test_mode |= TEST_MODE_INIT_GEN_BUF_DATA;
1097         pdev->params.test_mode |= TEST_MODE_SAVE_DUMMY_DMA_DATA;
1098         pdev->params.test_mode |= TEST_MODE_IGNORE_SHMEM_SIGNATURE;
1099         pdev->params.test_mode |= TEST_MODE_DRIVER_PULSE_ALWAYS_ALIVE;
1100     }
1101 
1102     /* Some chipsets are not capabable of handling multiple
1103      * read requests.  Currently we will get data corrupt on
1104      * Intel 840/860 chipset when one_tdma is not enabled. */
1105     if(pdev->hw_info.bus_mode == BUS_MODE_PCI)
1106     {
1107         if((CHIP_NUM(pdev)==CHIP_NUM_5706 || CHIP_NUM(pdev)==CHIP_NUM_5708) &&
1108             (CHIP_REV(pdev)==CHIP_REV_FPGA || CHIP_REV(pdev)==CHIP_REV_IKOS))
1109         {
1110             pdev->params.ping_pong_dma = FALSE;
1111         }
1112         else
1113         {
1114             pdev->params.ping_pong_dma = TRUE;
1115         }
1116     }
1117     else
1118     {
1119         pdev->params.ping_pong_dma = FALSE;
1120     }
1121 
1122     /* Get the pre-emphasis. */
1123     REG_RD_IND(
1124         pdev,
1125         pdev->hw_info.shmem_base +
1126             OFFSETOF(shmem_region_t, dev_info.port_hw_config.config),
1127         &pdev->params.serdes_pre_emphasis);
1128     pdev->params.serdes_pre_emphasis &= PORT_HW_CFG_SERDES_TXCTL3_MASK;
1129 
1130     /* This should be fixed in A1. */
1131     if(CHIP_ID(pdev) == CHIP_ID_5706_A0)
1132     {
1133         if(pdev->hw_info.bus_mode == BUS_MODE_PCIX &&
1134             pdev->hw_info.bus_speed == BUS_SPEED_133_MHZ)
1135         {
1136             pdev->params.num_rchans = 1;
1137         }
1138     }
1139 
1140     #if defined(DBG) && !defined(EXCLUDE_KQE_SUPPORT)
1141     pdev->params.con_kcqe_history_size = 256;
1142     pdev->params.con_kwqe_history_size = 256;
1143     #endif
1144 
1145     if(CHIP_NUM(pdev) == CHIP_NUM_5708 || CHIP_NUM(pdev) == CHIP_NUM_5709)
1146     {
1147         if(lm_get_medium(pdev) == LM_MEDIUM_TYPE_FIBER)
1148         {
1149             pdev->params.phy_addr = 2;
1150         }
1151     }
1152 
1153     if(CHIP_NUM(pdev) == CHIP_NUM_5709)
1154     {
1155         pdev->params.bin_mq_mode = TRUE;
1156     }
1157 
1158     DbgBreakIf(NUM_RX_CHAIN != NUM_TX_CHAIN);
1159 
1160     pdev->rx_info.num_rxq = NUM_RX_CHAIN;
1161     pdev->tx_info.num_txq = NUM_TX_CHAIN;
1162     pdev->tx_info.cu_idx = TX_CHAIN_IDX1;
1163 
1164     /* see if remote phy is enabled. */
1165     if(CHIP_REV(pdev) != CHIP_REV_IKOS)
1166     {
1167         REG_RD_IND(
1168             pdev,
1169             pdev->hw_info.shmem_base +
1170                 OFFSETOF(shmem_region_t,
1171                     dev_info.port_feature_config.config),
1172             &val);
1173         if(val & PORT_FEATURE_RPHY_ENABLED)
1174         {
1175             pdev->params.enable_remote_phy = 1;
1176         }
1177     }
1178 
1179     if (CHIP_NUM(pdev) == CHIP_NUM_5706 ||
1180         CHIP_NUM(pdev) == CHIP_NUM_5708)
1181     {
1182         // Due to slower speed of RV2P in Teton, we need to limit max
1183         // number of BD per each end bit. Otherwise, Appscan in RV2P
1184         // would spend excessive time scanning for end bit.
1185         pdev->params.limit_l4_rx_bd_cnt = 110;
1186     }
1187 
1188     /* Override the defaults with user configurations. */
1189     lm_status = mm_get_user_config(pdev);
1190     if(lm_status != LM_STATUS_SUCCESS)
1191     {
1192         return lm_status;
1193     }
1194 
1195     /* Make sure share memory is initialized by the firmware.  If not
1196      * fail initialization.  The check here is a little late as we
1197      * have already read some share memory info above.  This is ok. */
1198     REG_RD_IND(
1199         pdev,
1200         pdev->hw_info.shmem_base +
1201             OFFSETOF(shmem_region_t, dev_info.signature),
1202         &val);
1203     if((val & DEV_INFO_SIGNATURE_MASK) != DEV_INFO_SIGNATURE)
1204     {
1205         if(!(pdev->params.test_mode & TEST_MODE_IGNORE_SHMEM_SIGNATURE))
1206         {
1207             DbgBreakMsg("Shmem signature not present.\n");
1208 
1209             return LM_STATUS_BAD_SIGNATURE;
1210         }
1211 
1212         pdev->hw_info.mac_addr[0] = 0x00;
1213         pdev->hw_info.mac_addr[1] = 0x10;
1214         pdev->hw_info.mac_addr[2] = 0x18;
1215         pdev->hw_info.mac_addr[3] = 0xff;
1216         pdev->hw_info.mac_addr[4] = 0xff;
1217         pdev->hw_info.mac_addr[5] = 0xff;
1218 
1219         pdev->hw_info.iscsi_mac_addr[0] = 0x00;
1220         pdev->hw_info.iscsi_mac_addr[1] = 0x10;
1221         pdev->hw_info.iscsi_mac_addr[2] = 0x18;
1222         pdev->hw_info.iscsi_mac_addr[3] = 0xff;
1223         pdev->hw_info.iscsi_mac_addr[4] = 0xff;
1224         pdev->hw_info.iscsi_mac_addr[5] = 0xfe;
1225     }
1226 
1227     /* Make sure the parameter values are within range. */
1228     param = param_list;
1229     while(param->offset)
1230     {
1231         if(param->min != 0 || param->max != 0)
1232         {
1233             if(PARAM_VAL(pdev, param) < param->min ||
1234                 PARAM_VAL(pdev, param) > param->max)
1235             {
1236                 if(CHIP_REV(pdev) == CHIP_REV_FPGA ||
1237                     CHIP_REV(pdev) == CHIP_REV_IKOS)
1238                 {
1239                     SET_PARAM_VAL(pdev, param, param->fpga_ikos_default);
1240                 }
1241                 else
1242                 {
1243                     SET_PARAM_VAL(pdev, param, param->asic_default);
1244                 }
1245             }
1246         }
1247 
1248         param++;
1249     }
1250 
1251     /* params.mtu read from the registry does not include the MAC header
1252      * size.  We need to add the header here. */
1253     /*
1254      * get_vbd_params does this aleady
1255      * pdev->params.mtu += ETHERNET_PACKET_HEADER_SIZE;
1256      */
1257 
1258     #ifndef EXCLUDE_KQE_SUPPORT
1259     /* The size of the kcq histroy.  This is the number entries that will
1260      * not be over written by the chip. */
1261     if(pdev->params.kcq_history_size > (LM_PAGE_SIZE/sizeof(kcqe_t)) *
1262         pdev->params.kcq_page_cnt - 1)
1263     {
1264         pdev->params.kcq_history_size = ((LM_PAGE_SIZE/sizeof(kcqe_t)) *
1265             pdev->params.kcq_page_cnt) / 2;
1266     }
1267     #endif
1268 
1269     /* XXX: Exception for Xinan, need a permanent fix. */
1270     if (CHIP_NUM(pdev) == CHIP_NUM_5709)
1271     {
1272         pdev->params.rcv_buffer_offset = 0;
1273     }
1274 
1275     /* Check for a valid mac address. */
1276     if((pdev->params.mac_addr[0] == 0 &&
1277         pdev->params.mac_addr[1] == 0 &&
1278         pdev->params.mac_addr[2] == 0 &&
1279         pdev->params.mac_addr[3] == 0 &&
1280         pdev->params.mac_addr[4] == 0 &&
1281         pdev->params.mac_addr[5] == 0) || (pdev->params.mac_addr[0] & 1))
1282     {
1283         DbgMessage(pdev, WARN, "invalid LAA.\n");
1284 
1285         pdev->params.mac_addr[0] = pdev->hw_info.mac_addr[0];
1286         pdev->params.mac_addr[1] = pdev->hw_info.mac_addr[1];
1287         pdev->params.mac_addr[2] = pdev->hw_info.mac_addr[2];
1288         pdev->params.mac_addr[3] = pdev->hw_info.mac_addr[3];
1289         pdev->params.mac_addr[4] = pdev->hw_info.mac_addr[4];
1290         pdev->params.mac_addr[5] = pdev->hw_info.mac_addr[5];
1291     }
1292 
1293     /* There is a bug in HC that will cause it to stop updating the
1294      * status block.  This has been shown on some system with L4 traffic
1295      * goinging.  To workaround this, the trip points and interrupt trip
1296      * points must be the same and the statistics DMA must be disabled. */
1297     if(CHIP_ID(pdev) == CHIP_ID_5706_A0)
1298     {
1299         pdev->params.tx_quick_cons_trip_int = pdev->params.tx_quick_cons_trip;
1300         pdev->params.tx_ticks_int = pdev->params.tx_ticks;
1301         pdev->params.rx_quick_cons_trip_int = pdev->params.rx_quick_cons_trip;
1302         pdev->params.rx_ticks_int = pdev->params.rx_ticks;
1303         pdev->params.comp_prod_trip_int = pdev->params.comp_prod_trip;
1304         pdev->params.com_ticks_int = pdev->params.com_ticks;
1305         pdev->params.cmd_ticks_int = pdev->params.cmd_ticks;
1306         pdev->params.stats_ticks = 0;
1307     }
1308 
1309     /* enable_syn_rcvd will direct all tcp segments with syn bit to rxq 1. */
1310     if(pdev->params.enable_syn_rcvq &&
1311         NUM_RX_CHAIN > 1 &&
1312         pdev->params.l2_rx_desc_cnt[1] == 0)
1313     {
1314         pdev->params.l2_rx_desc_cnt[1] = 60;
1315     }
1316 
1317     /* Timer mode is broken is 5706_A0 and 5706_A1. */
1318     if(CHIP_ID(pdev) == CHIP_ID_5706_A0 || CHIP_ID(pdev) == CHIP_ID_5706_A1)
1319     {
1320         pdev->params.hc_timer_mode = HC_COLLECT_MODE;
1321     }
1322 
1323     /* Get the current fw_wr_seq. */
1324     REG_RD_IND(
1325         pdev,
1326         pdev->hw_info.shmem_base + OFFSETOF(shmem_region_t, drv_fw_mb.fw_mb),
1327         &val);
1328     pdev->vars.fw_wr_seq = val & DRV_MSG_SEQ;
1329 
1330     /* see if firmware is remote phy capable. */
1331     if(pdev->params.enable_remote_phy)
1332     {
1333         REG_RD_IND(
1334             pdev,
1335             pdev->hw_info.shmem_base +
1336                 OFFSETOF(shmem_region_t, drv_fw_cap_mb.fw_cap_mb),
1337             &val);
1338         if((val & CAPABILITY_SIGNATURE_MASK) != FW_CAP_SIGNATURE ||
1339             (val & FW_CAP_REMOTE_PHY_CAPABLE) == 0)
1340         {
1341             pdev->params.enable_remote_phy = 0;
1342         }
1343     }
1344 
1345     return LM_STATUS_SUCCESS;
1346 } /* lm_get_dev_info */
1347 
1348 
1349 
1350 #ifndef EXCLUDE_KQE_SUPPORT
1351 /*******************************************************************************
1352  * Description:
1353  *
1354  * Return:
1355  ******************************************************************************/
1356 STATIC lm_status_t
init_kwq_resc(lm_device_t * pdev)1357 init_kwq_resc(
1358     lm_device_t *pdev)
1359 {
1360     u32_t mem_size;
1361 
1362     if(pdev->params.kwq_page_cnt == 0)
1363     {
1364         return LM_STATUS_SUCCESS;
1365     }
1366 
1367     /* Allocate memory for the page table which does not need to be
1368      * page aligned.  However the size must be multiple of page size.
1369      *
1370      * When initialized, the page table will point to the pages
1371      * used for the kernel work queue. */
1372     mem_size = pdev->params.kwq_page_cnt * sizeof(lm_address_t);
1373     mem_size = (mem_size + LM_PAGE_MASK) & ~LM_PAGE_MASK;
1374 
1375     pdev->kq_info.kwq_pgtbl_virt = mm_alloc_phys_mem(
1376         pdev,
1377         mem_size,
1378         &pdev->kq_info.kwq_pgtbl_phy,
1379         PHYS_MEM_TYPE_NONCACHED,
1380         NULL);
1381     if(pdev->kq_info.kwq_pgtbl_virt == NULL)
1382     {
1383         return LM_STATUS_RESOURCE;
1384     }
1385 
1386     DbgBreakIf(pdev->kq_info.kwq_pgtbl_phy.as_u32.low & CACHE_LINE_SIZE_MASK);
1387 
1388     /* Allocate memory for the kernel work queue.  Here we allocate
1389      * a physically continuous block of memory and then initialize the
1390      * page table to pointer to the pages in this block.
1391      *
1392      * The kernel work queue is used by the driver similiar to a
1393      * circular ring.
1394      *
1395      * The memory block must be page aligned. */
1396     mem_size = LM_PAGE_SIZE * pdev->params.kwq_page_cnt;
1397     pdev->kq_info.kwq_virt = (kwqe_t *) mm_alloc_phys_mem(
1398         pdev,
1399         mem_size,
1400         &pdev->kq_info.kwq_phy,
1401         PHYS_MEM_TYPE_NONCACHED,
1402         NULL);
1403     if(pdev->kq_info.kwq_virt == NULL)
1404     {
1405         return LM_STATUS_RESOURCE;
1406     }
1407 
1408     DbgBreakIf(pdev->kq_info.kwq_phy.as_u32.low & CACHE_LINE_SIZE_MASK);
1409     DbgBreakIf(((u8_t *) pdev->kq_info.kwq_virt - (u8_t *) 0) & LM_PAGE_MASK);
1410 
1411     return LM_STATUS_SUCCESS;
1412 } /* init_kwq_resc */
1413 
1414 
1415 
1416 /*******************************************************************************
1417  * Description:
1418  *
1419  * Return:
1420  ******************************************************************************/
1421 STATIC lm_status_t
init_kcq_resc(lm_device_t * pdev)1422 init_kcq_resc(
1423     lm_device_t *pdev)
1424 {
1425 
1426     u32_t mem_size;
1427 
1428     if(pdev->params.kcq_page_cnt == 0)
1429     {
1430         return LM_STATUS_SUCCESS;
1431     }
1432 
1433     /* Allocate memory for the page table which does not need to be
1434      * page aligned.  However the size must be multiple of page size.
1435      *
1436      * When initialized, the page table will point to the pages
1437      * used for the kernel completion queue. */
1438     mem_size = pdev->params.kcq_page_cnt * sizeof(lm_address_t);
1439     mem_size = (mem_size + LM_PAGE_MASK) & ~LM_PAGE_MASK;
1440 
1441     pdev->kq_info.kcq_pgtbl_virt = mm_alloc_phys_mem(
1442         pdev,
1443         mem_size,
1444         &pdev->kq_info.kcq_pgtbl_phy,
1445         PHYS_MEM_TYPE_NONCACHED,
1446         NULL);
1447     if(pdev->kq_info.kcq_pgtbl_virt == NULL)
1448     {
1449         return LM_STATUS_RESOURCE;
1450     }
1451 
1452     DbgBreakIf(pdev->kq_info.kcq_pgtbl_phy.as_u32.low & CACHE_LINE_SIZE_MASK);
1453 
1454     /* Allocate memory for the kernel completion queue.  Here we allocate
1455      * a physically continuous block of memory and then initialize the
1456      * page table to pointer to the pages in this block.
1457      *
1458      * The kernel completion queue is used by the driver similiar to a
1459      * circular ring.
1460      *
1461      * The memory block must be page aligned. */
1462     mem_size = LM_PAGE_SIZE * pdev->params.kcq_page_cnt;
1463 
1464     pdev->kq_info.kcq_virt = (kcqe_t *) mm_alloc_phys_mem(
1465         pdev,
1466         mem_size,
1467         &pdev->kq_info.kcq_phy,
1468         PHYS_MEM_TYPE_NONCACHED,
1469         NULL);
1470     if(pdev->kq_info.kcq_virt == NULL)
1471     {
1472         return LM_STATUS_RESOURCE;
1473     }
1474 
1475     DbgBreakIf(pdev->kq_info.kcq_phy.as_u32.low & CACHE_LINE_SIZE_MASK);
1476     DbgBreakIf(((u8_t *) pdev->kq_info.kcq_virt - (u8_t *) 0) & LM_PAGE_MASK);
1477 
1478     return LM_STATUS_SUCCESS;
1479 } /* init_kcq_resc */
1480 #endif /* EXCLUDE_KQE_SUPPORT */
1481 
1482 
1483 
1484 #if INCLUDE_OFLD_SUPPORT
1485 /*******************************************************************************
1486  * Description:
1487  *
1488  * Return:
1489  ******************************************************************************/
1490 STATIC lm_status_t
init_ofld_resc(lm_device_t * pdev)1491 init_ofld_resc(
1492     lm_device_t *pdev)
1493 {
1494     lm_offload_info_t *ofld;
1495     u32_t mem_size;
1496     u32_t idx;
1497 
1498     ofld = &pdev->ofld;
1499     ofld->pdev = pdev;
1500     ofld->pg_cid_hnd_info.max_pending_pg_oflds = 16;
1501     ofld->pg_cid_hnd_info.pending_pg_ofld_cnt = 0;
1502 
1503     s_list_init(&ofld->active_req_list, NULL, NULL, 0);
1504     s_list_init(&ofld->upload_req_list, NULL, NULL, 0);
1505 
1506     for(idx = 0; idx < STATE_BLOCK_CNT; idx++)
1507     {
1508         d_list_init(&ofld->state_blks[idx].tcp_list, NULL, NULL, 0);
1509         d_list_init(&ofld->state_blks[idx].path_list, NULL, NULL, 0);
1510         d_list_init(&ofld->state_blks[idx].neigh_list, NULL, NULL, 0);
1511 
1512         ofld->state_blks[idx].max_conn = 0xffffffff;
1513 
1514         ofld->state_blks[idx].state_block_idx = idx;
1515         ofld->state_blks[idx].ofld = ofld;
1516 
1517         ofld->state_blks[idx].params.ticks_per_second = 100;
1518         ofld->state_blks[idx].params.ack_frequency = 2;
1519         ofld->state_blks[idx].params.delayed_ack_ticks = 20;
1520         ofld->state_blks[idx].params.max_retx = 10;
1521         ofld->state_blks[idx].params.doubt_reachability_retx = 8;
1522         ofld->state_blks[idx].params.sws_prevention_ticks = 10;
1523         ofld->state_blks[idx].params.dup_ack_threshold = 3;
1524         ofld->state_blks[idx].params.push_ticks = 20;
1525         ofld->state_blks[idx].params.nce_stale_ticks = 20;
1526         ofld->state_blks[idx].params.starting_ip_id = 0x8000;
1527     }
1528 
1529     /* Allocate memory for the generic buffer chain. */
1530     mem_size = LM_PAGE_SIZE * pdev->params.gen_bd_page_cnt;
1531     ofld->gen_chain.bd_chain_virt = (rx_bd_t *) mm_alloc_phys_mem(
1532         pdev,
1533         mem_size,
1534         &ofld->gen_chain.bd_chain_phy,
1535         PHYS_MEM_TYPE_UNSPECIFIED,
1536         NULL);
1537     if(ofld->gen_chain.bd_chain_virt == NULL)
1538     {
1539         return LM_STATUS_RESOURCE;
1540     }
1541 
1542     DbgBreakIf(ofld->gen_chain.bd_chain_phy.as_u32.low & CACHE_LINE_SIZE_MASK);
1543 
1544     ofld->gen_chain.cid_addr = GET_CID_ADDR(GEN_CHAIN_CID);
1545 
1546     s_list_init(&ofld->gen_chain.block_list, NULL, NULL, 0);
1547     s_list_init(&ofld->gen_chain.free_gen_buf_list, NULL, NULL, 0);
1548     s_list_init(&ofld->gen_chain.active_gen_buf_list, NULL, NULL, 0);
1549 
1550     /* Allocate memory for the hcopy chain. */
1551     if(pdev->params.hcopy_desc_cnt)
1552     {
1553         mem_size = LM_PAGE_SIZE * pdev->params.hcopy_bd_page_cnt;
1554         ofld->hcopy_chain.bd_chain_virt =(tx_bd_t *) mm_alloc_phys_mem(
1555             pdev,
1556             mem_size,
1557             &ofld->hcopy_chain.bd_chain_phy,
1558             PHYS_MEM_TYPE_UNSPECIFIED,
1559             NULL);
1560         if(ofld->hcopy_chain.bd_chain_virt == NULL)
1561         {
1562             return LM_STATUS_RESOURCE;
1563         }
1564 
1565         DbgBreakIf(ofld->hcopy_chain.bd_chain_phy.as_u32.low &
1566             CACHE_LINE_SIZE_MASK);
1567 
1568         ofld->hcopy_chain.cid_addr = GET_CID_ADDR(HCOPY_CID);
1569         ofld->hcopy_chain.hw_con_idx_ptr =
1570             &pdev->vars.status_virt->deflt.status_rx_quick_consumer_index15;
1571 
1572         s_list_init(&ofld->hcopy_chain.pending_descq, NULL, NULL, 0);
1573         s_list_init(&ofld->hcopy_chain.active_descq, NULL, NULL, 0);
1574     }
1575 
1576     ofld->cid_to_state = (lm_state_header_t **) mm_alloc_mem(
1577         pdev,
1578         sizeof(lm_state_header_t *) * MAX_CID,
1579         NULL);
1580     if(ofld->cid_to_state == NULL)
1581     {
1582         return LM_STATUS_RESOURCE;
1583     }
1584 
1585     return LM_STATUS_SUCCESS;
1586 } /* init_ofld_resc */
1587 #endif /* INCLUDE_OFLD_SUPPORT */
1588 
1589 
1590 
1591 /*******************************************************************************
1592  * Description:
1593  *
1594  * Return:
1595  ******************************************************************************/
1596 STATIC volatile u16_t *
sblk_tx_con_idx_ptr(lm_device_t * pdev,lm_tx_chain_t * txq)1597 sblk_tx_con_idx_ptr(
1598     lm_device_t *pdev,
1599     lm_tx_chain_t *txq)
1600 {
1601     volatile status_blk_combined_t *sblk;
1602     volatile u16_t *idx_ptr;
1603 
1604     sblk = pdev->vars.status_virt;
1605 
1606     if(CHIP_NUM(pdev) == CHIP_NUM_5706 || CHIP_NUM(pdev) == CHIP_NUM_5708)
1607     {
1608         switch(txq->idx)
1609         {
1610             case TX_CHAIN_IDX0:
1611                 idx_ptr = &sblk->deflt.status_tx_quick_consumer_index0;
1612                 break;
1613 
1614             case TX_CHAIN_IDX1:
1615                 idx_ptr = &sblk->deflt.status_tx_quick_consumer_index1;
1616                 break;
1617 
1618             case TX_CHAIN_IDX2:
1619                 idx_ptr = &sblk->deflt.status_tx_quick_consumer_index2;
1620                 break;
1621 
1622             case TX_CHAIN_IDX3:
1623                 idx_ptr = &sblk->deflt.status_tx_quick_consumer_index3;
1624                 break;
1625 
1626             default:
1627                 idx_ptr = NULL;
1628 
1629                 DbgBreakIf(txq->idx != pdev->tx_info.cu_idx);
1630 
1631                 if(txq->idx == pdev->tx_info.cu_idx)
1632                 {
1633                     idx_ptr = &sblk->deflt.status_rx_quick_consumer_index14;
1634                 }
1635                 break;
1636         }
1637     }
1638     else
1639     {
1640         switch(txq->idx)
1641         {
1642             case TX_CHAIN_IDX0:
1643                 idx_ptr = &sblk->deflt.status_tx_quick_consumer_index0;
1644                 break;
1645 
1646             case TX_CHAIN_IDX1:
1647                 idx_ptr = &sblk->deflt.status_tx_quick_consumer_index1;
1648                 break;
1649 
1650             case TX_CHAIN_IDX2:
1651                 idx_ptr = &sblk->deflt.status_tx_quick_consumer_index2;
1652                 break;
1653 
1654             case TX_CHAIN_IDX3:
1655                 idx_ptr = &sblk->deflt.status_tx_quick_consumer_index3;
1656                 break;
1657 
1658             case TX_CHAIN_IDX4:
1659                 idx_ptr = &sblk->proc[0].status_pcpu_tx_quick_consumer_index;
1660                 break;
1661 
1662             case TX_CHAIN_IDX5:
1663                 idx_ptr = &sblk->proc[1].status_pcpu_tx_quick_consumer_index;
1664                 break;
1665 
1666             case TX_CHAIN_IDX6:
1667                 idx_ptr = &sblk->proc[2].status_pcpu_tx_quick_consumer_index;
1668                 break;
1669 
1670             case TX_CHAIN_IDX7:
1671                 idx_ptr = &sblk->proc[3].status_pcpu_tx_quick_consumer_index;
1672                 break;
1673 
1674             case TX_CHAIN_IDX8:
1675                 idx_ptr = &sblk->proc[4].status_pcpu_tx_quick_consumer_index;
1676                 break;
1677 
1678             case TX_CHAIN_IDX9:
1679                 idx_ptr = &sblk->proc[5].status_pcpu_tx_quick_consumer_index;
1680                 break;
1681 
1682             case TX_CHAIN_IDX10:
1683                 idx_ptr = &sblk->proc[6].status_pcpu_tx_quick_consumer_index;
1684                 break;
1685 
1686             case TX_CHAIN_IDX11:
1687                 idx_ptr = &sblk->proc[7].status_pcpu_tx_quick_consumer_index;
1688                 break;
1689 
1690             default:
1691                 DbgBreakMsg("invalid xinan tx index.\n");
1692                 idx_ptr = NULL;
1693                 break;
1694         }
1695     }
1696 
1697     return idx_ptr;
1698 } /* sblk_tx_con_idx_ptr */
1699 
1700 
1701 
1702 /*******************************************************************************
1703  * Description:
1704  *
1705  * Return:
1706  ******************************************************************************/
1707 STATIC lm_status_t
init_l2tx_resc(lm_device_t * pdev)1708 init_l2tx_resc(
1709     lm_device_t *pdev)
1710 {
1711     lm_tx_chain_t *txq;
1712     u32_t bd_page_cnt;
1713     u32_t mem_size;
1714     u32_t idx;
1715     u32_t num_tx_chains;
1716 
1717 #if defined(LM_NON_LEGACY_MODE_SUPPORT)
1718     num_tx_chains = MAX_TX_CHAIN;
1719     if(CHIP_NUM(pdev) == CHIP_NUM_5706 || CHIP_NUM(pdev) == CHIP_NUM_5708)
1720     {
1721         num_tx_chains = pdev->tx_info.num_txq;
1722     }
1723 #else
1724     DbgBreakIf(pdev->tx_info.num_txq > MAX_TX_CHAIN);
1725     for(idx = pdev->tx_info.num_txq; idx < MAX_TX_CHAIN; idx++)
1726     {
1727         pdev->params.l2_tx_bd_page_cnt[idx] = 0;
1728     }
1729     num_tx_chains = pdev->tx_info.num_txq;
1730 #endif
1731     for(idx = 0; idx < num_tx_chains; idx++)
1732     {
1733         txq = &pdev->tx_info.chain[idx];
1734         txq->idx = idx;
1735         txq->cid_addr = GET_CID_ADDR(L2TX_CID_BASE + 2 * txq->idx);
1736 
1737         s_list_init(&txq->active_descq, NULL, NULL, 0);
1738 
1739         if(CHIP_NUM(pdev) == CHIP_NUM_5706 || CHIP_NUM(pdev) == CHIP_NUM_5708)
1740         {
1741             DbgBreakIf(idx > 4);
1742 
1743             if(txq->idx == pdev->tx_info.cu_idx && txq->idx != TX_CHAIN_IDX1)
1744             {
1745                 DbgBreakIf(idx != 4);
1746                 txq->cid_addr = GET_CID_ADDR(30);
1747             }
1748         }
1749         else if(txq->idx >= 4)
1750         {
1751             DbgBreakIf(idx > 11);
1752 
1753             /* Xinan has to use tx1 for catchup because catchup2 uses
1754              * status_rx_quick_consumer_index14 for completion.  This
1755              * status block index is not available on Xinan. */
1756             DbgBreakIf(pdev->tx_info.cu_idx != TX_CHAIN_IDX1);
1757 
1758             if(txq->idx >= 4)
1759             {
1760                 txq->cid_addr = GET_CID_ADDR(L2TX_TSS_CID_BASE + txq->idx - 4);
1761             }
1762         }
1763 
1764         bd_page_cnt = pdev->params.l2_tx_bd_page_cnt[txq->idx];
1765         if(bd_page_cnt)
1766         {
1767             mem_size = LM_PAGE_SIZE * bd_page_cnt;
1768 
1769             txq->bd_chain_virt = (tx_bd_t *) mm_alloc_phys_mem(
1770                 pdev,
1771                 mem_size,
1772                 &txq->bd_chain_phy,
1773                 PHYS_MEM_TYPE_NONCACHED,
1774                 NULL);
1775             if(txq->bd_chain_virt == NULL)
1776             {
1777                 return LM_STATUS_RESOURCE;
1778             }
1779 
1780             DbgBreakIf(txq->bd_chain_phy.as_u32.low & CACHE_LINE_SIZE_MASK);
1781         }
1782 
1783         txq->hw_con_idx_ptr = sblk_tx_con_idx_ptr(pdev, txq);
1784     }
1785 
1786     return LM_STATUS_SUCCESS;
1787 } /* init_l2tx_resc */
1788 
1789 
1790 
1791 /*******************************************************************************
1792  * Description:
1793  *
1794  * Return:
1795  ******************************************************************************/
1796 STATIC volatile u16_t *
sblk_rx_con_idx_ptr(lm_device_t * pdev,lm_rx_chain_t * rxq)1797 sblk_rx_con_idx_ptr(
1798     lm_device_t *pdev,
1799     lm_rx_chain_t *rxq)
1800 {
1801     volatile status_blk_combined_t *sblk;
1802     volatile u16_t *idx_ptr;
1803 
1804     sblk = pdev->vars.status_virt;
1805 
1806     if(CHIP_NUM(pdev) == CHIP_NUM_5706 || CHIP_NUM(pdev) == CHIP_NUM_5708)
1807     {
1808         switch(rxq->idx)
1809         {
1810             case RX_CHAIN_IDX0:
1811                 idx_ptr = &sblk->deflt.status_rx_quick_consumer_index0;
1812                 break;
1813 
1814             case RX_CHAIN_IDX1:
1815                 idx_ptr = &sblk->deflt.status_rx_quick_consumer_index1;
1816                 break;
1817 
1818             case RX_CHAIN_IDX2:
1819                 idx_ptr = &sblk->deflt.status_rx_quick_consumer_index2;
1820                 break;
1821 
1822             case RX_CHAIN_IDX3:
1823                 idx_ptr = &sblk->deflt.status_rx_quick_consumer_index3;
1824                 break;
1825 
1826             case RX_CHAIN_IDX4:
1827                 idx_ptr = &sblk->deflt.status_rx_quick_consumer_index4;
1828                 break;
1829 
1830             case RX_CHAIN_IDX5:
1831                 idx_ptr = &sblk->deflt.status_rx_quick_consumer_index5;
1832                 break;
1833 
1834             case RX_CHAIN_IDX6:
1835                 idx_ptr = &sblk->deflt.status_rx_quick_consumer_index6;
1836                 break;
1837 
1838             case RX_CHAIN_IDX7:
1839                 idx_ptr = &sblk->deflt.status_rx_quick_consumer_index7;
1840                 break;
1841 
1842             case RX_CHAIN_IDX8:
1843                 idx_ptr = &sblk->deflt.status_rx_quick_consumer_index8;
1844                 break;
1845 
1846             case RX_CHAIN_IDX9:
1847                 idx_ptr = &sblk->deflt.status_rx_quick_consumer_index9;
1848                 break;
1849 
1850             case RX_CHAIN_IDX10:
1851                 idx_ptr = &sblk->deflt.status_rx_quick_consumer_index10;
1852                 break;
1853 
1854             case RX_CHAIN_IDX11:
1855                 idx_ptr = &sblk->deflt.status_rx_quick_consumer_index11;
1856                 break;
1857 
1858             case RX_CHAIN_IDX12:
1859                 idx_ptr = &sblk->deflt.status_rx_quick_consumer_index12;
1860                 break;
1861 
1862             case RX_CHAIN_IDX13:
1863                 idx_ptr = &sblk->deflt.status_rx_quick_consumer_index13;
1864                 break;
1865 
1866             case RX_CHAIN_IDX14:
1867                 idx_ptr = &sblk->deflt.status_rx_quick_consumer_index14;
1868                 break;
1869 
1870             case RX_CHAIN_IDX15:
1871                 idx_ptr = &sblk->deflt.status_rx_quick_consumer_index15;
1872                 break;
1873 
1874             default:
1875                 DbgBreakMsg("invalid teton rx index.\n");
1876                 idx_ptr = NULL;
1877                 break;
1878         }
1879     }
1880     else
1881     {
1882         switch(rxq->idx)
1883         {
1884             case RX_CHAIN_IDX0:
1885                 idx_ptr = &sblk->deflt.status_rx_quick_consumer_index0;
1886                 break;
1887 
1888             case RX_CHAIN_IDX1:
1889                 idx_ptr = &sblk->deflt.status_rx_quick_consumer_index1;
1890                 break;
1891 
1892             case RX_CHAIN_IDX2:
1893                 idx_ptr = &sblk->deflt.status_rx_quick_consumer_index2;
1894                 break;
1895 
1896             case RX_CHAIN_IDX3:
1897                 idx_ptr = &sblk->deflt.status_rx_quick_consumer_index3;
1898                 break;
1899 
1900             case RX_CHAIN_IDX4:
1901                 idx_ptr = &sblk->proc[0].status_pcpu_rx_quick_consumer_index;
1902                 break;
1903 
1904             case RX_CHAIN_IDX5:
1905                 idx_ptr = &sblk->proc[1].status_pcpu_rx_quick_consumer_index;
1906                 break;
1907 
1908             case RX_CHAIN_IDX6:
1909                 idx_ptr = &sblk->proc[2].status_pcpu_rx_quick_consumer_index;
1910                 break;
1911 
1912             case RX_CHAIN_IDX7:
1913                 idx_ptr = &sblk->proc[3].status_pcpu_rx_quick_consumer_index;
1914                 break;
1915 
1916             case RX_CHAIN_IDX8:
1917                 idx_ptr = &sblk->proc[4].status_pcpu_rx_quick_consumer_index;
1918                 break;
1919 
1920             case RX_CHAIN_IDX9:
1921                 idx_ptr = &sblk->proc[5].status_pcpu_rx_quick_consumer_index;
1922                 break;
1923 
1924             case RX_CHAIN_IDX10:
1925                 idx_ptr = &sblk->proc[6].status_pcpu_rx_quick_consumer_index;
1926                 break;
1927 
1928             case RX_CHAIN_IDX11:
1929                 idx_ptr = &sblk->proc[7].status_pcpu_rx_quick_consumer_index;
1930                 break;
1931 
1932             default:
1933                 DbgBreakMsg("invalid xinan rx index.\n");
1934                 idx_ptr = NULL;
1935                 break;
1936         }
1937     }
1938 
1939     return idx_ptr;
1940 } /* sblk_rx_con_idx_ptr */
1941 
1942 
1943 
1944 /*******************************************************************************
1945  * Description:
1946  *
1947  * Return:
1948  ******************************************************************************/
1949 STATIC lm_status_t
alloc_l2rx_desc(lm_device_t * pdev,lm_rx_chain_t * rxq)1950 alloc_l2rx_desc(
1951     lm_device_t *pdev,
1952     lm_rx_chain_t *rxq)
1953 {
1954     u32_t bd_page_cnt;
1955     lm_packet_t *pkt;
1956     u32_t desc_size;
1957     u32_t desc_cnt;
1958     u8_t *mem_virt;
1959     u32_t mem_size;
1960     u32_t idx;
1961 
1962     bd_page_cnt = pdev->params.l2_rx_bd_page_cnt[rxq->idx];
1963     desc_cnt = pdev->params.l2_rx_desc_cnt[rxq->idx];
1964 
1965     if(bd_page_cnt == 0 || desc_cnt == 0)
1966     {
1967         pdev->params.l2_rx_bd_page_cnt[rxq->idx] = 0;
1968         pdev->params.l2_rx_desc_cnt[rxq->idx] = 0;
1969 
1970         return LM_STATUS_SUCCESS;
1971     }
1972 
1973     mem_size = LM_PAGE_SIZE * bd_page_cnt;
1974 
1975     rxq->bd_chain_virt = (rx_bd_t *) mm_alloc_phys_mem(
1976         pdev,
1977         mem_size,
1978         &rxq->bd_chain_phy,
1979         PHYS_MEM_TYPE_NONCACHED,
1980         NULL);
1981     if(rxq->bd_chain_virt == NULL)
1982     {
1983         return LM_STATUS_RESOURCE;
1984     }
1985 
1986     DbgBreakIf(rxq->bd_chain_phy.as_u32.low & CACHE_LINE_SIZE_MASK);
1987 
1988 #ifndef LM_NON_LEGACY_MODE_SUPPORT
1989     desc_size = mm_desc_size(pdev, DESC_TYPE_L2RX_PACKET) + SIZEOF_SIG;
1990     mem_size = desc_size * desc_cnt;
1991 
1992     mem_virt = (u8_t *) mm_alloc_mem(pdev, mem_size, NULL);
1993     if(mem_virt == NULL)
1994     {
1995         return LM_STATUS_RESOURCE;
1996     }
1997 
1998     for(idx = 0; idx < desc_cnt; idx++)
1999     {
2000         pkt = (lm_packet_t *) (mem_virt + SIZEOF_SIG);
2001         mem_virt += desc_size;
2002         mem_size -= desc_size;
2003 
2004         SIG(pkt) = L2PACKET_RX_SIG;
2005         // full packet needs to hold mtu + 4-byte CRC32
2006         pkt->u1.rx.buf_size = pdev->params.mtu + 4;
2007         pkt->u1.rx.buf_size += L2RX_FRAME_HDR_LEN;
2008         pkt->u1.rx.buf_size += pdev->params.rcv_buffer_offset;
2009         pkt->u1.rx.buf_size += CACHE_LINE_SIZE_MASK + 1;
2010         pkt->u1.rx.buf_size &= ~CACHE_LINE_SIZE_MASK;
2011 
2012         s_list_push_tail(&rxq->free_descq, &pkt->link);
2013     }
2014 
2015     DbgBreakIf(mem_size);
2016     DbgBreakIf(s_list_entry_cnt(&rxq->free_descq) != desc_cnt);
2017 #endif
2018     return LM_STATUS_SUCCESS;
2019 } /* alloc_l2rx_desc */
2020 
2021 
2022 
2023 /*******************************************************************************
2024  * Description:
2025  *
2026  * Return:
2027  ******************************************************************************/
2028 STATIC lm_status_t
init_l2rx_resc(lm_device_t * pdev)2029 init_l2rx_resc(
2030     lm_device_t *pdev)
2031 {
2032     lm_status_t lm_status;
2033     lm_rx_chain_t *rxq;
2034     u32_t idx;
2035 
2036 #ifndef LM_NON_LEGACY_MODE_SUPPORT
2037     DbgBreakIf(pdev->rx_info.num_rxq > MAX_RX_CHAIN);
2038 
2039     for(idx = pdev->rx_info.num_rxq; idx < MAX_RX_CHAIN; idx++)
2040     {
2041         pdev->params.l2_rx_desc_cnt[idx] = 0;
2042         pdev->params.l2_rx_bd_page_cnt[idx] = 0;
2043     }
2044 #endif
2045     for(idx = 0; idx < pdev->rx_info.num_rxq ; idx++)
2046     {
2047         rxq = &pdev->rx_info.chain[idx];
2048         rxq->idx = idx;
2049         rxq->cid_addr = GET_CID_ADDR(L2RX_CID_BASE + rxq->idx);
2050 
2051         s_list_init(&rxq->free_descq, NULL, NULL, 0);
2052         s_list_init(&rxq->active_descq, NULL, NULL, 0);
2053 
2054         lm_status = alloc_l2rx_desc(pdev, rxq);
2055         if(lm_status != LM_STATUS_SUCCESS)
2056         {
2057             return lm_status;
2058         }
2059 
2060         rxq->hw_con_idx_ptr = sblk_rx_con_idx_ptr(pdev, rxq);
2061     }
2062 
2063     return LM_STATUS_SUCCESS;
2064 } /* init_l2rx_resc */
2065 
2066 
2067 
2068 /*******************************************************************************
2069  * Description:
2070  *
2071  * Return:
2072  ******************************************************************************/
2073 STATIC lm_status_t
init_context_resc_5709(lm_device_t * pdev)2074 init_context_resc_5709(
2075     lm_device_t *pdev)
2076 {
2077     phy_mem_block_t *ctx_mem;
2078     u32_t page_align_delta;
2079     lm_address_t mem_phy;
2080     u32_t ctx_in_mblk;
2081     u32_t mem_size;
2082     u8_t *mem_virt;
2083     u32_t ctx_cnt;
2084 
2085     DbgBreakIf(CHIP_NUM(pdev) != CHIP_NUM_5709);
2086     DbgBreakIf(CTX_MBLK_SIZE & LM_PAGE_MASK);
2087     DbgBreakIf(MAX_CTX > 16 * 1024);
2088     DbgBreakIf(MAX_CTX * ONE_CTX_SIZE / CTX_MBLK_SIZE != NUM_CTX_MBLKS);
2089     DbgBreakIf((MAX_CTX * ONE_CTX_SIZE) % CTX_MBLK_SIZE);
2090 
2091     ctx_mem = &pdev->vars.ctx_mem[0];
2092     ctx_cnt = 0;
2093 
2094     while(ctx_cnt < MAX_CTX)
2095     {
2096         ctx_in_mblk = CTX_MBLK_SIZE / ONE_CTX_SIZE;
2097         if(ctx_cnt + ctx_in_mblk > MAX_CTX)
2098         {
2099             ctx_in_mblk = MAX_CTX - ctx_cnt;
2100         }
2101 
2102         mem_size = ctx_in_mblk * ONE_CTX_SIZE;
2103 
2104         mem_virt = (u8_t *) mm_alloc_phys_mem(
2105             pdev,
2106             mem_size + LM_PAGE_MASK,
2107             &mem_phy,
2108             PHYS_MEM_TYPE_NONCACHED,
2109             NULL);
2110         if(mem_virt == NULL)
2111         {
2112             return LM_STATUS_RESOURCE;
2113         }
2114 
2115         page_align_delta = mem_phy.as_u32.low & LM_PAGE_MASK;
2116         if(page_align_delta)
2117         {
2118             page_align_delta = LM_PAGE_SIZE - page_align_delta;
2119         }
2120 
2121         mem_virt += page_align_delta;
2122         LM_INC64(&mem_phy, page_align_delta);
2123 
2124         ctx_mem->start_phy = mem_phy;
2125         ctx_mem->start = mem_virt;
2126         ctx_mem->size = mem_size;
2127         ctx_mem++;
2128 
2129         ctx_cnt += mem_size / ONE_CTX_SIZE;
2130     }
2131 
2132     return LM_STATUS_SUCCESS;
2133 } /* init_context_resc_5709 */
2134 
2135 
2136 
2137 /*******************************************************************************
2138  * Description:
2139  *
2140  * Return:
2141  ******************************************************************************/
2142 lm_status_t
lm_init_resc(lm_device_t * pdev)2143 lm_init_resc(
2144     lm_device_t *pdev)
2145 {
2146     lm_status_t lm_status;
2147     lm_address_t mem_phy;
2148     u8_t *mem_virt;
2149     u32_t mem_size;
2150 
2151     #ifndef EXCLUDE_KQE_SUPPORT
2152     lm_status = init_kwq_resc(pdev);
2153     if(lm_status != LM_STATUS_SUCCESS)
2154     {
2155         return lm_status;
2156     }
2157 
2158     lm_status = init_kcq_resc(pdev);
2159     if(lm_status != LM_STATUS_SUCCESS)
2160     {
2161         return lm_status;
2162     }
2163     #endif
2164 
2165     #if INCLUDE_OFLD_SUPPORT
2166     lm_status = init_ofld_resc(pdev);
2167     if(lm_status != LM_STATUS_SUCCESS)
2168     {
2169         return lm_status;
2170     }
2171     #endif
2172 
2173     DbgBreakIf(sizeof(status_blk_combined_t) > STATUS_BLOCK_BUFFER_SIZE);
2174     DbgBreakIf(sizeof(statistics_block_t) > CHIP_STATS_BUFFER_SIZE);
2175 
2176     mem_size = STATUS_BLOCK_BUFFER_SIZE +
2177     #ifndef EXCLUDE_RSS_SUPPORT
2178         RSS_INDIRECTION_TABLE_SIZE +
2179         RSS_LOOKUP_TABLE_WA +
2180     #endif
2181         CHIP_STATS_BUFFER_SIZE;
2182 
2183     mem_virt = mm_alloc_phys_mem(
2184         pdev,
2185         mem_size,
2186         &mem_phy,
2187         PHYS_MEM_TYPE_NONCACHED,
2188         NULL);
2189     if(mem_virt == NULL)
2190     {
2191         return LM_STATUS_RESOURCE;
2192     }
2193 
2194     DbgBreakIf(mem_phy.as_u32.low & CACHE_LINE_SIZE_MASK);
2195 
2196     pdev->vars.status_virt = (status_blk_combined_t *) mem_virt;
2197     pdev->vars.status_phy = mem_phy;
2198     mem_virt += STATUS_BLOCK_BUFFER_SIZE;
2199     LM_INC64(&mem_phy, STATUS_BLOCK_BUFFER_SIZE);
2200 
2201     pdev->vars.stats_virt = (statistics_block_t *) mem_virt;
2202     pdev->vars.stats_phy = mem_phy;
2203     mem_virt += CHIP_STATS_BUFFER_SIZE;
2204     LM_INC64(&mem_phy, CHIP_STATS_BUFFER_SIZE);
2205 
2206     #ifndef EXCLUDE_RSS_SUPPORT
2207     pdev->rx_info.rss_ind_table_virt = mem_virt;
2208     pdev->rx_info.rss_ind_table_phy = mem_phy;
2209     #endif
2210 
2211     if(CHIP_NUM(pdev) == CHIP_NUM_5709)
2212     {
2213         lm_status = init_context_resc_5709(pdev);
2214         if(lm_status != LM_STATUS_SUCCESS)
2215         {
2216             return lm_status;
2217         }
2218     }
2219 
2220     lm_status = init_l2tx_resc(pdev);
2221     if(lm_status != LM_STATUS_SUCCESS)
2222     {
2223         return lm_status;
2224     }
2225 
2226     lm_status = init_l2rx_resc(pdev);
2227     if(lm_status != LM_STATUS_SUCCESS)
2228     {
2229         return lm_status;
2230     }
2231 
2232     lm_clear_nwuf(pdev);
2233 
2234     return LM_STATUS_SUCCESS;
2235 } /* lm_init_resc */
2236 
2237 
2238 
2239 /*******************************************************************************
2240  * Description:
2241  *
2242  * Return:
2243  ******************************************************************************/
2244 STATIC u32_t
compute_crc32(u8_t * buf,u32_t buf_size)2245 compute_crc32(
2246     u8_t *buf,
2247     u32_t buf_size)
2248 {
2249     u32_t reg;
2250     u32_t tmp;
2251     u32_t j;
2252     u32_t k;
2253 
2254     reg = 0xffffffff;
2255 
2256     for(j = 0; j < buf_size; j++)
2257     {
2258         reg ^= buf[j];
2259 
2260         for(k = 0; k < 8; k++)
2261         {
2262             tmp = reg & 0x01;
2263 
2264             reg >>= 1;
2265 
2266             if(tmp)
2267             {
2268                 reg ^= 0xedb88320;
2269             }
2270         }
2271     }
2272 
2273     return ~reg;
2274 } /* compute_crc32 */
2275 
2276 
2277 
2278 #define NUM_MC_HASH_REGISTERS                   8
2279 /*******************************************************************************
2280  * Description:
2281  *
2282  * Return:
2283  ******************************************************************************/
2284 STATIC void
set_mc_hash_reg(lm_device_t * pdev,lm_mc_table_t * mc_table)2285 set_mc_hash_reg(
2286     lm_device_t *pdev,
2287     lm_mc_table_t *mc_table)
2288 {
2289     u32_t hash_reg[NUM_MC_HASH_REGISTERS];
2290     u32_t reg_idx;
2291     u32_t bit_pos;
2292     u32_t idx;
2293     u32_t crc32;
2294 
2295     /* Program the MC hash registers.
2296      *    The MAC hash registers are used to help discard unwanted
2297      *    multicast packets as they are received from the external
2298      *    media.  The destination address is fed into the normal CRC
2299      *    algorithm in order to generate a hash function.  The most
2300      *    significant bits of the CRC are then used without any inversion
2301      *    in reverse order to index into a hash table which is comprised
2302      *    of these MAC hash registers.  If the CRC is calculated by
2303      *    shifting right then the rightmost bits of the CRC can be
2304      *    directly used with no additional inversion or bit swapping
2305      *    required.  All four MAC hash registers are used such that
2306      *    register 1 bit-32 is the most significant hash table entry
2307      *    and register 8 bit-0 is the least significant hash table entry.
2308      *    This follows the normal big-endian ordering used throughout
2309      *    Teton.  Since there are 256 hash table entries, 8-bits are
2310      *    used from the CRC.  The hash registers are ignored if the
2311      *    receive MAC is in promiscuous mode. */
2312     for(idx = 0; idx < NUM_MC_HASH_REGISTERS; idx++)
2313     {
2314         hash_reg[idx] = 0;
2315     }
2316 
2317     for(idx = 0; idx < mc_table->entry_cnt; idx++)
2318     {
2319         crc32 = compute_crc32(
2320             mc_table->addr_arr[idx].mc_addr,
2321             ETHERNET_ADDRESS_SIZE);
2322 
2323         /* The most significant 7 bits of the CRC32 (no inversion),
2324          * are used to index into one of the possible 128 bit positions. */
2325         bit_pos = ~crc32 & 0xff;
2326 
2327         reg_idx = (bit_pos & 0xe0) >> 5;
2328 
2329         bit_pos &= 0x1f;
2330 
2331         hash_reg[reg_idx] |= (1 << bit_pos);
2332     }
2333 
2334     for(idx = 0; idx < NUM_MC_HASH_REGISTERS; idx++)
2335     {
2336         REG_WR(pdev, emac.emac_multicast_hash[idx], hash_reg[idx]);
2337     }
2338 } /* set_mc_hash_reg */
2339 
2340 
2341 
2342 /*******************************************************************************
2343  * Description:
2344  *
2345  * Return:
2346  ******************************************************************************/
2347 lm_status_t
lm_set_rx_mask(lm_device_t * pdev,u32_t user_idx,lm_rx_mask_t rx_mask)2348 lm_set_rx_mask(
2349     lm_device_t *pdev,
2350     u32_t user_idx,
2351     lm_rx_mask_t rx_mask)
2352 {
2353     u32_t combined_rx_mask;
2354     u32_t invalid_rx_mask;
2355     u32_t sort_mode;
2356     u32_t rx_mode;
2357     u32_t val;
2358     u32_t idx;
2359 
2360     if(user_idx >= MAX_RX_FILTER_USER_CNT)
2361     {
2362         DbgBreakMsg("invalid user index.\n");
2363 
2364         return LM_STATUS_FAILURE;
2365     }
2366 
2367     combined_rx_mask = rx_mask;
2368     for(idx = 0; idx < MAX_RX_FILTER_USER_CNT; idx++)
2369     {
2370         if(idx != user_idx)
2371         {
2372             combined_rx_mask |= pdev->rx_info.mask[idx];
2373         }
2374     }
2375 
2376     /* Set up the rx_mode register. */
2377     invalid_rx_mask = combined_rx_mask;
2378     REG_RD(pdev, emac.emac_rx_mode, &rx_mode);
2379 
2380     if(invalid_rx_mask & LM_RX_MASK_ACCEPT_UNICAST)
2381     {
2382         invalid_rx_mask &= ~LM_RX_MASK_ACCEPT_UNICAST;
2383     }
2384 
2385     if(invalid_rx_mask & LM_RX_MASK_ACCEPT_MULTICAST)
2386     {
2387         invalid_rx_mask &= ~LM_RX_MASK_ACCEPT_MULTICAST;
2388     }
2389 
2390     if(invalid_rx_mask & LM_RX_MASK_ACCEPT_ALL_MULTICAST)
2391     {
2392         invalid_rx_mask &= ~LM_RX_MASK_ACCEPT_ALL_MULTICAST;
2393     }
2394 
2395     rx_mode &= ~EMAC_RX_MODE_FILT_BROADCAST;
2396     if(invalid_rx_mask & LM_RX_MASK_ACCEPT_BROADCAST)
2397     {
2398         invalid_rx_mask &= ~LM_RX_MASK_ACCEPT_BROADCAST;
2399     }
2400     else
2401     {
2402         rx_mode |= EMAC_RX_MODE_FILT_BROADCAST;
2403     }
2404 
2405     rx_mode &= ~(EMAC_RX_MODE_ACCEPT_RUNTS | EMAC_RX_MODE_ACCEPT_OVERSIZE);
2406     if(invalid_rx_mask & LM_RX_MASK_ACCEPT_ERROR_PACKET)
2407     {
2408         invalid_rx_mask &= ~LM_RX_MASK_ACCEPT_ERROR_PACKET;
2409         rx_mode |= EMAC_RX_MODE_ACCEPT_RUNTS |
2410             EMAC_RX_MODE_ACCEPT_OVERSIZE |
2411             EMAC_RX_MODE_NO_CRC_CHK;
2412     }
2413 
2414     rx_mode &= ~EMAC_RX_MODE_PROMISCUOUS;
2415     if(invalid_rx_mask & LM_RX_MASK_PROMISCUOUS_MODE)
2416     {
2417         invalid_rx_mask &= ~LM_RX_MASK_PROMISCUOUS_MODE;
2418         rx_mode |= EMAC_RX_MODE_PROMISCUOUS;
2419     }
2420 
2421     if(invalid_rx_mask)
2422     {
2423         DbgBreakMsg("Unknown rx_mask.\n");
2424 
2425         return LM_STATUS_FAILURE;
2426     }
2427 
2428     if(combined_rx_mask & LM_RX_MASK_ACCEPT_ALL_MULTICAST)
2429     {
2430         for(idx = 0; idx < NUM_MC_HASH_REGISTERS; idx++)
2431         {
2432             REG_WR(pdev, emac.emac_multicast_hash[idx], 0xffffffff);
2433         }
2434     }
2435     else if(combined_rx_mask & LM_RX_MASK_ACCEPT_MULTICAST)
2436     {
2437         set_mc_hash_reg(pdev, &pdev->mc_table);
2438     }
2439     else
2440     {
2441         for(idx = 0; idx < NUM_MC_HASH_REGISTERS; idx++)
2442         {
2443             REG_WR(pdev, emac.emac_multicast_hash[idx], 0);
2444         }
2445     }
2446 
2447     pdev->rx_info.mask[user_idx] = rx_mask;
2448 
2449     val = rx_mode | EMAC_RX_MODE_SORT_MODE;
2450     if(pdev->params.keep_vlan_tag)
2451     {
2452         val |= EMAC_RX_MODE_KEEP_VLAN_TAG;
2453     }
2454     REG_WR(pdev, emac.emac_rx_mode, val);
2455 
2456     /* Set up the sort_mode register. */
2457     sort_mode = 0;
2458 
2459     if(rx_mask & LM_RX_MASK_ACCEPT_UNICAST)
2460     {
2461         sort_mode |= 1 << user_idx;
2462     }
2463 
2464     if(rx_mask & LM_RX_MASK_ACCEPT_MULTICAST)
2465     {
2466         sort_mode |= RPM_SORT_USER0_MC_HSH_EN;
2467     }
2468 
2469     if(rx_mask & LM_RX_MASK_ACCEPT_ALL_MULTICAST)
2470     {
2471         sort_mode |= RPM_SORT_USER0_MC_EN;
2472     }
2473 
2474     if(rx_mask & LM_RX_MASK_ACCEPT_BROADCAST)
2475     {
2476         sort_mode |= RPM_SORT_USER0_BC_EN;
2477     }
2478 
2479     if(rx_mask & LM_RX_MASK_PROMISCUOUS_MODE)
2480     {
2481         sort_mode |= RPM_SORT_USER0_PROM_EN | RPM_SORT_USER0_PROM_VLAN;
2482     }
2483 
2484     switch(user_idx)
2485     {
2486         case RX_FILTER_USER_IDX0:
2487             REG_RD(pdev, rpm.rpm_sort_user0, &val);
2488 
2489             REG_WR(pdev, rpm.rpm_sort_user0, 0x00000000);
2490             REG_WR(pdev, rpm.rpm_sort_user0, sort_mode);
2491 
2492             val &= 0xffff;
2493             val &= ~(1 << user_idx);
2494 
2495             sort_mode |= val | RPM_SORT_USER0_ENA;
2496             REG_WR(pdev, rpm.rpm_sort_user0, sort_mode);
2497             break;
2498 
2499         case RX_FILTER_USER_IDX1:
2500             REG_RD(pdev, rpm.rpm_sort_user1, &val);
2501 
2502             REG_WR(pdev, rpm.rpm_sort_user1, 0x00000000);
2503             REG_WR(pdev, rpm.rpm_sort_user1, sort_mode);
2504 
2505             val &= 0xffff;
2506             val &= ~(1 << user_idx);
2507 
2508             sort_mode |= val | RPM_SORT_USER0_ENA;
2509             REG_WR(pdev, rpm.rpm_sort_user1, sort_mode);
2510             break;
2511 
2512         case RX_FILTER_USER_IDX2:
2513             REG_RD(pdev, rpm.rpm_sort_user2, &val);
2514 
2515             REG_WR(pdev, rpm.rpm_sort_user2, 0x00000000);
2516             REG_WR(pdev, rpm.rpm_sort_user2, sort_mode);
2517 
2518             val &= 0xffff;
2519             val &= ~(1 << user_idx);
2520 
2521             sort_mode |= val | RPM_SORT_USER0_ENA;
2522             REG_WR(pdev, rpm.rpm_sort_user2, sort_mode);
2523             break;
2524 
2525         case RX_FILTER_USER_IDX3:
2526             REG_RD(pdev, rpm.rpm_sort_user3, &val);
2527 
2528             REG_WR(pdev, rpm.rpm_sort_user3, 0x00000000);
2529             REG_WR(pdev, rpm.rpm_sort_user3, sort_mode);
2530 
2531             val &= 0xffff;
2532             val &= ~(1 << user_idx);
2533 
2534             sort_mode |= val | RPM_SORT_USER0_ENA;
2535             REG_WR(pdev, rpm.rpm_sort_user3, sort_mode);
2536             break;
2537 
2538         default:
2539             DbgBreakMsg("invalid user idx.\n");
2540 
2541             break;
2542     }
2543 
2544     /* Set rx_flood for L2. */
2545     REG_RD_IND(pdev, 0xe0024, &val);
2546     val &= ~(1 << user_idx);
2547 
2548     if(rx_mask & (LM_RX_MASK_ACCEPT_MULTICAST |
2549                   LM_RX_MASK_ACCEPT_ALL_MULTICAST |
2550                   LM_RX_MASK_ACCEPT_BROADCAST |
2551                   LM_RX_MASK_PROMISCUOUS_MODE))
2552     {
2553         val |= (1 << user_idx);
2554     }
2555 
2556     REG_WR_IND(pdev, 0xe0024, val);
2557 
2558     return LM_STATUS_SUCCESS;
2559 } /* lm_set_rx_mask */
2560 
2561 
2562 
2563 /*******************************************************************************
2564  * Description:
2565  *
2566  * Return:
2567  ******************************************************************************/
2568 lm_status_t
lm_add_mc(lm_device_t * pdev,u8_t * mc_addr)2569 lm_add_mc(
2570     lm_device_t *pdev,
2571     u8_t *mc_addr)
2572 {
2573     lm_mc_entry_t *mc_entry;
2574     u32_t cnt;
2575 
2576     DbgMessage(pdev, VERBOSE, "### lm_add_mc\n");
2577 
2578     for(cnt = 0; cnt < pdev->mc_table.entry_cnt; cnt++)
2579     {
2580         mc_entry = &pdev->mc_table.addr_arr[cnt];
2581 
2582         if(IS_ETH_ADDRESS_EQUAL(mc_entry->mc_addr, mc_addr))
2583         {
2584             mc_entry->ref_cnt++;
2585 
2586             return LM_STATUS_SUCCESS;
2587         }
2588     }
2589 
2590     if(pdev->mc_table.entry_cnt >= LM_MAX_MC_TABLE_SIZE)
2591     {
2592         DbgBreakMsg("No entry in MC table\n");
2593 
2594         return LM_STATUS_FAILURE;
2595     }
2596 
2597     mc_entry = &pdev->mc_table.addr_arr[pdev->mc_table.entry_cnt];
2598     pdev->mc_table.entry_cnt++;
2599 
2600     mc_entry->ref_cnt = 1;
2601 
2602     COPY_ETH_ADDRESS(mc_addr, mc_entry->mc_addr);
2603 
2604     (void) lm_set_rx_mask(
2605         pdev,
2606         RX_FILTER_USER_IDX0,
2607         pdev->rx_info.mask[RX_FILTER_USER_IDX0] | LM_RX_MASK_ACCEPT_MULTICAST);
2608 
2609     return LM_STATUS_SUCCESS;
2610 } /* lm_add_mc */
2611 
2612 
2613 
2614 /*******************************************************************************
2615  * Description:
2616  *
2617  * Return:
2618  ******************************************************************************/
2619 lm_status_t
lm_del_mc(lm_device_t * pdev,u8_t * mc_addr)2620 lm_del_mc(
2621     lm_device_t *pdev,
2622     u8_t *mc_addr)
2623 {
2624     lm_mc_entry_t *mc_entry;
2625     u32_t cnt;
2626 
2627     for(cnt = 0; cnt < pdev->mc_table.entry_cnt; cnt++)
2628     {
2629         mc_entry = &pdev->mc_table.addr_arr[cnt];
2630 
2631         if(IS_ETH_ADDRESS_EQUAL(mc_entry->mc_addr, mc_addr))
2632         {
2633             mc_entry->ref_cnt--;
2634 
2635             /* No more instance left, remove the address from the table.
2636              * Move the last entry in the table to the deleted slot. */
2637             if(mc_entry->ref_cnt == 0)
2638             {
2639                 if(pdev->mc_table.entry_cnt > 1)
2640                 {
2641                     *mc_entry = pdev->mc_table.addr_arr[pdev->mc_table.entry_cnt-1];
2642                 }
2643 
2644                 pdev->mc_table.entry_cnt--;
2645 
2646                 /* Update the receive mask if the table is empty. */
2647                 if(pdev->mc_table.entry_cnt == 0)
2648                 {
2649                     pdev->rx_info.mask[RX_FILTER_USER_IDX0] &=
2650                             ~LM_RX_MASK_ACCEPT_MULTICAST;
2651                 }
2652 
2653                 (void) lm_set_rx_mask(
2654                         pdev,
2655                         RX_FILTER_USER_IDX0,
2656                         pdev->rx_info.mask[RX_FILTER_USER_IDX0]);
2657             }
2658 
2659             return LM_STATUS_SUCCESS;
2660         }
2661     }
2662 
2663     DbgBreakMsg("Mc address not in the table\n");
2664 
2665     return LM_STATUS_FAILURE;
2666 } /* lm_del_mc */
2667 
2668 
2669 
2670 /*******************************************************************************
2671  * Description:
2672  *
2673  * Return:
2674  ******************************************************************************/
2675 void
lm_clear_mc(lm_device_t * pdev)2676 lm_clear_mc(lm_device_t *pdev)
2677 {
2678     DbgMessage(pdev, VERBOSE, "### lm_clear_mc\n");
2679 
2680     pdev->mc_table.entry_cnt = 0;
2681 
2682     (void) lm_set_rx_mask(
2683         pdev,
2684         RX_FILTER_USER_IDX0,
2685         pdev->rx_info.mask[RX_FILTER_USER_IDX0] & ~LM_RX_MASK_ACCEPT_MULTICAST);
2686 } /* lm_clear_mc */
2687 
2688 
2689 
2690 /*******************************************************************************
2691  * Description:
2692  *
2693  * Return:
2694  ******************************************************************************/
2695 lm_status_t
lm_get_stats(lm_device_t * pdev,lm_stats_t stats_type,u64_t * stats_cnt)2696 lm_get_stats(
2697     lm_device_t *pdev,
2698     lm_stats_t stats_type,
2699     u64_t *stats_cnt)
2700 {
2701     volatile statistics_block_t *sb;
2702     lm_status_t lm_status;
2703     lm_u64_t *stats;
2704     u32_t reg_val;
2705     u32_t val;
2706 
2707     //
2708     // The fix of CQ#29454 caused CQ#30307 -
2709     // Bacs: Bogus counters on 5708 under statistics tab
2710     // So far, Windows never see CQ#29454 problem.
2711     // Remove the fix right now
2712     //
2713 
2714     /* CQ#29454 - statistics corruption. */
2715     //REG_RD(pdev, hc.hc_stats_ticks, &val);
2716     //REG_WR(pdev, hc.hc_stats_ticks, 0);
2717 
2718     REG_WR(pdev, hc.hc_command, HC_COMMAND_STATS_NOW);
2719     REG_RD(pdev, hc.hc_command, &reg_val);
2720     mm_wait(pdev, 5);
2721 
2722     lm_status = LM_STATUS_SUCCESS;
2723     sb = pdev->vars.stats_virt;
2724     stats = (lm_u64_t *) stats_cnt;
2725 
2726     switch(stats_type)
2727     {
2728         case LM_STATS_FRAMES_XMITTED_OK:
2729             stats->as_u32.low = sb->stat_IfHCOutUcastPkts_lo;
2730             stats->as_u32.high = sb->stat_IfHCOutUcastPkts_hi;
2731 
2732             LM_INC64(stats, sb->stat_IfHCOutMulticastPkts_lo);
2733             stats->as_u32.high += sb->stat_IfHCOutMulticastPkts_hi;
2734 
2735             LM_INC64(stats, sb->stat_IfHCOutBroadcastPkts_lo);
2736             stats->as_u32.high += sb->stat_IfHCOutBroadcastPkts_hi;
2737             break;
2738 
2739         case LM_STATS_FRAMES_RECEIVED_OK:
2740             stats->as_u32.low = sb->stat_IfHCInUcastPkts_lo;
2741             stats->as_u32.high = sb->stat_IfHCInUcastPkts_hi;
2742 
2743             LM_INC64(stats, sb->stat_IfHCInMulticastPkts_lo);
2744             stats->as_u32.high += sb->stat_IfHCInMulticastPkts_hi;
2745 
2746             LM_INC64(stats, sb->stat_IfHCInBroadcastPkts_lo);
2747             stats->as_u32.high += sb->stat_IfHCInBroadcastPkts_hi;
2748             REG_RD_IND(
2749                 pdev,
2750                 OFFSETOF(reg_space_t,
2751                          com.com_scratch[0])+
2752                          COM_HSI_OFFSETOFF(com_l2_iscsi_no_buffer),
2753                          &val);
2754             if((stats->as_u32.high == 0 && stats->as_u32.low) &&
2755                (stats->as_u32.low < val))
2756             {
2757                 /* due to asynchrous nature of reading the counters
2758                  * from status block and reading the counters from
2759                  * chip scratchpad mem, it is possible that the values
2760                  * are out of syn */
2761                 stats->as_u32.low = 0;
2762             }
2763             else
2764             {
2765                 LM_DEC64(stats, val);
2766             }
2767 
2768             REG_RD_IND(
2769                 pdev,
2770                 OFFSETOF(reg_space_t,
2771                          com.com_scratch[0])+
2772                          COM_HSI_OFFSETOFF(com_l2_no_buffer),
2773                          &val);
2774             if((stats->as_u32.high == 0 && stats->as_u32.low) &&
2775                (stats->as_u32.low < val))
2776             {
2777                 /* due to asynchrous nature of reading the counters
2778                  * from status block and reading the counters from
2779                  * chip scratchpad mem, it is possible that the values
2780                  * are out of syn */
2781                 stats->as_u32.low = 0;
2782             }
2783             else
2784             {
2785                 LM_DEC64(stats, val);
2786             }
2787             break;
2788 
2789         case LM_STATS_ERRORED_RECEIVE_CNT:
2790             stats->as_u32.low = pdev->rx_info.stats.err;
2791             stats->as_u32.high = 0;
2792             break;
2793 
2794         case LM_STATS_RCV_CRC_ERROR:
2795             stats->as_u32.low = sb->stat_Dot3StatsFCSErrors;
2796             stats->as_u32.high = 0;
2797             break;
2798 
2799         case LM_STATS_ALIGNMENT_ERROR:
2800             stats->as_u32.low = sb->stat_Dot3StatsAlignmentErrors;
2801             stats->as_u32.high = 0;
2802             break;
2803 
2804         case LM_STATS_SINGLE_COLLISION_FRAMES:
2805             stats->as_u32.low = sb->stat_Dot3StatsSingleCollisionFrames;
2806             stats->as_u32.high = 0;
2807             break;
2808 
2809         case LM_STATS_MULTIPLE_COLLISION_FRAMES:
2810             stats->as_u32.low = sb->stat_Dot3StatsMultipleCollisionFrames;
2811             stats->as_u32.high = 0;
2812             break;
2813 
2814         case LM_STATS_FRAMES_DEFERRED:
2815             stats->as_u32.low = sb->stat_Dot3StatsDeferredTransmissions;
2816             stats->as_u32.high = 0;
2817             break;
2818 
2819         case LM_STATS_MAX_COLLISIONS:
2820             stats->as_u32.low = sb->stat_Dot3StatsExcessiveCollisions;
2821             break;
2822 
2823         case LM_STATS_UNICAST_FRAMES_XMIT:
2824             stats->as_u32.low = sb->stat_IfHCOutUcastPkts_lo;
2825             stats->as_u32.high = sb->stat_IfHCOutUcastPkts_hi;
2826             break;
2827 
2828         case LM_STATS_MULTICAST_FRAMES_XMIT:
2829             stats->as_u32.low = sb->stat_IfHCOutMulticastPkts_lo;
2830             stats->as_u32.high = sb->stat_IfHCOutMulticastPkts_hi;
2831             break;
2832 
2833         case LM_STATS_BROADCAST_FRAMES_XMIT:
2834             stats->as_u32.low = sb->stat_IfHCOutBroadcastPkts_lo;
2835             stats->as_u32.high = sb->stat_IfHCOutBroadcastPkts_hi;
2836             break;
2837 
2838         case LM_STATS_UNICAST_FRAMES_RCV:
2839             stats->as_u32.low = sb->stat_IfHCInUcastPkts_lo;
2840             stats->as_u32.high = sb->stat_IfHCInUcastPkts_hi;
2841             REG_RD_IND(
2842                 pdev,
2843                 OFFSETOF(reg_space_t,
2844                          com.com_scratch[0])+
2845                          COM_HSI_OFFSETOFF(com_unicast_no_buffer),
2846                          &val);
2847             if((stats->as_u32.high == 0 && stats->as_u32.low) &&
2848                (stats->as_u32.low < val))
2849             {
2850                 /* due to asynchrous nature of reading the counters
2851                  * from status block and reading the counters from
2852                  * chip scratchpad mem, it is possible that the values
2853                  * are out of syn */
2854                 stats->as_u32.low = 0;
2855             }
2856             else
2857             {
2858                 LM_DEC64(stats, val);
2859             }
2860             break;
2861 
2862         case LM_STATS_MULTICAST_FRAMES_RCV:
2863             stats->as_u32.low = sb->stat_IfHCInMulticastPkts_lo;
2864             stats->as_u32.high = sb->stat_IfHCInMulticastPkts_hi;
2865             REG_RD_IND(
2866                 pdev,
2867                 OFFSETOF(reg_space_t,
2868                          com.com_scratch[0])+
2869                          COM_HSI_OFFSETOFF(com_mcast_no_buffer),
2870                          &val);
2871 
2872             if((stats->as_u32.high == 0 && stats->as_u32.low) &&
2873                (stats->as_u32.low < val))
2874             {
2875                 /* due to asynchrous nature of reading the counters
2876                  * from status block and reading the counters from
2877                  * chip scratchpad mem, it is possible that the values
2878                  * are out of syn */
2879                 stats->as_u32.low = 0;
2880             }
2881             else
2882             {
2883                 LM_DEC64(stats, val);
2884             }
2885             break;
2886 
2887         case LM_STATS_BROADCAST_FRAMES_RCV:
2888             stats->as_u32.low = sb->stat_IfHCInBroadcastPkts_lo;
2889             stats->as_u32.high = sb->stat_IfHCInBroadcastPkts_hi;
2890             REG_RD_IND(
2891                 pdev,
2892                 OFFSETOF(reg_space_t,
2893                          com.com_scratch[0])+
2894                          COM_HSI_OFFSETOFF(com_bcast_no_buffer),
2895                          &val);
2896             if((stats->as_u32.high == 0 && stats->as_u32.low) &&
2897                (stats->as_u32.low < val))
2898             {
2899                 /* due to asynchrous nature of reading the counters
2900                  * from status block and reading the counters from
2901                  * chip scratchpad mem, it is possible that the values
2902                  * are out of syn */
2903                 stats->as_u32.low = 0;
2904             }
2905             else
2906             {
2907                 LM_DEC64(stats, val);
2908             }
2909             break;
2910 
2911         case LM_STATS_ERRORED_TRANSMIT_CNT:
2912         case LM_STATS_RCV_OVERRUN:
2913         case LM_STATS_XMIT_UNDERRUN:
2914             /* These counters are always zero. */
2915             stats->as_u32.low = 0;
2916             stats->as_u32.high = 0;
2917             break;
2918 
2919         case LM_STATS_RCV_NO_BUFFER_DROP:
2920             /* com_no_buffer */
2921             REG_RD_IND(
2922                 pdev,
2923                 OFFSETOF(reg_space_t, com.com_scratch[0])+COM_HSI_OFFSETOFF(com_unicast_no_buffer),
2924                 &val);
2925             stats->as_u32.low = val;
2926             REG_RD_IND(
2927                 pdev,
2928                 OFFSETOF(reg_space_t, com.com_scratch[0])+COM_HSI_OFFSETOFF(com_mcast_no_buffer),
2929                 &val);
2930             stats->as_u32.low += val;
2931             REG_RD_IND(
2932                 pdev,
2933                 OFFSETOF(reg_space_t, com.com_scratch[0])+COM_HSI_OFFSETOFF(com_bcast_no_buffer),
2934                 &val);
2935             stats->as_u32.low += val;
2936 
2937             stats->as_u32.high = 0;
2938             break;
2939 
2940         case LM_STATS_BYTES_RCV:
2941             stats->as_u32.low = sb->stat_IfHCInOctets_lo;
2942             stats->as_u32.high = sb->stat_IfHCInOctets_hi;
2943             break;
2944 
2945         case LM_STATS_BYTES_XMIT:
2946             stats->as_u32.low = sb->stat_IfHCOutOctets_lo;
2947             stats->as_u32.high = sb->stat_IfHCOutOctets_hi;
2948             break;
2949 
2950         case LM_STATS_IF_IN_DISCARDS:
2951             /* com_no_buffer */
2952             REG_RD_IND(
2953                 pdev,
2954                 OFFSETOF(reg_space_t, com.com_scratch[0])+COM_HSI_OFFSETOFF(com_unicast_no_buffer),
2955                 &val);
2956             stats->as_u32.low = val;
2957             REG_RD_IND(
2958                 pdev,
2959                 OFFSETOF(reg_space_t, com.com_scratch[0])+COM_HSI_OFFSETOFF(com_mcast_no_buffer),
2960                 &val);
2961             stats->as_u32.low += val;
2962             REG_RD_IND(
2963                 pdev,
2964                 OFFSETOF(reg_space_t, com.com_scratch[0])+COM_HSI_OFFSETOFF(com_bcast_no_buffer),
2965                 &val);
2966             stats->as_u32.low += val;
2967             stats->as_u32.low += sb->stat_Dot3StatsFCSErrors;
2968 
2969             stats->as_u32.high = 0;
2970             break;
2971 
2972         case LM_STATS_XMIT_DISCARDS:
2973         case LM_STATS_IF_IN_ERRORS:
2974         case LM_STATS_IF_OUT_ERRORS:
2975             stats->as_u32.low = 0;
2976             stats->as_u32.high = 0;
2977             break;
2978 
2979         case LM_STATS_DIRECTED_BYTES_RCV:
2980             /* rxp_unicast_bytes_rcvd */
2981             REG_RD_IND(
2982                 pdev,
2983                 OFFSETOF(reg_space_t, rxp.rxp_scratch[0])+RXP_HSI_OFFSETOFF(rxp_unicast_bytes_rcvd)+4,
2984                 &stats->as_u32.low);
2985             REG_RD_IND(
2986                 pdev,
2987                 OFFSETOF(reg_space_t, rxp.rxp_scratch[0])+RXP_HSI_OFFSETOFF(rxp_unicast_bytes_rcvd),
2988                 &stats->as_u32.high);
2989             break;
2990 
2991         case LM_STATS_MULTICAST_BYTES_RCV:
2992             /* rxp_multicast_bytes_rcvd */
2993             REG_RD_IND(
2994                 pdev,
2995                 OFFSETOF(reg_space_t, rxp.rxp_scratch[0])+RXP_HSI_OFFSETOFF(rxp_multicast_bytes_rcvd)+4,
2996                 &stats->as_u32.low);
2997             REG_RD_IND(
2998                 pdev,
2999                 OFFSETOF(reg_space_t, rxp.rxp_scratch[0])+RXP_HSI_OFFSETOFF(rxp_multicast_bytes_rcvd),
3000                 &stats->as_u32.high);
3001             break;
3002 
3003         case LM_STATS_BROADCAST_BYTES_RCV:
3004             /* rxp_broadcast_bytes_rcvd */
3005             REG_RD_IND(
3006                 pdev,
3007                 OFFSETOF(reg_space_t, rxp.rxp_scratch[0])+RXP_HSI_OFFSETOFF(rxp_broadcast_bytes_rcvd)+4,
3008                 &stats->as_u32.low);
3009             REG_RD_IND(
3010                 pdev,
3011                 OFFSETOF(reg_space_t, rxp.rxp_scratch[0])+RXP_HSI_OFFSETOFF(rxp_broadcast_bytes_rcvd),
3012                 &stats->as_u32.high);
3013             break;
3014 
3015         case LM_STATS_DIRECTED_BYTES_XMIT:
3016             /* unicast_bytes_xmit_lo */
3017             REG_RD_IND(
3018                 pdev,
3019                 OFFSETOF(reg_space_t, tpat.tpat_scratch[0])+TPAT_HSI_OFFSETOFF(unicast_bytes_xmit)+4,
3020                 &stats->as_u32.low);
3021             REG_RD_IND(
3022                 pdev,
3023                 OFFSETOF(reg_space_t, tpat.tpat_scratch[0])+TPAT_HSI_OFFSETOFF(unicast_bytes_xmit),
3024                 &stats->as_u32.high);
3025             break;
3026 
3027         case LM_STATS_MULTICAST_BYTES_XMIT:
3028             /* multicast_bytes_xmit */
3029             REG_RD_IND(
3030                 pdev,
3031                 OFFSETOF(reg_space_t, tpat.tpat_scratch[0])+TPAT_HSI_OFFSETOFF(multicast_bytes_xmit)+4,
3032                 &stats->as_u32.low);
3033             REG_RD_IND(
3034                 pdev,
3035                 OFFSETOF(reg_space_t, tpat.tpat_scratch[0])+TPAT_HSI_OFFSETOFF(multicast_bytes_xmit),
3036                 &stats->as_u32.high);
3037             break;
3038 
3039         case LM_STATS_BROADCAST_BYTES_XMIT:
3040             /* broadcast_bytes_xmit */
3041             REG_RD_IND(
3042                 pdev,
3043                 OFFSETOF(reg_space_t, tpat.tpat_scratch[0])+TPAT_HSI_OFFSETOFF(broadcast_bytes_xmit)+4,
3044                 &stats->as_u32.low);
3045             REG_RD_IND(
3046                 pdev,
3047                 OFFSETOF(reg_space_t, tpat.tpat_scratch[0])+TPAT_HSI_OFFSETOFF(broadcast_bytes_xmit),
3048                 &stats->as_u32.high);
3049             break;
3050 
3051         default:
3052             stats->as_u32.low = 0;
3053             stats->as_u32.high = 0;
3054 
3055             lm_status = LM_STATUS_INVALID_PARAMETER;
3056             break;
3057     }
3058 
3059     //REG_WR(pdev, hc.hc_stats_ticks, val);
3060 
3061     return lm_status;
3062 } /* lm_get_stats */
3063 
3064 
3065 
3066 /*******************************************************************************
3067  * Description:
3068  *
3069  * Return:
3070  ******************************************************************************/
3071 STATIC lm_nwuf_t *
find_nwuf(lm_nwuf_list_t * nwuf_list,u32_t mask_size,u8_t * byte_mask,u8_t * pattern,u32_t max_nwuf_cnt)3072 find_nwuf(
3073     lm_nwuf_list_t *nwuf_list,
3074     u32_t mask_size,
3075     u8_t *byte_mask,
3076     u8_t *pattern,
3077     u32_t max_nwuf_cnt)
3078 {
3079     lm_nwuf_t *nwuf;
3080     u8_t found;
3081     u32_t idx;
3082     u32_t j;
3083     u32_t k;
3084 
3085     for(idx = 0; idx < max_nwuf_cnt; idx++)
3086     {
3087         nwuf = &nwuf_list->nwuf_arr[idx];
3088 
3089         if((nwuf->size&0xffff) != mask_size)
3090         {
3091             continue;
3092         }
3093 
3094         found = TRUE;
3095         for(j = 0; j < mask_size && found == TRUE; j++)
3096         {
3097             if(nwuf->mask[j] != byte_mask[j])
3098             {
3099                 found = FALSE;
3100                 break;
3101             }
3102 
3103             for(k = 0; k < 8; k++)
3104             {
3105                 if((byte_mask[j] & (1 << k)) &&
3106                     (nwuf->pattern[j*8 + k] != pattern[j*8 + k]))
3107                 {
3108                     found = FALSE;
3109                     break;
3110                 }
3111             }
3112         }
3113 
3114         if(found)
3115         {
3116             return nwuf;
3117         }
3118     }
3119 
3120     return NULL;
3121 } /* find_nwuf */
3122 
3123 
3124 
3125 /*******************************************************************************
3126  * Description:
3127  *
3128  * Return:
3129  ******************************************************************************/
3130 lm_status_t
lm_add_nwuf(lm_device_t * pdev,u32_t pattern_size,u32_t mask_size,u8_t * byte_mask,u8_t * pattern)3131 lm_add_nwuf(
3132     lm_device_t *pdev,
3133     u32_t pattern_size,
3134     u32_t mask_size,
3135     u8_t *byte_mask,
3136     u8_t *pattern)
3137 {
3138     lm_nwuf_t *nwuf;
3139     u32_t idx;
3140 /*
3141     u32_t i;
3142 */
3143     u32_t j;
3144     u32_t k;
3145     u32_t l;
3146     u32_t combind_size;
3147     u32_t max_nwuf_cnt;
3148 
3149     if(CHIP_NUM(pdev) == CHIP_NUM_5709)
3150     {
3151         max_nwuf_cnt = LM_MAX_NWUF_CNT_5709;
3152     }
3153     else
3154     {
3155         max_nwuf_cnt = LM_MAX_NWUF_CNT;
3156     }
3157 
3158     combind_size = (pattern_size<<16) & 0xffff0000;
3159     combind_size |= mask_size;
3160     pattern_size &= 0xffff;
3161     mask_size &= 0xffff;
3162 
3163 
3164         //DbgBreakIf(mask_size == 0xc &&pattern_size == 0x4a);
3165 
3166 
3167     if(mask_size == 0 || mask_size > LM_NWUF_PATTERN_MASK_SIZE)
3168     {
3169         DbgBreakMsg("Invalid byte mask size\n");
3170 
3171         return LM_STATUS_FAILURE;
3172     }
3173 
3174     /* If this is a duplicate entry, we are done. */
3175     nwuf = find_nwuf(
3176             &pdev->nwuf_list,
3177             mask_size,
3178             byte_mask, pattern,
3179             max_nwuf_cnt);
3180 
3181     if(nwuf)
3182     {
3183         DbgMessage(pdev, INFORM, "Duplicated nwuf entry.\n");
3184 
3185         return LM_STATUS_EXISTING_OBJECT;
3186     }
3187 
3188     /* Find an empty slot. */
3189     nwuf = NULL;
3190     for(idx = 0; idx < max_nwuf_cnt; idx++)
3191     {
3192         if(pdev->nwuf_list.nwuf_arr[idx].size == 0)
3193         {
3194             nwuf = &pdev->nwuf_list.nwuf_arr[idx];
3195             break;
3196         }
3197     }
3198 
3199     /*
3200      * LHDBG_PRINT(("%p Adding NWUF[%d], mask size: %d, pattern size: %d\n",
3201                 pdev,idx,mask_size,pattern_size));
3202     LHDBG_PRINT(("mask array:\n"));
3203 
3204     for (i=0;i<mask_size;i++)
3205     {
3206         if (0 == i%16) LH_PRINTK(("\n"));
3207         LH_PRINTK(("%02x ", byte_mask[i]));
3208     }
3209     LH_PRINTK(("\npattern:\n"));
3210 
3211     for (i=0;i<mask_size;i++)
3212     {
3213         for (j=0;j<8;j++)
3214         {
3215             if (0 == (i*8+j)%16)
3216             {
3217                 LH_PRINTK(("\n"));
3218             }
3219             if (byte_mask[i] & 1<<j)
3220             {
3221                 LH_PRINTK(("[%02x] ",pattern[i*8+j]));
3222             }
3223             else
3224             {
3225                 if (pattern_size && i*8+j>=pattern_size)
3226                 {
3227                     LH_PRINTK(("-%02x- ",pattern[i*8+j]));
3228                 }
3229                 else
3230                 {
3231                     LH_PRINTK((" %02x  ",pattern[i*8+j]));
3232                 }
3233 
3234             }
3235         }
3236     }
3237     LH_PRINTK(("\n"));
3238 */
3239 
3240     if(nwuf == NULL)
3241     {
3242         DbgMessage(pdev, WARN, "Cannot add Nwuf, exceeded maximum.\n");
3243 
3244         return LM_STATUS_RESOURCE;
3245     }
3246 
3247     pdev->nwuf_list.cnt++;
3248 
3249     /* Save nwuf data. */
3250     nwuf->size = mask_size;
3251 
3252     if (pattern_size)
3253     {
3254         nwuf->size = combind_size;
3255         goto _handle_win7_pattern;
3256     }
3257 
3258     for(j = 0; j < mask_size; j++)
3259     {
3260         nwuf->mask[j] = byte_mask[j];
3261 
3262         for(k = 0; k < 8; k++)
3263         {
3264             if(byte_mask[j] & (1 << k))
3265             {
3266                 nwuf->pattern[j*8 + k] = pattern[j*8 + k];
3267             }
3268             else
3269             {
3270                 nwuf->pattern[j*8 + k] = 0;
3271             }
3272         }
3273     }
3274 
3275     /* The byte patterns immediately following the byte that is enabled
3276      * for comparision need to be set to 0xff.  This will help facilitate
3277      * the programming of pattern onto the chip.  The end of the pattern is
3278      * indicated by the first 0xff byte that is not enabled for comparision. */
3279     if(byte_mask[mask_size-1])
3280     {
3281         k = 8;
3282         while(k)
3283         {
3284             k--;
3285             if(byte_mask[mask_size-1] & (1 << k))
3286             {
3287                 break;
3288             }
3289 
3290             nwuf->pattern[(mask_size-1)*8 + k] = 0xff;
3291         }
3292     }
3293 
3294     /* Set the rest of the pattern to 0xff. */
3295     for(j = mask_size; j < LM_NWUF_PATTERN_MASK_SIZE; j++)
3296     {
3297         nwuf->mask[j] = 0;
3298 
3299         for(k = 0; k < 8; k++)
3300         {
3301             nwuf->pattern[j*8 + k] = 0xff;
3302         }
3303     }
3304 /*
3305     LHDBG_PRINT(("Dumping pattern before return\n"));
3306     for (i=0;i<128;i++)
3307     {
3308         if (i!=0 && i%16==0)
3309         {
3310             LH_PRINTK(("\n"));
3311         }
3312 
3313         LH_PRINTK(("%02x ",nwuf->pattern[i]));
3314 
3315     }
3316     LH_PRINTK(("\nEnd of add_nwuf\n"));
3317 */
3318     return LM_STATUS_SUCCESS;
3319 _handle_win7_pattern:
3320     /*
3321      * this is new for win7
3322      */
3323     l=0;
3324 
3325     /*for lxdiag build*/
3326 #ifdef LINUX
3327 	{
3328 		u8_t idx;
3329 		for (idx=0; idx< LM_NWUF_PATTERN_MASK_SIZE; idx++)
3330 					nwuf->mask[idx] = 0;
3331 	}
3332 #else
3333     memset(nwuf->mask,0,LM_NWUF_PATTERN_MASK_SIZE);
3334 #endif
3335 
3336     for(j = 0; j < mask_size ; j++)
3337     {
3338         nwuf->mask[j] = byte_mask[j];
3339 
3340         for(k = 0; k < 8 ; k++)
3341         {
3342             if ( l<pattern_size )
3343             {
3344                 if(byte_mask[j] & (1 << k))
3345                 {
3346                     nwuf->pattern[j*8 + k] = pattern[j*8 + k];
3347                 }
3348                 else
3349                 {
3350                     nwuf->pattern[j*8 + k] = 0;
3351                 }
3352             }
3353             else
3354             {
3355                 nwuf->pattern[j*8 + k] = 0xff;
3356             }
3357             l++;
3358         }
3359     }
3360 /*
3361     LHDBG_PRINT(("Dumping pattern before return\n"));
3362     for (i=0;i<128;i++)
3363     {
3364         if (i!=0 && i%16==0)
3365         {
3366             LH_PRINTK(("\n"));
3367         }
3368 
3369         LH_PRINTK(("%02x ",nwuf->pattern[i]));
3370 
3371     }
3372     LH_PRINTK(("\nEnd of add_nwuf\n"));
3373 */
3374     return LM_STATUS_SUCCESS;
3375 } /* lm_add_nwuf */
3376 
3377 
3378 /*******************************************************************************
3379  * Description:
3380  *
3381  * Return:
3382  ******************************************************************************/
3383 lm_status_t
lm_del_nwuf(lm_device_t * pdev,u32_t mask_size,u8_t * byte_mask,u8_t * pattern)3384 lm_del_nwuf(
3385     lm_device_t *pdev,
3386     u32_t mask_size,
3387     u8_t *byte_mask,
3388     u8_t *pattern)
3389 {
3390     lm_nwuf_t *nwuf;
3391     u32_t k;
3392     u32_t max_nwuf_cnt;
3393 
3394     if(CHIP_NUM(pdev) == CHIP_NUM_5709)
3395     {
3396         max_nwuf_cnt = LM_MAX_NWUF_CNT_5709;
3397     }
3398     else
3399     {
3400         max_nwuf_cnt = LM_MAX_NWUF_CNT;
3401     }
3402 
3403     mask_size &= 0xffff;
3404     if(mask_size == 0 || mask_size > LM_NWUF_PATTERN_MASK_SIZE)
3405     {
3406         DbgBreakMsg("Invalid byte mask size\n");
3407 
3408         return LM_STATUS_FAILURE;
3409     }
3410 
3411     /* Look for a matching pattern. */
3412     nwuf = find_nwuf(
3413             &pdev->nwuf_list,
3414             mask_size,
3415             byte_mask,
3416             pattern,
3417             max_nwuf_cnt);
3418 
3419     if(nwuf == NULL)
3420     {
3421         return LM_STATUS_OBJECT_NOT_FOUND;
3422     }
3423 
3424     nwuf->size = 0;
3425 
3426     for(k = 0; k < LM_NWUF_PATTERN_MASK_SIZE; k++)
3427     {
3428         nwuf->mask[k] = 0;
3429     }
3430 
3431     for(k = 0; k < LM_NWUF_PATTERN_SIZE; k++)
3432     {
3433         nwuf->pattern[k] = 0xff;
3434     }
3435 
3436     pdev->nwuf_list.cnt--;
3437 
3438     return LM_STATUS_SUCCESS;
3439 } /* lm_del_nwuf */
3440 
3441 
3442 
3443 /*******************************************************************************
3444  * Description:
3445  *
3446  * Return:
3447  ******************************************************************************/
3448 void
lm_clear_nwuf(lm_device_t * pdev)3449 lm_clear_nwuf(
3450     lm_device_t *pdev)
3451 {
3452     u32_t j;
3453     u32_t k;
3454     u32_t max_nwuf_cnt;
3455 
3456     if(CHIP_NUM(pdev) == CHIP_NUM_5709)
3457     {
3458         max_nwuf_cnt = LM_MAX_NWUF_CNT_5709;
3459     }
3460     else
3461     {
3462         max_nwuf_cnt = LM_MAX_NWUF_CNT;
3463     }
3464 
3465     for(j = 0; j < max_nwuf_cnt; j++)
3466     {
3467         pdev->nwuf_list.nwuf_arr[j].size = 0;
3468 
3469         for(k = 0; k < LM_NWUF_PATTERN_MASK_SIZE; k++)
3470         {
3471             pdev->nwuf_list.nwuf_arr[j].mask[k] = 0;
3472         }
3473 
3474         for(k = 0; k < LM_NWUF_PATTERN_SIZE; k++)
3475         {
3476             pdev->nwuf_list.nwuf_arr[j].pattern[k] = 0xff;
3477         }
3478     }
3479 
3480     pdev->nwuf_list.cnt = 0;
3481 } /* lm_clear_nwuf */
3482 
3483 
3484 
3485 /*******************************************************************************
3486  * Description:
3487  *
3488  * Return:
3489  ******************************************************************************/
3490 STATIC u32_t
init_nwuf_5709(lm_device_t * pdev,lm_nwuf_list_t * nwuf_list)3491 init_nwuf_5709(
3492     lm_device_t *pdev,
3493     lm_nwuf_list_t *nwuf_list)
3494 {
3495     lm_nwuf_t *nwuf;
3496     u16_t prev_val;
3497     u32_t nwuf_len;
3498     u32_t nwuf_cnt;
3499     u32_t offset;
3500     u8_t mask;
3501     u32_t val;
3502     u32_t idx;
3503     u8_t bit;
3504     u16_t pattern_size;
3505     u32_t nwuf_size[LM_MAX_NWUF_CNT_5709];
3506 
3507     DbgBreakIf(CHIP_NUM(pdev) != CHIP_NUM_5709);
3508     DbgBreakIf(LM_NWUF_PATTERN_SIZE > 128);
3509     DbgBreakIf(LM_MAX_NWUF_CNT_5709 > 8);
3510 
3511     REG_WR(pdev, rpm.rpm_acpi_byte_enable_ctrl, RPM_ACPI_BYTE_ENABLE_CTRL_INIT);
3512 
3513     for(idx = 0; idx < LM_MAX_NWUF_CNT_5709; idx++)
3514     {
3515         nwuf = &nwuf_list->nwuf_arr[idx];
3516         nwuf_size[idx] = nwuf->size;
3517     }
3518     for(idx = 0; idx < 1000; idx++)
3519     {
3520         mm_wait(pdev, 5);
3521 
3522         REG_RD(pdev, rpm.rpm_acpi_byte_enable_ctrl, &val);
3523         if((val & RPM_ACPI_BYTE_ENABLE_CTRL_INIT) == 0)
3524         {
3525             break;
3526         }
3527     }
3528     DbgBreakIf(val & RPM_ACPI_BYTE_ENABLE_CTRL_INIT);
3529 
3530     val = 0;
3531     for(idx = 0; idx < 4; idx++)
3532     {
3533         nwuf = &nwuf_list->nwuf_arr[idx];
3534         pattern_size = nwuf->size >>16;
3535         nwuf->size &= 0xffff;
3536 
3537         DbgBreakIf(nwuf->size > LM_NWUF_PATTERN_MASK_SIZE);
3538 
3539         if(nwuf->size == 0)
3540         {
3541             continue;
3542         }
3543         if (pattern_size)
3544         {
3545             val |= (pattern_size) << ((3 - idx) * 8);
3546         }
3547         else
3548         {
3549             val |= (nwuf->size * 8) << ((3 - idx) * 8);
3550         }
3551     }
3552     REG_WR(pdev, rpm.rpm_acpi_pattern_len0, val);
3553 
3554     val = 0;
3555     for(idx = 4; idx < LM_MAX_NWUF_CNT_5709; idx++)
3556     {
3557         nwuf = &nwuf_list->nwuf_arr[idx];
3558         pattern_size = nwuf->size >>16;
3559         nwuf->size &= 0xffff;
3560 
3561         DbgBreakIf(nwuf->size > LM_NWUF_PATTERN_MASK_SIZE);
3562 
3563         if(nwuf->size == 0)
3564         {
3565             continue;
3566         }
3567 
3568         if (pattern_size)
3569         {
3570             val |= (pattern_size) << ((7 - idx) * 8);
3571         }
3572         else
3573         {
3574             val |= (nwuf->size * 8) << ((7 - idx) * 8);
3575         }
3576 
3577         // old code val |= (nwuf->size * 8) << ((7 - idx) * 8);
3578     }
3579     REG_WR(pdev, rpm.rpm_acpi_pattern_len1, val);
3580 
3581     for(offset = 0; offset < LM_NWUF_PATTERN_SIZE; offset++)
3582     {
3583         val = 0;
3584 
3585         for(idx = 0; idx < LM_MAX_NWUF_CNT_5709; idx++)
3586         {
3587             nwuf = &nwuf_list->nwuf_arr[idx];
3588             pattern_size = nwuf_size[idx]>>16;
3589 
3590             if(nwuf->size == 0 || offset > nwuf->size * 8)
3591             {
3592                 continue;
3593             }
3594 
3595             mask = nwuf->mask[offset/8];
3596             bit = offset % 8;
3597 
3598             if(mask & (1 << bit))
3599             {
3600                 val |= 1 << idx;
3601             }
3602         }
3603 
3604         REG_WR(pdev, rpm.rpm_acpi_data, val);
3605 
3606         /* Perform the Write to the byte enable memory, The actual pattern
3607          * byte enables start from byte address 2. the first two bytes of
3608          * a packet are always 0 and inserted by EMAC to align the IP header
3609          * to 4-byte boudary. */
3610         REG_WR(
3611             pdev,
3612             rpm.rpm_acpi_byte_enable_ctrl,
3613             RPM_ACPI_BYTE_ENABLE_CTRL_WR | offset);
3614         REG_RD(pdev, rpm.rpm_acpi_byte_enable_ctrl, &val);
3615         DbgBreakIf(val & RPM_ACPI_BYTE_ENABLE_CTRL_WR);
3616     }
3617 
3618     nwuf_cnt = 0;
3619 
3620     for(idx = 0; idx < LM_MAX_NWUF_CNT_5709; idx++)
3621     {
3622         REG_WR(
3623             pdev,
3624             rpm.rpm_acpi_pattern_ctrl,
3625             RPM_ACPI_PATTERN_CTRL_CRC_SM_CLR|idx);
3626         REG_RD(pdev, rpm.rpm_acpi_pattern_ctrl, &val);
3627         DbgBreakIf(val & RPM_ACPI_PATTERN_CTRL_CRC_SM_CLR);
3628 
3629         nwuf = &nwuf_list->nwuf_arr[idx];
3630         if(nwuf->size == 0)
3631         {
3632             continue;
3633         }
3634         pattern_size = nwuf_size[idx]>>16;
3635 
3636         /* The CRC calculation is done on 64-bit data. So the length of the
3637          * pattern over which CRC needs to be calculated needs to be padded
3638          * by 0 to 7 bytes to make it 8 byte aligned. */
3639 
3640         if (pattern_size)
3641         {
3642             nwuf_len = pattern_size;
3643         }
3644         else
3645         {
3646             nwuf_len = (nwuf->size * 8);
3647         }
3648         nwuf_len += 2;  /* 2-byte padding. */
3649         nwuf_len = (nwuf_len + 3) & ~3;
3650 
3651         prev_val = 0;
3652 
3653         for(offset = 0; offset < nwuf_len; offset += 4)
3654         {
3655             val = 0;
3656 
3657             for(bit = 0; bit < 4; bit++)
3658             {
3659                 if (pattern_size)
3660                 {
3661                     if(offset < pattern_size)
3662                     {
3663                         mask = nwuf->mask[offset/8];
3664                     }
3665                     else
3666                     {
3667                         mask = 0;
3668                     }
3669                 }
3670                 else
3671                 {
3672                     if(offset < nwuf->size * 8)
3673                     {
3674                         mask = nwuf->mask[offset/8];
3675                     }
3676                     else
3677                     {
3678                         mask = 0;
3679                     }
3680                 }
3681                 if(mask & (1 << (bit + (offset % 8))))
3682                 {
3683                     val |= nwuf->pattern[offset+bit] << ((3 - bit) * 8);
3684                 }
3685             }
3686 
3687             REG_WR(pdev, rpm.rpm_acpi_data, (prev_val << 16) | (val >> 16));
3688             prev_val = (u16_t) val;
3689 
3690             REG_WR(
3691                 pdev,
3692                 rpm.rpm_acpi_pattern_ctrl,
3693                 RPM_ACPI_PATTERN_CTRL_WR | idx);
3694             REG_RD(pdev, rpm.rpm_acpi_pattern_ctrl, &val);
3695             DbgBreakIf(val & RPM_ACPI_PATTERN_CTRL_WR);
3696         }
3697 
3698         nwuf_cnt++;
3699     }
3700     for(idx = 0; idx < LM_MAX_NWUF_CNT_5709; idx++)
3701     {
3702         nwuf = &nwuf_list->nwuf_arr[idx];
3703         nwuf->size = nwuf_size[idx];
3704     }
3705 
3706     return nwuf_cnt;
3707 } /* init_nwuf_5709 */
3708 
3709 
3710 
3711 /*******************************************************************************
3712  * Description:
3713  *
3714  * Return:
3715  ******************************************************************************/
3716 STATIC u32_t
init_nwuf_5706(lm_device_t * pdev,lm_nwuf_list_t * nwuf_list)3717 init_nwuf_5706(
3718     lm_device_t *pdev,
3719     lm_nwuf_list_t *nwuf_list)
3720 {
3721     typedef union _acpi_wol_pat_t
3722     {
3723         #if defined(LITTLE_ENDIAN)
3724         struct _acpi_wol_pat_as_u8_t
3725         {
3726             u8_t pat[7];
3727             u8_t ena;
3728         } as_u8;
3729 
3730         struct _acpi_wol_pat_as_u32_t
3731         {
3732             u32_t low;
3733             u32_t high;
3734         } as_u32;
3735         #elif defined(BIG_ENDIAN)
3736         struct _acpi_wol_pat_as_u8_t
3737         {
3738             u8_t ena;
3739             u8_t pat[7];
3740         } as_u8;
3741 
3742         struct _acpi_wol_pat_as_u32_t
3743         {
3744             u32_t high;
3745             u32_t low;
3746         } as_u32;
3747         #endif
3748     } acpi_wol_pat_t;
3749 
3750     u32_t filler_pattern_idx;
3751     acpi_wol_pat_t wol_pat;
3752     u32_t pattern_cnt;
3753     u8_t val;
3754     u32_t j;
3755     u32_t k;
3756     u8_t idx;
3757     u32_t nwuf_size[LM_MAX_NWUF_CNT];
3758     lm_nwuf_t *nwuf;
3759 
3760     /*
3761      * 06/08 doesn't seem to have pattern size like those of 09
3762      */
3763     for(idx = 0; idx < LM_MAX_NWUF_CNT; idx++)
3764     {
3765         nwuf = &nwuf_list->nwuf_arr[idx];
3766         nwuf_size[idx] = nwuf->size;
3767         nwuf->size &= 0xffff;
3768     }
3769 
3770     DbgBreakIf(LM_NWUF_PATTERN_SIZE > 128);
3771     DbgBreakIf(LM_MAX_NWUF_CNT > 7);
3772     DbgBreakIf(CHIP_NUM(pdev) != CHIP_NUM_5706 &&
3773                CHIP_NUM(pdev) != CHIP_NUM_5708);
3774 
3775     /* If a pattern is not present, we will fill the pattern buffer
3776      * with the pattern with this index.  The pattern buffer cannot
3777      * have an empty pattern otherwise we will get a false detection. */
3778     filler_pattern_idx = 0;
3779 
3780     /* Find out the number of patterns. */
3781     pattern_cnt = 0;
3782     for(k = 0; k < LM_MAX_NWUF_CNT; k++)
3783     {
3784         if(nwuf_list->nwuf_arr[k].size)
3785         {
3786             pattern_cnt++;
3787             filler_pattern_idx = k;
3788         }
3789     }
3790 
3791     /* Program the pattern. */
3792     for(j = 0; j < LM_NWUF_PATTERN_SIZE; j++)
3793     {
3794         wol_pat.as_u32.low = 0x0;
3795         wol_pat.as_u32.high = 0x0;
3796 
3797         /* Build the enable bits. */
3798         wol_pat.as_u8.ena = 0;
3799         for(k = 0; k < LM_MAX_NWUF_CNT; k++)
3800         {
3801             if(nwuf_list->nwuf_arr[k].size == 0)
3802             {
3803                 val = nwuf_list->nwuf_arr[filler_pattern_idx].mask[j/8];
3804             }
3805             else if((j/8) >= nwuf_list->nwuf_arr[k].size)
3806             {
3807                 val = 0;
3808             }
3809             else
3810             {
3811                 val = nwuf_list->nwuf_arr[k].mask[j/8];
3812             }
3813 
3814             /* Determine if a byte is enabled for comparision. */
3815             if(val & (1 << (j % 8)))
3816             {
3817                 wol_pat.as_u8.ena |= 1 << k;
3818             }
3819         }
3820 
3821         DbgMessage1(pdev, VERBOSE, "%02x: ", j);
3822 
3823         /* Enter the byte of each pattern that will be used for comparison. */
3824         for(k = 0; k < LM_MAX_NWUF_CNT; k++)
3825         {
3826             /* Check to see if we are at the end of the pattern.  0xff
3827              * will terminate the pattern.  If there is no pattern present
3828              * we cannot terminate with 0xff. */
3829             if(nwuf_list->nwuf_arr[k].size == 0)
3830             {
3831                 val = nwuf_list->nwuf_arr[filler_pattern_idx].pattern[j];
3832                 DbgMessage(pdev, VERBOSE, "xx ");
3833             }
3834             else if((j/8) >= nwuf_list->nwuf_arr[k].size)
3835             {
3836                 val = 0xff;
3837                 DbgMessage(pdev, VERBOSE, "ff ");
3838             }
3839             else
3840             {
3841                 val = nwuf_list->nwuf_arr[k].pattern[j];
3842                 DbgMessage1(pdev, VERBOSE, "%02x ", val);
3843             }
3844 
3845             /* Format of the ACPI_WOL pattern from low address to high on a
3846              * little endian system:
3847              *    pat0_6 pat0_5 pat0_4 pat0_3 pat0_2 pat0_1 pat0_0 ena0
3848              *
3849              * on a big endian system:
3850              *    ena0 pat0_0 pat0_1 pat0_2 pat0_3 pat0_4 pat0_5 pat0_6 */
3851             #if defined(LITTLE_ENDIAN)
3852             wol_pat.as_u8.pat[6-k] = val;
3853             #elif defined(BIG_ENDIAN)
3854             wol_pat.as_u8.pat[k] = val;
3855             #endif
3856         }
3857 
3858         DbgMessage2(pdev, VERBOSE, "   %08x %08x\n",
3859             wol_pat.as_u32.high, wol_pat.as_u32.low);
3860 
3861         /* Swap the even 64-bit word with the odd 64-bit word.  This is
3862          * they way it works.  Don't ask why.  So the values written
3863          * to the header buffer looks as follows:
3864          *    0x0000:  ena1   pat1_0 pat1_1 pat1_2
3865          *    0x0004:  pat1_3 pat1_4 pat1_5 pat1_6
3866          *    0x0008:  ena0   pat0_0 pat0_1 pat0_2
3867          *    0x000c:  pat0_3 pat0_4 pat0_5 pat0_6
3868          *    0x0010:  ena3   pat3_0 pat3_1 pat3_2
3869          *    0x0014:  pat3_3 pat3_4 pat3_5 pat3_6
3870          *    0x0018:  ena2   pat2_0 pat2_1 pat2_2
3871          *    0x001c:  pat2_3 pat2_4 pat2_5 pat2_6 */
3872         if(j % 2)
3873         {
3874             REG_WR_IND(
3875                 pdev,
3876                 OFFSETOF(reg_space_t, tas.tas_thbuf[(j-1) * 2]),
3877                 wol_pat.as_u32.high);
3878             REG_WR_IND(
3879                 pdev,
3880                 OFFSETOF(reg_space_t, tas.tas_thbuf[(j-1) * 2 + 1]),
3881                 wol_pat.as_u32.low);
3882         }
3883         else
3884         {
3885             REG_WR_IND(
3886                 pdev,
3887                 OFFSETOF(reg_space_t, tas.tas_thbuf[(j+1) * 2]),
3888                 wol_pat.as_u32.high);
3889             REG_WR_IND(
3890                 pdev,
3891                 OFFSETOF(reg_space_t, tas.tas_thbuf[(j+1) * 2 + 1]),
3892                 wol_pat.as_u32.low);
3893         }
3894     }
3895 
3896     for(idx = 0; idx < LM_MAX_NWUF_CNT; idx++)
3897     {
3898         nwuf = &nwuf_list->nwuf_arr[idx];
3899         nwuf->size = nwuf_size[idx];
3900     }
3901 
3902     return pattern_cnt;
3903 } /* init_nwuf_5706 */
3904 
3905 
3906 
3907 /*******************************************************************************
3908  * Description:
3909  *
3910  * Return:
3911  ******************************************************************************/
3912 STATIC u32_t
init_nwuf(lm_device_t * pdev,lm_nwuf_list_t * nwuf_list)3913 init_nwuf(
3914     lm_device_t *pdev,
3915     lm_nwuf_list_t *nwuf_list)
3916 {
3917     u32_t nwuf_cnt;
3918 
3919     if(CHIP_NUM(pdev) == CHIP_NUM_5706 || CHIP_NUM(pdev) == CHIP_NUM_5708)
3920     {
3921         nwuf_cnt = init_nwuf_5706(pdev, nwuf_list);
3922     }
3923     else
3924     {
3925         nwuf_cnt = init_nwuf_5709(pdev, nwuf_list);
3926     }
3927 
3928     return nwuf_cnt;
3929 } /* init_nwuf */
3930 
3931 
3932 
3933 /*******************************************************************************
3934  * Description:
3935  *
3936  * Return:
3937  ******************************************************************************/
3938 STATIC void
set_d0_power_state(lm_device_t * pdev,u8_t set_pci_pm)3939 set_d0_power_state(
3940     lm_device_t *pdev,
3941     u8_t set_pci_pm)
3942 {
3943     u32_t val;
3944     u32_t idx;
3945 
3946     /* This step should be done by the OS or the caller.  Windows is
3947      * already doing this. */
3948     if(set_pci_pm)
3949     {
3950         /* Set the device to D0 state.  If a device is already in D3 state,
3951          * we will not be able to read the PCICFG_PM_CSR register using the
3952          * PCI memory command, we need to use config access here. */
3953         (void) mm_read_pci(
3954             pdev,
3955             OFFSETOF(reg_space_t, pci_config.pcicfg_pm_csr),
3956             &val);
3957 
3958         /* Set the device to D0 state.  This may be already done by the OS. */
3959         val &= ~PCICFG_PM_CSR_STATE;
3960         val |= PCICFG_PM_CSR_STATE_D0 | PCICFG_PM_CSR_PME_STATUS;
3961 
3962         (void) mm_write_pci(
3963             pdev,
3964             OFFSETOF(reg_space_t, pci_config.pcicfg_pm_csr),
3965             val);
3966     }
3967 
3968     /* With 5706_A1, the chip gets a reset coming out of D3.  Wait
3969      * for the boot to code finish running before we continue.  Without
3970      * this wait, we could run into lockup or the PHY may not work. */
3971     if(CHIP_ID(pdev) == CHIP_ID_5706_A1)
3972     {
3973         for(idx = 0; idx < 1000; idx++)
3974         {
3975             mm_wait(pdev, 15);
3976         }
3977     }
3978 
3979     /* Clear the ACPI_RCVD and MPKT_RCVD bits and disable magic packet. */
3980     REG_RD(pdev, emac.emac_mode, &val);
3981     val |= EMAC_MODE_MPKT_RCVD | EMAC_MODE_ACPI_RCVD;
3982     val &= ~EMAC_MODE_MPKT;
3983     REG_WR(pdev, emac.emac_mode, val);
3984 
3985     /* Disable interesting packet detection. */
3986     REG_RD(pdev, rpm.rpm_config, &val);
3987     val &= ~RPM_CONFIG_ACPI_ENA;
3988     REG_WR(pdev, rpm.rpm_config, val);
3989 } /* set_d0_power_state */
3990 
3991 
3992 
3993 /*******************************************************************************
3994  * Description:
3995  *
3996  * Return:
3997  ******************************************************************************/
3998 STATIC void
set_d3_power_state(lm_device_t * pdev,lm_wake_up_mode_t wake_up_mode,u8_t set_pci_pm)3999 set_d3_power_state(
4000     lm_device_t *pdev,
4001     lm_wake_up_mode_t wake_up_mode,
4002     u8_t set_pci_pm)
4003 {
4004     u32_t fw_timed_out;
4005     u32_t reset_reason;
4006     u32_t gpio_pin;
4007     u32_t val;
4008     u32_t cnt;
4009 
4010     /* Set up magic and interesting packet detection. */
4011     if(wake_up_mode & (LM_WAKE_UP_MODE_MAGIC_PACKET | LM_WAKE_UP_MODE_NWUF))
4012     {
4013         /* Enable magic packet detection. */
4014         REG_RD(pdev, emac.emac_mode, &val);
4015         if(wake_up_mode & LM_WAKE_UP_MODE_MAGIC_PACKET)
4016         {
4017             val |= EMAC_MODE_MPKT;
4018         }
4019         else
4020         {
4021             val &= ~EMAC_MODE_MPKT;
4022         }
4023 
4024         /* Enable port mode. */
4025         val &= ~EMAC_MODE_PORT;
4026         if(CHIP_REV(pdev) == CHIP_REV_FPGA || CHIP_REV(pdev) == CHIP_REV_IKOS)
4027         {
4028             /* IKOS or FPGA always run in GMII mode even if its actual
4029              * link speed is 10mb or 100mb. */
4030             val |= EMAC_MODE_PORT_GMII;
4031         }
4032         else
4033         {
4034             val |= EMAC_MODE_PORT_MII;
4035         }
4036         val |= EMAC_MODE_MPKT_RCVD | EMAC_MODE_ACPI_RCVD;
4037 
4038         REG_WR(pdev, emac.emac_mode, val);
4039 
4040         /* Set up the receive mask. */
4041         (void) lm_set_rx_mask(
4042             pdev,
4043             RX_FILTER_USER_IDX0,
4044             LM_RX_MASK_ACCEPT_UNICAST |
4045                 LM_RX_MASK_ACCEPT_ALL_MULTICAST |
4046                 LM_RX_MASK_ACCEPT_BROADCAST);
4047 
4048         /* The first four address slots are use for magic packet detection.
4049          * we need to initialize all four address slots. */
4050         for(cnt = 0; cnt < 4; cnt++)
4051         {
4052             (void) lm_set_mac_addr(pdev, cnt, pdev->params.mac_addr);
4053         }
4054 
4055         /* Need to enable EMAC and RPM for WOL. */
4056         REG_WR(
4057             pdev,
4058             misc.misc_enable_set_bits,
4059             MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
4060                 MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
4061                 MISC_ENABLE_SET_BITS_EMAC_ENABLE);
4062 
4063         /* Enable interesting packet detection.  This must be done after
4064          * the necessary blocks are enabled, otherwise we may wake-up on
4065          * a bogus first packet.  Need to document this in prm. */
4066         REG_RD(pdev, rpm.rpm_config, &val);
4067         if(wake_up_mode & LM_WAKE_UP_MODE_NWUF)
4068         {
4069             REG_WR(pdev, rpm.rpm_config, val & ~RPM_CONFIG_ACPI_ENA);
4070 
4071             /* Also need to be documented in the prm - to prevent a false
4072              * detection, we need to disable ACP_EN if there is no pattern
4073              * programmed.  There is no way of preventing false detection
4074              * by intializing the pattern buffer a certain way. */
4075             if(init_nwuf(pdev, &pdev->nwuf_list))
4076             {
4077                 val |= RPM_CONFIG_ACPI_ENA;
4078             }
4079             else
4080             {
4081                 val &= ~RPM_CONFIG_ACPI_ENA;
4082             }
4083         }
4084         else
4085         {
4086             val &= ~RPM_CONFIG_ACPI_ENA;
4087         }
4088         REG_WR(pdev, rpm.rpm_config, val);
4089 
4090         /* xinan requires rbuf to be enabled.  enabling it for teton
4091          * does not hurt. */
4092         REG_WR(
4093             pdev,
4094             misc.misc_enable_set_bits,
4095             MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
4096 
4097         reset_reason = LM_REASON_WOL_SUSPEND;
4098     }
4099     else
4100     {
4101         reset_reason = LM_REASON_NO_WOL_SUSPEND;
4102     }
4103 
4104     /* Allow the firmware to make any final changes to the chip before
4105      * we go into D3 mode.  The timeout period is longer because the
4106      * firwmare could take more time to download management firmware
4107      * which occurs during this stage of the reset. */
4108     fw_timed_out = fw_reset_sync(
4109         pdev,
4110         reset_reason,
4111         DRV_MSG_DATA_WAIT3,
4112         FW_ACK_TIME_OUT_MS*1000 * 3);
4113 
4114     /* If the firmware is not running, we have to switch to vaux power,
4115      * otherwise let the firmware do it. */
4116     if(fw_timed_out)
4117     {
4118         /* Power down the PHY. */
4119         if(pdev->params.enable_remote_phy == FALSE)
4120         {
4121             if(CHIP_REV(pdev) != CHIP_REV_FPGA &&
4122                 CHIP_REV(pdev) != CHIP_REV_IKOS)
4123             {
4124                 (void) lm_mwrite(
4125                     pdev,
4126                     pdev->params.phy_addr,
4127                     0x1c,
4128                     0xa821);
4129             }
4130         }
4131 
4132         /* Minimum core clock for a particular link.
4133          *    10Mb      core_clk = 6.25Mhz
4134          *    100Mb     core_clk = 12Mhz
4135          *    1Gb       core_clk = 100Mhz (use PLL)
4136          *
4137          * The driver is configured to autoneg to 10/100Mb for WOL mode.  So
4138          * the core clock needs to be configured to 12Mhz. */
4139         REG_RD(pdev, misc.misc_clock_control_bits, &val);
4140         val &= ~(MISC_CLOCK_CONTROL_BITS_CORE_CLK_DISABLE |
4141             MISC_CLOCK_CONTROL_BITS_CORE_CLK_ALT |
4142             MISC_CLOCK_CONTROL_BITS_CORE_CLK_ALT_SRC_TE);
4143 
4144         /* Select the 12.5m alt clock. */
4145         REG_WR(
4146             pdev,
4147             misc.misc_clock_control_bits,
4148             MISC_CLOCK_CONTROL_BITS_CORE_CLK_ALT_SRC_12_TE | val);
4149 
4150         /* Switch to the alt clock. */
4151         REG_WR(
4152             pdev,
4153             misc.misc_clock_control_bits,
4154             MISC_CLOCK_CONTROL_BITS_CORE_CLK_ALT_SRC_12_TE |
4155                 MISC_CLOCK_CONTROL_BITS_CORE_CLK_ALT |
4156                 val);
4157 
4158         /* Disable core clock to non-wol blocks. */
4159         REG_WR(
4160             pdev,
4161             misc.misc_clock_control_bits,
4162             MISC_CLOCK_CONTROL_BITS_CORE_CLK_ALT_SRC_12_TE |
4163                 MISC_CLOCK_CONTROL_BITS_CORE_CLK_ALT |
4164                 MISC_CLOCK_CONTROL_BITS_CORE_CLK_DISABLE |
4165                 val);
4166 
4167         gpio_pin = 1 << 2;  /* GPIO 2 */
4168 
4169         /* Switch to vaux power by bring GPIO2 to low. */
4170         REG_RD(pdev, misc.misc_spio, &val);
4171         val &= ~(gpio_pin << 24);           /* use this gpio as output. */
4172         val |= gpio_pin << 16;              /* clear the gpio. */
4173         REG_WR(pdev, misc.misc_spio, val);
4174 
4175         /* This step should be done by the OS or the caller.  Windows is
4176          * already doing this. */
4177         if(set_pci_pm)
4178         {
4179             /* Set the device to D3 state. */
4180             REG_RD_OFFSET(
4181                 pdev,
4182                 OFFSETOF(reg_space_t, pci_config.pcicfg_pm_csr),
4183                 &val);
4184 
4185             val &= ~PCICFG_PM_CSR_STATE;
4186             val |= PCICFG_PM_CSR_STATE_D3_HOT;
4187 
4188             REG_WR_OFFSET(
4189                 pdev,
4190                 OFFSETOF(reg_space_t, pci_config.pcicfg_pm_csr),
4191                 val);
4192         }
4193     }
4194 } /* set_d3_power_state */
4195 
4196 
4197 
4198 /*******************************************************************************
4199  * Description:
4200  *
4201  * Return:
4202  ******************************************************************************/
4203 void
lm_set_power_state(lm_device_t * pdev,lm_power_state_t power_state,lm_wake_up_mode_t wake_up_mode,u8_t set_pci_pm)4204 lm_set_power_state(
4205     lm_device_t *pdev,
4206     lm_power_state_t power_state,
4207     lm_wake_up_mode_t wake_up_mode,     /* Valid when power_state is D3. */
4208     u8_t set_pci_pm)
4209 {
4210     if(power_state == LM_POWER_STATE_D0)
4211     {
4212         set_d0_power_state(pdev, set_pci_pm);
4213     }
4214     else
4215     {
4216         set_d3_power_state(pdev, wake_up_mode, set_pci_pm);
4217     }
4218 } /* lm_set_power_state */
4219 
4220 
4221 
4222 #ifndef EXCLUDE_KQE_SUPPORT
4223 /*******************************************************************************
4224  * Description:
4225  *
4226  * Return:
4227  ******************************************************************************/
4228 u32_t
lm_submit_kernel_wqes(lm_device_t * pdev,kwqe_t * wqes[],u32_t num_wqes)4229 lm_submit_kernel_wqes(
4230     lm_device_t *pdev,
4231     kwqe_t *wqes[],
4232     u32_t num_wqes)
4233 {
4234     kwqe_t *prod_qe;
4235     u16_t prod_idx;
4236     u32_t qe_cnt;
4237 
4238     if(num_wqes > pdev->kq_info.kwqe_left)
4239     {
4240         pdev->kq_info.no_kwq_bd_left++;
4241 
4242         return 0;
4243     }
4244 
4245     pdev->kq_info.kwqe_left -= num_wqes;
4246 
4247     prod_qe = pdev->kq_info.kwq_prod_qe;
4248     prod_idx = pdev->kq_info.kwq_prod_idx;
4249 
4250     qe_cnt = num_wqes;
4251     while(qe_cnt)
4252     {
4253         *prod_qe = *(*wqes);
4254 
4255         if(prod_qe == pdev->kq_info.kwq_last_qe)
4256         {
4257             prod_qe = pdev->kq_info.kwq_virt;
4258         }
4259         else
4260         {
4261             prod_qe++;
4262         }
4263 
4264         wqes++;
4265         prod_idx++;
4266         qe_cnt--;
4267     }
4268 
4269     pdev->kq_info.kwq_prod_qe = prod_qe;
4270     pdev->kq_info.kwq_prod_idx = prod_idx;
4271 
4272     MBQ_WR16(
4273         pdev,
4274         GET_CID(pdev->kq_info.kwq_cid_addr),
4275         OFFSETOF(krnlq_context_t, krnlq_host_qidx),
4276         prod_idx);
4277 
4278     return num_wqes;
4279 } /* lm_submit_kernel_wqes */
4280 #endif /* EXCLUDE_KQE_SUPPORT */
4281 
4282 
4283 
4284 /*******************************************************************************
4285  * Description:
4286  *
4287  * Return:
4288  ******************************************************************************/
4289 lm_interrupt_status_t
lm_get_interrupt_status(lm_device_t * pdev)4290 lm_get_interrupt_status(
4291     lm_device_t *pdev)
4292 {
4293     lm_interrupt_status_t intr_status;
4294     u32_t deasserted_attns;
4295     u32_t asserted_attns;
4296     lm_rx_chain_t *rxq;
4297     lm_tx_chain_t *txq;
4298     u16_t hw_con_idx;
4299     u32_t val;
4300     u32_t idx;
4301 
4302     intr_status = LM_NO_EVENT_ACTIVE;
4303 
4304     /* Determine link change status. */
4305     if(pdev->params.link_chng_mode == LINK_CHNG_MODE_USE_STATUS_REG)
4306     {
4307         REG_RD(pdev, emac.emac_status, &val);
4308         if(pdev->params.phy_int_mode == PHY_INT_MODE_MI_INTERRUPT)
4309         {
4310             if(val & EMAC_STATUS_MI_INT)
4311             {
4312                 intr_status |= LM_PHY_EVENT_ACTIVE;
4313             }
4314         }
4315         else if(val & EMAC_STATUS_LINK_CHANGE)
4316         {
4317             intr_status |= LM_PHY_EVENT_ACTIVE;
4318         }
4319 
4320         GET_ATTN_CHNG_BITS(pdev, &asserted_attns, &deasserted_attns);
4321     }
4322     else
4323     {
4324         GET_ATTN_CHNG_BITS(pdev, &asserted_attns, &deasserted_attns);
4325 
4326         if(asserted_attns & STATUS_ATTN_BITS_LINK_STATE)
4327         {
4328             intr_status |= LM_PHY_EVENT_ACTIVE;
4329         }
4330         else if(deasserted_attns & STATUS_ATTN_BITS_LINK_STATE)
4331         {
4332             intr_status |= LM_PHY_EVENT_ACTIVE;
4333         }
4334     }
4335 
4336     /* Get driver pulse event.  MCP uses the TIMER_ABORT attention to
4337      * signal to the driver to write a driver pulse to the firmware. */
4338     if((asserted_attns & STATUS_ATTN_BITS_TIMER_ABORT) ||
4339         (deasserted_attns & STATUS_ATTN_BITS_TIMER_ABORT))
4340     {
4341         if(pdev->params.enable_remote_phy)
4342         {
4343             REG_RD_IND(
4344                 pdev,
4345                 pdev->hw_info.shmem_base +
4346                     OFFSETOF(shmem_region_t, fw_evt_mb.fw_evt_code_mb),
4347                 &val);
4348 
4349             if(val == 0)
4350             {
4351                 intr_status |= LM_KNOCK_KNOCK_EVENT;
4352             }
4353             else if(val == FW_EVT_CODE_LINK_STATUS_CHANGE_EVENT)
4354             {
4355                 intr_status |= LM_PHY_EVENT_ACTIVE;
4356             }
4357             else
4358             {
4359                 DbgBreakMsg("not a valid fw event.\n");
4360             }
4361         }
4362         else
4363         {
4364             intr_status |= LM_KNOCK_KNOCK_EVENT;
4365         }
4366 
4367         if(asserted_attns & STATUS_ATTN_BITS_TIMER_ABORT)
4368         {
4369             REG_WR(
4370                 pdev,
4371                 pci_config.pcicfg_status_bit_set_cmd,
4372                 asserted_attns & STATUS_ATTN_BITS_TIMER_ABORT);
4373         }
4374         else
4375         {
4376             REG_WR(
4377                 pdev,
4378                 pci_config.pcicfg_status_bit_clear_cmd,
4379                 deasserted_attns & STATUS_ATTN_BITS_TIMER_ABORT);
4380         }
4381     }
4382 
4383     /* get l2 tx events. */
4384     for(idx = 0; idx < pdev->tx_info.num_txq; idx++)
4385     {
4386         txq = &pdev->tx_info.chain[idx];
4387 
4388         hw_con_idx = *txq->hw_con_idx_ptr;
4389         if((hw_con_idx & MAX_BD_PER_PAGE) == MAX_BD_PER_PAGE)
4390         {
4391             hw_con_idx++;
4392         }
4393 
4394         if(hw_con_idx != txq->con_idx)
4395         {
4396             intr_status |= LM_TX0_EVENT_ACTIVE << txq->idx;
4397         }
4398     }
4399 
4400     /* get l2 rx events. */
4401     for(idx = 0; idx < pdev->rx_info.num_rxq; idx++)
4402     {
4403         rxq = &pdev->rx_info.chain[idx];
4404 
4405         hw_con_idx = *rxq->hw_con_idx_ptr;
4406         if((hw_con_idx & MAX_BD_PER_PAGE) == MAX_BD_PER_PAGE)
4407         {
4408             hw_con_idx++;
4409         }
4410 
4411         if(hw_con_idx != rxq->con_idx)
4412         {
4413             intr_status |= LM_RX0_EVENT_ACTIVE << rxq->idx;
4414         }
4415     }
4416 
4417     #ifndef EXCLUDE_KQE_SUPPORT
4418     if(CHIP_NUM(pdev) == CHIP_NUM_5706 || CHIP_NUM(pdev) == CHIP_NUM_5708)
4419     {
4420         /* HC install problem:  as a workaround, rx_quick_consumer_index15
4421          * is high jacked for use as cmd_con_idx.  The original cmd_con_idx
4422          * is not used. */
4423         if(pdev->kq_info.kwq_con_idx !=
4424             pdev->vars.status_virt->deflt.status_rx_quick_consumer_index15)
4425         {
4426             intr_status |= LM_KWQ_EVENT_ACTIVE;
4427         }
4428     }
4429     else
4430     {
4431         if(pdev->kq_info.kwq_con_idx !=
4432             pdev->vars.status_virt->deflt.status_cmd_consumer_index)
4433         {
4434             intr_status |= LM_KWQ_EVENT_ACTIVE;
4435         }
4436     }
4437 
4438     if(pdev->kq_info.kcq_con_idx !=
4439         pdev->vars.status_virt->deflt.status_completion_producer_index)
4440     {
4441         intr_status |= LM_KCQ_EVENT_ACTIVE;
4442     }
4443     #endif
4444 
4445     #if INCLUDE_OFLD_SUPPORT
4446     else if(pdev->params.hcopy_desc_cnt)
4447     {
4448         if(pdev->ofld.hcopy_chain.con_idx !=
4449             *(pdev->ofld.hcopy_chain.hw_con_idx_ptr))
4450         {
4451             intr_status |= LM_KCQ_EVENT_ACTIVE;
4452         }
4453     }
4454     #endif
4455 
4456     return intr_status;
4457 } /* lm_get_interrupt_status */
4458 
4459 
4460 
4461 #ifndef EXCLUDE_KQE_SUPPORT
4462 /*******************************************************************************
4463  * Description:
4464  *
4465  * Return:
4466  ******************************************************************************/
4467 void
lm_ack_completed_wqes(lm_device_t * pdev)4468 lm_ack_completed_wqes(
4469     lm_device_t *pdev)
4470 {
4471     u16_t new_con_idx;
4472     kwqe_t *con_qe;
4473     u16_t num_wqes;
4474     u16_t con_idx;
4475 
4476     /* HC install problem:  as a workaround, rx_quick_consumer_index15
4477      * is high jacked for use as cmd_con_idx.  The original cmd_con_idx
4478      * is not used. */
4479     if(CHIP_NUM(pdev) == CHIP_NUM_5706 || CHIP_NUM(pdev) == CHIP_NUM_5708)
4480     {
4481         new_con_idx =
4482             pdev->vars.status_virt->deflt.status_rx_quick_consumer_index15;
4483     }
4484     else
4485     {
4486         new_con_idx = pdev->vars.status_virt->deflt.status_cmd_consumer_index;
4487     }
4488 
4489     num_wqes = (u16_t) S16_SUB(new_con_idx, pdev->kq_info.kwq_con_idx);
4490     pdev->kq_info.kwqe_left += num_wqes;
4491 
4492     con_idx = new_con_idx;
4493     con_qe = pdev->kq_info.kwq_con_qe + num_wqes;
4494 
4495     /* Check for con_qe wrap around. */
4496     if((u8_t *) con_qe > (u8_t *) pdev->kq_info.kwq_last_qe)
4497     {
4498         con_qe = (kwqe_t *) ((u8_t *) pdev->kq_info.kwq_virt +
4499             ((u8_t *) con_qe - (u8_t *) pdev->kq_info.kwq_last_qe));
4500         con_qe--;
4501     }
4502 
4503     pdev->kq_info.kwq_con_idx = con_idx;
4504     pdev->kq_info.kwq_con_qe = con_qe;
4505 
4506     /* Make sure the con_qe and con_idx are consistent. */
4507     DbgBreakIf(((((u8_t *) con_qe - (u8_t *) pdev->kq_info.kwq_virt) /
4508         sizeof(kwqe_t)) & 0x7f) != (con_idx & 0x7f));
4509 
4510     #if DBG
4511     /* Make sure all the kwqes are accounted for. */
4512     if(S16_SUB(pdev->kq_info.kwq_prod_idx, con_idx) >= 0)
4513     {
4514         num_wqes = pdev->kq_info.kwqe_left +
4515             (u32_t) S16_SUB(pdev->kq_info.kwq_prod_idx, con_idx);
4516     }
4517     else
4518     {
4519         num_wqes = pdev->kq_info.kwqe_left + 0x10000 - con_idx +
4520             pdev->kq_info.kwq_prod_idx;
4521     }
4522 
4523     DbgBreakIf(num_wqes != (LM_PAGE_SIZE/sizeof(kwqe_t)) *
4524         pdev->params.kwq_page_cnt - 1);
4525     #endif
4526 } /* lm_ack_completed_wqes */
4527 
4528 
4529 
4530 /*******************************************************************************
4531  * Description:
4532  *
4533  * Return:
4534  ******************************************************************************/
4535 u32_t
lm_get_kernel_cqes(lm_device_t * pdev,kcqe_t * cqe_ptr[],u32_t ptr_cnt)4536 lm_get_kernel_cqes(
4537     lm_device_t *pdev,
4538     kcqe_t *cqe_ptr[],
4539     u32_t ptr_cnt)
4540 {
4541     kcqe_t *con_qe;
4542     u16_t prod_idx;
4543     u32_t num_cqes;
4544     u16_t con_idx;
4545 
4546     DbgMessage(pdev, VERBOSEint, "### lm_get_kernel_cqes\n");
4547 
4548     con_idx = pdev->kq_info.kcq_con_idx;
4549     con_qe = pdev->kq_info.kcq_con_qe;
4550 
4551     DbgBreakIf(((((u8_t *) con_qe - (u8_t *) pdev->kq_info.kcq_virt) /
4552         sizeof(kcqe_t)) & 0x7f) != (con_idx & 0x7f));
4553 
4554     num_cqes = 0;
4555     prod_idx = pdev->vars.status_virt->deflt.status_completion_producer_index;
4556 
4557     while(con_idx != prod_idx && num_cqes != ptr_cnt)
4558     {
4559         *cqe_ptr = con_qe;
4560         cqe_ptr++;
4561         num_cqes++;
4562         con_idx++;
4563 
4564         if(con_qe == pdev->kq_info.kcq_last_qe)
4565         {
4566             con_qe = pdev->kq_info.kcq_virt;
4567         }
4568         else
4569         {
4570             con_qe++;
4571         }
4572 
4573         prod_idx =
4574             pdev->vars.status_virt->deflt.status_completion_producer_index;
4575     }
4576 
4577     /* Make sure the last entry in the array does not have the 'next'
4578      * bit set.  We want to ensure the array contains all the cqes
4579      * for a completion.
4580      *
4581      * This piece of code also takes care of the case where a completion
4582      * spans multiple kcqes and not all the kcqes have been dma'd to
4583      * the host.  For example, if a completion consists of A, B, C, and D
4584      * kcqes.  The status block may tell us A and B have been dma'd.  In
4585      * this case, we don't want to return kcqes A and B in the array. */
4586     cqe_ptr--;
4587     while(num_cqes && ((*cqe_ptr)->kcqe_flags & KCQE_FLAGS_NEXT))
4588     {
4589         num_cqes--;
4590         cqe_ptr--;
4591     }
4592 
4593     DbgBreakIf(num_cqes == 0);
4594 
4595     return num_cqes;
4596 } /* lm_get_kernel_cqes */
4597 
4598 
4599 
4600 /*******************************************************************************
4601  * Description:
4602  *
4603  * Return:
4604  ******************************************************************************/
4605 u8_t
lm_ack_kernel_cqes(lm_device_t * pdev,u32_t num_cqes)4606 lm_ack_kernel_cqes(
4607     lm_device_t *pdev,
4608     u32_t num_cqes)
4609 {
4610     kcqe_t *con_qe;
4611     u16_t prod_idx;
4612     u16_t con_idx;
4613 
4614     DbgMessage(pdev, VERBOSEint, "### lm_ack_kernel_cqes\n");
4615 
4616     con_idx = pdev->kq_info.kcq_con_idx;
4617 
4618     if(num_cqes)
4619     {
4620         /* Advance the consumer index and the con_qe pointer */
4621         con_idx += (u16_t) num_cqes;
4622         con_qe = pdev->kq_info.kcq_con_qe + num_cqes;
4623 
4624         /* Check for con_qe wrap around. */
4625         if((u8_t *) con_qe > (u8_t *) pdev->kq_info.kcq_last_qe)
4626         {
4627             con_qe = (kcqe_t *) ((u8_t *) pdev->kq_info.kcq_virt +
4628                 ((u8_t *) con_qe - (u8_t *) pdev->kq_info.kcq_last_qe));
4629             con_qe--;
4630         }
4631 
4632         pdev->kq_info.kcq_con_idx = con_idx;
4633         pdev->kq_info.kcq_con_qe = con_qe;
4634 
4635         /* Don't acknowledge the last 'kcq_history_size' entries so the
4636          * chip will not over write them with new entries.  We are doing
4637          * this to have a history of the kcq entries for debugging. */
4638         if(pdev->params.kcq_history_size)
4639         {
4640             /* The con_idx should always be ahead of history_kcq_con_idx. */
4641             DbgBreakIf(S16_SUB(con_idx, pdev->kq_info.history_kcq_con_idx) < 0);
4642 
4643             /* Number of entries between con_idx and history_kcq_con_idx. */
4644             num_cqes = (u32_t) S16_SUB(
4645                 con_idx,
4646                 pdev->kq_info.history_kcq_con_idx);
4647 
4648             /* Don't advance the consumer index if the number of history
4649              * entries is less than 'kcq_history_size'. */
4650             if(num_cqes >= pdev->params.kcq_history_size)
4651             {
4652                 /* Make sure we will have at most kcq_history_size entires. */
4653                 num_cqes -= pdev->params.kcq_history_size;
4654 
4655                 DbgBreakIf(num_cqes > pdev->params.kcq_history_size);
4656 
4657                 /* Advance the consumer index and the con_qe pointer */
4658                 pdev->kq_info.history_kcq_con_idx += (u16_t) num_cqes;
4659                 con_qe = pdev->kq_info.history_kcq_con_qe + num_cqes;
4660 
4661                 /* Check for con_qe wrap around. */
4662                 if((u8_t *) con_qe > (u8_t *) pdev->kq_info.kcq_last_qe)
4663                 {
4664                     con_qe = (kcqe_t *) ((u8_t *) pdev->kq_info.kcq_virt +
4665                         ((u8_t *) con_qe -
4666                          (u8_t *) pdev->kq_info.kcq_last_qe));
4667                     con_qe--;
4668                 }
4669                 pdev->kq_info.history_kcq_con_qe = con_qe;
4670 
4671                 MBQ_WR16(
4672                     pdev,
4673                     GET_CID(pdev->kq_info.kcq_cid_addr),
4674                     OFFSETOF(krnlq_context_t, krnlq_host_qidx),
4675                     pdev->kq_info.history_kcq_con_idx);
4676             }
4677         }
4678         else
4679         {
4680             MBQ_WR16(
4681                 pdev,
4682                 GET_CID(pdev->kq_info.kcq_cid_addr),
4683                 OFFSETOF(krnlq_context_t, krnlq_host_qidx),
4684                 con_idx);
4685         }
4686     }
4687 
4688     prod_idx = pdev->vars.status_virt->deflt.status_completion_producer_index;
4689 
4690     DbgBreakIf(S16_SUB(prod_idx, con_idx) < 0);
4691 
4692     return con_idx != prod_idx;
4693 } /* lm_ack_kernel_cqes */
4694 #endif /* EXCLUDE_KQE_SUPPORT */
4695 
4696 
4697 
4698 #ifndef EXCLUDE_RSS_SUPPORT
4699 #if RSS_LOOKUP_TABLE_WA
4700 /*******************************************************************************
4701  * Description:
4702  *
4703  * Return:
4704  ******************************************************************************/
4705 u64_t
rss_f64(u8_t * key,u8_t s,u8_t e)4706 rss_f64(
4707     u8_t* key,
4708     u8_t s,
4709     u8_t e
4710     )
4711 {
4712     u64_t f;
4713 
4714     for( f=0; s<=e; ++s )
4715     {
4716         f = (f << 8);
4717         f |= key[s];
4718     }
4719 
4720     return f;
4721 }
4722 
4723 
4724 
4725 /*******************************************************************************
4726  * Description:
4727  *
4728  * Return:
4729  ******************************************************************************/
4730 u32_t
rss_hash_byte(u8_t * key,u8_t byte,u8_t s,u8_t e,u32_t rst)4731 rss_hash_byte(
4732     u8_t* key,
4733     u8_t  byte,
4734     u8_t  s,
4735     u8_t  e,
4736     u32_t rst
4737     )
4738 {
4739     u8_t i;
4740     u64_t key_msb;
4741 
4742     key_msb = rss_f64(key, s,e);
4743 
4744     for( i=0x80; i!=0; i>>=1 )
4745     {
4746         if( i & byte )
4747         {
4748             u32_t k;
4749 
4750             k = (u32_t)(key_msb >> 32);
4751             rst ^= k;
4752         }
4753         key_msb = (key_msb << 1);
4754     }
4755 
4756     return rst;
4757 }
4758 
4759 
4760 
4761 /*******************************************************************************
4762  * Description:
4763  *
4764  * Return:
4765  ******************************************************************************/
4766 void
rss_gen_one_table(u8_t * key,u8_t s,u8_t e,u32_t * gtbl)4767 rss_gen_one_table(
4768     u8_t* key,
4769     u8_t  s,
4770     u8_t  e,
4771     u32_t* gtbl
4772     )
4773 {
4774     u32_t i;
4775 
4776     for( i = 0; i < 256; ++i )
4777     {
4778         gtbl[i] = rss_hash_byte( key, (u8_t)i, s, e, 0 );
4779     }
4780 }
4781 
4782 
4783 
4784 /*******************************************************************************
4785  * Description:
4786  *
4787  * Return:
4788  ******************************************************************************/
4789 void
rss_gen_tables(u8_t * key,u32_t * tables)4790 rss_gen_tables(
4791     u8_t* key,
4792     u32_t* tables
4793     )
4794 {
4795     u8_t t;
4796 
4797     for( t = 0; t < 12; ++t )
4798     {
4799         rss_gen_one_table( key, t, (u8_t)(t+7), tables );
4800         tables += 256;
4801     }
4802 }
4803 #endif
4804 
4805 
4806 #ifndef LM_NON_LEGACY_MODE_SUPPORT
4807 /*******************************************************************************
4808  * Description:
4809  *
4810  * Return:
4811  ******************************************************************************/
4812 lm_status_t
lm_enable_rss(lm_device_t * pdev,lm_rss_hash_t hash_type,u8_t * indirection_table,u32_t table_size,u8_t * hash_key,u32_t key_size)4813 lm_enable_rss(
4814     lm_device_t *pdev,
4815     lm_rss_hash_t hash_type,
4816     u8_t *indirection_table,
4817     u32_t table_size,
4818     u8_t *hash_key,
4819     u32_t key_size)
4820 {
4821     l2_kwqe_rss_table_update_t *rss_update;
4822     u8_t rss_key[RSS_HASH_KEY_SIZE];
4823     lm_address_t rss_table_phy;
4824     u8_t *rss_table_virt;
4825     kwqe_t *prod_qe;
4826     u16_t prod_idx;
4827     u32_t idx;
4828     u32_t val;
4829 
4830     if(pdev->kq_info.kwqe_left < 2)
4831     {
4832         pdev->kq_info.no_kwq_bd_left++;
4833         return LM_STATUS_RESOURCE;
4834     }
4835 
4836     pdev->kq_info.kwqe_left -= 2;
4837 
4838     DbgBreakIf(key_size > RSS_HASH_KEY_SIZE);
4839 
4840     /* Initialize the rss key array. */
4841     if(key_size > RSS_HASH_KEY_SIZE)
4842     {
4843         key_size = RSS_HASH_KEY_SIZE;
4844     }
4845 
4846     for(idx = 0; idx < key_size; idx++)
4847     {
4848         rss_key[idx] = hash_key[idx];
4849     }
4850 
4851     for(idx = key_size; idx < RSS_HASH_KEY_SIZE; idx++)
4852     {
4853         rss_key[idx] = 0;
4854     }
4855 
4856     DbgBreakIf(table_size > RSS_INDIRECTION_TABLE_SIZE);
4857 
4858     if(table_size > RSS_INDIRECTION_TABLE_SIZE)
4859     {
4860         table_size = RSS_INDIRECTION_TABLE_SIZE;
4861     }
4862 
4863     if(CHIP_NUM(pdev) == CHIP_NUM_5709)
4864     {
4865         REG_RD(pdev, rlup.rlup_rss_config, &val);
4866         val &= ~RLUP_RSS_CONFIG_IPV4_RSS_TYPE_OFF_XI;
4867         val &= ~RLUP_RSS_CONFIG_IPV6_RSS_TYPE_OFF_XI;
4868         REG_WR(pdev, rlup.rlup_rss_config, val);
4869 
4870         val = (rss_key[0] << 24) |
4871               (rss_key[1] << 16) |
4872               (rss_key[2] << 8) |
4873                rss_key[3];
4874         REG_WR(pdev, rlup.rlup_rss_key1, val);
4875 
4876         val = (rss_key[4] << 24) |
4877               (rss_key[5] << 16) |
4878               (rss_key[6] << 8) |
4879                rss_key[7];
4880         REG_WR(pdev, rlup.rlup_rss_key2, val);
4881 
4882         val = (rss_key[8] << 24) |
4883               (rss_key[9] << 16) |
4884               (rss_key[10] << 8) |
4885                rss_key[11];
4886         REG_WR(pdev, rlup.rlup_rss_key3, val);
4887 
4888         val = (rss_key[12] << 24) |
4889               (rss_key[13] << 16) |
4890               (rss_key[14] << 8) |
4891                rss_key[15];
4892         REG_WR(pdev, rlup.rlup_rss_key4, val);
4893 
4894         val = (rss_key[16] << 24) |
4895               (rss_key[17] << 16) |
4896               (rss_key[18] << 8) |
4897                rss_key[19];
4898         REG_WR(pdev, rlup.rlup_ipv6_rss_key5, val);
4899 
4900         val = (rss_key[20] << 24) |
4901               (rss_key[21] << 16) |
4902               (rss_key[22] << 8) |
4903                rss_key[23];
4904         REG_WR(pdev, rlup.rlup_ipv6_rss_key6, val);
4905 
4906         val = (rss_key[24] << 24) |
4907               (rss_key[25] << 16) |
4908               (rss_key[26] << 8) |
4909                rss_key[27];
4910         REG_WR(pdev, rlup.rlup_ipv6_rss_key7, val);
4911 
4912         val = (rss_key[28] << 24) |
4913               (rss_key[29] << 16) |
4914               (rss_key[30] << 8) |
4915                rss_key[31];
4916         REG_WR(pdev, rlup.rlup_ipv6_rss_key8, val);
4917 
4918         val = (rss_key[32] << 24) |
4919               (rss_key[33] << 16) |
4920               (rss_key[34] << 8) |
4921                rss_key[35];
4922         REG_WR(pdev, rlup.rlup_ipv6_rss_key9, val);
4923 
4924         val = (rss_key[36] << 24) |
4925               (rss_key[37] << 16) |
4926               (rss_key[38] << 8) |
4927                rss_key[39];
4928         REG_WR(pdev, rlup.rlup_ipv6_rss_key10, val);
4929     }
4930 
4931     rss_table_virt = pdev->rx_info.rss_ind_table_virt;
4932     rss_table_phy = pdev->rx_info.rss_ind_table_phy;
4933 
4934     for(idx = 0; idx < table_size; idx++)
4935     {
4936         rss_table_virt[idx] = indirection_table[idx];
4937     }
4938 
4939     prod_qe = pdev->kq_info.kwq_prod_qe;
4940     prod_idx = pdev->kq_info.kwq_prod_idx;
4941 
4942     /* Initialize the RSS update KWQE. */
4943     rss_update = (l2_kwqe_rss_table_update_t *) prod_qe;
4944 
4945     rss_update->rss_flags = L2_KWQE_FLAGS_LAYER_MASK_L2;
4946     rss_update->rss_opcode = L2_KWQE_OPCODE_VALUE_UPDATE_RSS;
4947 
4948     rss_update->rss_table_size = (u16_t) table_size;
4949     rss_update->rss_table_haddr_lo = rss_table_phy.as_u32.low;
4950     rss_update->rss_table_haddr_hi = rss_table_phy.as_u32.high;
4951     rss_update->rss_host_opaque = 0;
4952     rss_update->rss_hash_type = hash_type;
4953 
4954     #if RSS_LOOKUP_TABLE_WA
4955     rss_table_virt += RSS_INDIRECTION_TABLE_SIZE;
4956     LM_INC64(&rss_table_phy, RSS_INDIRECTION_TABLE_SIZE);
4957 
4958     rss_update->rss_lookup_table_lo = rss_table_phy.as_u32.low;
4959     rss_update->rss_lookup_table_hi = rss_table_phy.as_u32.high;
4960 
4961     rss_gen_tables(rss_key, (u32_t *) rss_table_virt);
4962     #endif
4963 
4964     /* Advance to the next KWQE. */
4965     if(prod_qe == pdev->kq_info.kwq_last_qe)
4966     {
4967         prod_qe = pdev->kq_info.kwq_virt;
4968     }
4969     else
4970     {
4971         prod_qe++;
4972     }
4973     prod_idx++;
4974 
4975     /* Initialize the RSS enable KWQE. */
4976     rss_update = (l2_kwqe_rss_table_update_t *) prod_qe;
4977 
4978     rss_update->rss_flags = L2_KWQE_FLAGS_LAYER_MASK_L2;
4979     rss_update->rss_opcode = L2_KWQE_OPCODE_VALUE_ENABLE_RSS;
4980     rss_update->rss_host_opaque = 0;
4981     rss_update->rss_hash_type = hash_type;
4982 
4983     /* Advance to the next KWQE. */
4984     if(prod_qe == pdev->kq_info.kwq_last_qe)
4985     {
4986         prod_qe = pdev->kq_info.kwq_virt;
4987     }
4988     else
4989     {
4990         prod_qe++;
4991     }
4992     prod_idx++;
4993 
4994     pdev->kq_info.kwq_prod_qe = prod_qe;
4995     pdev->kq_info.kwq_prod_idx = prod_idx;
4996 
4997     MBQ_WR16(
4998         pdev,
4999         GET_CID(pdev->kq_info.kwq_cid_addr),
5000         OFFSETOF(krnlq_context_t, krnlq_host_qidx),
5001         prod_idx);
5002 
5003     return LM_STATUS_SUCCESS;
5004 } /* lm_enable_rss */
5005 #else /* LM_LEAGCY_MODE_SUPPORT */
5006 /*******************************************************************************
5007  * Description:
5008  *
5009  * Return:
5010  ******************************************************************************/
5011 lm_status_t
lm_enable_rss(lm_device_t * pdev,lm_rss_hash_t hash_type,PROCESSOR_NUMBER * indirection_table,u32_t table_size,u8_t * hash_key,u32_t key_size,u8_t * cpu_tbl,u8_t * rss_qidx_tbl)5012 lm_enable_rss(
5013     lm_device_t *pdev,
5014     lm_rss_hash_t hash_type,
5015     PROCESSOR_NUMBER *indirection_table,
5016     u32_t table_size,
5017     u8_t *hash_key,
5018     u32_t key_size,
5019     u8_t *cpu_tbl,
5020     u8_t *rss_qidx_tbl)
5021 {
5022     l2_kwqe_rss_table_update_t *rss_update;
5023     u8_t rss_key[RSS_HASH_KEY_SIZE];
5024     lm_address_t rss_table_phy;
5025     u8_t *rss_table_virt;
5026     kwqe_t *prod_qe;
5027     u16_t prod_idx;
5028     u32_t idx;
5029     u32_t val;
5030 
5031     if(pdev->kq_info.kwqe_left < 2)
5032     {
5033         pdev->kq_info.no_kwq_bd_left++;
5034         return LM_STATUS_RESOURCE;
5035     }
5036 
5037     pdev->kq_info.kwqe_left -= 2;
5038 
5039     DbgBreakIf(key_size > RSS_HASH_KEY_SIZE);
5040 
5041     /* Initialize the rss key array. */
5042     if(key_size > RSS_HASH_KEY_SIZE)
5043     {
5044         key_size = RSS_HASH_KEY_SIZE;
5045     }
5046 
5047     for(idx = 0; idx < key_size; idx++)
5048     {
5049         rss_key[idx] = hash_key[idx];
5050     }
5051 
5052     for(idx = key_size; idx < RSS_HASH_KEY_SIZE; idx++)
5053     {
5054         rss_key[idx] = 0;
5055     }
5056 
5057     DbgBreakIf(table_size > RSS_INDIRECTION_TABLE_SIZE);
5058 
5059     if(table_size > RSS_INDIRECTION_TABLE_SIZE)
5060     {
5061         table_size = RSS_INDIRECTION_TABLE_SIZE;
5062     }
5063 
5064     if(CHIP_NUM(pdev) == CHIP_NUM_5709)
5065     {
5066         REG_RD(pdev, rlup.rlup_rss_config, &val);
5067         val &= ~RLUP_RSS_CONFIG_IPV4_RSS_TYPE_OFF_XI;
5068         val &= ~RLUP_RSS_CONFIG_IPV6_RSS_TYPE_OFF_XI;
5069         REG_WR(pdev, rlup.rlup_rss_config, val);
5070 
5071         val = (rss_key[0] << 24) |
5072               (rss_key[1] << 16) |
5073               (rss_key[2] << 8) |
5074                rss_key[3];
5075         REG_WR(pdev, rlup.rlup_rss_key1, val);
5076 
5077         val = (rss_key[4] << 24) |
5078               (rss_key[5] << 16) |
5079               (rss_key[6] << 8) |
5080                rss_key[7];
5081         REG_WR(pdev, rlup.rlup_rss_key2, val);
5082 
5083         val = (rss_key[8] << 24) |
5084               (rss_key[9] << 16) |
5085               (rss_key[10] << 8) |
5086                rss_key[11];
5087         REG_WR(pdev, rlup.rlup_rss_key3, val);
5088 
5089         val = (rss_key[12] << 24) |
5090               (rss_key[13] << 16) |
5091               (rss_key[14] << 8) |
5092                rss_key[15];
5093         REG_WR(pdev, rlup.rlup_rss_key4, val);
5094 
5095         val = (rss_key[16] << 24) |
5096               (rss_key[17] << 16) |
5097               (rss_key[18] << 8) |
5098                rss_key[19];
5099         REG_WR(pdev, rlup.rlup_ipv6_rss_key5, val);
5100 
5101         val = (rss_key[20] << 24) |
5102               (rss_key[21] << 16) |
5103               (rss_key[22] << 8) |
5104                rss_key[23];
5105         REG_WR(pdev, rlup.rlup_ipv6_rss_key6, val);
5106 
5107         val = (rss_key[24] << 24) |
5108               (rss_key[25] << 16) |
5109               (rss_key[26] << 8) |
5110                rss_key[27];
5111         REG_WR(pdev, rlup.rlup_ipv6_rss_key7, val);
5112 
5113         val = (rss_key[28] << 24) |
5114               (rss_key[29] << 16) |
5115               (rss_key[30] << 8) |
5116                rss_key[31];
5117         REG_WR(pdev, rlup.rlup_ipv6_rss_key8, val);
5118 
5119         val = (rss_key[32] << 24) |
5120               (rss_key[33] << 16) |
5121               (rss_key[34] << 8) |
5122                rss_key[35];
5123         REG_WR(pdev, rlup.rlup_ipv6_rss_key9, val);
5124 
5125         val = (rss_key[36] << 24) |
5126               (rss_key[37] << 16) |
5127               (rss_key[38] << 8) |
5128                rss_key[39];
5129         REG_WR(pdev, rlup.rlup_ipv6_rss_key10, val);
5130     }
5131 
5132     rss_table_virt = pdev->rx_info.rss_ind_table_virt;
5133     rss_table_phy = pdev->rx_info.rss_ind_table_phy;
5134 
5135     pdev->rx_info.rss_tbl_size = table_size;
5136     if(!cpu_tbl) /* indirection table already had queue idx? */
5137     {
5138         for(idx = 0; idx < table_size; idx++)
5139             rss_table_virt[idx] = indirection_table[idx].Number;
5140     }
5141     else
5142     {
5143         /* map the cpu num in the indirection table to queue idx
5144          * according to the cpu table passed down from the um, then
5145          * rebuilt the table with queue idx*/
5146         u8_t *rss_cpu_tbl = &cpu_tbl[1];
5147 
5148         for(idx = 0; idx < table_size; idx++)
5149         {
5150             for(val = 0; val < cpu_tbl[0]; val++)
5151             {
5152                 if(indirection_table[idx].Number == rss_cpu_tbl[val])
5153                 {
5154                     if(pdev->vars.interrupt_mode == IRQ_MODE_MSIX_BASED ||
5155                        pdev->vars.interrupt_mode == IRQ_MODE_MSI_BASED)
5156                     {
5157                         rss_table_virt[idx] = rss_qidx_tbl[rss_cpu_tbl[val] + 1];
5158                     }
5159                     else
5160                     {
5161                         rss_table_virt[idx] = (u8_t)val;
5162                     }
5163                     break;
5164                 }
5165             }
5166         }
5167     }
5168 
5169     prod_qe = pdev->kq_info.kwq_prod_qe;
5170     prod_idx = pdev->kq_info.kwq_prod_idx;
5171 
5172     /* Initialize the RSS update KWQE. */
5173     rss_update = (l2_kwqe_rss_table_update_t *) prod_qe;
5174 
5175     rss_update->rss_flags = L2_KWQE_FLAGS_LAYER_MASK_L2;
5176     rss_update->rss_opcode = L2_KWQE_OPCODE_VALUE_UPDATE_RSS;
5177 
5178     rss_update->rss_table_size = (u16_t) table_size;
5179     rss_update->rss_table_haddr_lo = rss_table_phy.as_u32.low;
5180     rss_update->rss_table_haddr_hi = rss_table_phy.as_u32.high;
5181     rss_update->rss_host_opaque = 0;
5182     rss_update->rss_hash_type = hash_type;
5183 
5184     #if RSS_LOOKUP_TABLE_WA
5185     rss_table_virt += RSS_INDIRECTION_TABLE_SIZE;
5186     LM_INC64(&rss_table_phy, RSS_INDIRECTION_TABLE_SIZE);
5187 
5188     rss_update->rss_lookup_table_lo = rss_table_phy.as_u32.low;
5189     rss_update->rss_lookup_table_hi = rss_table_phy.as_u32.high;
5190 
5191     rss_gen_tables(rss_key, (u32_t *) rss_table_virt);
5192     #endif
5193 
5194     /* Advance to the next KWQE. */
5195     if(prod_qe == pdev->kq_info.kwq_last_qe)
5196     {
5197         prod_qe = pdev->kq_info.kwq_virt;
5198     }
5199     else
5200     {
5201         prod_qe++;
5202     }
5203     prod_idx++;
5204 
5205     /* Initialize the RSS enable KWQE. */
5206     rss_update = (l2_kwqe_rss_table_update_t *) prod_qe;
5207 
5208     rss_update->rss_flags = L2_KWQE_FLAGS_LAYER_MASK_L2;
5209     rss_update->rss_opcode = L2_KWQE_OPCODE_VALUE_ENABLE_RSS;
5210     rss_update->rss_host_opaque = 0;
5211     rss_update->rss_hash_type = hash_type;
5212 
5213     /* Advance to the next KWQE. */
5214     if(prod_qe == pdev->kq_info.kwq_last_qe)
5215     {
5216         prod_qe = pdev->kq_info.kwq_virt;
5217     }
5218     else
5219     {
5220         prod_qe++;
5221     }
5222     prod_idx++;
5223 
5224     pdev->kq_info.kwq_prod_qe = prod_qe;
5225     pdev->kq_info.kwq_prod_idx = prod_idx;
5226 
5227     MBQ_WR16(
5228         pdev,
5229         GET_CID(pdev->kq_info.kwq_cid_addr),
5230         OFFSETOF(krnlq_context_t, krnlq_host_qidx),
5231         prod_idx);
5232 
5233     return LM_STATUS_SUCCESS;
5234 } /* lm_enable_rss */
5235 #endif /* LM_NON_LEGACY_MODE_SUPPORT */
5236 
5237 
5238 /*******************************************************************************
5239  * Description:
5240  *
5241  * Return:
5242  ******************************************************************************/
5243 lm_status_t
lm_disable_rss(lm_device_t * pdev)5244 lm_disable_rss(
5245     lm_device_t *pdev)
5246 {
5247     l2_kwqe_rss_table_update_t *rss_update;
5248     kwqe_t *prod_qe;
5249     u16_t prod_idx;
5250     u32_t val;
5251 
5252     if(pdev->kq_info.kwqe_left < 1)
5253     {
5254         pdev->kq_info.no_kwq_bd_left++;
5255         return LM_STATUS_RESOURCE;
5256     }
5257 
5258     pdev->kq_info.kwqe_left -= 1;
5259 
5260     if(CHIP_NUM(pdev) == CHIP_NUM_5709)
5261     {
5262         REG_RD(pdev, rlup.rlup_rss_config, &val);
5263         val &= ~RLUP_RSS_CONFIG_IPV4_RSS_TYPE_OFF_XI;
5264         val &= ~RLUP_RSS_CONFIG_IPV6_RSS_TYPE_OFF_XI;
5265         REG_WR(pdev, rlup.rlup_rss_config, val);
5266     }
5267 
5268     prod_qe = pdev->kq_info.kwq_prod_qe;
5269     prod_idx = pdev->kq_info.kwq_prod_idx;
5270 
5271     /* Initialize the RSS enable KWQE. */
5272     rss_update = (l2_kwqe_rss_table_update_t *) prod_qe;
5273 
5274     rss_update->rss_flags = L2_KWQE_FLAGS_LAYER_MASK_L2;
5275     rss_update->rss_opcode = L2_KWQE_OPCODE_VALUE_DISABLE_RSS;
5276 
5277     /* Advance to the next KWQE. */
5278     if(prod_qe == pdev->kq_info.kwq_last_qe)
5279     {
5280         prod_qe = pdev->kq_info.kwq_virt;
5281     }
5282     else
5283     {
5284         prod_qe++;
5285     }
5286     prod_idx++;
5287 
5288     pdev->kq_info.kwq_prod_qe = prod_qe;
5289     pdev->kq_info.kwq_prod_idx = prod_idx;
5290 
5291     MBQ_WR16(
5292         pdev,
5293         GET_CID(pdev->kq_info.kwq_cid_addr),
5294         OFFSETOF(krnlq_context_t, krnlq_host_qidx),
5295         prod_idx);
5296 
5297     return LM_STATUS_SUCCESS;
5298 } /* lm_disable_rss */
5299 #endif /* EXCLUDE_RSS_SUPPORT */
5300 
5301 /*******************************************************************************
5302  * Description:
5303  *
5304  * Return:
5305  ******************************************************************************/
lm_set_pcie_nfe_report(lm_device_t * pdev)5306 void lm_set_pcie_nfe_report(lm_device_t *pdev)
5307 {
5308     if(CHIP_NUM(pdev) == CHIP_NUM_5709 &&
5309        pdev->params.disable_pcie_nfr)
5310     {
5311         u16_t pci_devctl;
5312         REG_RD(pdev,pci_config.pcicfg_device_control,&pci_devctl);
5313         pci_devctl &= ~PCICFG_DEVICE_CONTROL_NON_FATAL_REP_ENA;
5314         REG_WR(pdev,pci_config.pcicfg_device_control,pci_devctl);
5315     }
5316 }
5317 
5318 /*******************************************************************************
5319  * Description:
5320  *
5321  * Return:
5322  ******************************************************************************/
lm_clear_coalescing_ticks(lm_device_t * pdev)5323 void lm_clear_coalescing_ticks(lm_device_t *pdev)
5324 {
5325     pdev->params.tx_quick_cons_trip = 1;
5326     pdev->params.tx_quick_cons_trip_int = 1;
5327     pdev->params.rx_quick_cons_trip = 1;
5328     pdev->params.rx_quick_cons_trip_int = 1;
5329     pdev->params.comp_prod_trip = 1;
5330     pdev->params.comp_prod_trip_int = 1;
5331 
5332     pdev->params.tx_ticks = 0;
5333     pdev->params.tx_ticks_int = 0;
5334     pdev->params.com_ticks = 0;
5335     pdev->params.com_ticks_int = 0;
5336     pdev->params.cmd_ticks = 0;
5337     pdev->params.cmd_ticks_int = 0;
5338     pdev->params.rx_ticks = 0;
5339     pdev->params.rx_ticks_int = 0;
5340     pdev->params.stats_ticks = 0;
5341 
5342     /* Xinan per-processor HC configuration. */
5343     pdev->params.psb_tx_cons_trip = 0x10001;
5344     pdev->params.psb_rx_cons_trip = 0x10001;
5345     pdev->params.psb_comp_prod_trip = 0x10001;
5346 
5347     pdev->params.psb_tx_ticks = 0;
5348     pdev->params.psb_rx_ticks = 0;
5349     pdev->params.psb_com_ticks = 0;
5350     pdev->params.psb_cmd_ticks = 0;
5351     pdev->params.psb_period_ticks = 0;
5352 }
5353 
lm_is_mmio_ok(lm_device_t * pdev)5354 u8_t lm_is_mmio_ok(lm_device_t *pdev)
5355 {
5356     u32_t val;
5357     REG_RD(pdev, pci_config.pcicfg_vendor_id, &val);
5358     if (0xffffffff == val)
5359     {
5360         return FALSE;
5361     }
5362     else
5363     {
5364         return TRUE;
5365     }
5366 }
5367 
5368 #if defined(LM_NON_LEGACY_MODE_SUPPORT)
5369 /*******************************************************************************
5370  * Description:
5371  *
5372  * Return:
5373  ******************************************************************************/
5374 void
lm_create_q_group(lm_device_t * pdev,u32_t q_group_id,u32_t lookahead_sz)5375 lm_create_q_group(
5376     lm_device_t *pdev,
5377     u32_t q_group_id,
5378     u32_t lookahead_sz
5379     )
5380 {
5381     u32_t val;
5382     lm_rx_chain_t *rxq;
5383 
5384     rxq = &pdev->rx_info.chain[q_group_id];
5385     rxq->vmq_lookahead_size = lookahead_sz;
5386 
5387     val = lookahead_sz << 16;
5388     CTX_WR(
5389         pdev,
5390         rxq->cid_addr,
5391         WORD_ALIGNED_OFFSETOF(l2_bd_chain_context_t,
5392                               l2ctx_vmq_lookahead_sz),
5393         val);
5394 }
5395 
5396 /*******************************************************************************
5397  * Description:
5398  *
5399  * Return:
5400  ******************************************************************************/
5401 lm_status_t
lm_destroy_q_group(lm_device_t * pdev,u32_t q_group_id,u32_t num_queues)5402 lm_destroy_q_group(
5403     lm_device_t *pdev,
5404     u32_t q_group_id,
5405     u32_t num_queues
5406     )
5407 {
5408     u32_t num_kwqes_needed;
5409     kwqe_t *prod_qe;
5410     u16_t prod_idx;
5411     l2_kwqe_vm_free_rx_queue_t *kwqe_free_rxq;
5412 
5413     num_kwqes_needed = num_queues;
5414 
5415     if(pdev->kq_info.kwqe_left < num_kwqes_needed)
5416     {
5417         DbgMessage(pdev, WARN, "No more KWQE left.\n");
5418 
5419         pdev->kq_info.no_kwq_bd_left++;
5420 
5421         return LM_STATUS_RESOURCE;
5422     }
5423 
5424     prod_qe = pdev->kq_info.kwq_prod_qe;
5425     prod_idx = pdev->kq_info.kwq_prod_idx;
5426 
5427     kwqe_free_rxq = (l2_kwqe_vm_free_rx_queue_t *) prod_qe;
5428 
5429     if(q_group_id <= RX_CHAIN_IDX3)
5430     {
5431         if(q_group_id == RX_CHAIN_IDX0)
5432         {
5433             u8_t idx;
5434             /* default queue may have more than 1 queue pairs */
5435             for(idx = 0; idx < num_queues; idx++)
5436             {
5437                 kwqe_free_rxq->flags = L2_KWQE_FLAGS_LAYER_MASK_L2;
5438                 kwqe_free_rxq->queue_type = L2_NORMAL_QUEUE;
5439 
5440                 if(idx == 0)
5441                     kwqe_free_rxq->qid = (u8_t)q_group_id;
5442                 else
5443                 {
5444                     kwqe_free_rxq->qid = idx + 3;
5445                 }
5446 
5447                 kwqe_free_rxq->opcode = L2_KWQE_OPCODE_VALUE_VM_FREE_RX_QUEUE;
5448 
5449                 /* Advance to the next KWQE. */
5450                 if(prod_qe == pdev->kq_info.kwq_last_qe)
5451                 {
5452                     prod_qe = pdev->kq_info.kwq_virt;
5453                 }
5454                 else
5455                 {
5456                     prod_qe++;
5457                 }
5458                 prod_idx++;
5459 
5460                 pdev->kq_info.kwqe_left -= 1;
5461                 kwqe_free_rxq = (l2_kwqe_vm_free_rx_queue_t *) prod_qe;
5462             }
5463             pdev->kq_info.kwq_prod_qe = prod_qe;
5464             pdev->kq_info.kwq_prod_idx = prod_idx;
5465 
5466             MBQ_WR16(
5467                 pdev,
5468                 GET_CID(pdev->kq_info.kwq_cid_addr),
5469                 OFFSETOF(krnlq_context_t, krnlq_host_qidx),
5470                 prod_idx);
5471 
5472             return LM_STATUS_SUCCESS;
5473         }
5474         else
5475         {
5476             kwqe_free_rxq->queue_type = L2_NORMAL_QUEUE;
5477             kwqe_free_rxq->qid = (u8_t)q_group_id;
5478             pdev->kq_info.kwqe_left -= 1;
5479 #if INCLUDE_OFLD_SUPPORT
5480             if(q_group_id == RX_CHAIN_IDX2 &&
5481                !s_list_is_empty(&pdev->rx_info.chain[RX_CHAIN_IDX1].active_descq))
5482             {
5483                 kwqe_free_rxq->flags = L2_KWQE_FLAGS_LAYER_MASK_L2;
5484                 kwqe_free_rxq->opcode = L2_KWQE_OPCODE_VALUE_VM_FREE_RX_QUEUE;
5485 
5486                 /* Advance to the next KWQE. */
5487                 if(prod_qe == pdev->kq_info.kwq_last_qe)
5488                 {
5489                     prod_qe = pdev->kq_info.kwq_virt;
5490                 }
5491                 else
5492                 {
5493                     prod_qe++;
5494                 }
5495                 prod_idx++;
5496 
5497                 /* flush the catchup RX queue too */
5498                 kwqe_free_rxq = (l2_kwqe_vm_free_rx_queue_t *) prod_qe;
5499                 kwqe_free_rxq->queue_type = L2_NORMAL_QUEUE;
5500                 kwqe_free_rxq->qid = (u8_t)RX_CHAIN_IDX1;
5501                 pdev->kq_info.kwqe_left -= 1;
5502             }
5503 #endif
5504         }
5505     }
5506     else
5507     {
5508         kwqe_free_rxq->queue_type = L2_VM_QUEUE;
5509         kwqe_free_rxq->qid = (u8_t)q_group_id;
5510         pdev->kq_info.kwqe_left -= 1;
5511     }
5512     kwqe_free_rxq->flags = L2_KWQE_FLAGS_LAYER_MASK_L2;
5513     kwqe_free_rxq->opcode = L2_KWQE_OPCODE_VALUE_VM_FREE_RX_QUEUE;
5514 
5515     /* Advance to the next KWQE. */
5516     if(prod_qe == pdev->kq_info.kwq_last_qe)
5517     {
5518         prod_qe = pdev->kq_info.kwq_virt;
5519     }
5520     else
5521     {
5522         prod_qe++;
5523     }
5524     prod_idx++;
5525 
5526     pdev->kq_info.kwq_prod_qe = prod_qe;
5527     pdev->kq_info.kwq_prod_idx = prod_idx;
5528 
5529     MBQ_WR16(
5530         pdev,
5531         GET_CID(pdev->kq_info.kwq_cid_addr),
5532         OFFSETOF(krnlq_context_t, krnlq_host_qidx),
5533         prod_idx);
5534 
5535     return LM_STATUS_SUCCESS;
5536 }
5537 
5538 /*******************************************************************************
5539  * Description:
5540  *
5541  * Return:
5542  ******************************************************************************/
5543 VOID
lm_update_defq_filter_ctx(lm_device_t * pdev,u8_t valid)5544 lm_update_defq_filter_ctx(
5545     lm_device_t *pdev,
5546     u8_t valid
5547     )
5548 {
5549     u32_t ctx_offset = pdev->vars.hw_filter_ctx_offset;
5550     u32_t val = 0;
5551 
5552     if(valid)
5553         val |= L2_VM_FILTER_MAC << 16;
5554 
5555     REG_WR_IND(
5556         pdev,
5557         OFFSETOF(reg_space_t, rxp.rxp_scratch[0])+ctx_offset,
5558         val);
5559 }
5560 
5561 /*******************************************************************************
5562  * Description:
5563  *
5564  * Return:
5565  ******************************************************************************/
5566 lm_status_t
lm_chng_q_group_filter(lm_device_t * pdev,u32_t q_group_id,u8_t * dest_mac,u16_t * vlan_ptr,u32_t filter_id)5567 lm_chng_q_group_filter(
5568     lm_device_t *pdev,
5569     u32_t q_group_id,
5570     u8_t  *dest_mac,
5571     u16_t *vlan_ptr,
5572     u32_t filter_id
5573     )
5574 {
5575     kwqe_t *prod_qe;
5576     u16_t prod_idx;
5577 
5578     if(pdev->kq_info.kwqe_left < 1)
5579     {
5580         DbgMessage(pdev, WARN, "No more KWQE left.\n");
5581 
5582         pdev->kq_info.no_kwq_bd_left++;
5583 
5584         return LM_STATUS_RESOURCE;
5585     }
5586 
5587     prod_qe = pdev->kq_info.kwq_prod_qe;
5588     prod_idx = pdev->kq_info.kwq_prod_idx;
5589 
5590     pdev->kq_info.kwqe_left -= 1;
5591     if(dest_mac == NULL && vlan_ptr == NULL)
5592     {
5593         /* clear filter operation */
5594         l2_kwqe_vm_remove_rx_filter_t * kwqe_remove_rx_filter =
5595             (l2_kwqe_vm_remove_rx_filter_t *) prod_qe;
5596         kwqe_remove_rx_filter->flags = L2_KWQE_FLAGS_LAYER_MASK_L2;
5597         kwqe_remove_rx_filter->qid = (u8_t)q_group_id;
5598         kwqe_remove_rx_filter->filter_id = (u8_t)filter_id;
5599         kwqe_remove_rx_filter->opcode = L2_KWQE_OPCODE_VALUE_VM_REMOVE_RX_FILTER;
5600     }
5601     else
5602     {
5603         /* set filter operation */
5604         l2_kwqe_vm_set_rx_filter_t * kwqe_set_rx_filter =
5605             (l2_kwqe_vm_set_rx_filter_t *) prod_qe;
5606 
5607         kwqe_set_rx_filter->flags = L2_KWQE_FLAGS_LAYER_MASK_L2;
5608         kwqe_set_rx_filter->qid = (u8_t)q_group_id;
5609         kwqe_set_rx_filter->filter_id = (u8_t)filter_id;
5610         if(vlan_ptr)
5611         {
5612             kwqe_set_rx_filter->vlan = *vlan_ptr;
5613             kwqe_set_rx_filter->filter_type = L2_VM_FILTER_MAC_VLAN;
5614         }
5615         else
5616         {
5617             kwqe_set_rx_filter->filter_type = L2_VM_FILTER_MAC;
5618         }
5619         kwqe_set_rx_filter->opcode = L2_KWQE_OPCODE_VALUE_VM_SET_RX_FILTER;
5620     }
5621 
5622     /* Advance to the next KWQE. */
5623     if(prod_qe == pdev->kq_info.kwq_last_qe)
5624     {
5625         prod_qe = pdev->kq_info.kwq_virt;
5626     }
5627     else
5628     {
5629         prod_qe++;
5630     }
5631     prod_idx++;
5632 
5633     pdev->kq_info.kwq_prod_qe = prod_qe;
5634     pdev->kq_info.kwq_prod_idx = prod_idx;
5635 
5636     MBQ_WR16(
5637         pdev,
5638         GET_CID(pdev->kq_info.kwq_cid_addr),
5639         OFFSETOF(krnlq_context_t, krnlq_host_qidx),
5640         prod_idx);
5641     return LM_STATUS_PENDING;
5642 }
5643 
5644 /*******************************************************************************
5645  * Description:
5646  *
5647  * Return:
5648  ******************************************************************************/
5649 u32_t
lm_service_l2_kcqes(struct _lm_device_t * pdev,kcqe_t * cqe_ptr[],u32_t num_cqes)5650 lm_service_l2_kcqes(
5651     struct _lm_device_t *pdev,
5652     kcqe_t *cqe_ptr[],
5653     u32_t num_cqes)
5654 {
5655     u32_t cqe_serviced_cnt;
5656     u32_t cqe_cnt;
5657     u8_t success;
5658     kcqe_t *kcqe;
5659     lm_status_t lm_status;
5660 
5661     cqe_serviced_cnt = 0;
5662     while(num_cqes)
5663     {
5664         /* Determine the number of cqes for a completion.  Some
5665          * completions span several cqes. */
5666         cqe_cnt = 0;
5667         while(cqe_ptr[cqe_cnt]->kcqe_flags & KCQE_FLAGS_NEXT)
5668         {
5669             cqe_cnt++;
5670         }
5671         cqe_cnt++;
5672 
5673         DbgBreakIf(cqe_cnt > num_cqes);
5674 
5675         kcqe = *cqe_ptr;
5676 
5677         DbgBreakIf((kcqe->kcqe_flags & KCQE_FLAGS_LAYER_MASK) !=
5678                     KCQE_FLAGS_LAYER_MASK_L2);
5679 
5680         switch(kcqe->kcqe_opcode)
5681         {
5682             case L2_KCQE_OPCODE_VALUE_VM_FREE_RX_QUEUE:
5683                 /* initiate rx buffer abort */
5684                 {
5685                     l2_kcqe_vm_free_rx_queue_t *kcqe_free_rxq;
5686 
5687                     kcqe_free_rxq = (l2_kcqe_vm_free_rx_queue_t *)kcqe;
5688                     mm_q_grp_abort_rx_request(
5689                         pdev,
5690                         kcqe_free_rxq->qid);
5691                 }
5692                 break;
5693 
5694             case L2_KCQE_OPCODE_VALUE_VM_SET_RX_FILTER:
5695             case L2_KCQE_OPCODE_VALUE_VM_REMOVE_RX_FILTER:
5696                 {
5697                     l2_kcqe_vm_set_rx_filter_t *kcqe_filter;
5698 
5699                     kcqe_filter = (l2_kcqe_vm_set_rx_filter_t *)kcqe;
5700                     if(kcqe_filter->status == SC_SUCCESS)
5701                     {
5702                         lm_status = LM_STATUS_SUCCESS;
5703                     }
5704                     else
5705                     {
5706                         lm_status = LM_STATUS_FAILURE;
5707                     }
5708                     mm_comp_l2_filter_chng_req(
5709                         pdev,
5710                         lm_status,
5711                         kcqe_filter->qid);
5712                 }
5713                 break;
5714 
5715             case L2_KCQE_OPCODE_VALUE_VM_ALLOC_RX_QUEUE:
5716             case L2_KCQE_OPCODE_VALUE_RX_PACKET:
5717             case L2_KCQE_OPCODE_VALUE_ENABLE_RSS:
5718             case L2_KCQE_OPCODE_VALUE_DISABLE_RSS:
5719             case L2_KCQE_OPCODE_VALUE_UPDATE_RSS:
5720             case L2_KCQE_OPCODE_VALUE_FLUSH_BD_CHAIN:
5721                 /* no need to do anything in the driver */
5722                 break;
5723 
5724             default:
5725                 DbgBreakMsg("invalid l2 kcqe.\n");
5726                 break;
5727         }
5728 
5729         cqe_ptr += cqe_cnt;
5730         num_cqes -= cqe_cnt;
5731         cqe_serviced_cnt += cqe_cnt;
5732     }
5733 
5734     return cqe_serviced_cnt;
5735 }
5736 #endif /* LM_NON_LEGACY_MODE_SUPPORT */
5737