xref: /illumos-gate/usr/src/uts/common/io/bnxe/bnxe_tx.c (revision 55fea89dcaa64928bed4327112404dcb3e07b79f)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2014 QLogic Corporation
24  * The contents of this file are subject to the terms of the
25  * QLogic End User License (the "License").
26  * You may not use this file except in compliance with the License.
27  *
28  * You can obtain a copy of the License at
29  * http://www.qlogic.com/Resources/Documents/DriverDownloadHelp/
30  * QLogic_End_User_Software_License.txt
31  * See the License for the specific language governing permissions
32  * and limitations under the License.
33  */
34 
35 /*
36  * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved.
37  */
38 
39 #include "bnxe.h"
40 
41 ddi_dma_attr_t bnxeTxDmaAttrib =
42 {
43     DMA_ATTR_V0,                /* dma_attr_version */
44     0,                          /* dma_attr_addr_lo */
45     0xffffffffffffffff,         /* dma_attr_addr_hi */
46     0xffffffffffffffff,         /* dma_attr_count_max */
47     BNXE_DMA_ALIGNMENT,         /* dma_attr_align */
48     0xffffffff,                 /* dma_attr_burstsizes */
49     1,                          /* dma_attr_minxfer */
50     0xffffffffffffffff,         /* dma_attr_maxxfer */
51     0xffffffffffffffff,         /* dma_attr_seg */
52     BNXE_MAX_DMA_SGLLEN,        /* dma_attr_sgllen */
53     1,                          /* dma_attr_granular */
54     0,                          /* dma_attr_flags */
55 };
56 
57 ddi_dma_attr_t bnxeTxCbDmaAttrib =
58 {
59     DMA_ATTR_V0,                /* dma_attr_version */
60     0,                          /* dma_attr_addr_lo */
61     0xffffffffffffffff,         /* dma_attr_addr_hi */
62     0xffffffffffffffff,         /* dma_attr_count_max */
63     BNXE_DMA_ALIGNMENT,         /* dma_attr_align */
64     0xffffffff,                 /* dma_attr_burstsizes */
65     1,                          /* dma_attr_minxfer */
66     0xffffffffffffffff,         /* dma_attr_maxxfer */
67     0xffffffffffffffff,         /* dma_attr_seg */
68     1,                          /* dma_attr_sgllen */
69     1,                          /* dma_attr_granular */
70     0,                          /* dma_attr_flags */
71 };
72 
73 
74 static um_txpacket_t * BnxeTxPktAlloc(um_device_t * pUM, size_t size);
75 
76 
BnxeTxPktUnmap(um_txpacket_t * pTxPkt)77 static inline void BnxeTxPktUnmap(um_txpacket_t * pTxPkt)
78 {
79     int i;
80 
81     for (i = 0; i < pTxPkt->num_handles; i++)
82     {
83         ddi_dma_unbind_handle(pTxPkt->dmaHandles[i]);
84     }
85 
86     pTxPkt->num_handles = 0;
87 }
88 
89 
BnxeTxPktsFree(um_txpacket_t * pTxPkt)90 static void BnxeTxPktsFree(um_txpacket_t * pTxPkt)
91 {
92     int i;
93 
94     if (pTxPkt->num_handles > 0)
95     {
96         BnxeTxPktUnmap(pTxPkt);
97     }
98 
99     if (pTxPkt->pMblk != NULL)
100     {
101         freemsg(pTxPkt->pMblk);
102     }
103 
104     for (i = 0; i < BNXE_MAX_DMA_HANDLES_PER_PKT; i++)
105     {
106         ddi_dma_free_handle(&pTxPkt->dmaHandles[i]);
107     }
108 
109     pTxPkt->pMblk         = NULL;
110     pTxPkt->num_handles   = 0;
111     pTxPkt->frag_list.cnt = 0;
112 
113     ddi_dma_unbind_handle(pTxPkt->cbDmaHandle);
114     ddi_dma_mem_free(&pTxPkt->cbDmaAccHandle);
115     ddi_dma_free_handle(&pTxPkt->cbDmaHandle);
116     kmem_free(pTxPkt, sizeof(um_txpacket_t));
117 }
118 
119 
BnxeTxPktsFreeList(s_list_t * pPktList)120 static void BnxeTxPktsFreeList(s_list_t * pPktList)
121 {
122     um_txpacket_t * pTxPkt;
123 
124     while (!s_list_is_empty(pPktList))
125     {
126         pTxPkt = (um_txpacket_t *)s_list_pop_head(pPktList);
127         BnxeTxPktsFree(pTxPkt);
128     }
129 }
130 
131 
132 /*
133  * Free the mblk and all frag mappings used by each packet in the list
134  * and then put the entire list on the free queue for immediate use.
135  */
BnxeTxPktsReclaim(um_device_t * pUM,int idx,s_list_t * pPktList)136 void BnxeTxPktsReclaim(um_device_t * pUM,
137                        int           idx,
138                        s_list_t *    pPktList)
139 {
140     um_txpacket_t * pTxPkt;
141 
142     if (s_list_entry_cnt(pPktList) == 0)
143     {
144         return;
145     }
146 
147     for (pTxPkt = (um_txpacket_t *)s_list_peek_head(pPktList);
148          pTxPkt;
149          pTxPkt = (um_txpacket_t *)s_list_next_entry(&pTxPkt->lm_pkt.link))
150     {
151         if (pTxPkt->num_handles > 0)
152         {
153             BnxeTxPktUnmap(pTxPkt);
154         }
155 
156         if (pTxPkt->pMblk != NULL)
157         {
158             freemsg(pTxPkt->pMblk);
159             pTxPkt->pMblk = NULL;
160         }
161     }
162 
163     BNXE_LOCK_ENTER_FREETX(pUM, idx);
164     s_list_add_tail(&pUM->txq[idx].freeTxDescQ, pPktList);
165     BNXE_LOCK_EXIT_FREETX(pUM, idx);
166 }
167 
168 
169 /* Must be called with TX lock held!!! */
BnxeTxSendWaitingPkt(um_device_t * pUM,int idx)170 static int BnxeTxSendWaitingPkt(um_device_t * pUM,
171                                 int           idx)
172 {
173     TxQueue *       pTxQ = &pUM->txq[idx];
174     lm_device_t *   pLM = &pUM->lm_dev;
175     lm_tx_chain_t * pLmTxChain;
176     um_txpacket_t * pTxPkt;
177     int rc;
178 
179     pLmTxChain = &pLM->tx_info.chain[idx];
180 
181     while (s_list_entry_cnt(&pTxQ->waitTxDescQ))
182     {
183         pTxPkt = (um_txpacket_t *)s_list_peek_head(&pTxQ->waitTxDescQ);
184 
185         if (pTxPkt->frag_list.cnt + 2 > pLmTxChain->bd_chain.bd_left)
186         {
187             return BNXE_TX_DEFERPKT;
188         }
189 
190         pTxPkt = (um_txpacket_t *)s_list_pop_head(&pTxQ->waitTxDescQ);
191 
192         rc = lm_send_packet(pLM, idx, &pTxPkt->lm_pkt, &pTxPkt->frag_list);
193 
194         if (pUM->fmCapabilities &&
195             BnxeCheckAccHandle(pLM->vars.reg_handle[BAR_0]) != DDI_FM_OK)
196         {
197             ddi_fm_service_impact(pUM->pDev, DDI_SERVICE_DEGRADED);
198         }
199 
200         if (rc != LM_STATUS_SUCCESS)
201         {
202             /*
203              * Send failed (probably not enough BDs available)...
204              * Put the packet back at the head of the wait queue.
205              */
206             pTxQ->txFailed++;
207             s_list_push_head(&pTxQ->waitTxDescQ, &pTxPkt->lm_pkt.link);
208             return BNXE_TX_DEFERPKT;
209         }
210     }
211 
212     return BNXE_TX_GOODXMIT;
213 }
214 
215 
BnxeTxRingProcess(um_device_t * pUM,int idx)216 void BnxeTxRingProcess(um_device_t * pUM,
217                        int           idx)
218 {
219     TxQueue *       pTxQ = &pUM->txq[idx];
220     lm_device_t *   pLM = &pUM->lm_dev;
221     lm_tx_chain_t * pLmTxChain;
222     s_list_t        tmpList;
223     u32_t           pktsTxed;
224     int rc;
225 
226     s_list_clear(&tmpList);
227 
228     BNXE_LOCK_ENTER_TX(pUM, idx);
229 
230     pktsTxed = lm_get_packets_sent(&pUM->lm_dev, idx, &tmpList);
231 
232     if (pUM->fmCapabilities &&
233         BnxeCheckAccHandle(pUM->lm_dev.vars.reg_handle[BAR_0]) != DDI_FM_OK)
234     {
235         ddi_fm_service_impact(pUM->pDev, DDI_SERVICE_DEGRADED);
236     }
237 
238     if ((pktsTxed + s_list_entry_cnt(&pTxQ->sentTxQ)) >=
239         pUM->devParams.maxTxFree)
240     {
241         s_list_add_tail(&tmpList, &pTxQ->sentTxQ);
242         s_list_clear(&pTxQ->sentTxQ);
243     }
244     else
245     {
246         s_list_add_tail(&pTxQ->sentTxQ, &tmpList);
247         s_list_clear(&tmpList);
248     }
249 
250     BNXE_LOCK_EXIT_TX(pUM, idx);
251 
252     if (s_list_entry_cnt(&tmpList))
253     {
254         BnxeTxPktsReclaim(pUM, idx, &tmpList);
255     }
256 
257     if (pTxQ->noTxCredits == 0)
258     {
259         /* no need to notify the stack */
260         return;
261     }
262 
263     pLmTxChain = &pUM->lm_dev.tx_info.chain[idx];
264 
265     if (pTxQ->noTxCredits & BNXE_TX_RESOURCES_NO_CREDIT)
266     {
267         BNXE_LOCK_ENTER_TX(pUM, idx);
268         rc = BnxeTxSendWaitingPkt(pUM, idx);
269         BNXE_LOCK_EXIT_TX(pUM, idx);
270 
271         if ((rc == BNXE_TX_GOODXMIT) &&
272             (pLmTxChain->bd_chain.bd_left >= BNXE_MAX_DMA_FRAGS_PER_PKT))
273         {
274             atomic_and_32(&pTxQ->noTxCredits, ~BNXE_TX_RESOURCES_NO_CREDIT);
275         }
276     }
277 
278     if ((pTxQ->noTxCredits & BNXE_TX_RESOURCES_NO_DESC) &&
279         (s_list_entry_cnt(&pTxQ->freeTxDescQ) > pTxQ->thresh_pdwm))
280     {
281         atomic_and_32(&pTxQ->noTxCredits, ~BNXE_TX_RESOURCES_NO_DESC);
282     }
283 
284     if (pTxQ->noTxCredits == 0)
285     {
286         if (idx == FCOE_CID(pLM))
287         {
288             BnxeLogInfo(pUM, "FCoE tx credit ok, no upcall!");
289         }
290         else
291         {
292             /* notify the stack that tx resources are now available */
293 #if defined(BNXE_RINGS) && (defined(__S11) || defined(__S12))
294             mac_tx_ring_update(pUM->pMac, pTxQ->ringHandle);
295 #else
296             mac_tx_update(pUM->pMac);
297 #endif
298         }
299     }
300 }
301 
302 
BnxeTxPktMapFrag(um_device_t * pUM,um_txpacket_t * pTxPkt,mblk_t * pMblk)303 static inline int BnxeTxPktMapFrag(um_device_t *   pUM,
304                                    um_txpacket_t * pTxPkt,
305                                    mblk_t *        pMblk)
306 {
307     ddi_dma_handle_t dmaHandle;
308     ddi_dma_cookie_t cookie;
309     lm_frag_t *      pFrag;
310     boolean_t        partial;
311     u32_t            bindLen;
312     u32_t            count;
313     int rc, i;
314 
315     if (pTxPkt->num_handles == BNXE_MAX_DMA_HANDLES_PER_PKT)
316     {
317         return BNXE_TX_RESOURCES_NO_OS_DMA_RES;
318     }
319 
320     if (pTxPkt->frag_list.cnt >= BNXE_MAX_DMA_FRAGS_PER_PKT)
321     {
322         return BNXE_TX_RESOURCES_TOO_MANY_FRAGS;
323     }
324 
325     dmaHandle = pTxPkt->dmaHandles[pTxPkt->num_handles];
326 
327     if ((rc = ddi_dma_addr_bind_handle(dmaHandle,
328                                        NULL,
329                                        (caddr_t)pMblk->b_rptr,
330                                        (pMblk->b_wptr - pMblk->b_rptr),
331                                        (DDI_DMA_WRITE | DDI_DMA_STREAMING),
332                                        DDI_DMA_DONTWAIT,
333                                        NULL,
334                                        &cookie,
335                                        &count)) != DDI_DMA_MAPPED)
336     {
337         BnxeLogWarn(pUM, "Failed to bind DMA address for tx packet (%d)", rc);
338         return BNXE_TX_RESOURCES_NO_OS_DMA_RES;
339     }
340 
341     /*
342      * ddi_dma_addr_bind_handle() correctly returns an error if the physical
343      * fragment count exceeds the maximum fragment count specified in the
344      * ddi_dma_attrib structure for the current pMblk.  However, a packet can
345      * span multiple mblk's.  The purpose of the check below is to make sure we
346      * do not overflow our fragment count limit based on what has already been
347      * mapped from this packet.
348      */
349     partial = ((pTxPkt->frag_list.cnt + count) >
350                (pMblk->b_cont ? BNXE_MAX_DMA_FRAGS_PER_PKT - 1
351                               : BNXE_MAX_DMA_FRAGS_PER_PKT));
352     if (partial)
353     {
354         /*
355          * Going to try a partial dma so (re)set count to the remaining number
356          * of dma fragments that are available leaving one fragment at the end.
357          */
358         count = (BNXE_MAX_DMA_FRAGS_PER_PKT - 1 - pTxPkt->frag_list.cnt);
359         if (count == 0)
360         {
361             /*
362              * No more dma fragments are available.  This fragment was not
363              * mapped and will be copied into the copy buffer along with the
364              * rest of the packet data.
365              */
366             ddi_dma_unbind_handle(dmaHandle);
367             return BNXE_TX_RESOURCES_TOO_MANY_FRAGS;
368         }
369     }
370 
371     pFrag = &pTxPkt->frag_list.frag_arr[pTxPkt->frag_list.cnt];
372     pTxPkt->frag_list.cnt += count;
373 
374     /* map "count" dma fragments */
375 
376     bindLen = 0;
377     for (i = 0; i < (count - 1); i++)
378     {
379         pFrag->addr.as_u64 = cookie.dmac_laddress;
380         bindLen += pFrag->size = cookie.dmac_size;
381 
382         pFrag++;
383 
384         ddi_dma_nextcookie(dmaHandle, &cookie);
385     }
386 
387     pFrag->addr.as_u64 = cookie.dmac_laddress;
388     bindLen += pFrag->size = cookie.dmac_size;
389 
390     pTxPkt->num_handles++;
391 
392     if (partial)
393     {
394         /*
395          * Move the mblk's read pointer past the data that was bound to a DMA
396          * fragment.  Any remaining data will get copied into the copy buffer.
397          */
398         pMblk->b_rptr += bindLen;
399         return BNXE_TX_RESOURCES_TOO_MANY_FRAGS;
400     }
401 
402     return 0;
403 }
404 
405 
BnxeTxPktCopy(um_device_t * pUM,TxQueue * pTxQ,um_txpacket_t * pTxPkt)406 static int BnxeTxPktCopy(um_device_t *   pUM,
407                          TxQueue *       pTxQ,
408                          um_txpacket_t * pTxPkt)
409 {
410     lm_frag_t * pCopyFrag = NULL;
411     size_t      msgSize;
412     size_t      copySize = 0;
413     size_t      pktLen = 0;
414     boolean_t   tryMap = B_TRUE;
415     mblk_t *    pMblk;
416     caddr_t     pTmp;
417     int rc;
418 
419     /* Walk the chain to get the total pkt length... */
420     for (pMblk = pTxPkt->pMblk; pMblk; pMblk = pMblk->b_cont)
421     {
422         pktLen += MBLKL(pMblk);
423     }
424 
425     /*
426      * If the packet length is under the tx copy threshold then copy
427      * the all data into the copy buffer.
428      */
429     if (pktLen < pUM->devParams.txCopyThreshold)
430     {
431         ASSERT(pktLen <= pTxPkt->cbLength);
432 
433         pTmp = pTxPkt->pCbBuf;
434 
435         for (pMblk = pTxPkt->pMblk; pMblk; pMblk = pMblk->b_cont)
436         {
437             if ((msgSize = MBLKL(pMblk)) == 0)
438             {
439                 continue;
440             }
441 
442             bcopy(pMblk->b_rptr, pTmp, msgSize);
443             pTmp += msgSize;
444         }
445 
446         pCopyFrag              = &pTxPkt->frag_list.frag_arr[0];
447         pCopyFrag->addr.as_u64 = pTxPkt->cbPhysAddr.as_u64;
448         pCopyFrag->size        = pktLen;
449         pTxPkt->frag_list.cnt++;
450 
451         copySize = pktLen;
452         pTxQ->txCopied++;
453 
454         /* Done! */
455         goto _BnxeTxPktCopy_DMA_SYNC_COPY_BUFFER;
456     }
457 
458     /* Try to DMA map all the blocks... */
459 
460     for (pMblk = pTxPkt->pMblk; pMblk; pMblk = pMblk->b_cont)
461     {
462         if ((msgSize = MBLKL(pMblk)) == 0)
463         {
464             continue;
465         }
466 
467         if (tryMap)
468         {
469             if (BnxeTxPktMapFrag(pUM, pTxPkt, pMblk) == 0)
470             {
471                 /*
472                  * The fragment was successfully mapped now move on to the
473                  * next one.  Here we set pCopyFrag to NULL which represents
474                  * a break of continuous data in the copy buffer.  If the
475                  * packet header was copied the first fragment points to the
476                  * beginning of the copy buffer.  Since this block was mapped
477                  * any future blocks that have to be copied must be handled by
478                  * a new fragment even though the fragment is pointed to the
479                  * copied data in the copy buffer.
480                  */
481                 pCopyFrag = NULL;
482                 continue;
483             }
484             else
485             {
486                 /*
487                  * The frament was not mapped or was partially mapped.  In
488                  * either case we will no longer try to map the remaining
489                  * blocks.  All remaining packet data is copied.
490                  */
491                 tryMap = B_FALSE;
492                 msgSize = MBLKL(pMblk); /* new msgSize with partial binding */
493             }
494         }
495 
496 #if 0
497         if ((copySize + msgSize) > pTxPkt->cbLength)
498         {
499             /* remaining packet is too large (length more than copy buffer) */
500             BnxeTxPktUnmap(pTxPkt);
501             return -1;
502         }
503 #else
504         ASSERT((copySize + msgSize) <= pTxPkt->cbLength);
505 #endif
506 
507         bcopy(pMblk->b_rptr, (pTxPkt->pCbBuf + copySize), msgSize);
508 
509         /*
510          * If pCopyFrag is already specified then simply update the copy size.
511          * If not then set pCopyFrag to the next available fragment.
512          */
513         if (pCopyFrag)
514         {
515             pCopyFrag->size += msgSize;
516         }
517         else
518         {
519             ASSERT((pTxPkt->frag_list.cnt + 1) <= BNXE_MAX_DMA_FRAGS_PER_PKT);
520             pCopyFrag              = &pTxPkt->frag_list.frag_arr[pTxPkt->frag_list.cnt++];
521             pCopyFrag->size        = msgSize;
522             pCopyFrag->addr.as_u64 = pTxPkt->cbPhysAddr.as_u64 + copySize;
523         }
524 
525         /* update count of bytes in the copy buffer needed for DMA sync */
526         copySize += msgSize;
527     }
528 
529 _BnxeTxPktCopy_DMA_SYNC_COPY_BUFFER:
530 
531     if (copySize > 0)
532     {
533         /* DMA sync the copy buffer before sending */
534 
535         rc = ddi_dma_sync(pTxPkt->cbDmaHandle, 0, copySize,
536                           DDI_DMA_SYNC_FORDEV);
537 
538         if (pUM->fmCapabilities &&
539             BnxeCheckDmaHandle(pTxPkt->cbDmaHandle) != DDI_FM_OK)
540         {
541             ddi_fm_service_impact(pUM->pDev, DDI_SERVICE_DEGRADED);
542         }
543 
544         if (rc != DDI_SUCCESS)
545         {
546             BnxeLogWarn(pUM, "(%d) Failed to dma sync tx copy (%p / %d)",
547                         rc, pTxPkt, copySize);
548         }
549     }
550 
551     if (pTxPkt->num_handles == 0)
552     {
553         freemsg(pTxPkt->pMblk);
554         pTxPkt->pMblk = NULL;
555     }
556 
557     return 0;
558 }
559 
560 
561 /* this code is derived from that shown in RFC 1071 Section 4.1 */
BnxeCalcCksum(void * start,u32_t len,u16_t prev_sum)562 static inline u16_t BnxeCalcCksum(void * start,
563                                   u32_t  len,
564                                   u16_t  prev_sum)
565 {
566     u16_t * pword;
567     u32_t   sum = 0;
568 
569     pword = (u16_t *)start;
570 
571     for ( ; len > 1; len -= 2, pword++)
572     {
573         /* the inner loop */
574         sum += *pword;
575     }
576 
577     /* add left-over byte, if any */
578     if (len)
579     {
580         sum += (u16_t)(*((u8_t *)pword));
581     }
582 
583     sum += prev_sum;
584 
585     /* fold 32-bit sum to 16 bits */
586     while (sum >> 16)
587     {
588         sum = ((sum & 0xffff) + (sum >> 16));
589     }
590 
591     return (u16_t)sum;
592 }
593 
594 
595 /*
596  * Everest1 (i.e. 57710, 57711, 57711E) does not natively support UDP checksums
597  * and does not know anything about the UDP header and where the checksum field
598  * is located.  It only knows about TCP.  Therefore we "lie" to the hardware for
599  * outgoing UDP packets w/ checksum offload.  Since the checksum field offset
600  * for TCP is 16 bytes and for UDP it is 6 bytes we pass a pointer to the
601  * hardware that is 10 bytes less than the start of the UDP header.  This allows
602  * the hardware to write the checksum in the correct spot.  But the hardware
603  * will compute a checksum which includes the last 10 bytes of the IP header.
604  * To correct this we tweak the stack computed pseudo checksum by folding in the
605  * calculation of the inverse checksum for those final 10 bytes of the IP
606  * header.  This allows the correct checksum to be computed by the hardware.
607  */
608 
609 #define TCP_CS_OFFSET           16
610 #define UDP_CS_OFFSET           6
611 #define UDP_TCP_CS_OFFSET_DIFF  (TCP_CS_OFFSET - UDP_CS_OFFSET)
612 
BnxeUdpPseudoCsum(um_device_t * pUM,u8_t * pUdpHdr,u8_t * pIpHdr,u8_t ipHdrLen)613 static inline u16_t BnxeUdpPseudoCsum(um_device_t * pUM,
614                                       u8_t *        pUdpHdr,
615                                       u8_t *        pIpHdr,
616                                       u8_t          ipHdrLen)
617 {
618     u32_t sum32;
619     u16_t sum16;
620     u16_t pseudo_cs;
621 
622     ASSERT(ipHdrLen >= UDP_TCP_CS_OFFSET_DIFF);
623 
624     /* calc cksum on last UDP_TCP_CS_OFFSET_DIFF bytes of ip header */
625     sum16 = BnxeCalcCksum(&pIpHdr[ipHdrLen - UDP_TCP_CS_OFFSET_DIFF],
626                           UDP_TCP_CS_OFFSET_DIFF, 0);
627 
628     /* substruct the calculated cksum from the udp pseudo cksum */
629     pseudo_cs = (*((u16_t *)&pUdpHdr[6]));
630     sum16     = ~sum16;
631     sum32     = (pseudo_cs + sum16);
632 
633     /* fold 32-bit sum to 16 bits */
634     while (sum32 >> 16)
635     {
636         sum32 = ((sum32 & 0xffff) + (sum32 >> 16));
637     }
638 
639     return ntohs((u16_t)sum32);
640 }
641 
642 
BnxeGetVlanTag(mblk_t * pMblk)643 static inline u16_t BnxeGetVlanTag(mblk_t * pMblk)
644 {
645     ASSERT(MBLKL(pMblk) >= sizeof(struct ether_vlan_header));
646     return GLD_VTAG_VID(ntohs(((struct ether_vlan_header *)pMblk->b_rptr)->ether_tci));
647 }
648 
649 
BnxeGetHdrInfo(um_device_t * pUM,um_txpacket_t * pTxPkt)650 static inline int BnxeGetHdrInfo(um_device_t *   pUM,
651                                  um_txpacket_t * pTxPkt)
652 {
653     mblk_t *      pMblk;
654     size_t        msgSize;
655     uint32_t      csStart;
656     uint32_t      csStuff;
657     uint32_t      csFlags;
658     uint32_t      lso;
659     u8_t *        pL2Hdr;
660     uint32_t      l2HdrLen;
661     u8_t *        pL3Hdr;
662     u32_t         l3HdrLen;
663     u8_t *        pL4Hdr;
664     u32_t         l4HdrLen;
665 
666     pMblk = pTxPkt->pMblk;
667     msgSize = MBLKL(pMblk);
668 
669     /* At least the MAC header... */
670 #if 0
671     if (msgSize < sizeof(struct ether_header))
672     {
673         BnxeLogWarn(pUM, "Invalid initial segment size in packet!");
674         return -1;
675     }
676 #else
677     ASSERT(msgSize >= sizeof(struct ether_header));
678 #endif
679 
680     mac_hcksum_get(pMblk, &csStart, &csStuff, NULL, NULL, &csFlags);
681 
682     lso = DB_LSOFLAGS(pMblk) & HW_LSO;
683 
684     /* get the Ethernet header */
685     pL2Hdr = (u8_t *)pMblk->b_rptr;
686 
687     /* grab the destination mac addr */
688     memcpy(pTxPkt->tx_info.dst_mac_addr, pL2Hdr, 6);
689 
690     if (lso)
691     {
692         pTxPkt->tx_info.flags |= LM_TX_FLAG_TCP_LSO_FRAME;
693 
694         pTxPkt->tx_info.lso_mss = (u16_t)DB_LSOMSS(pMblk);
695     }
696     else if (!csFlags)
697     {
698         /* no offload requested, just check for VLAN */
699 
700         if (((struct ether_header *)pMblk->b_rptr)->ether_type ==
701             htons(ETHERTYPE_VLAN))
702         {
703             pTxPkt->tx_info.vlan_tag = BnxeGetVlanTag(pMblk);
704             pTxPkt->tx_info.flags |= LM_TX_FLAG_VLAN_TAG_EXISTS;
705         }
706 
707         return 0;
708     }
709 
710     if (((struct ether_header *)pL2Hdr)->ether_type == htons(ETHERTYPE_VLAN))
711     {
712         l2HdrLen = sizeof(struct ether_vlan_header);
713 
714         pTxPkt->tx_info.vlan_tag = BnxeGetVlanTag(pMblk);
715         pTxPkt->tx_info.flags |= LM_TX_FLAG_VLAN_TAG_EXISTS;
716     }
717     else
718     {
719         l2HdrLen = sizeof(struct ether_header);
720     }
721 
722     if (csFlags & HCK_IPV4_HDRCKSUM)
723     {
724         pTxPkt->tx_info.flags |= LM_TX_FLAG_COMPUTE_IP_CKSUM;
725     }
726 
727     if (csFlags & HCK_PARTIALCKSUM)
728     {
729         pTxPkt->tx_info.flags |= LM_TX_FLAG_COMPUTE_TCP_UDP_CKSUM;
730 
731         l3HdrLen = csStart;
732         l4HdrLen = (l2HdrLen + csStuff + sizeof(u16_t));
733 
734         /*
735          * For TCP, here we ignore the urgent pointer and size of the
736          * options.  We'll get that info later.
737          */
738     }
739     else if (lso)
740     {
741         /* Solaris doesn't do LSO if there is option in the IP header. */
742         l3HdrLen = sizeof(struct ip);
743         l4HdrLen = (l2HdrLen + l3HdrLen + sizeof(struct tcphdr));
744     }
745     else
746     {
747         return 0;
748     }
749 
750     if (msgSize >= l4HdrLen)
751     {
752         /* the header is in the first block */
753         pL3Hdr = (pL2Hdr + l2HdrLen);
754     }
755     else
756     {
757         if ((msgSize <= l2HdrLen) && pMblk->b_cont &&
758             ((msgSize + MBLKL(pMblk->b_cont)) >= l4HdrLen))
759         {
760             /* the header is in the second block */
761             pL3Hdr = pMblk->b_cont->b_rptr + (l2HdrLen - msgSize);
762         }
763         else
764         {
765             /* do a pullup to make sure headers are in the first block */
766             pUM->txMsgPullUp++;
767 
768             if ((pMblk = msgpullup(pMblk, l4HdrLen)) == NULL)
769             {
770                 return -1;
771             }
772 
773             freemsg(pTxPkt->pMblk);
774             pTxPkt->pMblk = pMblk;
775 
776             pL3Hdr = (pMblk->b_rptr + l2HdrLen);
777         }
778     }
779 
780     /* must be IPv4 or IPv6 */
781     ASSERT((pL3Hdr[0] & 0xf0) == 0x60 || (pL3Hdr[0] & 0xf0) == 0x40);
782 
783     if ((pL3Hdr[0] & 0xf0) == 0x60)
784     {
785         pTxPkt->tx_info.flags |= LM_TX_FLAG_IPV6_PACKET;
786     }
787 
788     if (lso || ((csStuff - csStart) == TCP_CS_OFFSET))
789     {
790         /* get the TCP header */
791         pL4Hdr   = (pL3Hdr + l3HdrLen);
792         l4HdrLen = ((pL4Hdr[12] & 0xf0) >> 2);
793 
794         pTxPkt->tx_info.cs_any_offset     = 0;
795         pTxPkt->tx_info.tcp_nonce_sum_bit = (pL4Hdr[12] & 0x1);
796         pTxPkt->tx_info.tcp_pseudo_csum   = ntohs(*((u16_t *)&pL4Hdr[TCP_CS_OFFSET]));
797 
798         if (lso)
799         {
800             pTxPkt->tx_info.lso_ipid         = ntohs(*((u16_t *)&pL3Hdr[4]));
801             pTxPkt->tx_info.lso_tcp_send_seq = ntohl(*((u32_t *)&pL4Hdr[4]));
802             pTxPkt->tx_info.lso_tcp_flags    = pL4Hdr[13];
803         }
804     }
805     else
806     {
807         ASSERT((csStuff - csStart) == UDP_CS_OFFSET);
808 
809         /* get the UDP header */
810         pL4Hdr = pL3Hdr + l3HdrLen;
811 
812         l4HdrLen = sizeof(struct udphdr);
813 
814         pTxPkt->tx_info.cs_any_offset     = UDP_TCP_CS_OFFSET_DIFF;
815         pTxPkt->tx_info.tcp_nonce_sum_bit = 0;
816         pTxPkt->tx_info.tcp_pseudo_csum   =
817             CHIP_IS_E1x(((lm_device_t *)pUM)) ?
818                 BnxeUdpPseudoCsum(pUM, pL4Hdr, pL3Hdr, l3HdrLen) :
819                 ntohs(*((u16_t *)&pL4Hdr[UDP_CS_OFFSET]));
820     }
821 
822     pTxPkt->tx_info.lso_ip_hdr_len  = l3HdrLen;
823     pTxPkt->tx_info.lso_tcp_hdr_len = l4HdrLen;
824 
825     return 0;
826 }
827 
828 
BnxeTxSendMblk(um_device_t * pUM,int idx,mblk_t * pMblk,u32_t flags,u16_t vlan_tag)829 int BnxeTxSendMblk(um_device_t * pUM,
830                    int           idx,
831                    mblk_t *      pMblk,
832                    u32_t         flags,
833                    u16_t         vlan_tag)
834 {
835     lm_device_t *   pLM = &pUM->lm_dev;
836     TxQueue *       pTxQ = &pUM->txq[idx];
837     lm_tx_chain_t * pLmTxChain;
838     um_txpacket_t * pTxPkt;
839     s_list_t        tmpList;
840     u32_t           numPkts;
841     int rc;
842 
843     BNXE_LOCK_ENTER_FREETX(pUM, idx);
844 
845     pTxPkt = (um_txpacket_t *)s_list_pop_head(&pTxQ->freeTxDescQ);
846 
847     if (pTxQ->txLowWater > s_list_entry_cnt(&pTxQ->freeTxDescQ))
848     {
849         pTxQ->txLowWater = s_list_entry_cnt(&pTxQ->freeTxDescQ);
850     }
851 
852     BNXE_LOCK_EXIT_FREETX(pUM, idx);
853 
854     /* try to recycle if no more packet available */
855     if (pTxPkt == NULL)
856     {
857         pTxQ->txRecycle++;
858 
859         s_list_clear(&tmpList);
860 
861         BNXE_LOCK_ENTER_TX(pUM, idx);
862         numPkts = lm_get_packets_sent(pLM, idx, &tmpList);
863         BNXE_LOCK_EXIT_TX(pUM, idx);
864 
865         if (pUM->fmCapabilities &&
866             BnxeCheckAccHandle(pLM->vars.reg_handle[BAR_0]) != DDI_FM_OK)
867         {
868             ddi_fm_service_impact(pUM->pDev, DDI_SERVICE_DEGRADED);
869         }
870 
871         if (!numPkts)
872         {
873             atomic_or_32(&pTxQ->noTxCredits, BNXE_TX_RESOURCES_NO_DESC);
874             pTxQ->txBlocked++;
875             return BNXE_TX_HDWRFULL;
876         }
877 
878         /* steal the first packet from the list before reclaiming */
879 
880         pTxPkt = (um_txpacket_t *)s_list_pop_head(&tmpList);
881 
882         if (pTxPkt->num_handles)
883         {
884             BnxeTxPktUnmap(pTxPkt);
885         }
886 
887         if (pTxPkt->pMblk)
888         {
889             freemsg(pTxPkt->pMblk);
890             pTxPkt->pMblk = NULL;
891         }
892 
893         BnxeTxPktsReclaim(pUM, idx, &tmpList);
894     }
895 
896     pTxPkt->lm_pkt.link.next = NULL;
897 
898     pTxPkt->tx_info.flags    = 0;
899     pTxPkt->tx_info.vlan_tag = 0;
900     pTxPkt->frag_list.cnt    = 0;
901     pTxPkt->pMblk            = pMblk;
902 
903 #if 0
904     BnxeDumpPkt(pUM,
905                 (BNXE_FCOE(pUM) && (idx == FCOE_CID(&pUM->lm_dev))) ?
906                     "-> FCoE L2 TX ->" : "-> L2 TX ->",
907                 pMblk, B_TRUE);
908 #endif
909 
910     if (idx == FCOE_CID(pLM))
911     {
912         if (flags & PRV_TX_VLAN_TAG)
913         {
914             pTxPkt->tx_info.vlan_tag = vlan_tag;
915             pTxPkt->tx_info.flags |= LM_TX_FLAG_INSERT_VLAN_TAG;
916         }
917     }
918     else if (BnxeGetHdrInfo(pUM, pTxPkt))
919     {
920         goto BnxeTxSendMblk_fail;
921     }
922 
923     if (BnxeTxPktCopy(pUM, pTxQ, pTxPkt))
924     {
925         goto BnxeTxSendMblk_fail;
926     }
927 
928     /* Now try to send the packet... */
929 
930     pLmTxChain = &pLM->tx_info.chain[idx];
931 
932     BNXE_LOCK_ENTER_TX(pUM, idx);
933 
934     /* Try to reclaim sent packets if available BDs is lower than threshold */
935     if (pLmTxChain->bd_chain.bd_left < BNXE_MAX_DMA_FRAGS_PER_PKT + 2)
936     {
937         pTxQ->txRecycle++;
938 
939         s_list_clear(&tmpList);
940 
941         numPkts = lm_get_packets_sent(pLM, idx, &tmpList);
942 
943         if (pUM->fmCapabilities &&
944             BnxeCheckAccHandle(pLM->vars.reg_handle[BAR_0]) != DDI_FM_OK)
945         {
946             ddi_fm_service_impact(pUM->pDev, DDI_SERVICE_DEGRADED);
947         }
948 
949         if (numPkts)
950         {
951             BnxeTxPktsReclaim(pUM, idx, &tmpList);
952         }
953     }
954 
955     /*
956      * If there are no packets currently waiting to be sent and there are enough
957      * BDs available to satisfy this packet then send it now.
958      */
959     if (s_list_is_empty(&pTxQ->waitTxDescQ) &&
960         (pLmTxChain->bd_chain.bd_left >= pTxPkt->frag_list.cnt + 2))
961     {
962         rc = lm_send_packet(pLM, idx, &pTxPkt->lm_pkt, &pTxPkt->frag_list);
963 
964         if (pUM->fmCapabilities &&
965             BnxeCheckAccHandle(pLM->vars.reg_handle[BAR_0]) != DDI_FM_OK)
966         {
967             ddi_fm_service_impact(pUM->pDev, DDI_SERVICE_DEGRADED);
968         }
969 
970         if (pUM->fmCapabilities &&
971             BnxeCheckAccHandle(pLM->vars.reg_handle[BAR_1]) != DDI_FM_OK)
972         {
973             ddi_fm_service_impact(pUM->pDev, DDI_SERVICE_DEGRADED);
974         }
975 
976         if (rc == LM_STATUS_SUCCESS)
977         {
978             /* send completely successfully */
979             BNXE_LOCK_EXIT_TX(pUM, idx);
980             return BNXE_TX_GOODXMIT;
981         }
982 
983         /*
984          * Send failed (probably not enough BDs available)...
985          * Continue on with putting this packet on the wait queue.
986          */
987         pTxQ->txFailed++;
988     }
989 
990 #if 0
991     BnxeLogWarn(pUM, "WAIT TX DESCQ %lu %d %d",
992                 s_list_entry_cnt(&pTxQ->waitTxDescQ),
993                 pLmTxChain->bd_chain.bd_left, pTxPkt->frag_list.cnt);
994 #endif
995 
996     /*
997      * If we got here then there are other packets waiting to be sent or there
998      * aren't enough BDs available.  In either case put this packet at the end
999      * of the waiting queue.
1000      */
1001     s_list_push_tail(&pTxQ->waitTxDescQ, &pTxPkt->lm_pkt.link);
1002 
1003     pTxQ->txWait++;
1004 
1005     /*
1006      * If there appears to be a sufficient number of BDs available then make a
1007      * quick attempt to send as many waiting packets as possible.
1008      */
1009     if ((pLmTxChain->bd_chain.bd_left >= BNXE_MAX_DMA_FRAGS_PER_PKT) &&
1010         (BnxeTxSendWaitingPkt(pUM, idx) == BNXE_TX_GOODXMIT))
1011     {
1012         BNXE_LOCK_EXIT_TX(pUM, idx);
1013         return BNXE_TX_GOODXMIT;
1014     }
1015 
1016     /* Couldn't send anything! */
1017     atomic_or_32(&pTxQ->noTxCredits, BNXE_TX_RESOURCES_NO_CREDIT);
1018     pTxQ->txBlocked++;
1019 
1020     BNXE_LOCK_EXIT_TX(pUM, idx);
1021 
1022     return BNXE_TX_DEFERPKT;
1023 
1024 BnxeTxSendMblk_fail:
1025 
1026     pTxQ->txDiscards++;
1027 
1028     ASSERT(pTxPkt != NULL);
1029 
1030     if (pTxPkt->pMblk)
1031     {
1032         freemsg(pTxPkt->pMblk);
1033         pTxPkt->pMblk = NULL;
1034     }
1035 
1036     BNXE_LOCK_ENTER_FREETX(pUM, idx);
1037     s_list_push_tail(&pTxQ->freeTxDescQ, &pTxPkt->lm_pkt.link);
1038     BNXE_LOCK_EXIT_FREETX(pUM, idx);
1039 
1040     /*
1041      * Yes GOODXMIT since mblk was free'd here and this triggers caller to
1042      * try and send the next packet in its chain.
1043      */
1044     return BNXE_TX_GOODXMIT;
1045 }
1046 
1047 
BnxeTxPktsAbortIdx(um_device_t * pUM,int idx)1048 static void BnxeTxPktsAbortIdx(um_device_t * pUM,
1049                                int           idx)
1050 {
1051     s_list_t tmpList;
1052 
1053     BNXE_LOCK_ENTER_TX(pUM, idx);
1054     lm_abort(&pUM->lm_dev, ABORT_OP_INDICATE_TX_CHAIN, idx);
1055     tmpList = pUM->txq[idx].waitTxDescQ;
1056     s_list_clear(&pUM->txq[idx].waitTxDescQ);
1057     BNXE_LOCK_EXIT_TX(pUM, idx);
1058 
1059     BnxeTxPktsReclaim(pUM, idx, &tmpList);
1060 }
1061 
1062 
BnxeTxPktsAbort(um_device_t * pUM,int cliIdx)1063 void BnxeTxPktsAbort(um_device_t * pUM,
1064                      int           cliIdx)
1065 {
1066     int idx;
1067 
1068     switch (cliIdx)
1069     {
1070     case LM_CLI_IDX_FCOE:
1071 
1072         BnxeTxPktsAbortIdx(pUM, FCOE_CID(&pUM->lm_dev));
1073         break;
1074 
1075     case LM_CLI_IDX_NDIS:
1076 
1077         LM_FOREACH_TSS_IDX(&pUM->lm_dev, idx)
1078         {
1079             BnxeTxPktsAbortIdx(pUM, idx);
1080         }
1081 
1082         break;
1083 
1084     default:
1085 
1086         BnxeLogWarn(pUM, "ERROR: Invalid cliIdx for BnxeTxPktsAbort (%d)", cliIdx);
1087         break;
1088     }
1089 }
1090 
1091 
BnxeTxPktAlloc(um_device_t * pUM,size_t size)1092 static um_txpacket_t * BnxeTxPktAlloc(um_device_t * pUM,
1093                                       size_t        size)
1094 {
1095     um_txpacket_t *   pTxPkt;
1096     ddi_dma_cookie_t  cookie;
1097     u32_t             count;
1098     size_t            length;
1099     int rc, j;
1100 
1101     if ((pTxPkt = kmem_zalloc(sizeof(um_txpacket_t), KM_NOSLEEP)) == NULL)
1102     {
1103         return NULL;
1104     }
1105 
1106     pTxPkt->lm_pkt.l2pkt_tx_info = &pTxPkt->tx_info;
1107 
1108     if ((rc = ddi_dma_alloc_handle(pUM->pDev,
1109                                    &bnxeTxCbDmaAttrib,
1110                                    DDI_DMA_DONTWAIT,
1111                                    NULL,
1112                                    &pTxPkt->cbDmaHandle)) != DDI_SUCCESS)
1113     {
1114         BnxeLogWarn(pUM, "Failed to alloc DMA handle for Tx Desc (%d)", rc);
1115         kmem_free(pTxPkt, sizeof(um_txpacket_t));
1116         return NULL;
1117     }
1118 
1119     if ((rc = ddi_dma_mem_alloc(pTxPkt->cbDmaHandle,
1120                                 size,
1121                                 &bnxeAccessAttribBUF,
1122                                 DDI_DMA_STREAMING,
1123                                 DDI_DMA_DONTWAIT,
1124                                 NULL,
1125                                 &pTxPkt->pCbBuf,
1126                                 &length,
1127                                 &pTxPkt->cbDmaAccHandle)) != DDI_SUCCESS)
1128     {
1129         BnxeLogWarn(pUM, "Failed to alloc DMA memory for Tx Desc (%d)", rc);
1130         ddi_dma_free_handle(&pTxPkt->cbDmaHandle);
1131         kmem_free(pTxPkt, sizeof(um_txpacket_t));
1132         return NULL;
1133     }
1134 
1135     if ((rc = ddi_dma_addr_bind_handle(pTxPkt->cbDmaHandle,
1136                                        NULL,
1137                                        pTxPkt->pCbBuf,
1138                                        length,
1139                                        DDI_DMA_WRITE | DDI_DMA_STREAMING,
1140                                        DDI_DMA_DONTWAIT,
1141                                        NULL,
1142                                        &cookie,
1143                                        &count)) != DDI_DMA_MAPPED)
1144     {
1145         BnxeLogWarn(pUM, "Failed to bind DMA address for Tx Desc (%d)", rc);
1146         ddi_dma_mem_free(&pTxPkt->cbDmaAccHandle);
1147         ddi_dma_free_handle(&pTxPkt->cbDmaHandle);
1148         kmem_free(pTxPkt, sizeof(um_txpacket_t));
1149         return NULL;
1150     }
1151 
1152     pTxPkt->cbPhysAddr.as_u64 = cookie.dmac_laddress;
1153 
1154     for (j = 0; j < BNXE_MAX_DMA_HANDLES_PER_PKT; j++)
1155     {
1156         if ((rc = ddi_dma_alloc_handle(pUM->pDev,
1157                                        &bnxeTxDmaAttrib,
1158                                        DDI_DMA_DONTWAIT,
1159                                        NULL,
1160                                        &pTxPkt->dmaHandles[j])) !=
1161             DDI_SUCCESS)
1162         {
1163             BnxeLogWarn(pUM, "Failed to alloc DMA handles for Tx Pkt %d (%d)",
1164                         j, rc);
1165 
1166             for(--j; j >= 0; j--) /* unwind */
1167             {
1168                 ddi_dma_free_handle(&pTxPkt->dmaHandles[j]);
1169             }
1170 
1171             ddi_dma_unbind_handle(pTxPkt->cbDmaHandle);
1172             ddi_dma_mem_free(&pTxPkt->cbDmaAccHandle);
1173             ddi_dma_free_handle(&pTxPkt->cbDmaHandle);
1174             kmem_free(pTxPkt, sizeof(um_txpacket_t));
1175             return NULL;
1176         }
1177     }
1178 
1179     ASSERT(pTxPkt->pMblk == NULL);
1180     ASSERT(pTxPkt->num_handles == 0);
1181     ASSERT(pTxPkt->frag_list.cnt == 0);
1182     pTxPkt->cbLength = size;
1183 
1184     return pTxPkt;
1185 }
1186 
1187 
BnxeTxPktsInitIdx(um_device_t * pUM,int idx)1188 static int BnxeTxPktsInitIdx(um_device_t * pUM,
1189                              int           idx)
1190 {
1191     lm_device_t *   pLM = &pUM->lm_dev;
1192     TxQueue *       pTxQ;
1193     um_txpacket_t * pTxPkt;
1194     s_list_t        tmpList;
1195     int i;
1196 
1197     pTxQ = &pUM->txq[idx];
1198 
1199     s_list_clear(&pTxQ->sentTxQ);
1200     s_list_clear(&pTxQ->freeTxDescQ);
1201     s_list_clear(&pTxQ->waitTxDescQ);
1202 
1203     pTxQ->desc_cnt    = pUM->devParams.numTxDesc[LM_CHAIN_IDX_CLI(pLM, idx)];
1204     pTxQ->txLowWater  = pUM->devParams.numTxDesc[LM_CHAIN_IDX_CLI(pLM, idx)];
1205     pTxQ->thresh_pdwm = BNXE_PDWM_THRESHOLD;
1206     pTxQ->txFailed    = 0;
1207     pTxQ->txDiscards  = 0;
1208     pTxQ->txRecycle   = 0;
1209     pTxQ->txCopied    = 0;
1210     pTxQ->txBlocked   = 0;
1211     pTxQ->txWait      = 0;
1212 
1213     if (pUM->devParams.lsoEnable)
1214     {
1215         for (i = 0; i < pTxQ->desc_cnt; i++)
1216         {
1217             pTxPkt = BnxeTxPktAlloc(pUM,
1218                                     (BNXE_IP_MAXLEN +
1219                                      sizeof(struct ether_vlan_header)));
1220             if (pTxPkt == NULL)
1221             {
1222                 BnxeLogWarn(pUM, "Failed to allocate all Tx Descs for LSO (%d/%d allocated), LSO is disabled",
1223                             i, pTxQ->desc_cnt);
1224 
1225                 /* free existing in freeTxDescQ... */
1226 
1227                 BNXE_LOCK_ENTER_FREETX(pUM, idx);
1228                 tmpList = pTxQ->freeTxDescQ;
1229                 s_list_clear(&pTxQ->freeTxDescQ);
1230                 BNXE_LOCK_EXIT_FREETX(pUM, idx);
1231 
1232                 BnxeTxPktsFreeList(&tmpList);
1233 
1234                 pUM->devParams.lsoEnable = 0; /* Disabling LSO! */
1235 
1236                 break;
1237             }
1238 
1239             BNXE_LOCK_ENTER_FREETX(pUM, idx);
1240             s_list_push_tail(&pTxQ->freeTxDescQ, &pTxPkt->lm_pkt.link);
1241             BNXE_LOCK_EXIT_FREETX(pUM, idx);
1242         }
1243     }
1244 
1245     if (!pUM->devParams.lsoEnable)
1246     {
1247         for (i = 0; i < pTxQ->desc_cnt; i++)
1248         {
1249             pTxPkt = BnxeTxPktAlloc(pUM,
1250                                     (pUM->devParams.mtu[LM_CHAIN_IDX_CLI(pLM, idx)] +
1251                                      sizeof(struct ether_vlan_header)));
1252             if (pTxPkt == NULL)
1253             {
1254                 BnxeLogWarn(pUM, "Failed to allocate all Tx Descs (%d/%d allocated)",
1255                             i, pTxQ->desc_cnt);
1256 
1257                 /* free existing in freeTxDescQ... */
1258 
1259                 BNXE_LOCK_ENTER_FREETX(pUM, idx);
1260                 tmpList = pTxQ->freeTxDescQ;
1261                 s_list_clear(&pTxQ->freeTxDescQ);
1262                 BNXE_LOCK_EXIT_FREETX(pUM, idx);
1263 
1264                 BnxeTxPktsFreeList(&tmpList);
1265 
1266                 return -1;
1267             }
1268 
1269             BNXE_LOCK_ENTER_FREETX(pUM, idx);
1270             s_list_push_tail(&pTxQ->freeTxDescQ, &pTxPkt->lm_pkt.link);
1271             BNXE_LOCK_EXIT_FREETX(pUM, idx);
1272         }
1273     }
1274 
1275     return 0;
1276 }
1277 
1278 
BnxeTxPktsInit(um_device_t * pUM,int cliIdx)1279 int BnxeTxPktsInit(um_device_t * pUM,
1280                    int           cliIdx)
1281 {
1282     int idx, rc;
1283 
1284     switch (cliIdx)
1285     {
1286     case LM_CLI_IDX_FCOE:
1287 
1288         rc = BnxeTxPktsInitIdx(pUM, FCOE_CID(&pUM->lm_dev));
1289         break;
1290 
1291     case LM_CLI_IDX_NDIS:
1292 
1293         LM_FOREACH_TSS_IDX(&pUM->lm_dev, idx)
1294         {
1295             if ((rc = BnxeTxPktsInitIdx(pUM, idx)) < 0)
1296             {
1297                 break;
1298             }
1299         }
1300 
1301         break;
1302 
1303     default:
1304 
1305         BnxeLogWarn(pUM, "ERROR: Invalid cliIdx for BnxeTxPktsFini (%d)", cliIdx);
1306         rc = -1;
1307         break;
1308     }
1309 
1310     return rc;
1311 }
1312 
1313 
BnxeTxPktsFiniIdx(um_device_t * pUM,int idx)1314 static void BnxeTxPktsFiniIdx(um_device_t * pUM,
1315                               int           idx)
1316 {
1317     lm_device_t * pLM = &pUM->lm_dev;
1318     TxQueue *     pTxQ;
1319     s_list_t      tmpList;
1320 
1321     pTxQ = &pUM->txq[idx];
1322 
1323     BNXE_LOCK_ENTER_FREETX(pUM, idx);
1324     tmpList = pTxQ->freeTxDescQ;
1325     s_list_clear(&pTxQ->freeTxDescQ);
1326     BNXE_LOCK_EXIT_FREETX(pUM, idx);
1327 
1328     BNXE_LOCK_ENTER_TX(pUM, idx);
1329     s_list_add_tail(&tmpList, &pTxQ->sentTxQ);
1330     s_list_clear(&pTxQ->sentTxQ);
1331     BNXE_LOCK_EXIT_TX(pUM, idx);
1332 
1333     /* there could be more than originally allocated but less is bad */
1334     if (s_list_entry_cnt(&tmpList) <
1335         pUM->devParams.numTxDesc[LM_CHAIN_IDX_CLI(pLM, idx)])
1336     {
1337         BnxeLogWarn(pUM, "Missing TX descriptors (%lu / %d) (TxFail: %d)",
1338                     s_list_entry_cnt(&tmpList), pUM->devParams.numTxDesc,
1339                     pTxQ->txFailed);
1340     }
1341 
1342     BnxeTxPktsFreeList(&tmpList);
1343 }
1344 
1345 
BnxeTxPktsFini(um_device_t * pUM,int cliIdx)1346 void BnxeTxPktsFini(um_device_t * pUM,
1347                     int           cliIdx)
1348 {
1349     int idx;
1350 
1351     switch (cliIdx)
1352     {
1353     case LM_CLI_IDX_FCOE:
1354 
1355         BnxeTxPktsFiniIdx(pUM, FCOE_CID(&pUM->lm_dev));
1356         break;
1357 
1358     case LM_CLI_IDX_NDIS:
1359 
1360         LM_FOREACH_TSS_IDX(&pUM->lm_dev, idx)
1361         {
1362             BnxeTxPktsFiniIdx(pUM, idx);
1363         }
1364 
1365         break;
1366 
1367     default:
1368 
1369         BnxeLogWarn(pUM, "ERROR: Invalid cliIdx for BnxeTxPktsFini (%d)", cliIdx);
1370         break;
1371     }
1372 }
1373 
1374