summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/cavium/thunder/nicvf_main.c
diff options
context:
space:
mode:
authorSunil Goutham <sgoutham@cavium.com>2016-08-30 11:36:27 +0530
committerDavid S. Miller <davem@davemloft.net>2016-09-01 14:50:47 -0700
commit7ceb8a1319ec64954459d474dd4a8c3c60ff0999 (patch)
tree48fb486737e02a17d63e8e8f4a6395367d64231e /drivers/net/ethernet/cavium/thunder/nicvf_main.c
parent57e81d44b0e1aa4dcb479ff8de8fc34cf635d0e8 (diff)
net: thunderx: Fix for issues with multiple CQEs posted for a TSO packet
On ThunderX 88xx pass 2.x chips when TSO is offloaded to HW, HW posts a CQE for every TSO segment transmitted. Current code does handles this, but is prone to issues when segment sizes are small resulting in SW processing too many CQEs and also at times frees a SKB which is not yet transmitted. This patch handles the errata in a different way and eliminates issues with earlier approach, TSO packet is submitted to HW with post_cqe=0, so that no CQE is posted upon completion of transmission of TSO packet but a additional HDR + IMMEDIATE descriptors are added to SQ due to which a CQE is posted and will have required info to be used while cleanup in napi. This way only one CQE is posted for a TSO packet. Signed-off-by: Sunil Goutham <sgoutham@cavium.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/cavium/thunder/nicvf_main.c')
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c20
1 files changed, 15 insertions, 5 deletions
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index a19e73f11d73..3240349615bd 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -513,6 +513,7 @@ static void nicvf_snd_pkt_handler(struct net_device *netdev,
struct nicvf *nic = netdev_priv(netdev);
struct snd_queue *sq;
struct sq_hdr_subdesc *hdr;
+ struct sq_hdr_subdesc *tso_sqe;
sq = &nic->qs->sq[cqe_tx->sq_idx];
@@ -527,17 +528,21 @@ static void nicvf_snd_pkt_handler(struct net_device *netdev,
nicvf_check_cqe_tx_errs(nic, cq, cqe_tx);
skb = (struct sk_buff *)sq->skbuff[cqe_tx->sqe_ptr];
- /* For TSO offloaded packets only one SQE will have a valid SKB */
if (skb) {
+ /* Check for dummy descriptor used for HW TSO offload on 88xx */
+ if (hdr->dont_send) {
+ /* Get actual TSO descriptors and free them */
+ tso_sqe =
+ (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, hdr->rsvd2);
+ nicvf_put_sq_desc(sq, tso_sqe->subdesc_cnt + 1);
+ }
nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
prefetch(skb);
dev_consume_skb_any(skb);
sq->skbuff[cqe_tx->sqe_ptr] = (u64)NULL;
} else {
- /* In case of HW TSO, HW sends a CQE for each segment of a TSO
- * packet instead of a single CQE for the whole TSO packet
- * transmitted. Each of this CQE points to the same SQE, so
- * avoid freeing same SQE multiple times.
+ /* In case of SW TSO on 88xx, only last segment will have
+ * a SKB attached, so just free SQEs here.
*/
if (!nic->hw_tso)
nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
@@ -1502,6 +1507,7 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct net_device *netdev;
struct nicvf *nic;
int err, qcount;
+ u16 sdevid;
err = pci_enable_device(pdev);
if (err) {
@@ -1575,6 +1581,10 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (!pass1_silicon(nic->pdev))
nic->hw_tso = true;
+ pci_read_config_word(nic->pdev, PCI_SUBSYSTEM_ID, &sdevid);
+ if (sdevid == 0xA134)
+ nic->t88 = true;
+
/* Check if this VF is in QS only mode */
if (nic->sqs_mode)
return 0;