diff options
Diffstat (limited to 'drivers/net/ethernet')
222 files changed, 12162 insertions, 4415 deletions
diff --git a/drivers/net/ethernet/adi/bfin_mac.c b/drivers/net/ethernet/adi/bfin_mac.c index a175d0be1ae1..ee705771bd2c 100644 --- a/drivers/net/ethernet/adi/bfin_mac.c +++ b/drivers/net/ethernet/adi/bfin_mac.c @@ -188,10 +188,9 @@ static int desc_list_init(struct net_device *dev) /* allocate a new skb for next time receive */ new_skb = netdev_alloc_skb(dev, PKT_BUF_SZ + NET_IP_ALIGN); - if (!new_skb) { - pr_notice("init: low on mem - packet dropped\n"); + if (!new_skb) goto init_error; - } + skb_reserve(new_skb, NET_IP_ALIGN); /* Invidate the data cache of skb->data range when it is write back * cache. It will prevent overwritting the new data from DMA @@ -1236,7 +1235,6 @@ static void bfin_mac_rx(struct net_device *dev) new_skb = netdev_alloc_skb(dev, PKT_BUF_SZ + NET_IP_ALIGN); if (!new_skb) { - netdev_notice(dev, "rx: low on mem - packet dropped\n"); dev->stats.rx_dropped++; goto out; } diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c index 0be2195e5034..269295403fc4 100644 --- a/drivers/net/ethernet/aeroflex/greth.c +++ b/drivers/net/ethernet/aeroflex/greth.c @@ -1464,35 +1464,23 @@ static int greth_of_probe(struct platform_device *ofdev) } /* Allocate TX descriptor ring in coherent memory */ - greth->tx_bd_base = (struct greth_bd *) dma_alloc_coherent(greth->dev, - 1024, - &greth->tx_bd_base_phys, - GFP_KERNEL); - + greth->tx_bd_base = dma_alloc_coherent(greth->dev, 1024, + &greth->tx_bd_base_phys, + GFP_KERNEL | __GFP_ZERO); if (!greth->tx_bd_base) { - if (netif_msg_probe(greth)) - dev_err(&dev->dev, "could not allocate descriptor memory.\n"); err = -ENOMEM; goto error3; } - memset(greth->tx_bd_base, 0, 1024); - /* Allocate RX descriptor ring in coherent memory */ - greth->rx_bd_base = (struct greth_bd *) dma_alloc_coherent(greth->dev, - 1024, - &greth->rx_bd_base_phys, - GFP_KERNEL); - + greth->rx_bd_base = dma_alloc_coherent(greth->dev, 1024, + &greth->rx_bd_base_phys, + GFP_KERNEL | __GFP_ZERO); if (!greth->rx_bd_base) { - if (netif_msg_probe(greth)) - dev_err(greth->dev, "could not allocate descriptor memory.\n"); err = -ENOMEM; goto error4; } - memset(greth->rx_bd_base, 0, 1024); - /* Get MAC address from: module param, OF property or ID prom */ for (i = 0; i < 6; i++) { if (macaddr[i] != 0) diff --git a/drivers/net/ethernet/amd/7990.c b/drivers/net/ethernet/amd/7990.c index 6e722dc37db7..65926a956575 100644 --- a/drivers/net/ethernet/amd/7990.c +++ b/drivers/net/ethernet/amd/7990.c @@ -318,8 +318,6 @@ static int lance_rx (struct net_device *dev) struct sk_buff *skb = netdev_alloc_skb(dev, len + 2); if (!skb) { - printk ("%s: Memory squeeze, deferring packet.\n", - dev->name); dev->stats.rx_dropped++; rd->mblength = 0; rd->rmd1_bits = LE_R1_OWN; diff --git a/drivers/net/ethernet/amd/a2065.c b/drivers/net/ethernet/amd/a2065.c index 3789affbc0e5..0866e7627433 100644 --- a/drivers/net/ethernet/amd/a2065.c +++ b/drivers/net/ethernet/amd/a2065.c @@ -293,7 +293,6 @@ static int lance_rx(struct net_device *dev) struct sk_buff *skb = netdev_alloc_skb(dev, len + 2); if (!skb) { - netdev_warn(dev, "Memory squeeze, deferring packet\n"); dev->stats.rx_dropped++; rd->mblength = 0; rd->rmd1_bits = LE_R1_OWN; diff --git a/drivers/net/ethernet/amd/am79c961a.c b/drivers/net/ethernet/amd/am79c961a.c index 60e2b701afe7..9793767996a2 100644 --- a/drivers/net/ethernet/amd/am79c961a.c +++ b/drivers/net/ethernet/amd/am79c961a.c @@ -528,7 +528,6 @@ am79c961_rx(struct net_device *dev, struct dev_priv *priv) dev->stats.rx_packets++; } else { am_writeword (dev, hdraddr + 2, RMD_OWN); - printk (KERN_WARNING "%s: memory squeeze, dropping packet.\n", dev->name); dev->stats.rx_dropped++; break; } diff --git a/drivers/net/ethernet/amd/ariadne.c b/drivers/net/ethernet/amd/ariadne.c index 98f4522fd17b..c178eb4c8166 100644 --- a/drivers/net/ethernet/amd/ariadne.c +++ b/drivers/net/ethernet/amd/ariadne.c @@ -193,7 +193,6 @@ static int ariadne_rx(struct net_device *dev) skb = netdev_alloc_skb(dev, pkt_len + 2); if (skb == NULL) { - netdev_warn(dev, "Memory squeeze, deferring packet\n"); for (i = 0; i < RX_RING_SIZE; i++) if (lowb(priv->rx_ring[(entry + i) % RX_RING_SIZE]->RMD1) & RF_OWN) break; diff --git a/drivers/net/ethernet/amd/atarilance.c b/drivers/net/ethernet/amd/atarilance.c index 84219df72f51..e8d0ef508f48 100644 --- a/drivers/net/ethernet/amd/atarilance.c +++ b/drivers/net/ethernet/amd/atarilance.c @@ -996,8 +996,6 @@ static int lance_rx( struct net_device *dev ) else { skb = netdev_alloc_skb(dev, pkt_len + 2); if (skb == NULL) { - DPRINTK( 1, ( "%s: Memory squeeze, deferring packet.\n", - dev->name )); for( i = 0; i < RX_RING_SIZE; i++ ) if (MEM->rx_head[(entry+i) & RX_RING_MOD_MASK].flag & RMD1_OWN_CHIP) @@ -1149,9 +1147,7 @@ static struct net_device *atarilance_dev; static int __init atarilance_module_init(void) { atarilance_dev = atarilance_probe(-1); - if (IS_ERR(atarilance_dev)) - return PTR_ERR(atarilance_dev); - return 0; + return PTR_RET(atarilance_dev); } static void __exit atarilance_module_exit(void) diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c index de774d419144..688aede742c7 100644 --- a/drivers/net/ethernet/amd/au1000_eth.c +++ b/drivers/net/ethernet/amd/au1000_eth.c @@ -727,7 +727,6 @@ static int au1000_rx(struct net_device *dev) frmlen -= 4; /* Remove FCS */ skb = netdev_alloc_skb(dev, frmlen + 2); if (skb == NULL) { - netdev_err(dev, "Memory squeeze, dropping packet.\n"); dev->stats.rx_dropped++; continue; } diff --git a/drivers/net/ethernet/amd/declance.c b/drivers/net/ethernet/amd/declance.c index baca0bd1b393..3d86ffeb4e15 100644 --- a/drivers/net/ethernet/amd/declance.c +++ b/drivers/net/ethernet/amd/declance.c @@ -607,8 +607,6 @@ static int lance_rx(struct net_device *dev) skb = netdev_alloc_skb(dev, len + 2); if (skb == 0) { - printk("%s: Memory squeeze, deferring packet.\n", - dev->name); dev->stats.rx_dropped++; *rds_ptr(rd, mblength, lp->type) = 0; *rds_ptr(rd, rmd1, lp->type) = diff --git a/drivers/net/ethernet/amd/mvme147.c b/drivers/net/ethernet/amd/mvme147.c index 9af3c307862c..a51497c9d2af 100644 --- a/drivers/net/ethernet/amd/mvme147.c +++ b/drivers/net/ethernet/amd/mvme147.c @@ -188,9 +188,7 @@ static struct net_device *dev_mvme147_lance; int __init init_module(void) { dev_mvme147_lance = mvme147lance_probe(-1); - if (IS_ERR(dev_mvme147_lance)) - return PTR_ERR(dev_mvme147_lance); - return 0; + return PTR_RET(dev_mvme147_lance); } void __exit cleanup_module(void) diff --git a/drivers/net/ethernet/amd/ni65.c b/drivers/net/ethernet/amd/ni65.c index 013b65108536..26fc0ce0faa3 100644 --- a/drivers/net/ethernet/amd/ni65.c +++ b/drivers/net/ethernet/amd/ni65.c @@ -1238,7 +1238,7 @@ MODULE_PARM_DESC(dma, "ni6510 ISA DMA channel (ignored for some cards)"); int __init init_module(void) { dev_ni65 = ni65_probe(-1); - return IS_ERR(dev_ni65) ? PTR_ERR(dev_ni65) : 0; + return PTR_RET(dev_ni65); } void __exit cleanup_module(void) diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c index 797f847edf13..ed2130727643 100644 --- a/drivers/net/ethernet/amd/pcnet32.c +++ b/drivers/net/ethernet/amd/pcnet32.c @@ -1166,7 +1166,6 @@ static void pcnet32_rx_entry(struct net_device *dev, skb = netdev_alloc_skb(dev, pkt_len + NET_IP_ALIGN); if (skb == NULL) { - netif_err(lp, drv, dev, "Memory squeeze, dropping packet\n"); dev->stats.rx_dropped++; return; } diff --git a/drivers/net/ethernet/amd/sun3lance.c b/drivers/net/ethernet/amd/sun3lance.c index 74b3891b6483..4375abe61da1 100644 --- a/drivers/net/ethernet/amd/sun3lance.c +++ b/drivers/net/ethernet/amd/sun3lance.c @@ -812,9 +812,6 @@ static int lance_rx( struct net_device *dev ) else { skb = netdev_alloc_skb(dev, pkt_len + 2); if (skb == NULL) { - DPRINTK( 1, ( "%s: Memory squeeze, deferring packet.\n", - dev->name )); - dev->stats.rx_dropped++; head->msg_length = 0; head->flag |= RMD1_OWN_CHIP; @@ -943,9 +940,7 @@ static struct net_device *sun3lance_dev; int __init init_module(void) { sun3lance_dev = sun3lance_probe(-1); - if (IS_ERR(sun3lance_dev)) - return PTR_ERR(sun3lance_dev); - return 0; + return PTR_RET(sun3lance_dev); } void __exit cleanup_module(void) diff --git a/drivers/net/ethernet/amd/sunlance.c b/drivers/net/ethernet/amd/sunlance.c index 6a40290d3727..f47b780892e9 100644 --- a/drivers/net/ethernet/amd/sunlance.c +++ b/drivers/net/ethernet/amd/sunlance.c @@ -536,8 +536,6 @@ static void lance_rx_dvma(struct net_device *dev) skb = netdev_alloc_skb(dev, len + 2); if (skb == NULL) { - printk(KERN_INFO "%s: Memory squeeze, deferring packet.\n", - dev->name); dev->stats.rx_dropped++; rd->mblength = 0; rd->rmd1_bits = LE_R1_OWN; @@ -708,8 +706,6 @@ static void lance_rx_pio(struct net_device *dev) skb = netdev_alloc_skb(dev, len + 2); if (skb == NULL) { - printk(KERN_INFO "%s: Memory squeeze, deferring packet.\n", - dev->name); dev->stats.rx_dropped++; sbus_writew(0, &rd->mblength); sbus_writeb(LE_R1_OWN, &rd->rmd1_bits); @@ -1377,10 +1373,9 @@ static int sparc_lance_probe_one(struct platform_device *op, dma_alloc_coherent(&op->dev, sizeof(struct lance_init_block), &lp->init_block_dvma, GFP_ATOMIC); - if (!lp->init_block_mem) { - printk(KERN_ERR "SunLance: Cannot allocate consistent DMA memory.\n"); + if (!lp->init_block_mem) goto fail; - } + lp->pio_buffer = 0; lp->init_ring = lance_init_ring_dvma; lp->rx = lance_rx_dvma; diff --git a/drivers/net/ethernet/apple/macmace.c b/drivers/net/ethernet/apple/macmace.c index a206779c68cf..4ce8ceb62205 100644 --- a/drivers/net/ethernet/apple/macmace.c +++ b/drivers/net/ethernet/apple/macmace.c @@ -386,20 +386,16 @@ static int mace_open(struct net_device *dev) /* Allocate the DMA ring buffers */ mp->tx_ring = dma_alloc_coherent(mp->device, - N_TX_RING * MACE_BUFF_SIZE, - &mp->tx_ring_phys, GFP_KERNEL); - if (mp->tx_ring == NULL) { - printk(KERN_ERR "%s: unable to allocate DMA tx buffers\n", dev->name); + N_TX_RING * MACE_BUFF_SIZE, + &mp->tx_ring_phys, GFP_KERNEL); + if (mp->tx_ring == NULL) goto out1; - } mp->rx_ring = dma_alloc_coherent(mp->device, - N_RX_RING * MACE_BUFF_SIZE, - &mp->rx_ring_phys, GFP_KERNEL); - if (mp->rx_ring == NULL) { - printk(KERN_ERR "%s: unable to allocate DMA rx buffers\n", dev->name); + N_RX_RING * MACE_BUFF_SIZE, + &mp->rx_ring_phys, GFP_KERNEL); + if (mp->rx_ring == NULL) goto out2; - } mace_dma_off(dev); diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c index f73d5609439a..7e0a822289c3 100644 --- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c +++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c @@ -1420,11 +1420,9 @@ static void atl1e_clean_rx_irq(struct atl1e_adapter *adapter, u8 que, packet_size = ((prrs->word1 >> RRS_PKT_SIZE_SHIFT) & RRS_PKT_SIZE_MASK) - 4; /* CRC */ skb = netdev_alloc_skb_ip_align(netdev, packet_size); - if (skb == NULL) { - netdev_warn(netdev, - "Memory squeeze, deferring packet\n"); + if (skb == NULL) goto skip_pkt; - } + memcpy(skb->data, (u8 *)(prrs + 1), packet_size); skb_put(skb, packet_size); skb->protocol = eth_type_trans(skb, netdev); diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c index 5b0d9931c720..9948fee28ae5 100644 --- a/drivers/net/ethernet/atheros/atlx/atl1.c +++ b/drivers/net/ethernet/atheros/atlx/atl1.c @@ -2774,7 +2774,7 @@ static int atl1_close(struct net_device *netdev) return 0; } -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP static int atl1_suspend(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c index 1278b47022e0..a046b6ff847c 100644 --- a/drivers/net/ethernet/atheros/atlx/atl2.c +++ b/drivers/net/ethernet/atheros/atlx/atl2.c @@ -437,9 +437,6 @@ static void atl2_intr_rx(struct atl2_adapter *adapter) /* alloc new buffer */ skb = netdev_alloc_skb_ip_align(netdev, rx_size); if (NULL == skb) { - printk(KERN_WARNING - "%s: Mem squeeze, deferring packet.\n", - netdev->name); /* * Check that some rx space is free. If not, * free one and mark stats->rx_dropped++. diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c index 7d81e059e811..0b3e23ec37f7 100644 --- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c +++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c @@ -862,27 +862,25 @@ static int bcm_enet_open(struct net_device *dev) /* allocate rx dma ring */ size = priv->rx_ring_size * sizeof(struct bcm_enet_desc); - p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL); + p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma, + GFP_KERNEL | __GFP_ZERO); if (!p) { - dev_err(kdev, "cannot allocate rx ring %u\n", size); ret = -ENOMEM; goto out_freeirq_tx; } - memset(p, 0, size); priv->rx_desc_alloc_size = size; priv->rx_desc_cpu = p; /* allocate tx dma ring */ size = priv->tx_ring_size * sizeof(struct bcm_enet_desc); - p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL); + p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma, + GFP_KERNEL | __GFP_ZERO); if (!p) { - dev_err(kdev, "cannot allocate tx ring\n"); ret = -ENOMEM; goto out_free_rx_ring; } - memset(p, 0, size); priv->tx_desc_alloc_size = size; priv->tx_desc_cpu = p; @@ -1619,7 +1617,6 @@ static int bcm_enet_probe(struct platform_device *pdev) struct resource *res_mem, *res_irq, *res_irq_rx, *res_irq_tx; struct mii_bus *bus; const char *clk_name; - unsigned int iomem_size; int i, ret; /* stop if shared driver failed, assume driver->probe will be @@ -1644,17 +1641,12 @@ static int bcm_enet_probe(struct platform_device *pdev) if (ret) goto out; - iomem_size = resource_size(res_mem); - if (!request_mem_region(res_mem->start, iomem_size, "bcm63xx_enet")) { - ret = -EBUSY; - goto out; - } - - priv->base = ioremap(res_mem->start, iomem_size); + priv->base = devm_request_and_ioremap(&pdev->dev, res_mem); if (priv->base == NULL) { ret = -ENOMEM; - goto out_release_mem; + goto out; } + dev->irq = priv->irq = res_irq->start; priv->irq_rx = res_irq_rx->start; priv->irq_tx = res_irq_tx->start; @@ -1674,9 +1666,9 @@ static int bcm_enet_probe(struct platform_device *pdev) priv->mac_clk = clk_get(&pdev->dev, clk_name); if (IS_ERR(priv->mac_clk)) { ret = PTR_ERR(priv->mac_clk); - goto out_unmap; + goto out; } - clk_enable(priv->mac_clk); + clk_prepare_enable(priv->mac_clk); /* initialize default and fetch platform data */ priv->rx_ring_size = BCMENET_DEF_RX_DESC; @@ -1705,7 +1697,7 @@ static int bcm_enet_probe(struct platform_device *pdev) priv->phy_clk = NULL; goto out_put_clk_mac; } - clk_enable(priv->phy_clk); + clk_prepare_enable(priv->phy_clk); } /* do minimal hardware init to be able to probe mii bus */ @@ -1733,7 +1725,8 @@ static int bcm_enet_probe(struct platform_device *pdev) * if a slave is not present on hw */ bus->phy_mask = ~(1 << priv->phy_id); - bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); + bus->irq = devm_kzalloc(&pdev->dev, sizeof(int) * PHY_MAX_ADDR, + GFP_KERNEL); if (!bus->irq) { ret = -ENOMEM; goto out_free_mdio; @@ -1794,10 +1787,8 @@ static int bcm_enet_probe(struct platform_device *pdev) return 0; out_unregister_mdio: - if (priv->mii_bus) { + if (priv->mii_bus) mdiobus_unregister(priv->mii_bus); - kfree(priv->mii_bus->irq); - } out_free_mdio: if (priv->mii_bus) @@ -1807,19 +1798,13 @@ out_uninit_hw: /* turn off mdc clock */ enet_writel(priv, 0, ENET_MIISC_REG); if (priv->phy_clk) { - clk_disable(priv->phy_clk); + clk_disable_unprepare(priv->phy_clk); clk_put(priv->phy_clk); } out_put_clk_mac: - clk_disable(priv->mac_clk); + clk_disable_unprepare(priv->mac_clk); clk_put(priv->mac_clk); - -out_unmap: - iounmap(priv->base); - -out_release_mem: - release_mem_region(res_mem->start, iomem_size); out: free_netdev(dev); return ret; @@ -1833,7 +1818,6 @@ static int bcm_enet_remove(struct platform_device *pdev) { struct bcm_enet_priv *priv; struct net_device *dev; - struct resource *res; /* stop netdevice */ dev = platform_get_drvdata(pdev); @@ -1845,7 +1829,6 @@ static int bcm_enet_remove(struct platform_device *pdev) if (priv->has_phy) { mdiobus_unregister(priv->mii_bus); - kfree(priv->mii_bus->irq); mdiobus_free(priv->mii_bus); } else { struct bcm63xx_enet_platform_data *pd; @@ -1856,17 +1839,12 @@ static int bcm_enet_remove(struct platform_device *pdev) bcm_enet_mdio_write_mii); } - /* release device resources */ - iounmap(priv->base); - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - release_mem_region(res->start, resource_size(res)); - /* disable hw block clocks */ if (priv->phy_clk) { - clk_disable(priv->phy_clk); + clk_disable_unprepare(priv->phy_clk); clk_put(priv->phy_clk); } - clk_disable(priv->mac_clk); + clk_disable_unprepare(priv->mac_clk); clk_put(priv->mac_clk); platform_set_drvdata(pdev, NULL); @@ -1889,31 +1867,20 @@ struct platform_driver bcm63xx_enet_driver = { static int bcm_enet_shared_probe(struct platform_device *pdev) { struct resource *res; - unsigned int iomem_size; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) return -ENODEV; - iomem_size = resource_size(res); - if (!request_mem_region(res->start, iomem_size, "bcm63xx_enet_dma")) - return -EBUSY; - - bcm_enet_shared_base = ioremap(res->start, iomem_size); - if (!bcm_enet_shared_base) { - release_mem_region(res->start, iomem_size); + bcm_enet_shared_base = devm_request_and_ioremap(&pdev->dev, res); + if (!bcm_enet_shared_base) return -ENOMEM; - } + return 0; } static int bcm_enet_shared_remove(struct platform_device *pdev) { - struct resource *res; - - iounmap(bcm_enet_shared_base); - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - release_mem_region(res->start, resource_size(res)); return 0; } diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c index da5f4397f87c..eec0af45b859 100644 --- a/drivers/net/ethernet/broadcom/bgmac.c +++ b/drivers/net/ethernet/broadcom/bgmac.c @@ -13,6 +13,7 @@ #include <linux/delay.h> #include <linux/etherdevice.h> #include <linux/mii.h> +#include <linux/phy.h> #include <linux/interrupt.h> #include <linux/dma-mapping.h> #include <bcm47xx_nvram.h> @@ -244,10 +245,8 @@ static int bgmac_dma_rx_skb_for_slot(struct bgmac *bgmac, /* Alloc skb */ slot->skb = netdev_alloc_skb(bgmac->net_dev, BGMAC_RX_BUF_SIZE); - if (!slot->skb) { - bgmac_err(bgmac, "Allocation of skb failed!\n"); + if (!slot->skb) return -ENOMEM; - } /* Poison - if everything goes fine, hardware will overwrite it */ rx = (struct bgmac_rx_header *)slot->skb->data; @@ -1313,6 +1312,73 @@ static const struct ethtool_ops bgmac_ethtool_ops = { }; /************************************************** + * MII + **************************************************/ + +static int bgmac_mii_read(struct mii_bus *bus, int mii_id, int regnum) +{ + return bgmac_phy_read(bus->priv, mii_id, regnum); +} + +static int bgmac_mii_write(struct mii_bus *bus, int mii_id, int regnum, + u16 value) +{ + return bgmac_phy_write(bus->priv, mii_id, regnum, value); +} + +static int bgmac_mii_register(struct bgmac *bgmac) +{ + struct mii_bus *mii_bus; + int i, err = 0; + + mii_bus = mdiobus_alloc(); + if (!mii_bus) + return -ENOMEM; + + mii_bus->name = "bgmac mii bus"; + sprintf(mii_bus->id, "%s-%d-%d", "bgmac", bgmac->core->bus->num, + bgmac->core->core_unit); + mii_bus->priv = bgmac; + mii_bus->read = bgmac_mii_read; + mii_bus->write = bgmac_mii_write; + mii_bus->parent = &bgmac->core->dev; + mii_bus->phy_mask = ~(1 << bgmac->phyaddr); + + mii_bus->irq = kmalloc_array(PHY_MAX_ADDR, sizeof(int), GFP_KERNEL); + if (!mii_bus->irq) { + err = -ENOMEM; + goto err_free_bus; + } + for (i = 0; i < PHY_MAX_ADDR; i++) + mii_bus->irq[i] = PHY_POLL; + + err = mdiobus_register(mii_bus); + if (err) { + bgmac_err(bgmac, "Registration of mii bus failed\n"); + goto err_free_irq; + } + + bgmac->mii_bus = mii_bus; + + return err; + +err_free_irq: + kfree(mii_bus->irq); +err_free_bus: + mdiobus_free(mii_bus); + return err; +} + +static void bgmac_mii_unregister(struct bgmac *bgmac) +{ + struct mii_bus *mii_bus = bgmac->mii_bus; + + mdiobus_unregister(mii_bus); + kfree(mii_bus->irq); + mdiobus_free(mii_bus); +} + +/************************************************** * BCMA bus ops **************************************************/ @@ -1404,11 +1470,18 @@ static int bgmac_probe(struct bcma_device *core) if (core->bus->sprom.boardflags_lo & BGMAC_BFL_ENETADM) bgmac_warn(bgmac, "Support for ADMtek ethernet switch not implemented\n"); + err = bgmac_mii_register(bgmac); + if (err) { + bgmac_err(bgmac, "Cannot register MDIO\n"); + err = -ENOTSUPP; + goto err_dma_free; + } + err = register_netdev(bgmac->net_dev); if (err) { bgmac_err(bgmac, "Cannot register net device\n"); err = -ENOTSUPP; - goto err_dma_free; + goto err_mii_unregister; } netif_carrier_off(net_dev); @@ -1417,6 +1490,8 @@ static int bgmac_probe(struct bcma_device *core) return 0; +err_mii_unregister: + bgmac_mii_unregister(bgmac); err_dma_free: bgmac_dma_free(bgmac); @@ -1433,6 +1508,7 @@ static void bgmac_remove(struct bcma_device *core) netif_napi_del(&bgmac->napi); unregister_netdev(bgmac->net_dev); + bgmac_mii_unregister(bgmac); bgmac_dma_free(bgmac); bcma_set_drvdata(core, NULL); free_netdev(bgmac->net_dev); diff --git a/drivers/net/ethernet/broadcom/bgmac.h b/drivers/net/ethernet/broadcom/bgmac.h index 4ede614c81f8..98d4b5fcc070 100644 --- a/drivers/net/ethernet/broadcom/bgmac.h +++ b/drivers/net/ethernet/broadcom/bgmac.h @@ -399,6 +399,7 @@ struct bgmac { struct bcma_device *cmn; /* Reference to CMN core for BCM4706 */ struct net_device *net_dev; struct napi_struct napi; + struct mii_bus *mii_bus; /* DMA */ struct bgmac_dma_ring tx_ring[BGMAC_MAX_TX_RINGS]; diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c index 2f0ba8f2fd6c..e709296e3b85 100644 --- a/drivers/net/ethernet/broadcom/bnx2.c +++ b/drivers/net/ethernet/broadcom/bnx2.c @@ -854,12 +854,11 @@ bnx2_alloc_mem(struct bnx2 *bp) sizeof(struct statistics_block); status_blk = dma_alloc_coherent(&bp->pdev->dev, bp->status_stats_size, - &bp->status_blk_mapping, GFP_KERNEL); + &bp->status_blk_mapping, + GFP_KERNEL | __GFP_ZERO); if (status_blk == NULL) goto alloc_mem_err; - memset(status_blk, 0, bp->status_stats_size); - bnapi = &bp->bnx2_napi[0]; bnapi->status_blk.msi = status_blk; bnapi->hw_tx_cons_ptr = diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index e4605a965084..c6303428f9e9 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h @@ -492,7 +492,6 @@ enum bnx2x_tpa_mode_t { struct bnx2x_fastpath { struct bnx2x *bp; /* parent */ -#define BNX2X_NAPI_WEIGHT 128 struct napi_struct napi; union host_hc_status_block status_blk; /* chip independed shortcuts into sb structure */ @@ -613,9 +612,10 @@ struct bnx2x_fastpath { * START_BD - describes packed * START_BD(splitted) - includes unpaged data segment for GSO * PARSING_BD - for TSO and CSUM data + * PARSING_BD2 - for encapsulation data * Frag BDs - decribes pages for frags */ -#define BDS_PER_TX_PKT 3 +#define BDS_PER_TX_PKT 4 #define MAX_BDS_PER_TX_PKT (MAX_SKB_FRAGS + BDS_PER_TX_PKT) /* max BDs per tx packet including next pages */ #define MAX_DESC_PER_TX_PKT (MAX_BDS_PER_TX_PKT + \ @@ -730,18 +730,24 @@ struct bnx2x_fastpath { #define SKB_CS(skb) (*(u16 *)(skb_transport_header(skb) + \ skb->csum_offset)) -#define pbd_tcp_flags(skb) (ntohl(tcp_flag_word(tcp_hdr(skb)))>>16 & 0xff) +#define pbd_tcp_flags(tcp_hdr) (ntohl(tcp_flag_word(tcp_hdr))>>16 & 0xff) -#define XMIT_PLAIN 0 -#define XMIT_CSUM_V4 0x1 -#define XMIT_CSUM_V6 0x2 -#define XMIT_CSUM_TCP 0x4 -#define XMIT_GSO_V4 0x8 -#define XMIT_GSO_V6 0x10 +#define XMIT_PLAIN 0 +#define XMIT_CSUM_V4 (1 << 0) +#define XMIT_CSUM_V6 (1 << 1) +#define XMIT_CSUM_TCP (1 << 2) +#define XMIT_GSO_V4 (1 << 3) +#define XMIT_GSO_V6 (1 << 4) +#define XMIT_CSUM_ENC_V4 (1 << 5) +#define XMIT_CSUM_ENC_V6 (1 << 6) +#define XMIT_GSO_ENC_V4 (1 << 7) +#define XMIT_GSO_ENC_V6 (1 << 8) -#define XMIT_CSUM (XMIT_CSUM_V4 | XMIT_CSUM_V6) -#define XMIT_GSO (XMIT_GSO_V4 | XMIT_GSO_V6) +#define XMIT_CSUM_ENC (XMIT_CSUM_ENC_V4 | XMIT_CSUM_ENC_V6) +#define XMIT_GSO_ENC (XMIT_GSO_ENC_V4 | XMIT_GSO_ENC_V6) +#define XMIT_CSUM (XMIT_CSUM_V4 | XMIT_CSUM_V6 | XMIT_CSUM_ENC) +#define XMIT_GSO (XMIT_GSO_V4 | XMIT_GSO_V6 | XMIT_GSO_ENC) /* stuff added to make the code fit 80Col */ #define CQE_TYPE(cqe_fp_flags) ((cqe_fp_flags) & ETH_FAST_PATH_RX_CQE_TYPE) @@ -1215,14 +1221,16 @@ enum { BNX2X_SP_RTNL_ENABLE_SRIOV, BNX2X_SP_RTNL_VFPF_MCAST, BNX2X_SP_RTNL_VFPF_STORM_RX_MODE, + BNX2X_SP_RTNL_HYPERVISOR_VLAN, }; struct bnx2x_prev_path_list { + struct list_head list; u8 bus; u8 slot; u8 path; - struct list_head list; + u8 aer; u8 undi; }; @@ -1269,6 +1277,8 @@ struct bnx2x { #define BP_FW_MB_IDX(bp) BP_FW_MB_IDX_VN(bp, BP_VN(bp)) #ifdef CONFIG_BNX2X_SRIOV + /* protects vf2pf mailbox from simultaneous access */ + struct mutex vf2pf_mutex; /* vf pf channel mailbox contains request and response buffers */ struct bnx2x_vf_mbx_msg *vf2pf_mbox; dma_addr_t vf2pf_mbox_mapping; @@ -1281,6 +1291,8 @@ struct bnx2x { dma_addr_t pf2vf_bulletin_mapping; struct pf_vf_bulletin_content old_bulletin; + + u16 requested_nr_virtfn; #endif /* CONFIG_BNX2X_SRIOV */ struct net_device *dev; @@ -1944,12 +1956,9 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms, void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id, bool is_pf); -#define BNX2X_ILT_ZALLOC(x, y, size) \ - do { \ - x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \ - if (x) \ - memset(x, 0, size); \ - } while (0) +#define BNX2X_ILT_ZALLOC(x, y, size) \ + x = dma_alloc_coherent(&bp->pdev->dev, size, y, \ + GFP_KERNEL | __GFP_ZERO) #define BNX2X_ILT_FREE(x, y, size) \ do { \ @@ -2286,7 +2295,7 @@ static const u32 dmae_reg_go_c[] = { DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15 }; -void bnx2x_set_ethtool_ops(struct net_device *netdev); +void bnx2x_set_ethtool_ops(struct bnx2x *bp, struct net_device *netdev); void bnx2x_notify_link_changed(struct bnx2x *bp); #define BNX2X_MF_SD_PROTOCOL(bp) \ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 4046f97378c2..352e58ede4d5 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -451,7 +451,8 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue, * Compute number of aggregated segments, and gso_type. */ static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags, - u16 len_on_bd, unsigned int pkt_len) + u16 len_on_bd, unsigned int pkt_len, + u16 num_of_coalesced_segs) { /* TPA aggregation won't have either IP options or TCP options * other than timestamp or IPv6 extension headers. @@ -480,8 +481,7 @@ static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags, /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count * to skb_shinfo(skb)->gso_segs */ - NAPI_GRO_CB(skb)->count = DIV_ROUND_UP(pkt_len - hdrs_len, - skb_shinfo(skb)->gso_size); + NAPI_GRO_CB(skb)->count = num_of_coalesced_segs; } static int bnx2x_alloc_rx_sge(struct bnx2x *bp, @@ -537,7 +537,8 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp, /* This is needed in order to enable forwarding support */ if (frag_size) bnx2x_set_gro_params(skb, tpa_info->parsing_flags, len_on_bd, - le16_to_cpu(cqe->pkt_len)); + le16_to_cpu(cqe->pkt_len), + le16_to_cpu(cqe->num_of_coalesced_segs)); #ifdef BNX2X_STOP_ON_ERROR if (pages > min_t(u32, 8, MAX_SKB_FRAGS) * SGE_PAGES) { @@ -2009,7 +2010,7 @@ static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code) * Cleans the object that have internal lists without sending * ramrods. Should be run when interrutps are disabled. */ -static void bnx2x_squeeze_objects(struct bnx2x *bp) +void bnx2x_squeeze_objects(struct bnx2x *bp) { int rc; unsigned long ramrod_flags = 0, vlan_mac_flags = 0; @@ -2774,7 +2775,7 @@ load_error0: #endif /* ! BNX2X_STOP_ON_ERROR */ } -static int bnx2x_drain_tx_queues(struct bnx2x *bp) +int bnx2x_drain_tx_queues(struct bnx2x *bp) { u8 rc = 0, cos, i; @@ -3086,11 +3087,11 @@ int bnx2x_poll(struct napi_struct *napi, int budget) * to ease the pain of our fellow microcode engineers * we use one mapping for both BDs */ -static noinline u16 bnx2x_tx_split(struct bnx2x *bp, - struct bnx2x_fp_txdata *txdata, - struct sw_tx_bd *tx_buf, - struct eth_tx_start_bd **tx_bd, u16 hlen, - u16 bd_prod, int nbd) +static u16 bnx2x_tx_split(struct bnx2x *bp, + struct bnx2x_fp_txdata *txdata, + struct sw_tx_bd *tx_buf, + struct eth_tx_start_bd **tx_bd, u16 hlen, + u16 bd_prod) { struct eth_tx_start_bd *h_tx_bd = *tx_bd; struct eth_tx_bd *d_tx_bd; @@ -3098,11 +3099,10 @@ static noinline u16 bnx2x_tx_split(struct bnx2x *bp, int old_len = le16_to_cpu(h_tx_bd->nbytes); /* first fix first BD */ - h_tx_bd->nbd = cpu_to_le16(nbd); h_tx_bd->nbytes = cpu_to_le16(hlen); - DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x) nbd %d\n", - h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo, h_tx_bd->nbd); + DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x)\n", + h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo); /* now get a new data BD * (after the pbd) and fill it */ @@ -3131,7 +3131,7 @@ static noinline u16 bnx2x_tx_split(struct bnx2x *bp, #define bswab32(b32) ((__force __le32) swab32((__force __u32) (b32))) #define bswab16(b16) ((__force __le16) swab16((__force __u16) (b16))) -static inline __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix) +static __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix) { __sum16 tsum = (__force __sum16) csum; @@ -3146,30 +3146,47 @@ static inline __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix) return bswab16(tsum); } -static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb) +static u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb) { u32 rc; + __u8 prot = 0; + __be16 protocol; if (skb->ip_summed != CHECKSUM_PARTIAL) - rc = XMIT_PLAIN; + return XMIT_PLAIN; - else { - if (vlan_get_protocol(skb) == htons(ETH_P_IPV6)) { - rc = XMIT_CSUM_V6; - if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) - rc |= XMIT_CSUM_TCP; + protocol = vlan_get_protocol(skb); + if (protocol == htons(ETH_P_IPV6)) { + rc = XMIT_CSUM_V6; + prot = ipv6_hdr(skb)->nexthdr; + } else { + rc = XMIT_CSUM_V4; + prot = ip_hdr(skb)->protocol; + } + if (!CHIP_IS_E1x(bp) && skb->encapsulation) { + if (inner_ip_hdr(skb)->version == 6) { + rc |= XMIT_CSUM_ENC_V6; + if (inner_ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) + rc |= XMIT_CSUM_TCP; } else { - rc = XMIT_CSUM_V4; - if (ip_hdr(skb)->protocol == IPPROTO_TCP) + rc |= XMIT_CSUM_ENC_V4; + if (inner_ip_hdr(skb)->protocol == IPPROTO_TCP) rc |= XMIT_CSUM_TCP; } } + if (prot == IPPROTO_TCP) + rc |= XMIT_CSUM_TCP; - if (skb_is_gso_v6(skb)) - rc |= XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6; - else if (skb_is_gso(skb)) - rc |= XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP; + if (skb_is_gso_v6(skb)) { + rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6); + if (rc & XMIT_CSUM_ENC) + rc |= XMIT_GSO_ENC_V6; + } else if (skb_is_gso(skb)) { + rc |= (XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP); + if (rc & XMIT_CSUM_ENC) + rc |= XMIT_GSO_ENC_V4; + } return rc; } @@ -3254,14 +3271,23 @@ exit_lbl: } #endif -static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data, - u32 xmit_type) +static void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data, + u32 xmit_type) { + struct ipv6hdr *ipv6; + *parsing_data |= (skb_shinfo(skb)->gso_size << ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) & ETH_TX_PARSE_BD_E2_LSO_MSS; - if ((xmit_type & XMIT_GSO_V6) && - (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6)) + + if (xmit_type & XMIT_GSO_ENC_V6) + ipv6 = inner_ipv6_hdr(skb); + else if (xmit_type & XMIT_GSO_V6) + ipv6 = ipv6_hdr(skb); + else + ipv6 = NULL; + + if (ipv6 && ipv6->nexthdr == NEXTHDR_IPV6) *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR; } @@ -3272,13 +3298,13 @@ static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data, * @pbd: parse BD * @xmit_type: xmit flags */ -static inline void bnx2x_set_pbd_gso(struct sk_buff *skb, - struct eth_tx_parse_bd_e1x *pbd, - u32 xmit_type) +static void bnx2x_set_pbd_gso(struct sk_buff *skb, + struct eth_tx_parse_bd_e1x *pbd, + u32 xmit_type) { pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size); pbd->tcp_send_seq = bswab32(tcp_hdr(skb)->seq); - pbd->tcp_flags = pbd_tcp_flags(skb); + pbd->tcp_flags = pbd_tcp_flags(tcp_hdr(skb)); if (xmit_type & XMIT_GSO_V4) { pbd->ip_id = bswab16(ip_hdr(skb)->id); @@ -3298,6 +3324,40 @@ static inline void bnx2x_set_pbd_gso(struct sk_buff *skb, } /** + * bnx2x_set_pbd_csum_enc - update PBD with checksum and return header length + * + * @bp: driver handle + * @skb: packet skb + * @parsing_data: data to be updated + * @xmit_type: xmit flags + * + * 57712/578xx related, when skb has encapsulation + */ +static u8 bnx2x_set_pbd_csum_enc(struct bnx2x *bp, struct sk_buff *skb, + u32 *parsing_data, u32 xmit_type) +{ + *parsing_data |= + ((((u8 *)skb_inner_transport_header(skb) - skb->data) >> 1) << + ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) & + ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W; + + if (xmit_type & XMIT_CSUM_TCP) { + *parsing_data |= ((inner_tcp_hdrlen(skb) / 4) << + ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) & + ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW; + + return skb_inner_transport_header(skb) + + inner_tcp_hdrlen(skb) - skb->data; + } + + /* We support checksum offload for TCP and UDP only. + * No need to pass the UDP header length - it's a constant. + */ + return skb_inner_transport_header(skb) + + sizeof(struct udphdr) - skb->data; +} + +/** * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length * * @bp: driver handle @@ -3305,15 +3365,15 @@ static inline void bnx2x_set_pbd_gso(struct sk_buff *skb, * @parsing_data: data to be updated * @xmit_type: xmit flags * - * 57712 related + * 57712/578xx related */ -static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb, - u32 *parsing_data, u32 xmit_type) +static u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb, + u32 *parsing_data, u32 xmit_type) { *parsing_data |= ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) << - ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT) & - ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W; + ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) & + ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W; if (xmit_type & XMIT_CSUM_TCP) { *parsing_data |= ((tcp_hdrlen(skb) / 4) << @@ -3328,17 +3388,15 @@ static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb, return skb_transport_header(skb) + sizeof(struct udphdr) - skb->data; } -static inline void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb, - struct eth_tx_start_bd *tx_start_bd, u32 xmit_type) +/* set FW indication according to inner or outer protocols if tunneled */ +static void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb, + struct eth_tx_start_bd *tx_start_bd, + u32 xmit_type) { tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM; - if (xmit_type & XMIT_CSUM_V4) - tx_start_bd->bd_flags.as_bitfield |= - ETH_TX_BD_FLAGS_IP_CSUM; - else - tx_start_bd->bd_flags.as_bitfield |= - ETH_TX_BD_FLAGS_IPV6; + if (xmit_type & (XMIT_CSUM_ENC_V6 | XMIT_CSUM_V6)) + tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6; if (!(xmit_type & XMIT_CSUM_TCP)) tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP; @@ -3352,9 +3410,9 @@ static inline void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb, * @pbd: parse BD to be updated * @xmit_type: xmit flags */ -static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb, - struct eth_tx_parse_bd_e1x *pbd, - u32 xmit_type) +static u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb, + struct eth_tx_parse_bd_e1x *pbd, + u32 xmit_type) { u8 hlen = (skb_network_header(skb) - skb->data) >> 1; @@ -3400,6 +3458,70 @@ static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb, return hlen; } +static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb, + struct eth_tx_parse_bd_e2 *pbd_e2, + struct eth_tx_parse_2nd_bd *pbd2, + u16 *global_data, + u32 xmit_type) +{ + u16 hlen_w = 0; + u8 outerip_off, outerip_len = 0; + /* from outer IP to transport */ + hlen_w = (skb_inner_transport_header(skb) - + skb_network_header(skb)) >> 1; + + /* transport len */ + if (xmit_type & XMIT_CSUM_TCP) + hlen_w += inner_tcp_hdrlen(skb) >> 1; + else + hlen_w += sizeof(struct udphdr) >> 1; + + pbd2->fw_ip_hdr_to_payload_w = hlen_w; + + if (xmit_type & XMIT_CSUM_ENC_V4) { + struct iphdr *iph = ip_hdr(skb); + pbd2->fw_ip_csum_wo_len_flags_frag = + bswab16(csum_fold((~iph->check) - + iph->tot_len - iph->frag_off)); + } else { + pbd2->fw_ip_hdr_to_payload_w = + hlen_w - ((sizeof(struct ipv6hdr)) >> 1); + } + + pbd2->tcp_send_seq = bswab32(inner_tcp_hdr(skb)->seq); + + pbd2->tcp_flags = pbd_tcp_flags(inner_tcp_hdr(skb)); + + if (xmit_type & XMIT_GSO_V4) { + pbd2->hw_ip_id = bswab16(inner_ip_hdr(skb)->id); + + pbd_e2->data.tunnel_data.pseudo_csum = + bswab16(~csum_tcpudp_magic( + inner_ip_hdr(skb)->saddr, + inner_ip_hdr(skb)->daddr, + 0, IPPROTO_TCP, 0)); + + outerip_len = ip_hdr(skb)->ihl << 1; + } else { + pbd_e2->data.tunnel_data.pseudo_csum = + bswab16(~csum_ipv6_magic( + &inner_ipv6_hdr(skb)->saddr, + &inner_ipv6_hdr(skb)->daddr, + 0, IPPROTO_TCP, 0)); + } + + outerip_off = (skb_network_header(skb) - skb->data) >> 1; + + *global_data |= + outerip_off | + (!!(xmit_type & XMIT_CSUM_V6) << + ETH_TX_PARSE_2ND_BD_IP_HDR_TYPE_OUTER_SHIFT) | + (outerip_len << + ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT) | + ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) << + ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN_SHIFT); +} + /* called with netif_tx_lock * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call * netif_wake_queue() @@ -3415,6 +3537,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL; struct eth_tx_parse_bd_e1x *pbd_e1x = NULL; struct eth_tx_parse_bd_e2 *pbd_e2 = NULL; + struct eth_tx_parse_2nd_bd *pbd2 = NULL; u32 pbd_e2_parsing_data = 0; u16 pkt_prod, bd_prod; int nbd, txq_index; @@ -3482,7 +3605,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) mac_type = MULTICAST_ADDRESS; } -#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3) +#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT) /* First, check if we need to linearize the skb (due to FW restrictions). No need to check fragmentation if page size > 8K (there will be no violation to FW restrictions) */ @@ -3530,12 +3653,9 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) first_bd = tx_start_bd; tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD; - SET_FLAG(tx_start_bd->general_data, - ETH_TX_START_BD_PARSE_NBDS, - 0); - /* header nbd */ - SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_HDR_NBDS, 1); + /* header nbd: indirectly zero other flags! */ + tx_start_bd->general_data = 1 << ETH_TX_START_BD_HDR_NBDS_SHIFT; /* remember the first BD of the packet */ tx_buf->first_bd = txdata->tx_bd_prod; @@ -3555,19 +3675,16 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) /* when transmitting in a vf, start bd must hold the ethertype * for fw to enforce it */ -#ifndef BNX2X_STOP_ON_ERROR - if (IS_VF(bp)) { -#endif + if (IS_VF(bp)) tx_start_bd->vlan_or_ethertype = cpu_to_le16(ntohs(eth->h_proto)); -#ifndef BNX2X_STOP_ON_ERROR - } else { + else /* used by FW for packet accounting */ tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod); - } -#endif } + nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */ + /* turn on parsing and get a BD */ bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); @@ -3577,23 +3694,58 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) if (!CHIP_IS_E1x(bp)) { pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2; memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2)); - /* Set PBD in checksum offload case */ - if (xmit_type & XMIT_CSUM) + + if (xmit_type & XMIT_CSUM_ENC) { + u16 global_data = 0; + + /* Set PBD in enc checksum offload case */ + hlen = bnx2x_set_pbd_csum_enc(bp, skb, + &pbd_e2_parsing_data, + xmit_type); + + /* turn on 2nd parsing and get a BD */ + bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); + + pbd2 = &txdata->tx_desc_ring[bd_prod].parse_2nd_bd; + + memset(pbd2, 0, sizeof(*pbd2)); + + pbd_e2->data.tunnel_data.ip_hdr_start_inner_w = + (skb_inner_network_header(skb) - + skb->data) >> 1; + + if (xmit_type & XMIT_GSO_ENC) + bnx2x_update_pbds_gso_enc(skb, pbd_e2, pbd2, + &global_data, + xmit_type); + + pbd2->global_data = cpu_to_le16(global_data); + + /* add addition parse BD indication to start BD */ + SET_FLAG(tx_start_bd->general_data, + ETH_TX_START_BD_PARSE_NBDS, 1); + /* set encapsulation flag in start BD */ + SET_FLAG(tx_start_bd->general_data, + ETH_TX_START_BD_TUNNEL_EXIST, 1); + nbd++; + } else if (xmit_type & XMIT_CSUM) { + /* Set PBD in checksum offload case w/o encapsulation */ hlen = bnx2x_set_pbd_csum_e2(bp, skb, &pbd_e2_parsing_data, xmit_type); + } - if (IS_MF_SI(bp) || IS_VF(bp)) { - /* fill in the MAC addresses in the PBD - for local - * switching - */ - bnx2x_set_fw_mac_addr(&pbd_e2->src_mac_addr_hi, - &pbd_e2->src_mac_addr_mid, - &pbd_e2->src_mac_addr_lo, + /* Add the macs to the parsing BD this is a vf */ + if (IS_VF(bp)) { + /* override GRE parameters in BD */ + bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi, + &pbd_e2->data.mac_addr.src_mid, + &pbd_e2->data.mac_addr.src_lo, eth->h_source); - bnx2x_set_fw_mac_addr(&pbd_e2->dst_mac_addr_hi, - &pbd_e2->dst_mac_addr_mid, - &pbd_e2->dst_mac_addr_lo, + + bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi, + &pbd_e2->data.mac_addr.dst_mid, + &pbd_e2->data.mac_addr.dst_lo, eth->h_dest); } @@ -3615,14 +3767,13 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) /* Setup the data pointer of the first BD of the packet */ tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); - nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */ tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb)); pkt_size = tx_start_bd->nbytes; DP(NETIF_MSG_TX_QUEUED, - "first bd @%p addr (%x:%x) nbd %d nbytes %d flags %x vlan %x\n", + "first bd @%p addr (%x:%x) nbytes %d flags %x vlan %x\n", tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo, - le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes), + le16_to_cpu(tx_start_bd->nbytes), tx_start_bd->bd_flags.as_bitfield, le16_to_cpu(tx_start_bd->vlan_or_ethertype)); @@ -3635,10 +3786,12 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO; - if (unlikely(skb_headlen(skb) > hlen)) + if (unlikely(skb_headlen(skb) > hlen)) { + nbd++; bd_prod = bnx2x_tx_split(bp, txdata, tx_buf, &tx_start_bd, hlen, - bd_prod, ++nbd); + bd_prod); + } if (!CHIP_IS_E1x(bp)) bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data, xmit_type); @@ -3728,9 +3881,13 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) if (pbd_e2) DP(NETIF_MSG_TX_QUEUED, "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n", - pbd_e2, pbd_e2->dst_mac_addr_hi, pbd_e2->dst_mac_addr_mid, - pbd_e2->dst_mac_addr_lo, pbd_e2->src_mac_addr_hi, - pbd_e2->src_mac_addr_mid, pbd_e2->src_mac_addr_lo, + pbd_e2, + pbd_e2->data.mac_addr.dst_hi, + pbd_e2->data.mac_addr.dst_mid, + pbd_e2->data.mac_addr.dst_lo, + pbd_e2->data.mac_addr.src_hi, + pbd_e2->data.mac_addr.src_mid, + pbd_e2->data.mac_addr.src_lo, pbd_e2->parsing_data); DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h index aee7671ff4c1..54e1b149acb3 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h @@ -50,13 +50,13 @@ extern int int_mode; } \ } while (0) -#define BNX2X_PCI_ALLOC(x, y, size) \ - do { \ - x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \ - if (x == NULL) \ - goto alloc_mem_err; \ - memset((void *)x, 0, size); \ - } while (0) +#define BNX2X_PCI_ALLOC(x, y, size) \ +do { \ + x = dma_alloc_coherent(&bp->pdev->dev, size, y, \ + GFP_KERNEL | __GFP_ZERO); \ + if (x == NULL) \ + goto alloc_mem_err; \ +} while (0) #define BNX2X_ALLOC(x, size) \ do { \ @@ -496,7 +496,10 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev); /* setup_tc callback */ int bnx2x_setup_tc(struct net_device *dev, u8 num_tc); +int bnx2x_get_vf_config(struct net_device *dev, int vf, + struct ifla_vf_info *ivi); int bnx2x_set_vf_mac(struct net_device *dev, int queue, u8 *mac); +int bnx2x_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos); /* select_queue callback */ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb); @@ -834,7 +837,7 @@ static inline void bnx2x_add_all_napi_cnic(struct bnx2x *bp) /* Add NAPI objects */ for_each_rx_queue_cnic(bp, i) netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), - bnx2x_poll, BNX2X_NAPI_WEIGHT); + bnx2x_poll, NAPI_POLL_WEIGHT); } static inline void bnx2x_add_all_napi(struct bnx2x *bp) @@ -844,7 +847,7 @@ static inline void bnx2x_add_all_napi(struct bnx2x *bp) /* Add NAPI objects */ for_each_eth_queue(bp, i) netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), - bnx2x_poll, BNX2X_NAPI_WEIGHT); + bnx2x_poll, NAPI_POLL_WEIGHT); } static inline void bnx2x_del_all_napi_cnic(struct bnx2x *bp) @@ -970,6 +973,9 @@ static inline int bnx2x_func_start(struct bnx2x *bp) else /* CHIP_IS_E1X */ start_params->network_cos_mode = FW_WRR; + start_params->gre_tunnel_mode = IPGRE_TUNNEL; + start_params->gre_tunnel_rss = GRE_INNER_HEADERS_RSS; + return bnx2x_func_state_change(bp, &func_params); } @@ -1396,4 +1402,8 @@ static inline bool bnx2x_is_valid_ether_addr(struct bnx2x *bp, u8 *addr) * */ void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len); + +int bnx2x_drain_tx_queues(struct bnx2x *bp); +void bnx2x_squeeze_objects(struct bnx2x *bp); + #endif /* BNX2X_CMN_H */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c index edfa67adf2f9..129d6b21317c 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c @@ -1393,10 +1393,9 @@ static int bnx2x_get_module_eeprom(struct net_device *dev, u8 *data) { struct bnx2x *bp = netdev_priv(dev); - int rc = 0, phy_idx; + int rc = -EINVAL, phy_idx; u8 *user_data = data; - int remaining_len = ee->len, xfer_size; - unsigned int page_off = ee->offset; + unsigned int start_addr = ee->offset, xfer_size = 0; if (!netif_running(dev)) { DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, @@ -1405,21 +1404,52 @@ static int bnx2x_get_module_eeprom(struct net_device *dev, } phy_idx = bnx2x_get_cur_phy_idx(bp); - bnx2x_acquire_phy_lock(bp); - while (!rc && remaining_len > 0) { - xfer_size = (remaining_len > SFP_EEPROM_PAGE_SIZE) ? - SFP_EEPROM_PAGE_SIZE : remaining_len; + + /* Read A0 section */ + if (start_addr < ETH_MODULE_SFF_8079_LEN) { + /* Limit transfer size to the A0 section boundary */ + if (start_addr + ee->len > ETH_MODULE_SFF_8079_LEN) + xfer_size = ETH_MODULE_SFF_8079_LEN - start_addr; + else + xfer_size = ee->len; + bnx2x_acquire_phy_lock(bp); rc = bnx2x_read_sfp_module_eeprom(&bp->link_params.phy[phy_idx], &bp->link_params, - page_off, + I2C_DEV_ADDR_A0, + start_addr, xfer_size, user_data); - remaining_len -= xfer_size; + bnx2x_release_phy_lock(bp); + if (rc) { + DP(BNX2X_MSG_ETHTOOL, "Failed reading A0 section\n"); + + return -EINVAL; + } user_data += xfer_size; - page_off += xfer_size; + start_addr += xfer_size; } - bnx2x_release_phy_lock(bp); + /* Read A2 section */ + if ((start_addr >= ETH_MODULE_SFF_8079_LEN) && + (start_addr < ETH_MODULE_SFF_8472_LEN)) { + xfer_size = ee->len - xfer_size; + /* Limit transfer size to the A2 section boundary */ + if (start_addr + xfer_size > ETH_MODULE_SFF_8472_LEN) + xfer_size = ETH_MODULE_SFF_8472_LEN - start_addr; + start_addr -= ETH_MODULE_SFF_8079_LEN; + bnx2x_acquire_phy_lock(bp); + rc = bnx2x_read_sfp_module_eeprom(&bp->link_params.phy[phy_idx], + &bp->link_params, + I2C_DEV_ADDR_A2, + start_addr, + xfer_size, + user_data); + bnx2x_release_phy_lock(bp); + if (rc) { + DP(BNX2X_MSG_ETHTOOL, "Failed reading A2 section\n"); + return -EINVAL; + } + } return rc; } @@ -1427,24 +1457,50 @@ static int bnx2x_get_module_info(struct net_device *dev, struct ethtool_modinfo *modinfo) { struct bnx2x *bp = netdev_priv(dev); - int phy_idx; + int phy_idx, rc; + u8 sff8472_comp, diag_type; + if (!netif_running(dev)) { - DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, + DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, "cannot access eeprom when the interface is down\n"); return -EAGAIN; } - phy_idx = bnx2x_get_cur_phy_idx(bp); - switch (bp->link_params.phy[phy_idx].media_type) { - case ETH_PHY_SFPP_10G_FIBER: - case ETH_PHY_SFP_1G_FIBER: - case ETH_PHY_DA_TWINAX: + bnx2x_acquire_phy_lock(bp); + rc = bnx2x_read_sfp_module_eeprom(&bp->link_params.phy[phy_idx], + &bp->link_params, + I2C_DEV_ADDR_A0, + SFP_EEPROM_SFF_8472_COMP_ADDR, + SFP_EEPROM_SFF_8472_COMP_SIZE, + &sff8472_comp); + bnx2x_release_phy_lock(bp); + if (rc) { + DP(BNX2X_MSG_ETHTOOL, "Failed reading SFF-8472 comp field\n"); + return -EINVAL; + } + + bnx2x_acquire_phy_lock(bp); + rc = bnx2x_read_sfp_module_eeprom(&bp->link_params.phy[phy_idx], + &bp->link_params, + I2C_DEV_ADDR_A0, + SFP_EEPROM_DIAG_TYPE_ADDR, + SFP_EEPROM_DIAG_TYPE_SIZE, + &diag_type); + bnx2x_release_phy_lock(bp); + if (rc) { + DP(BNX2X_MSG_ETHTOOL, "Failed reading Diag Type field\n"); + return -EINVAL; + } + + if (!sff8472_comp || + (diag_type & SFP_EEPROM_DIAG_ADDR_CHANGE_REQ)) { modinfo->type = ETH_MODULE_SFF_8079; modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN; - return 0; - default: - return -EOPNOTSUPP; + } else { + modinfo->type = ETH_MODULE_SFF_8472; + modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; } + return 0; } static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val, @@ -3232,7 +3288,32 @@ static const struct ethtool_ops bnx2x_ethtool_ops = { .get_ts_info = ethtool_op_get_ts_info, }; -void bnx2x_set_ethtool_ops(struct net_device *netdev) +static const struct ethtool_ops bnx2x_vf_ethtool_ops = { + .get_settings = bnx2x_get_settings, + .set_settings = bnx2x_set_settings, + .get_drvinfo = bnx2x_get_drvinfo, + .get_msglevel = bnx2x_get_msglevel, + .set_msglevel = bnx2x_set_msglevel, + .get_link = bnx2x_get_link, + .get_coalesce = bnx2x_get_coalesce, + .get_ringparam = bnx2x_get_ringparam, + .set_ringparam = bnx2x_set_ringparam, + .get_sset_count = bnx2x_get_sset_count, + .get_strings = bnx2x_get_strings, + .get_ethtool_stats = bnx2x_get_ethtool_stats, + .get_rxnfc = bnx2x_get_rxnfc, + .set_rxnfc = bnx2x_set_rxnfc, + .get_rxfh_indir_size = bnx2x_get_rxfh_indir_size, + .get_rxfh_indir = bnx2x_get_rxfh_indir, + .set_rxfh_indir = bnx2x_set_rxfh_indir, + .get_channels = bnx2x_get_channels, + .set_channels = bnx2x_set_channels, +}; + +void bnx2x_set_ethtool_ops(struct bnx2x *bp, struct net_device *netdev) { - SET_ETHTOOL_OPS(netdev, &bnx2x_ethtool_ops); + if (IS_PF(bp)) + SET_ETHTOOL_OPS(netdev, &bnx2x_ethtool_ops); + else /* vf */ + SET_ETHTOOL_OPS(netdev, &bnx2x_vf_ethtool_ops); } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h index e5f808377c91..40f22c6794cd 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h @@ -30,31 +30,31 @@ * IRO[138].m2) + ((sbId) * IRO[138].m3)) #define CSTORM_IGU_MODE_OFFSET (IRO[157].base) #define CSTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \ - (IRO[316].base + ((pfId) * IRO[316].m1)) -#define CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \ (IRO[317].base + ((pfId) * IRO[317].m1)) +#define CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \ + (IRO[318].base + ((pfId) * IRO[318].m1)) #define CSTORM_ISCSI_EQ_CONS_OFFSET(pfId, iscsiEqId) \ - (IRO[309].base + ((pfId) * IRO[309].m1) + ((iscsiEqId) * IRO[309].m2)) + (IRO[310].base + ((pfId) * IRO[310].m1) + ((iscsiEqId) * IRO[310].m2)) #define CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfId, iscsiEqId) \ - (IRO[311].base + ((pfId) * IRO[311].m1) + ((iscsiEqId) * IRO[311].m2)) + (IRO[312].base + ((pfId) * IRO[312].m1) + ((iscsiEqId) * IRO[312].m2)) #define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfId, iscsiEqId) \ - (IRO[310].base + ((pfId) * IRO[310].m1) + ((iscsiEqId) * IRO[310].m2)) + (IRO[311].base + ((pfId) * IRO[311].m1) + ((iscsiEqId) * IRO[311].m2)) #define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(pfId, iscsiEqId) \ - (IRO[312].base + ((pfId) * IRO[312].m1) + ((iscsiEqId) * IRO[312].m2)) + (IRO[313].base + ((pfId) * IRO[313].m1) + ((iscsiEqId) * IRO[313].m2)) #define CSTORM_ISCSI_EQ_PROD_OFFSET(pfId, iscsiEqId) \ - (IRO[308].base + ((pfId) * IRO[308].m1) + ((iscsiEqId) * IRO[308].m2)) + (IRO[309].base + ((pfId) * IRO[309].m1) + ((iscsiEqId) * IRO[309].m2)) #define CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfId, iscsiEqId) \ - (IRO[314].base + ((pfId) * IRO[314].m1) + ((iscsiEqId) * IRO[314].m2)) + (IRO[315].base + ((pfId) * IRO[315].m1) + ((iscsiEqId) * IRO[315].m2)) #define CSTORM_ISCSI_EQ_SB_NUM_OFFSET(pfId, iscsiEqId) \ - (IRO[313].base + ((pfId) * IRO[313].m1) + ((iscsiEqId) * IRO[313].m2)) + (IRO[314].base + ((pfId) * IRO[314].m1) + ((iscsiEqId) * IRO[314].m2)) #define CSTORM_ISCSI_HQ_SIZE_OFFSET(pfId) \ - (IRO[315].base + ((pfId) * IRO[315].m1)) + (IRO[316].base + ((pfId) * IRO[316].m1)) #define CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \ - (IRO[307].base + ((pfId) * IRO[307].m1)) + (IRO[308].base + ((pfId) * IRO[308].m1)) #define CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \ - (IRO[306].base + ((pfId) * IRO[306].m1)) + (IRO[307].base + ((pfId) * IRO[307].m1)) #define CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \ - (IRO[305].base + ((pfId) * IRO[305].m1)) + (IRO[306].base + ((pfId) * IRO[306].m1)) #define CSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \ (IRO[151].base + ((funcId) * IRO[151].m1)) #define CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(pfId) \ @@ -114,7 +114,7 @@ #define TSTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \ (IRO[268].base + ((pfId) * IRO[268].m1)) #define TSTORM_ISCSI_TCP_LOCAL_ADV_WND_OFFSET(pfId) \ - (IRO[277].base + ((pfId) * IRO[277].m1)) + (IRO[278].base + ((pfId) * IRO[278].m1)) #define TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \ (IRO[264].base + ((pfId) * IRO[264].m1)) #define TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfId) \ @@ -136,35 +136,32 @@ #define USTORM_ASSERT_LIST_INDEX_OFFSET (IRO[177].base) #define USTORM_ASSERT_LIST_OFFSET(assertListEntry) \ (IRO[176].base + ((assertListEntry) * IRO[176].m1)) -#define USTORM_CQE_PAGE_NEXT_OFFSET(portId, clientId) \ - (IRO[205].base + ((portId) * IRO[205].m1) + ((clientId) * \ - IRO[205].m2)) #define USTORM_ETH_PAUSE_ENABLED_OFFSET(portId) \ (IRO[183].base + ((portId) * IRO[183].m1)) #define USTORM_FCOE_EQ_PROD_OFFSET(pfId) \ - (IRO[318].base + ((pfId) * IRO[318].m1)) + (IRO[319].base + ((pfId) * IRO[319].m1)) #define USTORM_FUNC_EN_OFFSET(funcId) \ (IRO[178].base + ((funcId) * IRO[178].m1)) #define USTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \ - (IRO[282].base + ((pfId) * IRO[282].m1)) -#define USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \ (IRO[283].base + ((pfId) * IRO[283].m1)) +#define USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \ + (IRO[284].base + ((pfId) * IRO[284].m1)) #define USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfId) \ - (IRO[287].base + ((pfId) * IRO[287].m1)) + (IRO[288].base + ((pfId) * IRO[288].m1)) #define USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfId) \ - (IRO[284].base + ((pfId) * IRO[284].m1)) + (IRO[285].base + ((pfId) * IRO[285].m1)) #define USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \ - (IRO[280].base + ((pfId) * IRO[280].m1)) + (IRO[281].base + ((pfId) * IRO[281].m1)) #define USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \ - (IRO[279].base + ((pfId) * IRO[279].m1)) + (IRO[280].base + ((pfId) * IRO[280].m1)) #define USTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \ - (IRO[278].base + ((pfId) * IRO[278].m1)) + (IRO[279].base + ((pfId) * IRO[279].m1)) #define USTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \ - (IRO[281].base + ((pfId) * IRO[281].m1)) + (IRO[282].base + ((pfId) * IRO[282].m1)) #define USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfId) \ - (IRO[285].base + ((pfId) * IRO[285].m1)) -#define USTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \ (IRO[286].base + ((pfId) * IRO[286].m1)) +#define USTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \ + (IRO[287].base + ((pfId) * IRO[287].m1)) #define USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(pfId) \ (IRO[182].base + ((pfId) * IRO[182].m1)) #define USTORM_RECORD_SLOW_PATH_OFFSET(funcId) \ @@ -190,39 +187,39 @@ #define XSTORM_FUNC_EN_OFFSET(funcId) \ (IRO[47].base + ((funcId) * IRO[47].m1)) #define XSTORM_ISCSI_HQ_SIZE_OFFSET(pfId) \ - (IRO[295].base + ((pfId) * IRO[295].m1)) + (IRO[296].base + ((pfId) * IRO[296].m1)) #define XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(pfId) \ - (IRO[298].base + ((pfId) * IRO[298].m1)) -#define XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfId) \ (IRO[299].base + ((pfId) * IRO[299].m1)) -#define XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfId) \ +#define XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfId) \ (IRO[300].base + ((pfId) * IRO[300].m1)) -#define XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfId) \ +#define XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfId) \ (IRO[301].base + ((pfId) * IRO[301].m1)) -#define XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfId) \ +#define XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfId) \ (IRO[302].base + ((pfId) * IRO[302].m1)) -#define XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfId) \ +#define XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfId) \ (IRO[303].base + ((pfId) * IRO[303].m1)) -#define XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfId) \ +#define XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfId) \ (IRO[304].base + ((pfId) * IRO[304].m1)) +#define XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfId) \ + (IRO[305].base + ((pfId) * IRO[305].m1)) #define XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \ - (IRO[294].base + ((pfId) * IRO[294].m1)) + (IRO[295].base + ((pfId) * IRO[295].m1)) #define XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \ - (IRO[293].base + ((pfId) * IRO[293].m1)) + (IRO[294].base + ((pfId) * IRO[294].m1)) #define XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \ - (IRO[292].base + ((pfId) * IRO[292].m1)) + (IRO[293].base + ((pfId) * IRO[293].m1)) #define XSTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \ - (IRO[297].base + ((pfId) * IRO[297].m1)) + (IRO[298].base + ((pfId) * IRO[298].m1)) #define XSTORM_ISCSI_SQ_SIZE_OFFSET(pfId) \ - (IRO[296].base + ((pfId) * IRO[296].m1)) + (IRO[297].base + ((pfId) * IRO[297].m1)) #define XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(pfId) \ - (IRO[291].base + ((pfId) * IRO[291].m1)) + (IRO[292].base + ((pfId) * IRO[292].m1)) #define XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \ - (IRO[290].base + ((pfId) * IRO[290].m1)) + (IRO[291].base + ((pfId) * IRO[291].m1)) #define XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(pfId) \ - (IRO[289].base + ((pfId) * IRO[289].m1)) + (IRO[290].base + ((pfId) * IRO[290].m1)) #define XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(pfId) \ - (IRO[288].base + ((pfId) * IRO[288].m1)) + (IRO[289].base + ((pfId) * IRO[289].m1)) #define XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(pfId) \ (IRO[44].base + ((pfId) * IRO[44].m1)) #define XSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h index 037860ecc343..12f00a40cdf0 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h @@ -114,6 +114,10 @@ struct license_key { #define EPIO_CFG_EPIO30 0x0000001f #define EPIO_CFG_EPIO31 0x00000020 +struct mac_addr { + u32 upper; + u32 lower; +}; struct shared_hw_cfg { /* NVRAM Offset */ /* Up to 16 bytes of NULL-terminated string */ @@ -508,7 +512,22 @@ struct port_hw_cfg { /* port 0: 0x12c port 1: 0x2bc */ #define PORT_HW_CFG_PAUSE_ON_HOST_RING_DISABLED 0x00000000 #define PORT_HW_CFG_PAUSE_ON_HOST_RING_ENABLED 0x00000001 - u32 reserved0[6]; /* 0x178 */ + /* SFP+ Tx Equalization: NIC recommended and tested value is 0xBEB2 + * LOM recommended and tested value is 0xBEB2. Using a different + * value means using a value not tested by BRCM + */ + u32 sfi_tap_values; /* 0x178 */ + #define PORT_HW_CFG_TX_EQUALIZATION_MASK 0x0000FFFF + #define PORT_HW_CFG_TX_EQUALIZATION_SHIFT 0 + + /* SFP+ Tx driver broadcast IDRIVER: NIC recommended and tested + * value is 0x2. LOM recommended and tested value is 0x2. Using a + * different value means using a value not tested by BRCM + */ + #define PORT_HW_CFG_TX_DRV_BROADCAST_MASK 0x000F0000 + #define PORT_HW_CFG_TX_DRV_BROADCAST_SHIFT 16 + + u32 reserved0[5]; /* 0x17c */ u32 aeu_int_mask; /* 0x190 */ @@ -2821,8 +2840,8 @@ struct afex_stats { #define BCM_5710_FW_MAJOR_VERSION 7 #define BCM_5710_FW_MINOR_VERSION 8 -#define BCM_5710_FW_REVISION_VERSION 2 -#define BCM_5710_FW_ENGINEERING_VERSION 0 +#define BCM_5710_FW_REVISION_VERSION 17 +#define BCM_5710_FW_ENGINEERING_VERSION 0 #define BCM_5710_FW_COMPILE_FLAGS 1 @@ -3513,11 +3532,14 @@ struct client_init_tx_data { #define CLIENT_INIT_TX_DATA_BCAST_ACCEPT_ALL_SHIFT 2 #define CLIENT_INIT_TX_DATA_ACCEPT_ANY_VLAN (0x1<<3) #define CLIENT_INIT_TX_DATA_ACCEPT_ANY_VLAN_SHIFT 3 -#define CLIENT_INIT_TX_DATA_RESERVED1 (0xFFF<<4) -#define CLIENT_INIT_TX_DATA_RESERVED1_SHIFT 4 +#define CLIENT_INIT_TX_DATA_RESERVED0 (0xFFF<<4) +#define CLIENT_INIT_TX_DATA_RESERVED0_SHIFT 4 u8 default_vlan_flg; u8 force_default_pri_flg; - __le32 reserved3; + u8 tunnel_lso_inc_ip_id; + u8 refuse_outband_vlan_flg; + u8 tunnel_non_lso_pcsum_location; + u8 reserved1; }; /* @@ -3551,6 +3573,11 @@ struct client_update_ramrod_data { __le16 silent_vlan_mask; u8 silent_vlan_removal_flg; u8 silent_vlan_change_flg; + u8 refuse_outband_vlan_flg; + u8 refuse_outband_vlan_change_flg; + u8 tx_switching_flg; + u8 tx_switching_change_flg; + __le32 reserved1; __le32 echo; }; @@ -3620,7 +3647,8 @@ struct eth_classify_header { */ struct eth_classify_mac_cmd { struct eth_classify_cmd_header header; - __le32 reserved0; + __le16 reserved0; + __le16 inner_mac; __le16 mac_lsb; __le16 mac_mid; __le16 mac_msb; @@ -3633,7 +3661,8 @@ struct eth_classify_mac_cmd { */ struct eth_classify_pair_cmd { struct eth_classify_cmd_header header; - __le32 reserved0; + __le16 reserved0; + __le16 inner_mac; __le16 mac_lsb; __le16 mac_mid; __le16 mac_msb; @@ -3855,8 +3884,68 @@ struct eth_halt_ramrod_data { /* - * Command for setting multicast classification for a client + * destination and source mac address. + */ +struct eth_mac_addresses { +#if defined(__BIG_ENDIAN) + __le16 dst_mid; + __le16 dst_lo; +#elif defined(__LITTLE_ENDIAN) + __le16 dst_lo; + __le16 dst_mid; +#endif +#if defined(__BIG_ENDIAN) + __le16 src_lo; + __le16 dst_hi; +#elif defined(__LITTLE_ENDIAN) + __le16 dst_hi; + __le16 src_lo; +#endif +#if defined(__BIG_ENDIAN) + __le16 src_hi; + __le16 src_mid; +#elif defined(__LITTLE_ENDIAN) + __le16 src_mid; + __le16 src_hi; +#endif +}; + +/* tunneling related data */ +struct eth_tunnel_data { +#if defined(__BIG_ENDIAN) + __le16 dst_mid; + __le16 dst_lo; +#elif defined(__LITTLE_ENDIAN) + __le16 dst_lo; + __le16 dst_mid; +#endif +#if defined(__BIG_ENDIAN) + __le16 reserved0; + __le16 dst_hi; +#elif defined(__LITTLE_ENDIAN) + __le16 dst_hi; + __le16 reserved0; +#endif +#if defined(__BIG_ENDIAN) + u8 reserved1; + u8 ip_hdr_start_inner_w; + __le16 pseudo_csum; +#elif defined(__LITTLE_ENDIAN) + __le16 pseudo_csum; + u8 ip_hdr_start_inner_w; + u8 reserved1; +#endif +}; + +/* union for mac addresses and for tunneling data. + * considered as tunneling data only if (tunnel_exist == 1). */ +union eth_mac_addr_or_tunnel_data { + struct eth_mac_addresses mac_addr; + struct eth_tunnel_data tunnel_data; +}; + +/*Command for setting multicast classification for a client */ struct eth_multicast_rules_cmd { u8 cmd_general_data; #define ETH_MULTICAST_RULES_CMD_RX_CMD (0x1<<0) @@ -3874,7 +3963,6 @@ struct eth_multicast_rules_cmd { struct regpair reserved3; }; - /* * parameters for multicast classification ramrod */ @@ -3883,7 +3971,6 @@ struct eth_multicast_rules_ramrod_data { struct eth_multicast_rules_cmd rules[MULTICAST_RULES_COUNT]; }; - /* * Place holder for ramrods protocol specific data */ @@ -3947,11 +4034,14 @@ struct eth_rss_update_ramrod_data { #define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY_SHIFT 4 #define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY (0x1<<5) #define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY_SHIFT 5 +#define ETH_RSS_UPDATE_RAMROD_DATA_EN_5_TUPLE_CAPABILITY (0x1<<6) +#define ETH_RSS_UPDATE_RAMROD_DATA_EN_5_TUPLE_CAPABILITY_SHIFT 6 #define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY (0x1<<7) #define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY_SHIFT 7 u8 rss_result_mask; u8 rss_mode; - __le32 __reserved2; + __le16 udp_4tuple_dst_port_mask; + __le16 udp_4tuple_dst_port_value; u8 indirection_table[T_ETH_INDIRECTION_TABLE_SIZE]; __le32 rss_key[T_ETH_RSS_KEY]; __le32 echo; @@ -4115,6 +4205,23 @@ enum eth_tpa_update_command { MAX_ETH_TPA_UPDATE_COMMAND }; +/* In case of LSO over IPv4 tunnel, whether to increment + * IP ID on external IP header or internal IP header + */ +enum eth_tunnel_lso_inc_ip_id { + EXT_HEADER, + INT_HEADER, + MAX_ETH_TUNNEL_LSO_INC_IP_ID +}; + +/* In case tunnel exist and L4 checksum offload, + * the pseudo checksum location, on packet or on BD. + */ +enum eth_tunnel_non_lso_pcsum_location { + PCSUM_ON_PKT, + PCSUM_ON_BD, + MAX_ETH_TUNNEL_NON_LSO_PCSUM_LOCATION +}; /* * Tx regular BD structure @@ -4166,8 +4273,8 @@ struct eth_tx_start_bd { #define ETH_TX_START_BD_FORCE_VLAN_MODE_SHIFT 4 #define ETH_TX_START_BD_PARSE_NBDS (0x3<<5) #define ETH_TX_START_BD_PARSE_NBDS_SHIFT 5 -#define ETH_TX_START_BD_RESREVED (0x1<<7) -#define ETH_TX_START_BD_RESREVED_SHIFT 7 +#define ETH_TX_START_BD_TUNNEL_EXIST (0x1<<7) +#define ETH_TX_START_BD_TUNNEL_EXIST_SHIFT 7 }; /* @@ -4216,15 +4323,10 @@ struct eth_tx_parse_bd_e1x { * Tx parsing BD structure for ETH E2 */ struct eth_tx_parse_bd_e2 { - __le16 dst_mac_addr_lo; - __le16 dst_mac_addr_mid; - __le16 dst_mac_addr_hi; - __le16 src_mac_addr_lo; - __le16 src_mac_addr_mid; - __le16 src_mac_addr_hi; + union eth_mac_addr_or_tunnel_data data; __le32 parsing_data; -#define ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W (0x7FF<<0) -#define ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT 0 +#define ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W (0x7FF<<0) +#define ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT 0 #define ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW (0xF<<11) #define ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT 11 #define ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR (0x1<<15) @@ -4236,8 +4338,51 @@ struct eth_tx_parse_bd_e2 { }; /* - * The last BD in the BD memory will hold a pointer to the next BD memory + * Tx 2nd parsing BD structure for ETH packet */ +struct eth_tx_parse_2nd_bd { + __le16 global_data; +#define ETH_TX_PARSE_2ND_BD_IP_HDR_START_OUTER_W (0xF<<0) +#define ETH_TX_PARSE_2ND_BD_IP_HDR_START_OUTER_W_SHIFT 0 +#define ETH_TX_PARSE_2ND_BD_IP_HDR_TYPE_OUTER (0x1<<4) +#define ETH_TX_PARSE_2ND_BD_IP_HDR_TYPE_OUTER_SHIFT 4 +#define ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN (0x1<<5) +#define ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN_SHIFT 5 +#define ETH_TX_PARSE_2ND_BD_NS_FLG (0x1<<6) +#define ETH_TX_PARSE_2ND_BD_NS_FLG_SHIFT 6 +#define ETH_TX_PARSE_2ND_BD_TUNNEL_UDP_EXIST (0x1<<7) +#define ETH_TX_PARSE_2ND_BD_TUNNEL_UDP_EXIST_SHIFT 7 +#define ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W (0x1F<<8) +#define ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT 8 +#define ETH_TX_PARSE_2ND_BD_RESERVED0 (0x7<<13) +#define ETH_TX_PARSE_2ND_BD_RESERVED0_SHIFT 13 + __le16 reserved1; + u8 tcp_flags; +#define ETH_TX_PARSE_2ND_BD_FIN_FLG (0x1<<0) +#define ETH_TX_PARSE_2ND_BD_FIN_FLG_SHIFT 0 +#define ETH_TX_PARSE_2ND_BD_SYN_FLG (0x1<<1) +#define ETH_TX_PARSE_2ND_BD_SYN_FLG_SHIFT 1 +#define ETH_TX_PARSE_2ND_BD_RST_FLG (0x1<<2) +#define ETH_TX_PARSE_2ND_BD_RST_FLG_SHIFT 2 +#define ETH_TX_PARSE_2ND_BD_PSH_FLG (0x1<<3) +#define ETH_TX_PARSE_2ND_BD_PSH_FLG_SHIFT 3 +#define ETH_TX_PARSE_2ND_BD_ACK_FLG (0x1<<4) +#define ETH_TX_PARSE_2ND_BD_ACK_FLG_SHIFT 4 +#define ETH_TX_PARSE_2ND_BD_URG_FLG (0x1<<5) +#define ETH_TX_PARSE_2ND_BD_URG_FLG_SHIFT 5 +#define ETH_TX_PARSE_2ND_BD_ECE_FLG (0x1<<6) +#define ETH_TX_PARSE_2ND_BD_ECE_FLG_SHIFT 6 +#define ETH_TX_PARSE_2ND_BD_CWR_FLG (0x1<<7) +#define ETH_TX_PARSE_2ND_BD_CWR_FLG_SHIFT 7 + u8 reserved2; + u8 tunnel_udp_hdr_start_w; + u8 fw_ip_hdr_to_payload_w; + __le16 fw_ip_csum_wo_len_flags_frag; + __le16 hw_ip_id; + __le32 tcp_send_seq; +}; + +/* The last BD in the BD memory will hold a pointer to the next BD memory */ struct eth_tx_next_bd { __le32 addr_lo; __le32 addr_hi; @@ -4252,6 +4397,7 @@ union eth_tx_bd_types { struct eth_tx_bd reg_bd; struct eth_tx_parse_bd_e1x parse_bd_e1x; struct eth_tx_parse_bd_e2 parse_bd_e2; + struct eth_tx_parse_2nd_bd parse_2nd_bd; struct eth_tx_next_bd next_bd; }; @@ -4663,10 +4809,10 @@ enum common_spqe_cmd_id { RAMROD_CMD_ID_COMMON_STOP_TRAFFIC, RAMROD_CMD_ID_COMMON_START_TRAFFIC, RAMROD_CMD_ID_COMMON_AFEX_VIF_LISTS, + RAMROD_CMD_ID_COMMON_SET_TIMESYNC, MAX_COMMON_SPQE_CMD_ID }; - /* * Per-protocol connection types */ @@ -4863,7 +5009,7 @@ struct vf_flr_event_data { */ struct malicious_vf_event_data { u8 vf_id; - u8 reserved0; + u8 err_id; u16 reserved1; u32 reserved2; u32 reserved3; @@ -4969,10 +5115,10 @@ enum event_ring_opcode { EVENT_RING_OPCODE_CLASSIFICATION_RULES, EVENT_RING_OPCODE_FILTERS_RULES, EVENT_RING_OPCODE_MULTICAST_RULES, + EVENT_RING_OPCODE_SET_TIMESYNC, MAX_EVENT_RING_OPCODE }; - /* * Modes for fairness algorithm */ @@ -5010,14 +5156,18 @@ struct flow_control_configuration { */ struct function_start_data { u8 function_mode; - u8 reserved; + u8 allow_npar_tx_switching; __le16 sd_vlan_tag; __le16 vif_id; u8 path_id; u8 network_cos_mode; + u8 dmae_cmd_id; + u8 gre_tunnel_mode; + u8 gre_tunnel_rss; + u8 nvgre_clss_en; + __le16 reserved1[2]; }; - struct function_update_data { u8 vif_id_change_flg; u8 afex_default_vlan_change_flg; @@ -5027,14 +5177,19 @@ struct function_update_data { __le16 afex_default_vlan; u8 allowed_priorities; u8 network_cos_mode; + u8 lb_mode_en_change_flg; u8 lb_mode_en; u8 tx_switch_suspend_change_flg; u8 tx_switch_suspend; u8 echo; - __le16 reserved1; + u8 reserved1; + u8 update_gre_cfg_flg; + u8 gre_tunnel_mode; + u8 gre_tunnel_rss; + u8 nvgre_clss_en; + u32 reserved3; }; - /* * FW version stored in the Xstorm RAM */ @@ -5061,6 +5216,22 @@ struct fw_version { #define __FW_VERSION_RESERVED_SHIFT 4 }; +/* GRE RSS Mode */ +enum gre_rss_mode { + GRE_OUTER_HEADERS_RSS, + GRE_INNER_HEADERS_RSS, + NVGRE_KEY_ENTROPY_RSS, + MAX_GRE_RSS_MODE +}; + +/* GRE Tunnel Mode */ +enum gre_tunnel_type { + NO_GRE_TUNNEL, + NVGRE_TUNNEL, + L2GRE_TUNNEL, + IPGRE_TUNNEL, + MAX_GRE_TUNNEL_TYPE +}; /* * Dynamic Host-Coalescing - Driver(host) counters @@ -5224,6 +5395,26 @@ enum ip_ver { MAX_IP_VER }; +/* + * Malicious VF error ID + */ +enum malicious_vf_error_id { + VF_PF_CHANNEL_NOT_READY, + ETH_ILLEGAL_BD_LENGTHS, + ETH_PACKET_TOO_SHORT, + ETH_PAYLOAD_TOO_BIG, + ETH_ILLEGAL_ETH_TYPE, + ETH_ILLEGAL_LSO_HDR_LEN, + ETH_TOO_MANY_BDS, + ETH_ZERO_HDR_NBDS, + ETH_START_BD_NOT_SET, + ETH_ILLEGAL_PARSE_NBDS, + ETH_IPV6_AND_CHECKSUM, + ETH_VLAN_FLG_INCORRECT, + ETH_ILLEGAL_LSO_MSS, + ETH_TUNNEL_NOT_SUPPORTED, + MAX_MALICIOUS_VF_ERROR_ID +}; /* * Multi-function modes @@ -5368,7 +5559,6 @@ struct protocol_common_spe { union protocol_common_specific_data data; }; - /* * The send queue element */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c index 77ebae0ac64a..6cc6c6374a92 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c @@ -27,6 +27,10 @@ #include "bnx2x.h" #include "bnx2x_cmn.h" +typedef int (*read_sfp_module_eeprom_func_p)(struct bnx2x_phy *phy, + struct link_params *params, + u8 dev_addr, u16 addr, u8 byte_cnt, + u8 *o_buf, u8); /********************************************************/ #define ETH_HLEN 14 /* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */ @@ -152,6 +156,7 @@ #define SFP_EEPROM_CON_TYPE_ADDR 0x2 #define SFP_EEPROM_CON_TYPE_VAL_LC 0x7 #define SFP_EEPROM_CON_TYPE_VAL_COPPER 0x21 + #define SFP_EEPROM_CON_TYPE_VAL_RJ45 0x22 #define SFP_EEPROM_COMP_CODE_ADDR 0x3 @@ -3127,11 +3132,6 @@ static int bnx2x_bsc_read(struct link_params *params, int rc = 0; struct bnx2x *bp = params->bp; - if ((sl_devid != 0xa0) && (sl_devid != 0xa2)) { - DP(NETIF_MSG_LINK, "invalid sl_devid 0x%x\n", sl_devid); - return -EINVAL; - } - if (xfer_cnt > 16) { DP(NETIF_MSG_LINK, "invalid xfer_cnt %d. Max is 16 bytes\n", xfer_cnt); @@ -3629,6 +3629,16 @@ static u8 bnx2x_ext_phy_resolve_fc(struct bnx2x_phy *phy, * init configuration, and set/clear SGMII flag. Internal * phy init is done purely in phy_init stage. */ +#define WC_TX_DRIVER(post2, idriver, ipre) \ + ((post2 << MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) | \ + (idriver << MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) | \ + (ipre << MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET)) + +#define WC_TX_FIR(post, main, pre) \ + ((post << MDIO_WC_REG_TX_FIR_TAP_POST_TAP_OFFSET) | \ + (main << MDIO_WC_REG_TX_FIR_TAP_MAIN_TAP_OFFSET) | \ + (pre << MDIO_WC_REG_TX_FIR_TAP_PRE_TAP_OFFSET)) + static void bnx2x_warpcore_enable_AN_KR2(struct bnx2x_phy *phy, struct link_params *params, struct link_vars *vars) @@ -3728,7 +3738,7 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy, if (((vars->line_speed == SPEED_AUTO_NEG) && (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) || (vars->line_speed == SPEED_1000)) { - u32 addr = MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2; + u16 addr = MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2; an_adv |= (1<<5); /* Enable CL37 1G Parallel Detect */ @@ -3753,20 +3763,13 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy, /* Set Transmit PMD settings */ lane = bnx2x_get_warpcore_lane(phy, params); bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_TX0_TX_DRIVER + 0x10*lane, - ((0x02 << MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) | - (0x06 << MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) | - (0x09 << MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET))); + MDIO_WC_REG_TX0_TX_DRIVER + 0x10*lane, + WC_TX_DRIVER(0x02, 0x06, 0x09)); /* Configure the next lane if dual mode */ if (phy->flags & FLAGS_WC_DUAL_MODE) bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, MDIO_WC_REG_TX0_TX_DRIVER + 0x10*(lane+1), - ((0x02 << - MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) | - (0x06 << - MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) | - (0x09 << - MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET))); + WC_TX_DRIVER(0x02, 0x06, 0x09)); bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, MDIO_WC_REG_CL72_USERB0_CL72_OS_DEF_CTRL, 0x03f0); @@ -3909,6 +3912,8 @@ static void bnx2x_warpcore_set_10G_XFI(struct bnx2x_phy *phy, { struct bnx2x *bp = params->bp; u16 misc1_val, tap_val, tx_driver_val, lane, val; + u32 cfg_tap_val, tx_drv_brdct, tx_equal; + /* Hold rxSeqStart */ bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0, 0x8000); @@ -3952,23 +3957,33 @@ static void bnx2x_warpcore_set_10G_XFI(struct bnx2x_phy *phy, if (is_xfi) { misc1_val |= 0x5; - tap_val = ((0x08 << MDIO_WC_REG_TX_FIR_TAP_POST_TAP_OFFSET) | - (0x37 << MDIO_WC_REG_TX_FIR_TAP_MAIN_TAP_OFFSET) | - (0x00 << MDIO_WC_REG_TX_FIR_TAP_PRE_TAP_OFFSET)); - tx_driver_val = - ((0x00 << MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) | - (0x02 << MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) | - (0x03 << MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET)); - + tap_val = WC_TX_FIR(0x08, 0x37, 0x00); + tx_driver_val = WC_TX_DRIVER(0x00, 0x02, 0x03); } else { + cfg_tap_val = REG_RD(bp, params->shmem_base + + offsetof(struct shmem_region, dev_info. + port_hw_config[params->port]. + sfi_tap_values)); + + tx_equal = cfg_tap_val & PORT_HW_CFG_TX_EQUALIZATION_MASK; + + tx_drv_brdct = (cfg_tap_val & + PORT_HW_CFG_TX_DRV_BROADCAST_MASK) >> + PORT_HW_CFG_TX_DRV_BROADCAST_SHIFT; + misc1_val |= 0x9; - tap_val = ((0x0f << MDIO_WC_REG_TX_FIR_TAP_POST_TAP_OFFSET) | - (0x2b << MDIO_WC_REG_TX_FIR_TAP_MAIN_TAP_OFFSET) | - (0x02 << MDIO_WC_REG_TX_FIR_TAP_PRE_TAP_OFFSET)); - tx_driver_val = - ((0x03 << MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) | - (0x02 << MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) | - (0x06 << MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET)); + + /* TAP values are controlled by nvram, if value there isn't 0 */ + if (tx_equal) + tap_val = (u16)tx_equal; + else + tap_val = WC_TX_FIR(0x0f, 0x2b, 0x02); + + if (tx_drv_brdct) + tx_driver_val = WC_TX_DRIVER(0x03, (u16)tx_drv_brdct, + 0x06); + else + tx_driver_val = WC_TX_DRIVER(0x03, 0x02, 0x06); } bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_MISC1, misc1_val); @@ -4105,15 +4120,11 @@ static void bnx2x_warpcore_set_20G_DXGXS(struct bnx2x *bp, /* Set Transmit PMD settings */ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, MDIO_WC_REG_TX_FIR_TAP, - ((0x12 << MDIO_WC_REG_TX_FIR_TAP_POST_TAP_OFFSET) | - (0x2d << MDIO_WC_REG_TX_FIR_TAP_MAIN_TAP_OFFSET) | - (0x00 << MDIO_WC_REG_TX_FIR_TAP_PRE_TAP_OFFSET) | - MDIO_WC_REG_TX_FIR_TAP_ENABLE)); + (WC_TX_FIR(0x12, 0x2d, 0x00) | + MDIO_WC_REG_TX_FIR_TAP_ENABLE)); bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_TX0_TX_DRIVER + 0x10*lane, - ((0x02 << MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) | - (0x02 << MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) | - (0x02 << MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET))); + MDIO_WC_REG_TX0_TX_DRIVER + 0x10*lane, + WC_TX_DRIVER(0x02, 0x02, 0x02)); } static void bnx2x_warpcore_set_sgmii_speed(struct bnx2x_phy *phy, @@ -4750,8 +4761,8 @@ void bnx2x_link_status_update(struct link_params *params, port_mb[port].link_status)); /* Force link UP in non LOOPBACK_EXT loopback mode(s) */ - if (bp->link_params.loopback_mode != LOOPBACK_NONE && - bp->link_params.loopback_mode != LOOPBACK_EXT) + if (params->loopback_mode != LOOPBACK_NONE && + params->loopback_mode != LOOPBACK_EXT) vars->link_status |= LINK_STATUS_LINK_UP; if (bnx2x_eee_has_cap(params)) @@ -7758,7 +7769,8 @@ static void bnx2x_sfp_set_transmitter(struct link_params *params, static int bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy, struct link_params *params, - u16 addr, u8 byte_cnt, u8 *o_buf) + u8 dev_addr, u16 addr, u8 byte_cnt, + u8 *o_buf, u8 is_init) { struct bnx2x *bp = params->bp; u16 val = 0; @@ -7771,7 +7783,7 @@ static int bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy, /* Set the read command byte count */ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_SFP_TWO_WIRE_BYTE_CNT, - (byte_cnt | 0xa000)); + (byte_cnt | (dev_addr << 8))); /* Set the read command address */ bnx2x_cl45_write(bp, phy, @@ -7845,6 +7857,7 @@ static void bnx2x_warpcore_power_module(struct link_params *params, } static int bnx2x_warpcore_read_sfp_module_eeprom(struct bnx2x_phy *phy, struct link_params *params, + u8 dev_addr, u16 addr, u8 byte_cnt, u8 *o_buf, u8 is_init) { @@ -7869,7 +7882,7 @@ static int bnx2x_warpcore_read_sfp_module_eeprom(struct bnx2x_phy *phy, usleep_range(1000, 2000); bnx2x_warpcore_power_module(params, 1); } - rc = bnx2x_bsc_read(params, phy, 0xa0, addr32, 0, byte_cnt, + rc = bnx2x_bsc_read(params, phy, dev_addr, addr32, 0, byte_cnt, data_array); } while ((rc != 0) && (++cnt < I2C_WA_RETRY_CNT)); @@ -7885,7 +7898,8 @@ static int bnx2x_warpcore_read_sfp_module_eeprom(struct bnx2x_phy *phy, static int bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy, struct link_params *params, - u16 addr, u8 byte_cnt, u8 *o_buf) + u8 dev_addr, u16 addr, u8 byte_cnt, + u8 *o_buf, u8 is_init) { struct bnx2x *bp = params->bp; u16 val, i; @@ -7896,6 +7910,15 @@ static int bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy, return -EINVAL; } + /* Set 2-wire transfer rate of SFP+ module EEPROM + * to 100Khz since some DACs(direct attached cables) do + * not work at 400Khz. + */ + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8727_TWO_WIRE_SLAVE_ADDR, + ((dev_addr << 8) | 1)); + /* Need to read from 1.8000 to clear it */ bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, @@ -7968,26 +7991,44 @@ static int bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy, return -EINVAL; } - int bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy, - struct link_params *params, u16 addr, - u8 byte_cnt, u8 *o_buf) + struct link_params *params, u8 dev_addr, + u16 addr, u16 byte_cnt, u8 *o_buf) { - int rc = -EOPNOTSUPP; + int rc = 0; + struct bnx2x *bp = params->bp; + u8 xfer_size; + u8 *user_data = o_buf; + read_sfp_module_eeprom_func_p read_func; + + if ((dev_addr != 0xa0) && (dev_addr != 0xa2)) { + DP(NETIF_MSG_LINK, "invalid dev_addr 0x%x\n", dev_addr); + return -EINVAL; + } + switch (phy->type) { case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726: - rc = bnx2x_8726_read_sfp_module_eeprom(phy, params, addr, - byte_cnt, o_buf); - break; + read_func = bnx2x_8726_read_sfp_module_eeprom; + break; case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727: case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722: - rc = bnx2x_8727_read_sfp_module_eeprom(phy, params, addr, - byte_cnt, o_buf); - break; + read_func = bnx2x_8727_read_sfp_module_eeprom; + break; case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT: - rc = bnx2x_warpcore_read_sfp_module_eeprom(phy, params, addr, - byte_cnt, o_buf, 0); - break; + read_func = bnx2x_warpcore_read_sfp_module_eeprom; + break; + default: + return -EOPNOTSUPP; + } + + while (!rc && (byte_cnt > 0)) { + xfer_size = (byte_cnt > SFP_EEPROM_PAGE_SIZE) ? + SFP_EEPROM_PAGE_SIZE : byte_cnt; + rc = read_func(phy, params, dev_addr, addr, xfer_size, + user_data, 0); + byte_cnt -= xfer_size; + user_data += xfer_size; + addr += xfer_size; } return rc; } @@ -8004,6 +8045,7 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy, /* First check for copper cable */ if (bnx2x_read_sfp_module_eeprom(phy, params, + I2C_DEV_ADDR_A0, SFP_EEPROM_CON_TYPE_ADDR, 2, (u8 *)val) != 0) { @@ -8021,6 +8063,7 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy, */ if (bnx2x_read_sfp_module_eeprom(phy, params, + I2C_DEV_ADDR_A0, SFP_EEPROM_FC_TX_TECH_ADDR, 1, &copper_module_type) != 0) { @@ -8049,20 +8092,24 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy, break; } case SFP_EEPROM_CON_TYPE_VAL_LC: + case SFP_EEPROM_CON_TYPE_VAL_RJ45: check_limiting_mode = 1; if ((val[1] & (SFP_EEPROM_COMP_CODE_SR_MASK | SFP_EEPROM_COMP_CODE_LR_MASK | SFP_EEPROM_COMP_CODE_LRM_MASK)) == 0) { - DP(NETIF_MSG_LINK, "1G Optic module detected\n"); + DP(NETIF_MSG_LINK, "1G SFP module detected\n"); gport = params->port; phy->media_type = ETH_PHY_SFP_1G_FIBER; - phy->req_line_speed = SPEED_1000; - if (!CHIP_IS_E1x(bp)) - gport = BP_PATH(bp) + (params->port << 1); - netdev_err(bp->dev, "Warning: Link speed was forced to 1000Mbps." - " Current SFP module in port %d is not" - " compliant with 10G Ethernet\n", - gport); + if (phy->req_line_speed != SPEED_1000) { + phy->req_line_speed = SPEED_1000; + if (!CHIP_IS_E1x(bp)) { + gport = BP_PATH(bp) + + (params->port << 1); + } + netdev_err(bp->dev, + "Warning: Link speed was forced to 1000Mbps. Current SFP module in port %d is not compliant with 10G Ethernet\n", + gport); + } } else { int idx, cfg_idx = 0; DP(NETIF_MSG_LINK, "10G Optic module detected\n"); @@ -8101,6 +8148,7 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy, u8 options[SFP_EEPROM_OPTIONS_SIZE]; if (bnx2x_read_sfp_module_eeprom(phy, params, + I2C_DEV_ADDR_A0, SFP_EEPROM_OPTIONS_ADDR, SFP_EEPROM_OPTIONS_SIZE, options) != 0) { @@ -8167,6 +8215,7 @@ static int bnx2x_verify_sfp_module(struct bnx2x_phy *phy, /* Format the warning message */ if (bnx2x_read_sfp_module_eeprom(phy, params, + I2C_DEV_ADDR_A0, SFP_EEPROM_VENDOR_NAME_ADDR, SFP_EEPROM_VENDOR_NAME_SIZE, (u8 *)vendor_name)) @@ -8175,6 +8224,7 @@ static int bnx2x_verify_sfp_module(struct bnx2x_phy *phy, vendor_name[SFP_EEPROM_VENDOR_NAME_SIZE] = '\0'; if (bnx2x_read_sfp_module_eeprom(phy, params, + I2C_DEV_ADDR_A0, SFP_EEPROM_PART_NO_ADDR, SFP_EEPROM_PART_NO_SIZE, (u8 *)vendor_pn)) @@ -8205,12 +8255,13 @@ static int bnx2x_wait_for_sfp_module_initialized(struct bnx2x_phy *phy, for (timeout = 0; timeout < 60; timeout++) { if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) - rc = bnx2x_warpcore_read_sfp_module_eeprom(phy, - params, 1, - 1, &val, 1); + rc = bnx2x_warpcore_read_sfp_module_eeprom( + phy, params, I2C_DEV_ADDR_A0, 1, 1, &val, + 1); else - rc = bnx2x_read_sfp_module_eeprom(phy, params, 1, 1, - &val); + rc = bnx2x_read_sfp_module_eeprom(phy, params, + I2C_DEV_ADDR_A0, + 1, 1, &val); if (rc == 0) { DP(NETIF_MSG_LINK, "SFP+ module initialization took %d ms\n", @@ -8219,7 +8270,8 @@ static int bnx2x_wait_for_sfp_module_initialized(struct bnx2x_phy *phy, } usleep_range(5000, 10000); } - rc = bnx2x_read_sfp_module_eeprom(phy, params, 1, 1, &val); + rc = bnx2x_read_sfp_module_eeprom(phy, params, I2C_DEV_ADDR_A0, + 1, 1, &val); return rc; } @@ -8376,15 +8428,6 @@ static void bnx2x_8727_specific_func(struct bnx2x_phy *phy, bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_PCS_OPT_CTRL, val); - - /* Set 2-wire transfer rate of SFP+ module EEPROM - * to 100Khz since some DACs(direct attached cables) do - * not work at 400Khz. - */ - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_8727_TWO_WIRE_SLAVE_ADDR, - 0xa001); break; default: DP(NETIF_MSG_LINK, "Function 0x%x not supported by 8727\n", @@ -9528,8 +9571,7 @@ static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy, } else { /* For 32-bit registers in 848xx, access via MDIO2ARM i/f. */ /* (1) set reg 0xc200_0014(SPI_BRIDGE_CTRL_2) to 0x03000000 */ - for (i = 0; i < ARRAY_SIZE(reg_set); - i++) + for (i = 0; i < ARRAY_SIZE(reg_set); i++) bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg, reg_set[i].val); @@ -10281,7 +10323,8 @@ static u8 bnx2x_848xx_read_status(struct bnx2x_phy *phy, LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE; /* Determine if EEE was negotiated */ - if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) + if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) || + (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) bnx2x_eee_an_resolve(phy, params, vars); } @@ -12242,7 +12285,7 @@ static void bnx2x_init_bmac_loopback(struct link_params *params, bnx2x_xgxs_deassert(params); - /* set bmac loopback */ + /* Set bmac loopback */ bnx2x_bmac_enable(params, vars, 1, 1); REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0); @@ -12261,7 +12304,7 @@ static void bnx2x_init_emac_loopback(struct link_params *params, vars->phy_flags = PHY_XGXS_FLAG; bnx2x_xgxs_deassert(params); - /* set bmac loopback */ + /* Set bmac loopback */ bnx2x_emac_enable(params, vars, 1); bnx2x_emac_program(params, vars); REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0); @@ -12521,6 +12564,7 @@ int bnx2x_phy_init(struct link_params *params, struct link_vars *vars) params->req_line_speed[0], params->req_flow_ctrl[0]); DP(NETIF_MSG_LINK, "(2) req_speed %d, req_flowctrl %d\n", params->req_line_speed[1], params->req_flow_ctrl[1]); + DP(NETIF_MSG_LINK, "req_adv_flow_ctrl 0x%x\n", params->req_fc_auto_adv); vars->link_status = 0; vars->phy_link_up = 0; vars->link_up = 0; @@ -13446,8 +13490,8 @@ static void bnx2x_check_kr2_wa(struct link_params *params, } /* Once KR2 was disabled, wait 5 seconds before checking KR2 recovery - * since some switches tend to reinit the AN process and clear the - * advertised BP/NP after ~2 seconds causing the KR2 to be disabled + * Since some switches tend to reinit the AN process and clear the + * the advertised BP/NP after ~2 seconds causing the KR2 to be disabled * and recovered many times */ if (vars->check_kr2_recovery_cnt > 0) { @@ -13465,8 +13509,10 @@ static void bnx2x_check_kr2_wa(struct link_params *params, /* CL73 has not begun yet */ if (base_page == 0) { - if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) + if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) { bnx2x_kr2_recovery(params, vars, phy); + DP(NETIF_MSG_LINK, "No BP\n"); + } return; } @@ -13482,7 +13528,7 @@ static void bnx2x_check_kr2_wa(struct link_params *params, if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) { if (!not_kr2_device) { DP(NETIF_MSG_LINK, "BP=0x%x, NP=0x%x\n", base_page, - next_page); + next_page); bnx2x_kr2_recovery(params, vars, phy); } return; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h index 56c2aae4e2c8..4df45234fdc0 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h @@ -41,6 +41,9 @@ #define SPEED_AUTO_NEG 0 #define SPEED_20000 20000 +#define I2C_DEV_ADDR_A0 0xa0 +#define I2C_DEV_ADDR_A2 0xa2 + #define SFP_EEPROM_PAGE_SIZE 16 #define SFP_EEPROM_VENDOR_NAME_ADDR 0x14 #define SFP_EEPROM_VENDOR_NAME_SIZE 16 @@ -54,6 +57,15 @@ #define SFP_EEPROM_SERIAL_SIZE 16 #define SFP_EEPROM_DATE_ADDR 0x54 /* ASCII YYMMDD */ #define SFP_EEPROM_DATE_SIZE 6 +#define SFP_EEPROM_DIAG_TYPE_ADDR 0x5c +#define SFP_EEPROM_DIAG_TYPE_SIZE 1 +#define SFP_EEPROM_DIAG_ADDR_CHANGE_REQ (1<<2) +#define SFP_EEPROM_SFF_8472_COMP_ADDR 0x5e +#define SFP_EEPROM_SFF_8472_COMP_SIZE 1 + +#define SFP_EEPROM_A2_CHECKSUM_RANGE 0x5e +#define SFP_EEPROM_A2_CC_DMI_ADDR 0x5f + #define PWR_FLT_ERR_MSG_LEN 250 #define XGXS_EXT_PHY_TYPE(ext_phy_config) \ @@ -420,8 +432,8 @@ void bnx2x_sfx7101_sp_sw_reset(struct bnx2x *bp, struct bnx2x_phy *phy); /* Read "byte_cnt" bytes from address "addr" from the SFP+ EEPROM */ int bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy, - struct link_params *params, u16 addr, - u8 byte_cnt, u8 *o_buf); + struct link_params *params, u8 dev_addr, + u16 addr, u16 byte_cnt, u8 *o_buf); void bnx2x_hw_reset_phy(struct link_params *params); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index e81a747ea8ce..fdfe33bc097b 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -75,8 +75,6 @@ #define FW_FILE_NAME_E1H "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw" #define FW_FILE_NAME_E2 "bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw" -#define MAC_LEADING_ZERO_CNT (ALIGN(ETH_ALEN, sizeof(u32)) - ETH_ALEN) - /* Time in jiffies before concluding the transmitter is hung */ #define TX_TIMEOUT (5*HZ) @@ -2955,14 +2953,16 @@ static unsigned long bnx2x_get_common_flags(struct bnx2x *bp, __set_bit(BNX2X_Q_FLG_ACTIVE, &flags); /* tx only connections collect statistics (on the same index as the - * parent connection). The statistics are zeroed when the parent - * connection is initialized. + * parent connection). The statistics are zeroed when the parent + * connection is initialized. */ __set_bit(BNX2X_Q_FLG_STATS, &flags); if (zero_stats) __set_bit(BNX2X_Q_FLG_ZERO_STATS, &flags); + __set_bit(BNX2X_Q_FLG_PCSUM_ON_PKT, &flags); + __set_bit(BNX2X_Q_FLG_TUN_INC_INNER_IP_ID, &flags); #ifdef BNX2X_STOP_ON_ERROR __set_bit(BNX2X_Q_FLG_TX_SEC, &flags); @@ -3227,16 +3227,29 @@ static void bnx2x_drv_info_ether_stat(struct bnx2x *bp) { struct eth_stats_info *ether_stat = &bp->slowpath->drv_info_to_mcp.ether_stat; + struct bnx2x_vlan_mac_obj *mac_obj = + &bp->sp_objs->mac_obj; + int i; strlcpy(ether_stat->version, DRV_MODULE_VERSION, ETH_STAT_INFO_VERSION_LEN); - bp->sp_objs[0].mac_obj.get_n_elements(bp, &bp->sp_objs[0].mac_obj, - DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED, - ether_stat->mac_local); - + /* get DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED macs, placing them in the + * mac_local field in ether_stat struct. The base address is offset by 2 + * bytes to account for the field being 8 bytes but a mac address is + * only 6 bytes. Likewise, the stride for the get_n_elements function is + * 2 bytes to compensate from the 6 bytes of a mac to the 8 bytes + * allocated by the ether_stat struct, so the macs will land in their + * proper positions. + */ + for (i = 0; i < DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED; i++) + memset(ether_stat->mac_local + i, 0, + sizeof(ether_stat->mac_local[0])); + mac_obj->get_n_elements(bp, &bp->sp_objs[0].mac_obj, + DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED, + ether_stat->mac_local + MAC_PAD, MAC_PAD, + ETH_ALEN); ether_stat->mtu_size = bp->dev->mtu; - if (bp->dev->features & NETIF_F_RXCSUM) ether_stat->feature_flags |= FEATURE_ETH_CHKSUM_OFFLOAD_MASK; if (bp->dev->features & NETIF_F_TSO) @@ -3258,8 +3271,7 @@ static void bnx2x_drv_info_fcoe_stat(struct bnx2x *bp) if (!CNIC_LOADED(bp)) return; - memcpy(fcoe_stat->mac_local + MAC_LEADING_ZERO_CNT, - bp->fip_mac, ETH_ALEN); + memcpy(fcoe_stat->mac_local + MAC_PAD, bp->fip_mac, ETH_ALEN); fcoe_stat->qos_priority = app->traffic_type_priority[LLFC_TRAFFIC_TYPE_FCOE]; @@ -3361,8 +3373,8 @@ static void bnx2x_drv_info_iscsi_stat(struct bnx2x *bp) if (!CNIC_LOADED(bp)) return; - memcpy(iscsi_stat->mac_local + MAC_LEADING_ZERO_CNT, - bp->cnic_eth_dev.iscsi_mac, ETH_ALEN); + memcpy(iscsi_stat->mac_local + MAC_PAD, bp->cnic_eth_dev.iscsi_mac, + ETH_ALEN); iscsi_stat->qos_priority = app->traffic_type_priority[LLFC_TRAFFIC_TYPE_ISCSI]; @@ -6029,9 +6041,10 @@ void bnx2x_nic_init(struct bnx2x *bp, u32 load_code) rmb(); bnx2x_init_rx_rings(bp); bnx2x_init_tx_rings(bp); - - if (IS_VF(bp)) + if (IS_VF(bp)) { + bnx2x_memset_stats(bp); return; + } /* Initialize MOD_ABS interrupts */ bnx2x_init_mod_abs_int(bp, &bp->link_vars, bp->common.chip_id, @@ -9525,6 +9538,10 @@ sp_rtnl_not_reset: bnx2x_vfpf_storm_rx_mode(bp); } + if (test_and_clear_bit(BNX2X_SP_RTNL_HYPERVISOR_VLAN, + &bp->sp_rtnl_state)) + bnx2x_pf_set_vfs_vlan(bp); + /* work which needs rtnl lock not-taken (as it takes the lock itself and * can be called from other contexts as well) */ @@ -9532,8 +9549,10 @@ sp_rtnl_not_reset: /* enable SR-IOV if applicable */ if (IS_SRIOV(bp) && test_and_clear_bit(BNX2X_SP_RTNL_ENABLE_SRIOV, - &bp->sp_rtnl_state)) + &bp->sp_rtnl_state)) { + bnx2x_disable_sriov(bp); bnx2x_enable_sriov(bp); + } } static void bnx2x_period_task(struct work_struct *work) @@ -9701,6 +9720,31 @@ static struct bnx2x_prev_path_list * return NULL; } +static int bnx2x_prev_path_mark_eeh(struct bnx2x *bp) +{ + struct bnx2x_prev_path_list *tmp_list; + int rc; + + rc = down_interruptible(&bnx2x_prev_sem); + if (rc) { + BNX2X_ERR("Received %d when tried to take lock\n", rc); + return rc; + } + + tmp_list = bnx2x_prev_path_get_entry(bp); + if (tmp_list) { + tmp_list->aer = 1; + rc = 0; + } else { + BNX2X_ERR("path %d: Entry does not exist for eeh; Flow occurs before initial insmod is over ?\n", + BP_PATH(bp)); + } + + up(&bnx2x_prev_sem); + + return rc; +} + static bool bnx2x_prev_is_path_marked(struct bnx2x *bp) { struct bnx2x_prev_path_list *tmp_list; @@ -9709,14 +9753,15 @@ static bool bnx2x_prev_is_path_marked(struct bnx2x *bp) if (down_trylock(&bnx2x_prev_sem)) return false; - list_for_each_entry(tmp_list, &bnx2x_prev_list, list) { - if (PCI_SLOT(bp->pdev->devfn) == tmp_list->slot && - bp->pdev->bus->number == tmp_list->bus && - BP_PATH(bp) == tmp_list->path) { + tmp_list = bnx2x_prev_path_get_entry(bp); + if (tmp_list) { + if (tmp_list->aer) { + DP(NETIF_MSG_HW, "Path %d was marked by AER\n", + BP_PATH(bp)); + } else { rc = true; BNX2X_DEV_INFO("Path %d was already cleaned from previous drivers\n", BP_PATH(bp)); - break; } } @@ -9730,6 +9775,28 @@ static int bnx2x_prev_mark_path(struct bnx2x *bp, bool after_undi) struct bnx2x_prev_path_list *tmp_list; int rc; + rc = down_interruptible(&bnx2x_prev_sem); + if (rc) { + BNX2X_ERR("Received %d when tried to take lock\n", rc); + return rc; + } + + /* Check whether the entry for this path already exists */ + tmp_list = bnx2x_prev_path_get_entry(bp); + if (tmp_list) { + if (!tmp_list->aer) { + BNX2X_ERR("Re-Marking the path.\n"); + } else { + DP(NETIF_MSG_HW, "Removing AER indication from path %d\n", + BP_PATH(bp)); + tmp_list->aer = 0; + } + up(&bnx2x_prev_sem); + return 0; + } + up(&bnx2x_prev_sem); + + /* Create an entry for this path and add it */ tmp_list = kmalloc(sizeof(struct bnx2x_prev_path_list), GFP_KERNEL); if (!tmp_list) { BNX2X_ERR("Failed to allocate 'bnx2x_prev_path_list'\n"); @@ -9739,6 +9806,7 @@ static int bnx2x_prev_mark_path(struct bnx2x *bp, bool after_undi) tmp_list->bus = bp->pdev->bus->number; tmp_list->slot = PCI_SLOT(bp->pdev->devfn); tmp_list->path = BP_PATH(bp); + tmp_list->aer = 0; tmp_list->undi = after_undi ? (1 << BP_PORT(bp)) : 0; rc = down_interruptible(&bnx2x_prev_sem); @@ -9746,8 +9814,8 @@ static int bnx2x_prev_mark_path(struct bnx2x *bp, bool after_undi) BNX2X_ERR("Received %d when tried to take lock\n", rc); kfree(tmp_list); } else { - BNX2X_DEV_INFO("Marked path [%d] - finished previous unload\n", - BP_PATH(bp)); + DP(NETIF_MSG_HW, "Marked path [%d] - finished previous unload\n", + BP_PATH(bp)); list_add(&tmp_list->list, &bnx2x_prev_list); up(&bnx2x_prev_sem); } @@ -9986,6 +10054,7 @@ static int bnx2x_prev_unload(struct bnx2x *bp) } do { + int aer = 0; /* Lock MCP using an unload request */ fw = bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS, 0); if (!fw) { @@ -9994,7 +10063,18 @@ static int bnx2x_prev_unload(struct bnx2x *bp) break; } - if (fw == FW_MSG_CODE_DRV_UNLOAD_COMMON) { + rc = down_interruptible(&bnx2x_prev_sem); + if (rc) { + BNX2X_ERR("Cannot check for AER; Received %d when tried to take lock\n", + rc); + } else { + /* If Path is marked by EEH, ignore unload status */ + aer = !!(bnx2x_prev_path_get_entry(bp) && + bnx2x_prev_path_get_entry(bp)->aer); + up(&bnx2x_prev_sem); + } + + if (fw == FW_MSG_CODE_DRV_UNLOAD_COMMON || aer) { rc = bnx2x_prev_unload_common(bp); break; } @@ -10034,8 +10114,12 @@ static void bnx2x_get_common_hwinfo(struct bnx2x *bp) id = ((val & 0xffff) << 16); val = REG_RD(bp, MISC_REG_CHIP_REV); id |= ((val & 0xf) << 12); - val = REG_RD(bp, MISC_REG_CHIP_METAL); - id |= ((val & 0xff) << 4); + + /* Metal is read from PCI regs, but we can't access >=0x400 from + * the configuration space (so we need to reg_rd) + */ + val = REG_RD(bp, PCICFG_OFFSET + PCI_ID_VAL3); + id |= (((val >> 24) & 0xf) << 4); val = REG_RD(bp, MISC_REG_BOND_ID); id |= (val & 0xf); bp->common.chip_id = id; @@ -10812,14 +10896,12 @@ static void bnx2x_get_cnic_mac_hwinfo(struct bnx2x *bp) } } - if (IS_MF_STORAGE_SD(bp)) - /* Zero primary MAC configuration */ - memset(bp->dev->dev_addr, 0, ETH_ALEN); - - if (IS_MF_FCOE_AFEX(bp) || IS_MF_FCOE_SD(bp)) - /* use FIP MAC as primary MAC */ + /* If this is a storage-only interface, use SAN mac as + * primary MAC. Notice that for SD this is already the case, + * as the SAN mac was copied from the primary MAC. + */ + if (IS_MF_FCOE_AFEX(bp)) memcpy(bp->dev->dev_addr, fip_mac, ETH_ALEN); - } else { val2 = SHMEM_RD(bp, dev_info.port_hw_config[port]. iscsi_mac_upper); @@ -11056,6 +11138,9 @@ static int bnx2x_get_hwinfo(struct bnx2x *bp) } else BNX2X_DEV_INFO("illegal OV for SD\n"); break; + case SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF: + bp->mf_config[vn] = 0; + break; default: /* Unknown configuration: reset mf_config */ bp->mf_config[vn] = 0; @@ -11402,26 +11487,6 @@ static int bnx2x_init_bp(struct bnx2x *bp) * net_device service functions */ -static int bnx2x_open_epilog(struct bnx2x *bp) -{ - /* Enable sriov via delayed work. This must be done via delayed work - * because it causes the probe of the vf devices to be run, which invoke - * register_netdevice which must have rtnl lock taken. As we are holding - * the lock right now, that could only work if the probe would not take - * the lock. However, as the probe of the vf may be called from other - * contexts as well (such as passthrough to vm failes) it can't assume - * the lock is being held for it. Using delayed work here allows the - * probe code to simply take the lock (i.e. wait for it to be released - * if it is being held). - */ - smp_mb__before_clear_bit(); - set_bit(BNX2X_SP_RTNL_ENABLE_SRIOV, &bp->sp_rtnl_state); - smp_mb__after_clear_bit(); - schedule_delayed_work(&bp->sp_rtnl_task, 0); - - return 0; -} - /* called with rtnl_lock */ static int bnx2x_open(struct net_device *dev) { @@ -11791,6 +11856,8 @@ static const struct net_device_ops bnx2x_netdev_ops = { .ndo_setup_tc = bnx2x_setup_tc, #ifdef CONFIG_BNX2X_SRIOV .ndo_set_vf_mac = bnx2x_set_vf_mac, + .ndo_set_vf_vlan = bnx2x_set_vf_vlan, + .ndo_get_vf_config = bnx2x_get_vf_config, #endif #ifdef NETDEV_FCOE_WWNN .ndo_fcoe_get_wwn = bnx2x_fcoe_get_wwn, @@ -11953,7 +12020,7 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev, dev->watchdog_timeo = TX_TIMEOUT; dev->netdev_ops = &bnx2x_netdev_ops; - bnx2x_set_ethtool_ops(dev); + bnx2x_set_ethtool_ops(bp, dev); dev->priv_flags |= IFF_UNICAST_FLT; @@ -11961,6 +12028,13 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev, NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO | NETIF_F_RXHASH | NETIF_F_HW_VLAN_TX; + if (!CHIP_IS_E1x(bp)) { + dev->hw_features |= NETIF_F_GSO_GRE; + dev->hw_enc_features = + NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG | + NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | + NETIF_F_GSO_GRE; + } dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_HIGHDMA; @@ -12447,7 +12521,7 @@ static int bnx2x_init_one(struct pci_dev *pdev, * l2 connections. */ if (IS_VF(bp)) { - bnx2x_vf_map_doorbells(bp); + bp->doorbells = bnx2x_vf_doorbells(bp); rc = bnx2x_vf_pci_alloc(bp); if (rc) goto init_one_exit; @@ -12475,13 +12549,8 @@ static int bnx2x_init_one(struct pci_dev *pdev, goto init_one_exit; } - /* Enable SRIOV if capability found in configuration space. - * Once the generic SR-IOV framework makes it in from the - * pci tree this will be revised, to allow dynamic control - * over the number of VFs. Right now, change the num of vfs - * param below to enable SR-IOV. - */ - rc = bnx2x_iov_init_one(bp, int_mode, 0/*num vfs*/); + /* Enable SRIOV if capability found in configuration space */ + rc = bnx2x_iov_init_one(bp, int_mode, BNX2X_MAX_NUM_OF_VFS); if (rc) goto init_one_exit; @@ -12493,16 +12562,6 @@ static int bnx2x_init_one(struct pci_dev *pdev, if (CHIP_IS_E1x(bp)) bp->flags |= NO_FCOE_FLAG; - /* disable FCOE for 57840 device, until FW supports it */ - switch (ent->driver_data) { - case BCM57840_O: - case BCM57840_4_10: - case BCM57840_2_20: - case BCM57840_MFO: - case BCM57840_MF: - bp->flags |= NO_FCOE_FLAG; - } - /* Set bp->num_queues for MSI-X mode*/ bnx2x_set_num_queues(bp); @@ -12636,9 +12695,7 @@ static void bnx2x_remove_one(struct pci_dev *pdev) static int bnx2x_eeh_nic_unload(struct bnx2x *bp) { - int i; - - bp->state = BNX2X_STATE_ERROR; + bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT; bp->rx_mode = BNX2X_RX_MODE_NONE; @@ -12647,29 +12704,21 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp) /* Stop Tx */ bnx2x_tx_disable(bp); - - bnx2x_netif_stop(bp, 0); /* Delete all NAPI objects */ bnx2x_del_all_napi(bp); if (CNIC_LOADED(bp)) bnx2x_del_all_napi_cnic(bp); + netdev_reset_tc(bp->dev); del_timer_sync(&bp->timer); + cancel_delayed_work(&bp->sp_task); + cancel_delayed_work(&bp->period_task); - bnx2x_stats_handle(bp, STATS_EVENT_STOP); - - /* Release IRQs */ - bnx2x_free_irq(bp); - - /* Free SKBs, SGEs, TPA pool and driver internals */ - bnx2x_free_skbs(bp); - - for_each_rx_queue(bp, i) - bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); - - bnx2x_free_mem(bp); + spin_lock_bh(&bp->stats_lock); + bp->stats_state = STATS_STATE_DISABLED; + spin_unlock_bh(&bp->stats_lock); - bp->state = BNX2X_STATE_CLOSED; + bnx2x_save_statistics(bp); netif_carrier_off(bp->dev); @@ -12705,6 +12754,8 @@ static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev, rtnl_lock(); + BNX2X_ERR("IO error detected\n"); + netif_device_detach(dev); if (state == pci_channel_io_perm_failure) { @@ -12715,6 +12766,8 @@ static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev, if (netif_running(dev)) bnx2x_eeh_nic_unload(bp); + bnx2x_prev_path_mark_eeh(bp); + pci_disable_device(pdev); rtnl_unlock(); @@ -12733,9 +12786,10 @@ static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata(pdev); struct bnx2x *bp = netdev_priv(dev); + int i; rtnl_lock(); - + BNX2X_ERR("IO slot reset initializing...\n"); if (pci_enable_device(pdev)) { dev_err(&pdev->dev, "Cannot re-enable PCI device after reset\n"); @@ -12749,6 +12803,42 @@ static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev) if (netif_running(dev)) bnx2x_set_power_state(bp, PCI_D0); + if (netif_running(dev)) { + BNX2X_ERR("IO slot reset --> driver unload\n"); + if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) { + u32 v; + + v = SHMEM2_RD(bp, + drv_capabilities_flag[BP_FW_MB_IDX(bp)]); + SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)], + v & ~DRV_FLAGS_CAPABILITIES_LOADED_L2); + } + bnx2x_drain_tx_queues(bp); + bnx2x_send_unload_req(bp, UNLOAD_RECOVERY); + bnx2x_netif_stop(bp, 1); + bnx2x_free_irq(bp); + + /* Report UNLOAD_DONE to MCP */ + bnx2x_send_unload_done(bp, true); + + bp->sp_state = 0; + bp->port.pmf = 0; + + bnx2x_prev_unload(bp); + + /* We should have resetted the engine, so It's fair to + * assume the FW will no longer write to the bnx2x driver. + */ + bnx2x_squeeze_objects(bp); + bnx2x_free_skbs(bp); + for_each_rx_queue(bp, i) + bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); + bnx2x_free_fp_mem(bp); + bnx2x_free_mem(bp); + + bp->state = BNX2X_STATE_CLOSED; + } + rtnl_unlock(); return PCI_ERS_RESULT_RECOVERED; @@ -12775,6 +12865,9 @@ static void bnx2x_io_resume(struct pci_dev *pdev) bnx2x_eeh_recover(bp); + bp->fw_seq = SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) & + DRV_MSG_SEQ_NUMBER_MASK; + if (netif_running(dev)) bnx2x_nic_load(bp, LOAD_NORMAL); @@ -12797,6 +12890,9 @@ static struct pci_driver bnx2x_pci_driver = { .suspend = bnx2x_suspend, .resume = bnx2x_resume, .err_handler = &bnx2x_err_handler, +#ifdef CONFIG_BNX2X_SRIOV + .sriov_configure = bnx2x_sriov_configure, +#endif }; static int __init bnx2x_init(void) diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h index 791eb2d53011..d22bc40091ec 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h @@ -1491,10 +1491,6 @@ /* [R 4] This field indicates the type of the device. '0' - 2 Ports; '1' - 1 Port. */ #define MISC_REG_BOND_ID 0xa400 -/* [R 8] These bits indicate the metal revision of the chip. This value - starts at 0x00 for each all-layer tape-out and increments by one for each - tape-out. */ -#define MISC_REG_CHIP_METAL 0xa404 /* [R 16] These bits indicate the part number for the chip. */ #define MISC_REG_CHIP_NUM 0xa408 /* [R 4] These bits indicate the base revision of the chip. This value @@ -6331,6 +6327,8 @@ #define PCI_PM_DATA_B 0x414 #define PCI_ID_VAL1 0x434 #define PCI_ID_VAL2 0x438 +#define PCI_ID_VAL3 0x43c + #define GRC_CONFIG_REG_PF_INIT_VF 0x624 #define GRC_CR_PF_INIT_VF_PF_FIRST_VF_NUM_MASK 0xf /* First VF_NUM for PF is encoded in this register. diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c index 7306416bc90d..32a9609cc98b 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c @@ -30,8 +30,6 @@ #define BNX2X_MAX_EMUL_MULTI 16 -#define MAC_LEADING_ZERO_CNT (ALIGN(ETH_ALEN, sizeof(u32)) - ETH_ALEN) - /**** Exe Queue interfaces ****/ /** @@ -444,30 +442,21 @@ static bool bnx2x_put_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o) } static int bnx2x_get_n_elements(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o, - int n, u8 *buf) + int n, u8 *base, u8 stride, u8 size) { struct bnx2x_vlan_mac_registry_elem *pos; - u8 *next = buf; + u8 *next = base; int counter = 0; /* traverse list */ list_for_each_entry(pos, &o->head, link) { if (counter < n) { - /* place leading zeroes in buffer */ - memset(next, 0, MAC_LEADING_ZERO_CNT); - - /* place mac after leading zeroes*/ - memcpy(next + MAC_LEADING_ZERO_CNT, pos->u.mac.mac, - ETH_ALEN); - - /* calculate address of next element and - * advance counter - */ + memcpy(next, &pos->u, size); counter++; - next = buf + counter * ALIGN(ETH_ALEN, sizeof(u32)); + DP(BNX2X_MSG_SP, "copied element number %d to address %p element was:\n", + counter, next); + next += stride + size; - DP(BNX2X_MSG_SP, "copied element number %d to address %p element was %pM\n", - counter, next, pos->u.mac.mac); } } return counter * ETH_ALEN; @@ -487,7 +476,8 @@ static int bnx2x_check_mac_add(struct bnx2x *bp, /* Check if a requested MAC already exists */ list_for_each_entry(pos, &o->head, link) - if (!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN)) + if (!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN) && + (data->mac.is_inner_mac == pos->u.mac.is_inner_mac)) return -EEXIST; return 0; @@ -520,7 +510,9 @@ static int bnx2x_check_vlan_mac_add(struct bnx2x *bp, list_for_each_entry(pos, &o->head, link) if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) && (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac, - ETH_ALEN))) + ETH_ALEN)) && + (data->vlan_mac.is_inner_mac == + pos->u.vlan_mac.is_inner_mac)) return -EEXIST; return 0; @@ -538,7 +530,8 @@ static struct bnx2x_vlan_mac_registry_elem * DP(BNX2X_MSG_SP, "Checking MAC %pM for DEL command\n", data->mac.mac); list_for_each_entry(pos, &o->head, link) - if (!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN)) + if ((!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN)) && + (data->mac.is_inner_mac == pos->u.mac.is_inner_mac)) return pos; return NULL; @@ -573,7 +566,9 @@ static struct bnx2x_vlan_mac_registry_elem * list_for_each_entry(pos, &o->head, link) if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) && (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac, - ETH_ALEN))) + ETH_ALEN)) && + (data->vlan_mac.is_inner_mac == + pos->u.vlan_mac.is_inner_mac)) return pos; return NULL; @@ -770,6 +765,8 @@ static void bnx2x_set_one_mac_e2(struct bnx2x *bp, bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb, &rule_entry->mac.mac_mid, &rule_entry->mac.mac_lsb, mac); + rule_entry->mac.inner_mac = + cpu_to_le16(elem->cmd_data.vlan_mac.u.mac.is_inner_mac); /* MOVE: Add a rule that will add this MAC to the target Queue */ if (cmd == BNX2X_VLAN_MAC_MOVE) { @@ -786,6 +783,9 @@ static void bnx2x_set_one_mac_e2(struct bnx2x *bp, bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb, &rule_entry->mac.mac_mid, &rule_entry->mac.mac_lsb, mac); + rule_entry->mac.inner_mac = + cpu_to_le16(elem->cmd_data.vlan_mac. + u.mac.is_inner_mac); } /* Set the ramrod data header */ @@ -974,7 +974,8 @@ static void bnx2x_set_one_vlan_mac_e2(struct bnx2x *bp, bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb, &rule_entry->pair.mac_mid, &rule_entry->pair.mac_lsb, mac); - + rule_entry->pair.inner_mac = + cpu_to_le16(elem->cmd_data.vlan_mac.u.vlan_mac.is_inner_mac); /* MOVE: Add a rule that will add this MAC to the target Queue */ if (cmd == BNX2X_VLAN_MAC_MOVE) { rule_entry++; @@ -991,6 +992,9 @@ static void bnx2x_set_one_vlan_mac_e2(struct bnx2x *bp, bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb, &rule_entry->pair.mac_mid, &rule_entry->pair.mac_lsb, mac); + rule_entry->pair.inner_mac = + cpu_to_le16(elem->cmd_data.vlan_mac.u. + vlan_mac.is_inner_mac); } /* Set the ramrod data header */ @@ -1854,6 +1858,7 @@ static int bnx2x_vlan_mac_del_all(struct bnx2x *bp, return rc; } list_del(&exeq_pos->link); + bnx2x_exe_queue_free_elem(bp, exeq_pos); } } @@ -2012,6 +2017,7 @@ void bnx2x_init_vlan_obj(struct bnx2x *bp, vlan_obj->check_move = bnx2x_check_move; vlan_obj->ramrod_cmd = RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES; + vlan_obj->get_n_elements = bnx2x_get_n_elements; /* Exe Queue */ bnx2x_exe_queue_init(bp, @@ -4426,6 +4432,12 @@ static void bnx2x_q_fill_init_tx_data(struct bnx2x_queue_sp_obj *o, tx_data->force_default_pri_flg = test_bit(BNX2X_Q_FLG_FORCE_DEFAULT_PRI, flags); + tx_data->tunnel_lso_inc_ip_id = + test_bit(BNX2X_Q_FLG_TUN_INC_INNER_IP_ID, flags); + tx_data->tunnel_non_lso_pcsum_location = + test_bit(BNX2X_Q_FLG_PCSUM_ON_PKT, flags) ? PCSUM_ON_PKT : + PCSUM_ON_BD; + tx_data->tx_status_block_id = params->fw_sb_id; tx_data->tx_sb_index_number = params->sb_cq_index; tx_data->tss_leading_client_id = params->tss_leading_cl_id; @@ -5669,17 +5681,18 @@ static inline int bnx2x_func_send_start(struct bnx2x *bp, memset(rdata, 0, sizeof(*rdata)); /* Fill the ramrod data with provided parameters */ - rdata->function_mode = (u8)start_params->mf_mode; - rdata->sd_vlan_tag = cpu_to_le16(start_params->sd_vlan_tag); - rdata->path_id = BP_PATH(bp); - rdata->network_cos_mode = start_params->network_cos_mode; - - /* - * No need for an explicit memory barrier here as long we would - * need to ensure the ordering of writing to the SPQ element - * and updating of the SPQ producer which involves a memory - * read and we will have to put a full memory barrier there - * (inside bnx2x_sp_post()). + rdata->function_mode = (u8)start_params->mf_mode; + rdata->sd_vlan_tag = cpu_to_le16(start_params->sd_vlan_tag); + rdata->path_id = BP_PATH(bp); + rdata->network_cos_mode = start_params->network_cos_mode; + rdata->gre_tunnel_mode = start_params->gre_tunnel_mode; + rdata->gre_tunnel_rss = start_params->gre_tunnel_rss; + + /* No need for an explicit memory barrier here as long we would + * need to ensure the ordering of writing to the SPQ element + * and updating of the SPQ producer which involves a memory + * read and we will have to put a full memory barrier there + * (inside bnx2x_sp_post()). */ return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0, diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h index ff907609b9fc..43c00bc84a08 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h @@ -100,6 +100,7 @@ struct bnx2x_raw_obj { /************************* VLAN-MAC commands related parameters ***************/ struct bnx2x_mac_ramrod_data { u8 mac[ETH_ALEN]; + u8 is_inner_mac; }; struct bnx2x_vlan_ramrod_data { @@ -108,6 +109,7 @@ struct bnx2x_vlan_ramrod_data { struct bnx2x_vlan_mac_ramrod_data { u8 mac[ETH_ALEN]; + u8 is_inner_mac; u16 vlan; }; @@ -313,8 +315,9 @@ struct bnx2x_vlan_mac_obj { * * @return number of copied bytes */ - int (*get_n_elements)(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o, - int n, u8 *buf); + int (*get_n_elements)(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o, int n, u8 *base, + u8 stride, u8 size); /** * Checks if ADD-ramrod with the given params may be performed. @@ -824,7 +827,9 @@ enum { BNX2X_Q_FLG_TX_SEC, BNX2X_Q_FLG_ANTI_SPOOF, BNX2X_Q_FLG_SILENT_VLAN_REM, - BNX2X_Q_FLG_FORCE_DEFAULT_PRI + BNX2X_Q_FLG_FORCE_DEFAULT_PRI, + BNX2X_Q_FLG_PCSUM_ON_PKT, + BNX2X_Q_FLG_TUN_INC_INNER_IP_ID }; /* Queue type options: queue type may be a compination of below. */ @@ -842,6 +847,7 @@ enum bnx2x_q_type { #define BNX2X_MULTI_TX_COS_E3B0 3 #define BNX2X_MULTI_TX_COS 3 /* Maximum possible */ +#define MAC_PAD (ALIGN(ETH_ALEN, sizeof(u32)) - ETH_ALEN) struct bnx2x_queue_init_params { struct { @@ -1118,6 +1124,15 @@ struct bnx2x_func_start_params { /* Function cos mode */ u8 network_cos_mode; + + /* NVGRE classification enablement */ + u8 nvgre_clss_en; + + /* NO_GRE_TUNNEL/NVGRE_TUNNEL/L2GRE_TUNNEL/IPGRE_TUNNEL */ + u8 gre_tunnel_mode; + + /* GRE_OUTER_HEADERS_RSS/GRE_INNER_HEADERS_RSS/NVGRE_KEY_ENTROPY_RSS */ + u8 gre_tunnel_rss; }; struct bnx2x_func_switch_update_params { diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c index 6adfa2093581..2ce7c7471367 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c @@ -20,7 +20,9 @@ #include "bnx2x.h" #include "bnx2x_init.h" #include "bnx2x_cmn.h" +#include "bnx2x_sp.h" #include <linux/crc32.h> +#include <linux/if_vlan.h> /* General service functions */ static void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid, @@ -555,8 +557,7 @@ static int bnx2x_vfop_config_list(struct bnx2x *bp, rc = bnx2x_config_vlan_mac(bp, vlan_mac); if (rc >= 0) { cnt += pos->add ? 1 : -1; - list_del(&pos->link); - list_add(&pos->link, &rollback_list); + list_move(&pos->link, &rollback_list); rc = 0; } else if (rc == -EEXIST) { rc = 0; @@ -958,6 +959,12 @@ op_err: BNX2X_ERR("QSETUP[%d:%d] error: rc %d\n", vf->abs_vfid, qid, vfop->rc); op_done: case BNX2X_VFOP_QSETUP_DONE: + vf->cfg_flags |= VF_CFG_VLAN; + smp_mb__before_clear_bit(); + set_bit(BNX2X_SP_RTNL_HYPERVISOR_VLAN, + &bp->sp_rtnl_state); + smp_mb__after_clear_bit(); + schedule_delayed_work(&bp->sp_rtnl_task, 0); bnx2x_vfop_end(bp, vf, vfop); return; default: @@ -1459,7 +1466,6 @@ static u8 bnx2x_vf_is_pcie_pending(struct bnx2x *bp, u8 abs_vfid) return bnx2x_is_pcie_pending(dev); unknown_dev: - BNX2X_ERR("Unknown device\n"); return false; } @@ -1926,20 +1932,22 @@ int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param, /* SRIOV can be enabled only with MSIX */ if (int_mode_param == BNX2X_INT_MODE_MSI || - int_mode_param == BNX2X_INT_MODE_INTX) + int_mode_param == BNX2X_INT_MODE_INTX) { BNX2X_ERR("Forced MSI/INTx mode is incompatible with SRIOV\n"); + return 0; + } err = -EIO; /* verify ari is enabled */ if (!bnx2x_ari_enabled(bp->pdev)) { - BNX2X_ERR("ARI not supported, SRIOV can not be enabled\n"); - return err; + BNX2X_ERR("ARI not supported (check pci bridge ARI forwarding), SRIOV can not be enabled\n"); + return 0; } /* verify igu is in normal mode */ if (CHIP_INT_MODE_IS_BC(bp)) { BNX2X_ERR("IGU not normal mode, SRIOV can not be enabled\n"); - return err; + return 0; } /* allocate the vfs database */ @@ -1964,8 +1972,10 @@ int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param, if (iov->total == 0) goto failed; - /* calculate the actual number of VFs */ - iov->nr_virtfn = min_t(u16, iov->total, (u16)num_vfs_param); + iov->nr_virtfn = min_t(u16, iov->total, num_vfs_param); + + DP(BNX2X_MSG_IOV, "num_vfs_param was %d, nr_virtfn was %d\n", + num_vfs_param, iov->nr_virtfn); /* allocate the vf array */ bp->vfdb->vfs = kzalloc(sizeof(struct bnx2x_virtf) * @@ -2378,8 +2388,8 @@ int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem) goto get_vf; case EVENT_RING_OPCODE_MALICIOUS_VF: abs_vfid = elem->message.data.malicious_vf_event.vf_id; - DP(BNX2X_MSG_IOV, "Got VF MALICIOUS notification abs_vfid=%d\n", - abs_vfid); + DP(BNX2X_MSG_IOV, "Got VF MALICIOUS notification abs_vfid=%d err_id=0x%x\n", + abs_vfid, elem->message.data.malicious_vf_event.err_id); goto get_vf; default: return 1; @@ -2436,8 +2446,8 @@ get_vf: /* Do nothing for now */ break; case EVENT_RING_OPCODE_MALICIOUS_VF: - DP(BNX2X_MSG_IOV, "got VF [%d] MALICIOUS notification\n", - vf->abs_vfid); + DP(BNX2X_MSG_IOV, "Got VF MALICIOUS notification abs_vfid=%d error id %x\n", + abs_vfid, elem->message.data.malicious_vf_event.err_id); /* Do nothing for now */ break; } @@ -3012,21 +3022,138 @@ void bnx2x_unlock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf, vf->op_current = CHANNEL_TLV_NONE; } -void bnx2x_enable_sriov(struct bnx2x *bp) +int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs_param) { - int rc = 0; - /* disbale sriov in case it is still enabled */ + struct bnx2x *bp = netdev_priv(pci_get_drvdata(dev)); + + DP(BNX2X_MSG_IOV, "bnx2x_sriov_configure called with %d, BNX2X_NR_VIRTFN(bp) was %d\n", + num_vfs_param, BNX2X_NR_VIRTFN(bp)); + + /* HW channel is only operational when PF is up */ + if (bp->state != BNX2X_STATE_OPEN) { + BNX2X_ERR("VF num configurtion via sysfs not supported while PF is down"); + return -EINVAL; + } + + /* we are always bound by the total_vfs in the configuration space */ + if (num_vfs_param > BNX2X_NR_VIRTFN(bp)) { + BNX2X_ERR("truncating requested number of VFs (%d) down to maximum allowed (%d)\n", + num_vfs_param, BNX2X_NR_VIRTFN(bp)); + num_vfs_param = BNX2X_NR_VIRTFN(bp); + } + + bp->requested_nr_virtfn = num_vfs_param; + if (num_vfs_param == 0) { + pci_disable_sriov(dev); + return 0; + } else { + return bnx2x_enable_sriov(bp); + } +} + +int bnx2x_enable_sriov(struct bnx2x *bp) +{ + int rc = 0, req_vfs = bp->requested_nr_virtfn; + + rc = pci_enable_sriov(bp->pdev, req_vfs); + if (rc) { + BNX2X_ERR("pci_enable_sriov failed with %d\n", rc); + return rc; + } + DP(BNX2X_MSG_IOV, "sriov enabled (%d vfs)\n", req_vfs); + return req_vfs; +} + +void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp) +{ + int vfidx; + struct pf_vf_bulletin_content *bulletin; + + DP(BNX2X_MSG_IOV, "configuring vlan for VFs from sp-task\n"); + for_each_vf(bp, vfidx) { + bulletin = BP_VF_BULLETIN(bp, vfidx); + if (BP_VF(bp, vfidx)->cfg_flags & VF_CFG_VLAN) + bnx2x_set_vf_vlan(bp->dev, vfidx, bulletin->vlan, 0); + } +} + +void bnx2x_disable_sriov(struct bnx2x *bp) +{ pci_disable_sriov(bp->pdev); - DP(BNX2X_MSG_IOV, "sriov disabled\n"); +} + +static int bnx2x_vf_ndo_sanity(struct bnx2x *bp, int vfidx, + struct bnx2x_virtf *vf) +{ + if (!IS_SRIOV(bp)) { + BNX2X_ERR("vf ndo called though sriov is disabled\n"); + return -EINVAL; + } + + if (vfidx >= BNX2X_NR_VIRTFN(bp)) { + BNX2X_ERR("vf ndo called for uninitialized VF. vfidx was %d BNX2X_NR_VIRTFN was %d\n", + vfidx, BNX2X_NR_VIRTFN(bp)); + return -EINVAL; + } + + if (!vf) { + BNX2X_ERR("vf ndo called but vf was null. vfidx was %d\n", + vfidx); + return -EINVAL; + } - /* enable sriov */ - DP(BNX2X_MSG_IOV, "vf num (%d)\n", (bp->vfdb->sriov.nr_virtfn)); - rc = pci_enable_sriov(bp->pdev, (bp->vfdb->sriov.nr_virtfn)); + return 0; +} + +int bnx2x_get_vf_config(struct net_device *dev, int vfidx, + struct ifla_vf_info *ivi) +{ + struct bnx2x *bp = netdev_priv(dev); + struct bnx2x_virtf *vf = BP_VF(bp, vfidx); + struct bnx2x_vlan_mac_obj *mac_obj = &bnx2x_vfq(vf, 0, mac_obj); + struct bnx2x_vlan_mac_obj *vlan_obj = &bnx2x_vfq(vf, 0, vlan_obj); + struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vfidx); + int rc; + + /* sanity */ + rc = bnx2x_vf_ndo_sanity(bp, vfidx, vf); if (rc) - BNX2X_ERR("pci_enable_sriov failed with %d\n", rc); - else - DP(BNX2X_MSG_IOV, "sriov enabled\n"); + return rc; + if (!mac_obj || !vlan_obj || !bulletin) { + BNX2X_ERR("VF partially initialized\n"); + return -EINVAL; + } + + ivi->vf = vfidx; + ivi->qos = 0; + ivi->tx_rate = 10000; /* always 10G. TBA take from link struct */ + ivi->spoofchk = 1; /*always enabled */ + if (vf->state == VF_ENABLED) { + /* mac and vlan are in vlan_mac objects */ + mac_obj->get_n_elements(bp, mac_obj, 1, (u8 *)&ivi->mac, + 0, ETH_ALEN); + vlan_obj->get_n_elements(bp, vlan_obj, 1, (u8 *)&ivi->vlan, + 0, VLAN_HLEN); + } else { + /* mac */ + if (bulletin->valid_bitmap & (1 << MAC_ADDR_VALID)) + /* mac configured by ndo so its in bulletin board */ + memcpy(&ivi->mac, bulletin->mac, ETH_ALEN); + else + /* funtion has not been loaded yet. Show mac as 0s */ + memset(&ivi->mac, 0, ETH_ALEN); + + /* vlan */ + if (bulletin->valid_bitmap & (1 << VLAN_VALID)) + /* vlan configured by ndo so its in bulletin board */ + memcpy(&ivi->vlan, &bulletin->vlan, VLAN_HLEN); + else + /* funtion has not been loaded yet. Show vlans as 0s */ + memset(&ivi->vlan, 0, VLAN_HLEN); + } + + return 0; } /* New mac for VF. Consider these cases: @@ -3044,23 +3171,19 @@ void bnx2x_enable_sriov(struct bnx2x *bp) * VF to configure any mac for itself except for this mac. In case of a race * where the VF fails to see the new post on its bulletin board before sending a * mac configuration request, the PF will simply fail the request and VF can try - * again after consulting its bulletin board + * again after consulting its bulletin board. */ -int bnx2x_set_vf_mac(struct net_device *dev, int queue, u8 *mac) +int bnx2x_set_vf_mac(struct net_device *dev, int vfidx, u8 *mac) { struct bnx2x *bp = netdev_priv(dev); - int rc, q_logical_state, vfidx = queue; + int rc, q_logical_state; struct bnx2x_virtf *vf = BP_VF(bp, vfidx); struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vfidx); - /* if SRIOV is disabled there is nothing to do (and somewhere, someone - * has erred). - */ - if (!IS_SRIOV(bp)) { - BNX2X_ERR("bnx2x_set_vf_mac called though sriov is disabled\n"); - return -EINVAL; - } - + /* sanity */ + rc = bnx2x_vf_ndo_sanity(bp, vfidx, vf); + if (rc) + return rc; if (!is_valid_ether_addr(mac)) { BNX2X_ERR("mac address invalid\n"); return -EINVAL; @@ -3085,7 +3208,7 @@ int bnx2x_set_vf_mac(struct net_device *dev, int queue, u8 *mac) if (vf->state == VF_ENABLED && q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) { /* configure the mac in device on this vf's queue */ - unsigned long flags = 0; + unsigned long ramrod_flags = 0; struct bnx2x_vlan_mac_obj *mac_obj = &bnx2x_vfq(vf, 0, mac_obj); /* must lock vfpf channel to protect against vf flows */ @@ -3106,14 +3229,133 @@ int bnx2x_set_vf_mac(struct net_device *dev, int queue, u8 *mac) } /* configure the new mac to device */ - __set_bit(RAMROD_COMP_WAIT, &flags); + __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); bnx2x_set_mac_one(bp, (u8 *)&bulletin->mac, mac_obj, true, - BNX2X_ETH_MAC, &flags); + BNX2X_ETH_MAC, &ramrod_flags); bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC); } - return rc; + return 0; +} + +int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos) +{ + struct bnx2x *bp = netdev_priv(dev); + int rc, q_logical_state; + struct bnx2x_virtf *vf = BP_VF(bp, vfidx); + struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vfidx); + + /* sanity */ + rc = bnx2x_vf_ndo_sanity(bp, vfidx, vf); + if (rc) + return rc; + + if (vlan > 4095) { + BNX2X_ERR("illegal vlan value %d\n", vlan); + return -EINVAL; + } + + DP(BNX2X_MSG_IOV, "configuring VF %d with VLAN %d qos %d\n", + vfidx, vlan, 0); + + /* update PF's copy of the VF's bulletin. No point in posting the vlan + * to the VF since it doesn't have anything to do with it. But it useful + * to store it here in case the VF is not up yet and we can only + * configure the vlan later when it does. + */ + bulletin->valid_bitmap |= 1 << VLAN_VALID; + bulletin->vlan = vlan; + + /* is vf initialized and queue set up? */ + q_logical_state = + bnx2x_get_q_logical_state(bp, &bnx2x_vfq(vf, 0, sp_obj)); + if (vf->state == VF_ENABLED && + q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) { + /* configure the vlan in device on this vf's queue */ + unsigned long ramrod_flags = 0; + unsigned long vlan_mac_flags = 0; + struct bnx2x_vlan_mac_obj *vlan_obj = + &bnx2x_vfq(vf, 0, vlan_obj); + struct bnx2x_vlan_mac_ramrod_params ramrod_param; + struct bnx2x_queue_state_params q_params = {NULL}; + struct bnx2x_queue_update_params *update_params; + + memset(&ramrod_param, 0, sizeof(ramrod_param)); + + /* must lock vfpf channel to protect against vf flows */ + bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN); + + /* remove existing vlans */ + __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); + rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_mac_flags, + &ramrod_flags); + if (rc) { + BNX2X_ERR("failed to delete vlans\n"); + return -EINVAL; + } + + /* send queue update ramrod to configure default vlan and silent + * vlan removal + */ + __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); + q_params.cmd = BNX2X_Q_CMD_UPDATE; + q_params.q_obj = &bnx2x_vfq(vf, 0, sp_obj); + update_params = &q_params.params.update; + __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG, + &update_params->update_flags); + __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG, + &update_params->update_flags); + + if (vlan == 0) { + /* if vlan is 0 then we want to leave the VF traffic + * untagged, and leave the incoming traffic untouched + * (i.e. do not remove any vlan tags). + */ + __clear_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, + &update_params->update_flags); + __clear_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, + &update_params->update_flags); + } else { + /* configure the new vlan to device */ + __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); + ramrod_param.vlan_mac_obj = vlan_obj; + ramrod_param.ramrod_flags = ramrod_flags; + ramrod_param.user_req.u.vlan.vlan = vlan; + ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD; + rc = bnx2x_config_vlan_mac(bp, &ramrod_param); + if (rc) { + BNX2X_ERR("failed to configure vlan\n"); + return -EINVAL; + } + + /* configure default vlan to vf queue and set silent + * vlan removal (the vf remains unaware of this vlan). + */ + update_params = &q_params.params.update; + __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, + &update_params->update_flags); + __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, + &update_params->update_flags); + update_params->def_vlan = vlan; + } + + /* Update the Queue state */ + rc = bnx2x_queue_state_change(bp, &q_params); + if (rc) { + BNX2X_ERR("Failed to configure default VLAN\n"); + return rc; + } + + /* clear the flag indicating that this VF needs its vlan + * (will only be set if the HV configured th Vlan before vf was + * and we were called because the VF came up later + */ + vf->cfg_flags &= ~VF_CFG_VLAN; + + bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN); + } + return 0; } /* crc is the first field in the bulletin board. compute the crc over the @@ -3165,20 +3407,26 @@ enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp) memcpy(bp->dev->dev_addr, bulletin.mac, ETH_ALEN); } + /* the vlan in bulletin board is valid and is new */ + if (bulletin.valid_bitmap & 1 << VLAN_VALID) + memcpy(&bulletin.vlan, &bp->old_bulletin.vlan, VLAN_HLEN); + /* copy new bulletin board to bp */ bp->old_bulletin = bulletin; return PFVF_BULLETIN_UPDATED; } -void bnx2x_vf_map_doorbells(struct bnx2x *bp) +void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp) { /* vf doorbells are embedded within the regview */ - bp->doorbells = bp->regview + PXP_VF_ADDR_DB_START; + return bp->regview + PXP_VF_ADDR_DB_START; } int bnx2x_vf_pci_alloc(struct bnx2x *bp) { + mutex_init(&bp->vf2pf_mutex); + /* allocate vf2pf mailbox for vf to pf channel */ BNX2X_PCI_ALLOC(bp->vf2pf_mbox, &bp->vf2pf_mbox_mapping, sizeof(struct bnx2x_vf_mbx_msg)); @@ -3196,3 +3444,26 @@ alloc_mem_err: sizeof(union pf_vf_bulletin)); return -ENOMEM; } + +int bnx2x_open_epilog(struct bnx2x *bp) +{ + /* Enable sriov via delayed work. This must be done via delayed work + * because it causes the probe of the vf devices to be run, which invoke + * register_netdevice which must have rtnl lock taken. As we are holding + * the lock right now, that could only work if the probe would not take + * the lock. However, as the probe of the vf may be called from other + * contexts as well (such as passthrough to vm failes) it can't assume + * the lock is being held for it. Using delayed work here allows the + * probe code to simply take the lock (i.e. wait for it to be released + * if it is being held). We only want to do this if the number of VFs + * was set before PF driver was loaded. + */ + if (IS_SRIOV(bp) && BNX2X_NR_VIRTFN(bp)) { + smp_mb__before_clear_bit(); + set_bit(BNX2X_SP_RTNL_ENABLE_SRIOV, &bp->sp_rtnl_state); + smp_mb__after_clear_bit(); + schedule_delayed_work(&bp->sp_rtnl_task, 0); + } + + return 0; +} diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h index b4050173add9..d4b17b7a774e 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h @@ -193,6 +193,7 @@ struct bnx2x_virtf { #define VF_CFG_TPA 0x0004 #define VF_CFG_INT_SIMD 0x0008 #define VF_CACHE_LINE 0x0010 +#define VF_CFG_VLAN 0x0020 u8 state; #define VF_FREE 0 /* VF ready to be acquired holds no resc */ @@ -712,6 +713,7 @@ void bnx2x_add_tlv(struct bnx2x *bp, void *tlvs_list, u16 offset, u16 type, u16 length); void bnx2x_vfpf_prep(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv, u16 type, u16 length); +void bnx2x_vfpf_finalize(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv); void bnx2x_dp_tlv_list(struct bnx2x *bp, void *tlvs_list); bool bnx2x_tlv_supported(u16 tlvtype); @@ -750,13 +752,17 @@ static inline int bnx2x_vf_ustorm_prods_offset(struct bnx2x *bp, } enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp); -void bnx2x_vf_map_doorbells(struct bnx2x *bp); +void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp); int bnx2x_vf_pci_alloc(struct bnx2x *bp); -void bnx2x_enable_sriov(struct bnx2x *bp); +int bnx2x_enable_sriov(struct bnx2x *bp); +void bnx2x_disable_sriov(struct bnx2x *bp); static inline int bnx2x_vf_headroom(struct bnx2x *bp) { return bp->vfdb->sriov.nr_virtfn * BNX2X_CLIENTS_PER_VF; } +void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp); +int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs); +int bnx2x_open_epilog(struct bnx2x *bp); #else /* CONFIG_BNX2X_SRIOV */ @@ -779,7 +785,8 @@ static inline void bnx2x_iov_init_dmae(struct bnx2x *bp) {} static inline int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param, int num_vfs_param) {return 0; } static inline void bnx2x_iov_remove_one(struct bnx2x *bp) {} -static inline void bnx2x_enable_sriov(struct bnx2x *bp) {} +static inline int bnx2x_enable_sriov(struct bnx2x *bp) {return 0; } +static inline void bnx2x_disable_sriov(struct bnx2x *bp) {} static inline int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count) {return 0; } static inline int bnx2x_vfpf_release(struct bnx2x *bp) {return 0; } @@ -802,8 +809,15 @@ static inline enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp return PFVF_BULLETIN_UNCHANGED; } -static inline int bnx2x_vf_map_doorbells(struct bnx2x *bp) {return 0; } +static inline void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp) +{ + return NULL; +} + static inline int bnx2x_vf_pci_alloc(struct bnx2x *bp) {return 0; } +static inline void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp) {} +static inline int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs) {return 0; } +static inline int bnx2x_open_epilog(struct bnx2x *bp) {return 0; } #endif /* CONFIG_BNX2X_SRIOV */ #endif /* bnx2x_sriov.h */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c index 4397f8b76f2e..2ca3d94fcec2 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c @@ -1547,11 +1547,51 @@ static void bnx2x_prep_fw_stats_req(struct bnx2x *bp) } } +void bnx2x_memset_stats(struct bnx2x *bp) +{ + int i; + + /* function stats */ + for_each_queue(bp, i) { + struct bnx2x_fp_stats *fp_stats = &bp->fp_stats[i]; + + memset(&fp_stats->old_tclient, 0, + sizeof(fp_stats->old_tclient)); + memset(&fp_stats->old_uclient, 0, + sizeof(fp_stats->old_uclient)); + memset(&fp_stats->old_xclient, 0, + sizeof(fp_stats->old_xclient)); + if (bp->stats_init) { + memset(&fp_stats->eth_q_stats, 0, + sizeof(fp_stats->eth_q_stats)); + memset(&fp_stats->eth_q_stats_old, 0, + sizeof(fp_stats->eth_q_stats_old)); + } + } + + memset(&bp->dev->stats, 0, sizeof(bp->dev->stats)); + + if (bp->stats_init) { + memset(&bp->net_stats_old, 0, sizeof(bp->net_stats_old)); + memset(&bp->fw_stats_old, 0, sizeof(bp->fw_stats_old)); + memset(&bp->eth_stats_old, 0, sizeof(bp->eth_stats_old)); + memset(&bp->eth_stats, 0, sizeof(bp->eth_stats)); + memset(&bp->func_stats, 0, sizeof(bp->func_stats)); + } + + bp->stats_state = STATS_STATE_DISABLED; + + if (bp->port.pmf && bp->port.port_stx) + bnx2x_port_stats_base_init(bp); + + /* mark the end of statistics initializiation */ + bp->stats_init = false; +} + void bnx2x_stats_init(struct bnx2x *bp) { int /*abs*/port = BP_PORT(bp); int mb_idx = BP_FW_MB_IDX(bp); - int i; bp->stats_pending = 0; bp->executer_idx = 0; @@ -1587,36 +1627,11 @@ void bnx2x_stats_init(struct bnx2x *bp) &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2); } - /* function stats */ - for_each_queue(bp, i) { - struct bnx2x_fp_stats *fp_stats = &bp->fp_stats[i]; - - memset(&fp_stats->old_tclient, 0, - sizeof(fp_stats->old_tclient)); - memset(&fp_stats->old_uclient, 0, - sizeof(fp_stats->old_uclient)); - memset(&fp_stats->old_xclient, 0, - sizeof(fp_stats->old_xclient)); - if (bp->stats_init) { - memset(&fp_stats->eth_q_stats, 0, - sizeof(fp_stats->eth_q_stats)); - memset(&fp_stats->eth_q_stats_old, 0, - sizeof(fp_stats->eth_q_stats_old)); - } - } - /* Prepare statistics ramrod data */ bnx2x_prep_fw_stats_req(bp); - memset(&bp->dev->stats, 0, sizeof(bp->dev->stats)); + /* Clean SP from previous statistics */ if (bp->stats_init) { - memset(&bp->net_stats_old, 0, sizeof(bp->net_stats_old)); - memset(&bp->fw_stats_old, 0, sizeof(bp->fw_stats_old)); - memset(&bp->eth_stats_old, 0, sizeof(bp->eth_stats_old)); - memset(&bp->eth_stats, 0, sizeof(bp->eth_stats)); - memset(&bp->func_stats, 0, sizeof(bp->func_stats)); - - /* Clean SP from previous statistics */ if (bp->func_stx) { memset(bnx2x_sp(bp, func_stats), 0, sizeof(struct host_func_stats)); @@ -1626,13 +1641,7 @@ void bnx2x_stats_init(struct bnx2x *bp) } } - bp->stats_state = STATS_STATE_DISABLED; - - if (bp->port.pmf && bp->port.port_stx) - bnx2x_port_stats_base_init(bp); - - /* mark the end of statistics initializiation */ - bp->stats_init = false; + bnx2x_memset_stats(bp); } void bnx2x_save_statistics(struct bnx2x *bp) diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h index 198f6f1c9ad5..d117f472816c 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h @@ -540,8 +540,8 @@ struct bnx2x_fw_port_stats_old { /* forward */ struct bnx2x; +void bnx2x_memset_stats(struct bnx2x *bp); void bnx2x_stats_init(struct bnx2x *bp); - void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event); /** diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c index 531eebf40d60..90fbf9cc2c2c 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c @@ -36,6 +36,8 @@ void bnx2x_add_tlv(struct bnx2x *bp, void *tlvs_list, u16 offset, u16 type, void bnx2x_vfpf_prep(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv, u16 type, u16 length) { + mutex_lock(&bp->vf2pf_mutex); + DP(BNX2X_MSG_IOV, "preparing to send %d tlv over vf pf channel\n", type); @@ -49,6 +51,15 @@ void bnx2x_vfpf_prep(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv, first_tlv->resp_msg_offset = sizeof(bp->vf2pf_mbox->req); } +/* releases the mailbox */ +void bnx2x_vfpf_finalize(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv) +{ + DP(BNX2X_MSG_IOV, "done sending [%d] tlv over vf pf channel\n", + first_tlv->tl.type); + + mutex_unlock(&bp->vf2pf_mutex); +} + /* list the types and lengths of the tlvs on the buffer */ void bnx2x_dp_tlv_list(struct bnx2x *bp, void *tlvs_list) { @@ -181,8 +192,10 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count) /* clear mailbox and prep first tlv */ bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_ACQUIRE, sizeof(*req)); - if (bnx2x_get_vf_id(bp, &vf_id)) - return -EAGAIN; + if (bnx2x_get_vf_id(bp, &vf_id)) { + rc = -EAGAIN; + goto out; + } req->vfdev_info.vf_id = vf_id; req->vfdev_info.vf_os = 0; @@ -213,7 +226,7 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count) /* PF timeout */ if (rc) - return rc; + goto out; /* copy acquire response from buffer to bp */ memcpy(&bp->acquire_resp, resp, sizeof(bp->acquire_resp)); @@ -253,7 +266,8 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count) /* PF reports error */ BNX2X_ERR("Failed to get the requested amount of resources: %d. Breaking...\n", bp->acquire_resp.hdr.status); - return -EAGAIN; + rc = -EAGAIN; + goto out; } } @@ -279,20 +293,24 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count) bp->acquire_resp.resc.current_mac_addr, ETH_ALEN); - return 0; +out: + bnx2x_vfpf_finalize(bp, &req->first_tlv); + return rc; } int bnx2x_vfpf_release(struct bnx2x *bp) { struct vfpf_release_tlv *req = &bp->vf2pf_mbox->req.release; struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp; - u32 rc = 0, vf_id; + u32 rc, vf_id; /* clear mailbox and prep first tlv */ bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_RELEASE, sizeof(*req)); - if (bnx2x_get_vf_id(bp, &vf_id)) - return -EAGAIN; + if (bnx2x_get_vf_id(bp, &vf_id)) { + rc = -EAGAIN; + goto out; + } req->vf_id = vf_id; @@ -308,7 +326,8 @@ int bnx2x_vfpf_release(struct bnx2x *bp) if (rc) /* PF timeout */ - return rc; + goto out; + if (resp->hdr.status == PFVF_STATUS_SUCCESS) { /* PF released us */ DP(BNX2X_MSG_SP, "vf released\n"); @@ -316,10 +335,13 @@ int bnx2x_vfpf_release(struct bnx2x *bp) /* PF reports error */ BNX2X_ERR("PF failed our release request - are we out of sync? response status: %d\n", resp->hdr.status); - return -EAGAIN; + rc = -EAGAIN; + goto out; } +out: + bnx2x_vfpf_finalize(bp, &req->first_tlv); - return 0; + return rc; } /* Tell PF about SB addresses */ @@ -350,16 +372,20 @@ int bnx2x_vfpf_init(struct bnx2x *bp) rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping); if (rc) - return rc; + goto out; if (resp->hdr.status != PFVF_STATUS_SUCCESS) { BNX2X_ERR("INIT VF failed: %d. Breaking...\n", resp->hdr.status); - return -EAGAIN; + rc = -EAGAIN; + goto out; } DP(BNX2X_MSG_SP, "INIT VF Succeeded\n"); - return 0; +out: + bnx2x_vfpf_finalize(bp, &req->first_tlv); + + return rc; } /* CLOSE VF - opposite to INIT_VF */ @@ -401,6 +427,8 @@ void bnx2x_vfpf_close_vf(struct bnx2x *bp) BNX2X_ERR("Sending CLOSE failed: pf response was %d\n", resp->hdr.status); + bnx2x_vfpf_finalize(bp, &req->first_tlv); + free_irq: /* Disable HW interrupts, NAPI */ bnx2x_netif_stop(bp, 0); @@ -435,7 +463,6 @@ int bnx2x_vfpf_setup_q(struct bnx2x *bp, int fp_idx) /* calculate queue flags */ flags |= VFPF_QUEUE_FLG_STATS; flags |= VFPF_QUEUE_FLG_CACHE_ALIGN; - flags |= IS_MF_SD(bp) ? VFPF_QUEUE_FLG_OV : 0; flags |= VFPF_QUEUE_FLG_VLAN; DP(NETIF_MSG_IFUP, "vlan removal enabled\n"); @@ -486,8 +513,11 @@ int bnx2x_vfpf_setup_q(struct bnx2x *bp, int fp_idx) if (resp->hdr.status != PFVF_STATUS_SUCCESS) { BNX2X_ERR("Status of SETUP_Q for queue[%d] is %d\n", fp_idx, resp->hdr.status); - return -EINVAL; + rc = -EINVAL; } + + bnx2x_vfpf_finalize(bp, &req->first_tlv); + return rc; } @@ -515,17 +545,19 @@ int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx) if (rc) { BNX2X_ERR("Sending TEARDOWN for queue %d failed: %d\n", qidx, rc); - return rc; + goto out; } /* PF failed the transaction */ if (resp->hdr.status != PFVF_STATUS_SUCCESS) { BNX2X_ERR("TEARDOWN for queue %d failed: %d\n", qidx, resp->hdr.status); - return -EINVAL; + rc = -EINVAL; } - return 0; +out: + bnx2x_vfpf_finalize(bp, &req->first_tlv); + return rc; } /* request pf to add a mac for the vf */ @@ -533,7 +565,7 @@ int bnx2x_vfpf_set_mac(struct bnx2x *bp) { struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters; struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp; - int rc; + int rc = 0; /* clear mailbox and prep first tlv */ bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SET_Q_FILTERS, @@ -562,7 +594,7 @@ int bnx2x_vfpf_set_mac(struct bnx2x *bp) rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping); if (rc) { BNX2X_ERR("failed to send message to pf. rc was %d\n", rc); - return rc; + goto out; } /* failure may mean PF was configured with a new mac for us */ @@ -587,8 +619,10 @@ int bnx2x_vfpf_set_mac(struct bnx2x *bp) if (resp->hdr.status != PFVF_STATUS_SUCCESS) { BNX2X_ERR("vfpf SET MAC failed: %d\n", resp->hdr.status); - return -EINVAL; + rc = -EINVAL; } +out: + bnx2x_vfpf_finalize(bp, &req->first_tlv); return 0; } @@ -643,14 +677,16 @@ int bnx2x_vfpf_set_mcast(struct net_device *dev) rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping); if (rc) { BNX2X_ERR("Sending a message failed: %d\n", rc); - return rc; + goto out; } if (resp->hdr.status != PFVF_STATUS_SUCCESS) { BNX2X_ERR("Set Rx mode/multicast failed: %d\n", resp->hdr.status); - return -EINVAL; + rc = -EINVAL; } +out: + bnx2x_vfpf_finalize(bp, &req->first_tlv); return 0; } @@ -689,7 +725,8 @@ int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp) break; default: BNX2X_ERR("BAD rx mode (%d)\n", mode); - return -EINVAL; + rc = -EINVAL; + goto out; } req->flags |= VFPF_SET_Q_FILTERS_RX_MASK_CHANGED; @@ -708,8 +745,10 @@ int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp) if (resp->hdr.status != PFVF_STATUS_SUCCESS) { BNX2X_ERR("Set Rx mode failed: %d\n", resp->hdr.status); - return -EINVAL; + rc = -EINVAL; } +out: + bnx2x_vfpf_finalize(bp, &req->first_tlv); return rc; } @@ -1004,7 +1043,7 @@ static void bnx2x_vf_mbx_init_vf(struct bnx2x *bp, struct bnx2x_virtf *vf, } /* convert MBX queue-flags to standard SP queue-flags */ -static void bnx2x_vf_mbx_set_q_flags(u32 mbx_q_flags, +static void bnx2x_vf_mbx_set_q_flags(struct bnx2x *bp, u32 mbx_q_flags, unsigned long *sp_q_flags) { if (mbx_q_flags & VFPF_QUEUE_FLG_TPA) @@ -1015,8 +1054,6 @@ static void bnx2x_vf_mbx_set_q_flags(u32 mbx_q_flags, __set_bit(BNX2X_Q_FLG_TPA_GRO, sp_q_flags); if (mbx_q_flags & VFPF_QUEUE_FLG_STATS) __set_bit(BNX2X_Q_FLG_STATS, sp_q_flags); - if (mbx_q_flags & VFPF_QUEUE_FLG_OV) - __set_bit(BNX2X_Q_FLG_OV, sp_q_flags); if (mbx_q_flags & VFPF_QUEUE_FLG_VLAN) __set_bit(BNX2X_Q_FLG_VLAN, sp_q_flags); if (mbx_q_flags & VFPF_QUEUE_FLG_COS) @@ -1025,6 +1062,10 @@ static void bnx2x_vf_mbx_set_q_flags(u32 mbx_q_flags, __set_bit(BNX2X_Q_FLG_HC, sp_q_flags); if (mbx_q_flags & VFPF_QUEUE_FLG_DHC) __set_bit(BNX2X_Q_FLG_DHC, sp_q_flags); + + /* outer vlan removal is set according to the PF's multi fuction mode */ + if (IS_MF_SD(bp)) + __set_bit(BNX2X_Q_FLG_OV, sp_q_flags); } static void bnx2x_vf_mbx_setup_q(struct bnx2x *bp, struct bnx2x_virtf *vf, @@ -1075,11 +1116,11 @@ static void bnx2x_vf_mbx_setup_q(struct bnx2x *bp, struct bnx2x_virtf *vf, init_p->tx.hc_rate = setup_q->txq.hc_rate; init_p->tx.sb_cq_index = setup_q->txq.sb_index; - bnx2x_vf_mbx_set_q_flags(setup_q->txq.flags, + bnx2x_vf_mbx_set_q_flags(bp, setup_q->txq.flags, &init_p->tx.flags); /* tx setup - flags */ - bnx2x_vf_mbx_set_q_flags(setup_q->txq.flags, + bnx2x_vf_mbx_set_q_flags(bp, setup_q->txq.flags, &setup_p->flags); /* tx setup - general, nothing */ @@ -1107,11 +1148,11 @@ static void bnx2x_vf_mbx_setup_q(struct bnx2x *bp, struct bnx2x_virtf *vf, /* rx init */ init_p->rx.hc_rate = setup_q->rxq.hc_rate; init_p->rx.sb_cq_index = setup_q->rxq.sb_index; - bnx2x_vf_mbx_set_q_flags(setup_q->rxq.flags, + bnx2x_vf_mbx_set_q_flags(bp, setup_q->rxq.flags, &init_p->rx.flags); /* rx setup - flags */ - bnx2x_vf_mbx_set_q_flags(setup_q->rxq.flags, + bnx2x_vf_mbx_set_q_flags(bp, setup_q->rxq.flags, &setup_p->flags); /* rx setup - general */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h index bfc80baec00d..41708faab575 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h @@ -328,9 +328,15 @@ struct pf_vf_bulletin_content { #define MAC_ADDR_VALID 0 /* alert the vf that a new mac address * is available for it */ +#define VLAN_VALID 1 /* when set, the vf should not access + * the vfpf channel + */ u8 mac[ETH_ALEN]; - u8 padding[2]; + u8 mac_padding[2]; + + u16 vlan; + u8 vlan_padding[6]; }; union pf_vf_bulletin { @@ -353,6 +359,7 @@ enum channel_tlvs { CHANNEL_TLV_LIST_END, CHANNEL_TLV_FLR, CHANNEL_TLV_PF_SET_MAC, + CHANNEL_TLV_PF_SET_VLAN, CHANNEL_TLV_MAX }; diff --git a/drivers/net/ethernet/broadcom/sb1250-mac.c b/drivers/net/ethernet/broadcom/sb1250-mac.c index e9b35da375cb..e80bfb60c3ef 100644 --- a/drivers/net/ethernet/broadcom/sb1250-mac.c +++ b/drivers/net/ethernet/broadcom/sb1250-mac.c @@ -831,11 +831,8 @@ static int sbdma_add_rcvbuffer(struct sbmac_softc *sc, struct sbmacdma *d, sb_new = netdev_alloc_skb(dev, ENET_PACKET_SIZE + SMP_CACHE_BYTES * 2 + NET_IP_ALIGN); - if (sb_new == NULL) { - pr_info("%s: sk_buff allocation failed\n", - d->sbdma_eth->sbm_dev->name); + if (sb_new == NULL) return -ENOBUFS; - } sbdma_align_skb(sb_new, SMP_CACHE_BYTES, NET_IP_ALIGN); } diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 17a972734ba7..a4416b09f209 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -212,6 +212,7 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits) #define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2) #define FIRMWARE_TG3 "tigon/tg3.bin" +#define FIRMWARE_TG357766 "tigon/tg357766.bin" #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin" #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin" @@ -3448,11 +3449,58 @@ static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf) #define TX_CPU_SCRATCH_SIZE 0x04000 /* tp->lock is held. */ -static int tg3_halt_cpu(struct tg3 *tp, u32 offset) +static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base) { int i; + const int iters = 10000; - BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)); + for (i = 0; i < iters; i++) { + tw32(cpu_base + CPU_STATE, 0xffffffff); + tw32(cpu_base + CPU_MODE, CPU_MODE_HALT); + if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT) + break; + } + + return (i == iters) ? -EBUSY : 0; +} + +/* tp->lock is held. */ +static int tg3_rxcpu_pause(struct tg3 *tp) +{ + int rc = tg3_pause_cpu(tp, RX_CPU_BASE); + + tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff); + tw32_f(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT); + udelay(10); + + return rc; +} + +/* tp->lock is held. */ +static int tg3_txcpu_pause(struct tg3 *tp) +{ + return tg3_pause_cpu(tp, TX_CPU_BASE); +} + +/* tp->lock is held. */ +static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base) +{ + tw32(cpu_base + CPU_STATE, 0xffffffff); + tw32_f(cpu_base + CPU_MODE, 0x00000000); +} + +/* tp->lock is held. */ +static void tg3_rxcpu_resume(struct tg3 *tp) +{ + tg3_resume_cpu(tp, RX_CPU_BASE); +} + +/* tp->lock is held. */ +static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base) +{ + int rc; + + BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)); if (tg3_asic_rev(tp) == ASIC_REV_5906) { u32 val = tr32(GRC_VCPU_EXT_CTRL); @@ -3460,17 +3508,8 @@ static int tg3_halt_cpu(struct tg3 *tp, u32 offset) tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU); return 0; } - if (offset == RX_CPU_BASE) { - for (i = 0; i < 10000; i++) { - tw32(offset + CPU_STATE, 0xffffffff); - tw32(offset + CPU_MODE, CPU_MODE_HALT); - if (tr32(offset + CPU_MODE) & CPU_MODE_HALT) - break; - } - - tw32(offset + CPU_STATE, 0xffffffff); - tw32_f(offset + CPU_MODE, CPU_MODE_HALT); - udelay(10); + if (cpu_base == RX_CPU_BASE) { + rc = tg3_rxcpu_pause(tp); } else { /* * There is only an Rx CPU for the 5750 derivative in the @@ -3479,17 +3518,12 @@ static int tg3_halt_cpu(struct tg3 *tp, u32 offset) if (tg3_flag(tp, IS_SSB_CORE)) return 0; - for (i = 0; i < 10000; i++) { - tw32(offset + CPU_STATE, 0xffffffff); - tw32(offset + CPU_MODE, CPU_MODE_HALT); - if (tr32(offset + CPU_MODE) & CPU_MODE_HALT) - break; - } + rc = tg3_txcpu_pause(tp); } - if (i >= 10000) { + if (rc) { netdev_err(tp->dev, "%s timed out, %s CPU\n", - __func__, offset == RX_CPU_BASE ? "RX" : "TX"); + __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX"); return -ENODEV; } @@ -3499,19 +3533,41 @@ static int tg3_halt_cpu(struct tg3 *tp, u32 offset) return 0; } -struct fw_info { - unsigned int fw_base; - unsigned int fw_len; - const __be32 *fw_data; -}; +static int tg3_fw_data_len(struct tg3 *tp, + const struct tg3_firmware_hdr *fw_hdr) +{ + int fw_len; + + /* Non fragmented firmware have one firmware header followed by a + * contiguous chunk of data to be written. The length field in that + * header is not the length of data to be written but the complete + * length of the bss. The data length is determined based on + * tp->fw->size minus headers. + * + * Fragmented firmware have a main header followed by multiple + * fragments. Each fragment is identical to non fragmented firmware + * with a firmware header followed by a contiguous chunk of data. In + * the main header, the length field is unused and set to 0xffffffff. + * In each fragment header the length is the entire size of that + * fragment i.e. fragment data + header length. Data length is + * therefore length field in the header minus TG3_FW_HDR_LEN. + */ + if (tp->fw_len == 0xffffffff) + fw_len = be32_to_cpu(fw_hdr->len); + else + fw_len = tp->fw->size; + + return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32); +} /* tp->lock is held. */ static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base, int cpu_scratch_size, - struct fw_info *info) + const struct tg3_firmware_hdr *fw_hdr) { - int err, lock_err, i; + int err, i; void (*write_op)(struct tg3 *, u32, u32); + int total_len = tp->fw->size; if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) { netdev_err(tp->dev, @@ -3520,30 +3576,49 @@ static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, return -EINVAL; } - if (tg3_flag(tp, 5705_PLUS)) + if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766) write_op = tg3_write_mem; else write_op = tg3_write_indirect_reg32; - /* It is possible that bootcode is still loading at this point. - * Get the nvram lock first before halting the cpu. - */ - lock_err = tg3_nvram_lock(tp); - err = tg3_halt_cpu(tp, cpu_base); - if (!lock_err) - tg3_nvram_unlock(tp); - if (err) - goto out; + if (tg3_asic_rev(tp) != ASIC_REV_57766) { + /* It is possible that bootcode is still loading at this point. + * Get the nvram lock first before halting the cpu. + */ + int lock_err = tg3_nvram_lock(tp); + err = tg3_halt_cpu(tp, cpu_base); + if (!lock_err) + tg3_nvram_unlock(tp); + if (err) + goto out; - for (i = 0; i < cpu_scratch_size; i += sizeof(u32)) - write_op(tp, cpu_scratch_base + i, 0); - tw32(cpu_base + CPU_STATE, 0xffffffff); - tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT); - for (i = 0; i < (info->fw_len / sizeof(u32)); i++) - write_op(tp, (cpu_scratch_base + - (info->fw_base & 0xffff) + - (i * sizeof(u32))), - be32_to_cpu(info->fw_data[i])); + for (i = 0; i < cpu_scratch_size; i += sizeof(u32)) + write_op(tp, cpu_scratch_base + i, 0); + tw32(cpu_base + CPU_STATE, 0xffffffff); + tw32(cpu_base + CPU_MODE, + tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT); + } else { + /* Subtract additional main header for fragmented firmware and + * advance to the first fragment + */ + total_len -= TG3_FW_HDR_LEN; + fw_hdr++; + } + + do { + u32 *fw_data = (u32 *)(fw_hdr + 1); + for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++) + write_op(tp, cpu_scratch_base + + (be32_to_cpu(fw_hdr->base_addr) & 0xffff) + + (i * sizeof(u32)), + be32_to_cpu(fw_data[i])); + + total_len -= be32_to_cpu(fw_hdr->len); + + /* Advance to next fragment */ + fw_hdr = (struct tg3_firmware_hdr *) + ((void *)fw_hdr + be32_to_cpu(fw_hdr->len)); + } while (total_len > 0); err = 0; @@ -3552,13 +3627,33 @@ out: } /* tp->lock is held. */ +static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc) +{ + int i; + const int iters = 5; + + tw32(cpu_base + CPU_STATE, 0xffffffff); + tw32_f(cpu_base + CPU_PC, pc); + + for (i = 0; i < iters; i++) { + if (tr32(cpu_base + CPU_PC) == pc) + break; + tw32(cpu_base + CPU_STATE, 0xffffffff); + tw32(cpu_base + CPU_MODE, CPU_MODE_HALT); + tw32_f(cpu_base + CPU_PC, pc); + udelay(1000); + } + + return (i == iters) ? -EBUSY : 0; +} + +/* tp->lock is held. */ static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp) { - struct fw_info info; - const __be32 *fw_data; - int err, i; + const struct tg3_firmware_hdr *fw_hdr; + int err; - fw_data = (void *)tp->fw->data; + fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data; /* Firmware blob starts with version numbers, followed by start address and length. We are setting complete length. @@ -3566,60 +3661,117 @@ static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp) Remainder is the blob to be loaded contiguously from start address. */ - info.fw_base = be32_to_cpu(fw_data[1]); - info.fw_len = tp->fw->size - 12; - info.fw_data = &fw_data[3]; - err = tg3_load_firmware_cpu(tp, RX_CPU_BASE, RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE, - &info); + fw_hdr); if (err) return err; err = tg3_load_firmware_cpu(tp, TX_CPU_BASE, TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE, - &info); + fw_hdr); if (err) return err; /* Now startup only the RX cpu. */ - tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff); - tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base); - - for (i = 0; i < 5; i++) { - if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base) - break; - tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff); - tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT); - tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base); - udelay(1000); - } - if (i >= 5) { + err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE, + be32_to_cpu(fw_hdr->base_addr)); + if (err) { netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x " "should be %08x\n", __func__, - tr32(RX_CPU_BASE + CPU_PC), info.fw_base); + tr32(RX_CPU_BASE + CPU_PC), + be32_to_cpu(fw_hdr->base_addr)); return -ENODEV; } - tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff); - tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000); + + tg3_rxcpu_resume(tp); + + return 0; +} + +static int tg3_validate_rxcpu_state(struct tg3 *tp) +{ + const int iters = 1000; + int i; + u32 val; + + /* Wait for boot code to complete initialization and enter service + * loop. It is then safe to download service patches + */ + for (i = 0; i < iters; i++) { + if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP) + break; + + udelay(10); + } + + if (i == iters) { + netdev_err(tp->dev, "Boot code not ready for service patches\n"); + return -EBUSY; + } + + val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE); + if (val & 0xff) { + netdev_warn(tp->dev, + "Other patches exist. Not downloading EEE patch\n"); + return -EEXIST; + } return 0; } /* tp->lock is held. */ +static void tg3_load_57766_firmware(struct tg3 *tp) +{ + struct tg3_firmware_hdr *fw_hdr; + + if (!tg3_flag(tp, NO_NVRAM)) + return; + + if (tg3_validate_rxcpu_state(tp)) + return; + + if (!tp->fw) + return; + + /* This firmware blob has a different format than older firmware + * releases as given below. The main difference is we have fragmented + * data to be written to non-contiguous locations. + * + * In the beginning we have a firmware header identical to other + * firmware which consists of version, base addr and length. The length + * here is unused and set to 0xffffffff. + * + * This is followed by a series of firmware fragments which are + * individually identical to previous firmware. i.e. they have the + * firmware header and followed by data for that fragment. The version + * field of the individual fragment header is unused. + */ + + fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data; + if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR) + return; + + if (tg3_rxcpu_pause(tp)) + return; + + /* tg3_load_firmware_cpu() will always succeed for the 57766 */ + tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr); + + tg3_rxcpu_resume(tp); +} + +/* tp->lock is held. */ static int tg3_load_tso_firmware(struct tg3 *tp) { - struct fw_info info; - const __be32 *fw_data; + const struct tg3_firmware_hdr *fw_hdr; unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size; - int err, i; + int err; - if (tg3_flag(tp, HW_TSO_1) || - tg3_flag(tp, HW_TSO_2) || - tg3_flag(tp, HW_TSO_3)) + if (!tg3_flag(tp, FW_TSO)) return 0; - fw_data = (void *)tp->fw->data; + fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data; /* Firmware blob starts with version numbers, followed by start address and length. We are setting complete length. @@ -3627,10 +3779,7 @@ static int tg3_load_tso_firmware(struct tg3 *tp) Remainder is the blob to be loaded contiguously from start address. */ - info.fw_base = be32_to_cpu(fw_data[1]); cpu_scratch_size = tp->fw_len; - info.fw_len = tp->fw->size - 12; - info.fw_data = &fw_data[3]; if (tg3_asic_rev(tp) == ASIC_REV_5705) { cpu_base = RX_CPU_BASE; @@ -3643,30 +3792,22 @@ static int tg3_load_tso_firmware(struct tg3 *tp) err = tg3_load_firmware_cpu(tp, cpu_base, cpu_scratch_base, cpu_scratch_size, - &info); + fw_hdr); if (err) return err; /* Now startup the cpu. */ - tw32(cpu_base + CPU_STATE, 0xffffffff); - tw32_f(cpu_base + CPU_PC, info.fw_base); - - for (i = 0; i < 5; i++) { - if (tr32(cpu_base + CPU_PC) == info.fw_base) - break; - tw32(cpu_base + CPU_STATE, 0xffffffff); - tw32(cpu_base + CPU_MODE, CPU_MODE_HALT); - tw32_f(cpu_base + CPU_PC, info.fw_base); - udelay(1000); - } - if (i >= 5) { + err = tg3_pause_cpu_and_set_pc(tp, cpu_base, + be32_to_cpu(fw_hdr->base_addr)); + if (err) { netdev_err(tp->dev, "%s fails to set CPU PC, is %08x should be %08x\n", - __func__, tr32(cpu_base + CPU_PC), info.fw_base); + __func__, tr32(cpu_base + CPU_PC), + be32_to_cpu(fw_hdr->base_addr)); return -ENODEV; } - tw32(cpu_base + CPU_STATE, 0xffffffff); - tw32_f(cpu_base + CPU_MODE, 0x00000000); + + tg3_resume_cpu(tp, cpu_base); return 0; } @@ -8039,11 +8180,9 @@ static int tg3_mem_rx_acquire(struct tg3 *tp) tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev, TG3_RX_RCB_RING_BYTES(tp), &tnapi->rx_rcb_mapping, - GFP_KERNEL); + GFP_KERNEL | __GFP_ZERO); if (!tnapi->rx_rcb) goto err_out; - - memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp)); } return 0; @@ -8093,12 +8232,10 @@ static int tg3_alloc_consistent(struct tg3 *tp) tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats), &tp->stats_mapping, - GFP_KERNEL); + GFP_KERNEL | __GFP_ZERO); if (!tp->hw_stats) goto err_out; - memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats)); - for (i = 0; i < tp->irq_cnt; i++) { struct tg3_napi *tnapi = &tp->napi[i]; struct tg3_hw_status *sblk; @@ -8106,11 +8243,10 @@ static int tg3_alloc_consistent(struct tg3 *tp) tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE, &tnapi->status_mapping, - GFP_KERNEL); + GFP_KERNEL | __GFP_ZERO); if (!tnapi->hw_status) goto err_out; - memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); sblk = tnapi->hw_status; if (tg3_flag(tp, ENABLE_RSS)) { @@ -9781,6 +9917,13 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) return err; } + if (tg3_asic_rev(tp) == ASIC_REV_57766) { + /* Ignore any errors for the firmware download. If download + * fails, the device will operate with EEE disabled + */ + tg3_load_57766_firmware(tp); + } + if (tg3_flag(tp, TSO_CAPABLE)) { err = tg3_load_tso_firmware(tp); if (err) @@ -10570,7 +10713,7 @@ static int tg3_test_msi(struct tg3 *tp) static int tg3_request_firmware(struct tg3 *tp) { - const __be32 *fw_data; + const struct tg3_firmware_hdr *fw_hdr; if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) { netdev_err(tp->dev, "Failed to load firmware \"%s\"\n", @@ -10578,15 +10721,15 @@ static int tg3_request_firmware(struct tg3 *tp) return -ENOENT; } - fw_data = (void *)tp->fw->data; + fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data; /* Firmware blob starts with version numbers, followed by * start address and _full_ length including BSS sections * (which must be longer than the actual data, of course */ - tp->fw_len = be32_to_cpu(fw_data[2]); /* includes bss */ - if (tp->fw_len < (tp->fw->size - 12)) { + tp->fw_len = be32_to_cpu(fw_hdr->len); /* includes bss */ + if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) { netdev_err(tp->dev, "bogus length %d in \"%s\"\n", tp->fw_len, tp->fw_needed); release_firmware(tp->fw); @@ -10885,7 +11028,15 @@ static int tg3_open(struct net_device *dev) if (tp->fw_needed) { err = tg3_request_firmware(tp); - if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) { + if (tg3_asic_rev(tp) == ASIC_REV_57766) { + if (err) { + netdev_warn(tp->dev, "EEE capability disabled\n"); + tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP; + } else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) { + netdev_warn(tp->dev, "EEE capability restored\n"); + tp->phy_flags |= TG3_PHYFLG_EEE_CAP; + } + } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) { if (err) return err; } else if (err) { @@ -14515,6 +14666,7 @@ static int tg3_phy_probe(struct tg3 *tp) if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) && (tg3_asic_rev(tp) == ASIC_REV_5719 || tg3_asic_rev(tp) == ASIC_REV_5720 || + tg3_asic_rev(tp) == ASIC_REV_57766 || tg3_asic_rev(tp) == ASIC_REV_5762 || (tg3_asic_rev(tp) == ASIC_REV_5717 && tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) || @@ -15300,7 +15452,8 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent) } else if (tg3_asic_rev(tp) != ASIC_REV_5700 && tg3_asic_rev(tp) != ASIC_REV_5701 && tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) { - tg3_flag_set(tp, TSO_BUG); + tg3_flag_set(tp, FW_TSO); + tg3_flag_set(tp, TSO_BUG); if (tg3_asic_rev(tp) == ASIC_REV_5705) tp->fw_needed = FIRMWARE_TG3TSO5; else @@ -15311,7 +15464,7 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent) if (tg3_flag(tp, HW_TSO_1) || tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3) || - tp->fw_needed) { + tg3_flag(tp, FW_TSO)) { /* For firmware TSO, assume ASF is disabled. * We'll disable TSO later if we discover ASF * is enabled in tg3_get_eeprom_hw_cfg(). @@ -15326,6 +15479,9 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent) if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) tp->fw_needed = FIRMWARE_TG3; + if (tg3_asic_rev(tp) == ASIC_REV_57766) + tp->fw_needed = FIRMWARE_TG357766; + tp->irq_max = 1; if (tg3_flag(tp, 5750_PLUS)) { @@ -15598,7 +15754,7 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent) */ tg3_get_eeprom_hw_cfg(tp); - if (tp->fw_needed && tg3_flag(tp, ENABLE_ASF)) { + if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) { tg3_flag_clear(tp, TSO_CAPABLE); tg3_flag_clear(tp, TSO_BUG); tp->fw_needed = NULL; @@ -15786,6 +15942,11 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent) udelay(50); tg3_nvram_init(tp); + /* If the device has an NVRAM, no need to load patch firmware */ + if (tg3_asic_rev(tp) == ASIC_REV_57766 && + !tg3_flag(tp, NO_NVRAM)) + tp->fw_needed = NULL; + grc_misc_cfg = tr32(GRC_MISC_CFG); grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK; diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h index 8d7d4c2ab5d6..1cdc1b641c77 100644 --- a/drivers/net/ethernet/broadcom/tg3.h +++ b/drivers/net/ethernet/broadcom/tg3.h @@ -2222,6 +2222,12 @@ #define NIC_SRAM_MBUF_POOL_BASE5705 0x00010000 #define NIC_SRAM_MBUF_POOL_SIZE5705 0x0000e000 +#define TG3_SRAM_RXCPU_SCRATCH_BASE_57766 0x00030000 +#define TG3_SRAM_RXCPU_SCRATCH_SIZE_57766 0x00010000 +#define TG3_57766_FW_BASE_ADDR 0x00030000 +#define TG3_57766_FW_HANDSHAKE 0x0003fccc +#define TG3_SBROM_IN_SERVICE_LOOP 0x51 + #define TG3_SRAM_RX_STD_BDCACHE_SIZE_5700 128 #define TG3_SRAM_RX_STD_BDCACHE_SIZE_5755 64 #define TG3_SRAM_RX_STD_BDCACHE_SIZE_5906 32 @@ -3009,17 +3015,18 @@ enum TG3_FLAGS { TG3_FLAG_JUMBO_CAPABLE, TG3_FLAG_CHIP_RESETTING, TG3_FLAG_INIT_COMPLETE, - TG3_FLAG_TSO_BUG, TG3_FLAG_MAX_RXPEND_64, - TG3_FLAG_TSO_CAPABLE, TG3_FLAG_PCI_EXPRESS, /* BCM5785 + pci_is_pcie() */ TG3_FLAG_ASF_NEW_HANDSHAKE, TG3_FLAG_HW_AUTONEG, TG3_FLAG_IS_NIC, TG3_FLAG_FLASH, + TG3_FLAG_FW_TSO, TG3_FLAG_HW_TSO_1, TG3_FLAG_HW_TSO_2, TG3_FLAG_HW_TSO_3, + TG3_FLAG_TSO_CAPABLE, + TG3_FLAG_TSO_BUG, TG3_FLAG_ICH_WORKAROUND, TG3_FLAG_1SHOT_MSI, TG3_FLAG_NO_FWARE_REPORTED, @@ -3064,6 +3071,13 @@ enum TG3_FLAGS { TG3_FLAG_NUMBER_OF_FLAGS, /* Last entry in enum TG3_FLAGS */ }; +struct tg3_firmware_hdr { + __be32 version; /* unused for fragments */ + __be32 base_addr; + __be32 len; +}; +#define TG3_FW_HDR_LEN (sizeof(struct tg3_firmware_hdr)) + struct tg3 { /* begin "general, frequently-used members" cacheline section */ diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c index 3227fdde521b..f2b73ffa9122 100644 --- a/drivers/net/ethernet/brocade/bna/bfa_ioc.c +++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c @@ -76,7 +76,7 @@ static void bfa_ioc_pf_disabled(struct bfa_ioc *ioc); static void bfa_ioc_pf_failed(struct bfa_ioc *ioc); static void bfa_ioc_pf_hwfailed(struct bfa_ioc *ioc); static void bfa_ioc_pf_fwmismatch(struct bfa_ioc *ioc); -static void bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type, +static void bfa_ioc_boot(struct bfa_ioc *ioc, enum bfi_fwboot_type boot_type, u32 boot_param); static u32 bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr); static void bfa_ioc_get_adapter_serial_num(struct bfa_ioc *ioc, diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c index 7cce42dc2f20..d588f842d557 100644 --- a/drivers/net/ethernet/brocade/bna/bnad.c +++ b/drivers/net/ethernet/brocade/bna/bnad.c @@ -1264,9 +1264,8 @@ bnad_mem_alloc(struct bnad *bnad, mem_info->mdl[i].len = mem_info->len; mem_info->mdl[i].kva = dma_alloc_coherent(&bnad->pcidev->dev, - mem_info->len, &dma_pa, - GFP_KERNEL); - + mem_info->len, &dma_pa, + GFP_KERNEL); if (mem_info->mdl[i].kva == NULL) goto err_return; diff --git a/drivers/net/ethernet/cadence/at91_ether.c b/drivers/net/ethernet/cadence/at91_ether.c index 3becdb2deb46..c6e40d65a3df 100644 --- a/drivers/net/ethernet/cadence/at91_ether.c +++ b/drivers/net/ethernet/cadence/at91_ether.c @@ -47,22 +47,19 @@ static int at91ether_start(struct net_device *dev) int i; lp->rx_ring = dma_alloc_coherent(&lp->pdev->dev, - MAX_RX_DESCR * sizeof(struct macb_dma_desc), - &lp->rx_ring_dma, GFP_KERNEL); - if (!lp->rx_ring) { - netdev_err(dev, "unable to alloc rx ring DMA buffer\n"); + (MAX_RX_DESCR * + sizeof(struct macb_dma_desc)), + &lp->rx_ring_dma, GFP_KERNEL); + if (!lp->rx_ring) return -ENOMEM; - } lp->rx_buffers = dma_alloc_coherent(&lp->pdev->dev, - MAX_RX_DESCR * MAX_RBUFF_SZ, - &lp->rx_buffers_dma, GFP_KERNEL); + MAX_RX_DESCR * MAX_RBUFF_SZ, + &lp->rx_buffers_dma, GFP_KERNEL); if (!lp->rx_buffers) { - netdev_err(dev, "unable to alloc rx data DMA buffer\n"); - dma_free_coherent(&lp->pdev->dev, - MAX_RX_DESCR * sizeof(struct macb_dma_desc), - lp->rx_ring, lp->rx_ring_dma); + MAX_RX_DESCR * sizeof(struct macb_dma_desc), + lp->rx_ring, lp->rx_ring_dma); lp->rx_ring = NULL; return -ENOMEM; } @@ -209,7 +206,6 @@ static void at91ether_rx(struct net_device *dev) netif_rx(skb); } else { lp->stats.rx_dropped++; - netdev_notice(dev, "Memory squeeze, dropping packet.\n"); } if (lp->rx_ring[lp->rx_tail].ctrl & MACB_BIT(RX_MHASH_MATCH)) @@ -519,18 +515,7 @@ static struct platform_driver at91ether_driver = { }, }; -static int __init at91ether_init(void) -{ - return platform_driver_probe(&at91ether_driver, at91ether_probe); -} - -static void __exit at91ether_exit(void) -{ - platform_driver_unregister(&at91ether_driver); -} - -module_init(at91ether_init) -module_exit(at91ether_exit) +module_platform_driver_probe(at91ether_driver, at91ether_probe); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("AT91RM9200 EMAC Ethernet driver"); diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c index 79039439bfdc..ed2cb130f988 100644 --- a/drivers/net/ethernet/cadence/macb.c +++ b/drivers/net/ethernet/cadence/macb.c @@ -485,6 +485,8 @@ static void macb_tx_interrupt(struct macb *bp) status = macb_readl(bp, TSR); macb_writel(bp, TSR, status); + macb_writel(bp, ISR, MACB_BIT(TCOMP)); + netdev_vdbg(bp->dev, "macb_tx_interrupt status = 0x%03lx\n", (unsigned long)status); @@ -736,6 +738,7 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id) * now. */ macb_writel(bp, IDR, MACB_RX_INT_FLAGS); + macb_writel(bp, ISR, MACB_BIT(RCOMP)); if (napi_schedule_prep(&bp->napi)) { netdev_vdbg(bp->dev, "scheduling RX softirq\n"); @@ -1054,6 +1057,7 @@ static void macb_configure_dma(struct macb *bp) dmacfg |= GEM_BF(RXBS, RX_BUFFER_SIZE / 64); dmacfg |= GEM_BF(FBLDO, 16); dmacfg |= GEM_BIT(TXPBMS) | GEM_BF(RXBMS, -1L); + dmacfg &= ~GEM_BIT(ENDIA); gem_writel(bp, DMACFG, dmacfg); } } @@ -1557,14 +1561,14 @@ static int __init macb_probe(struct platform_device *pdev) dev_err(&pdev->dev, "failed to get macb_clk\n"); goto err_out_free_dev; } - clk_enable(bp->pclk); + clk_prepare_enable(bp->pclk); bp->hclk = clk_get(&pdev->dev, "hclk"); if (IS_ERR(bp->hclk)) { dev_err(&pdev->dev, "failed to get hclk\n"); goto err_out_put_pclk; } - clk_enable(bp->hclk); + clk_prepare_enable(bp->hclk); bp->regs = ioremap(regs->start, resource_size(regs)); if (!bp->regs) { @@ -1654,9 +1658,9 @@ err_out_free_irq: err_out_iounmap: iounmap(bp->regs); err_out_disable_clocks: - clk_disable(bp->hclk); + clk_disable_unprepare(bp->hclk); clk_put(bp->hclk); - clk_disable(bp->pclk); + clk_disable_unprepare(bp->pclk); err_out_put_pclk: clk_put(bp->pclk); err_out_free_dev: @@ -1683,9 +1687,9 @@ static int __exit macb_remove(struct platform_device *pdev) unregister_netdev(dev); free_irq(dev->irq, dev); iounmap(bp->regs); - clk_disable(bp->hclk); + clk_disable_unprepare(bp->hclk); clk_put(bp->hclk); - clk_disable(bp->pclk); + clk_disable_unprepare(bp->pclk); clk_put(bp->pclk); free_netdev(dev); platform_set_drvdata(pdev, NULL); @@ -1703,8 +1707,8 @@ static int macb_suspend(struct platform_device *pdev, pm_message_t state) netif_carrier_off(netdev); netif_device_detach(netdev); - clk_disable(bp->hclk); - clk_disable(bp->pclk); + clk_disable_unprepare(bp->hclk); + clk_disable_unprepare(bp->pclk); return 0; } @@ -1714,8 +1718,8 @@ static int macb_resume(struct platform_device *pdev) struct net_device *netdev = platform_get_drvdata(pdev); struct macb *bp = netdev_priv(netdev); - clk_enable(bp->pclk); - clk_enable(bp->hclk); + clk_prepare_enable(bp->pclk); + clk_prepare_enable(bp->hclk); netif_device_attach(netdev); @@ -1737,18 +1741,7 @@ static struct platform_driver macb_driver = { }, }; -static int __init macb_init(void) -{ - return platform_driver_probe(&macb_driver, macb_probe); -} - -static void __exit macb_exit(void) -{ - platform_driver_unregister(&macb_driver); -} - -module_init(macb_init); -module_exit(macb_exit); +module_platform_driver_probe(macb_driver, macb_probe); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Cadence MACB/GEM Ethernet driver"); diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h index 570908b93578..993d70380688 100644 --- a/drivers/net/ethernet/cadence/macb.h +++ b/drivers/net/ethernet/cadence/macb.h @@ -173,6 +173,8 @@ /* Bitfields in DMACFG. */ #define GEM_FBLDO_OFFSET 0 #define GEM_FBLDO_SIZE 5 +#define GEM_ENDIA_OFFSET 7 +#define GEM_ENDIA_SIZE 1 #define GEM_RXBMS_OFFSET 8 #define GEM_RXBMS_SIZE 2 #define GEM_TXPBMS_OFFSET 10 diff --git a/drivers/net/ethernet/chelsio/cxgb/sge.c b/drivers/net/ethernet/chelsio/cxgb/sge.c index 482976925154..55fe8c9f0484 100644 --- a/drivers/net/ethernet/chelsio/cxgb/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb/sge.c @@ -835,7 +835,7 @@ static void refill_free_list(struct sge *sge, struct freelQ *q) struct sk_buff *skb; dma_addr_t mapping; - skb = alloc_skb(q->rx_buffer_size, GFP_ATOMIC); + skb = dev_alloc_skb(q->rx_buffer_size); if (!skb) break; @@ -1046,11 +1046,10 @@ static inline struct sk_buff *get_packet(struct pci_dev *pdev, const struct freelQ_ce *ce = &fl->centries[fl->cidx]; if (len < copybreak) { - skb = alloc_skb(len + 2, GFP_ATOMIC); + skb = netdev_alloc_skb_ip_align(NULL, len); if (!skb) goto use_orig_buf; - skb_reserve(skb, 2); /* align IP header */ skb_put(skb, len); pci_dma_sync_single_for_cpu(pdev, dma_unmap_addr(ce, dma_addr), diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index 6db997c78a5f..681804b30a3f 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -54,6 +54,10 @@ #define FW_VERSION_MINOR 1 #define FW_VERSION_MICRO 0 +#define FW_VERSION_MAJOR_T5 0 +#define FW_VERSION_MINOR_T5 0 +#define FW_VERSION_MICRO_T5 0 + #define CH_WARN(adap, fmt, ...) dev_warn(adap->pdev_dev, fmt, ## __VA_ARGS__) enum { @@ -66,7 +70,9 @@ enum { enum { MEM_EDC0, MEM_EDC1, - MEM_MC + MEM_MC, + MEM_MC0 = MEM_MC, + MEM_MC1 }; enum { @@ -74,8 +80,10 @@ enum { MEMWIN0_BASE = 0x1b800, MEMWIN1_APERTURE = 32768, MEMWIN1_BASE = 0x28000, + MEMWIN1_BASE_T5 = 0x52000, MEMWIN2_APERTURE = 65536, MEMWIN2_BASE = 0x30000, + MEMWIN2_BASE_T5 = 0x54000, }; enum dev_master { @@ -431,6 +439,7 @@ struct sge_txq { spinlock_t db_lock; int db_disabled; unsigned short db_pidx; + u64 udb; }; struct sge_eth_txq { /* state for an SGE Ethernet Tx queue */ @@ -504,13 +513,44 @@ struct sge { struct l2t_data; +#define CHELSIO_CHIP_CODE(version, revision) (((version) << 4) | (revision)) +#define CHELSIO_CHIP_VERSION(code) ((code) >> 4) +#define CHELSIO_CHIP_RELEASE(code) ((code) & 0xf) + +#define CHELSIO_T4 0x4 +#define CHELSIO_T5 0x5 + +enum chip_type { + T4_A1 = CHELSIO_CHIP_CODE(CHELSIO_T4, 0), + T4_A2 = CHELSIO_CHIP_CODE(CHELSIO_T4, 1), + T4_A3 = CHELSIO_CHIP_CODE(CHELSIO_T4, 2), + T4_FIRST_REV = T4_A1, + T4_LAST_REV = T4_A3, + + T5_A1 = CHELSIO_CHIP_CODE(CHELSIO_T5, 0), + T5_FIRST_REV = T5_A1, + T5_LAST_REV = T5_A1, +}; + +#ifdef CONFIG_PCI_IOV + +/* T4 supports SRIOV on PF0-3 and T5 on PF0-7. However, the Serial + * Configuration initialization for T5 only has SR-IOV functionality enabled + * on PF0-3 in order to simplify everything. + */ +#define NUM_OF_PF_WITH_SRIOV 4 + +#endif + struct adapter { void __iomem *regs; + void __iomem *bar2; struct pci_dev *pdev; struct device *pdev_dev; unsigned int mbox; unsigned int fn; unsigned int flags; + enum chip_type chip; int msg_enable; @@ -673,6 +713,16 @@ enum { VLAN_REWRITE }; +static inline int is_t5(enum chip_type chip) +{ + return (chip >= T5_FIRST_REV && chip <= T5_LAST_REV); +} + +static inline int is_t4(enum chip_type chip) +{ + return (chip >= T4_FIRST_REV && chip <= T4_LAST_REV); +} + static inline u32 t4_read_reg(struct adapter *adap, u32 reg_addr) { return readl(adap->regs + reg_addr); @@ -858,7 +908,8 @@ int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid, int start, int n, const u16 *rspq, unsigned int nrspq); int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode, unsigned int flags); -int t4_mc_read(struct adapter *adap, u32 addr, __be32 *data, u64 *parity); +int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, + u64 *parity); int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *parity); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index e707e31abd81..e76cf035100b 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -68,8 +68,8 @@ #include "t4fw_api.h" #include "l2t.h" -#define DRV_VERSION "1.3.0-ko" -#define DRV_DESC "Chelsio T4 Network Driver" +#define DRV_VERSION "2.0.0-ko" +#define DRV_DESC "Chelsio T4/T5 Network Driver" /* * Max interrupt hold-off timer value in us. Queues fall back to this value @@ -229,11 +229,51 @@ static DEFINE_PCI_DEVICE_TABLE(cxgb4_pci_tbl) = { CH_DEVICE(0x440a, 4), CH_DEVICE(0x440d, 4), CH_DEVICE(0x440e, 4), + CH_DEVICE(0x5001, 5), + CH_DEVICE(0x5002, 5), + CH_DEVICE(0x5003, 5), + CH_DEVICE(0x5004, 5), + CH_DEVICE(0x5005, 5), + CH_DEVICE(0x5006, 5), + CH_DEVICE(0x5007, 5), + CH_DEVICE(0x5008, 5), + CH_DEVICE(0x5009, 5), + CH_DEVICE(0x500A, 5), + CH_DEVICE(0x500B, 5), + CH_DEVICE(0x500C, 5), + CH_DEVICE(0x500D, 5), + CH_DEVICE(0x500E, 5), + CH_DEVICE(0x500F, 5), + CH_DEVICE(0x5010, 5), + CH_DEVICE(0x5011, 5), + CH_DEVICE(0x5012, 5), + CH_DEVICE(0x5013, 5), + CH_DEVICE(0x5401, 5), + CH_DEVICE(0x5402, 5), + CH_DEVICE(0x5403, 5), + CH_DEVICE(0x5404, 5), + CH_DEVICE(0x5405, 5), + CH_DEVICE(0x5406, 5), + CH_DEVICE(0x5407, 5), + CH_DEVICE(0x5408, 5), + CH_DEVICE(0x5409, 5), + CH_DEVICE(0x540A, 5), + CH_DEVICE(0x540B, 5), + CH_DEVICE(0x540C, 5), + CH_DEVICE(0x540D, 5), + CH_DEVICE(0x540E, 5), + CH_DEVICE(0x540F, 5), + CH_DEVICE(0x5410, 5), + CH_DEVICE(0x5411, 5), + CH_DEVICE(0x5412, 5), + CH_DEVICE(0x5413, 5), { 0, } }; #define FW_FNAME "cxgb4/t4fw.bin" +#define FW5_FNAME "cxgb4/t5fw.bin" #define FW_CFNAME "cxgb4/t4-config.txt" +#define FW5_CFNAME "cxgb4/t5-config.txt" MODULE_DESCRIPTION(DRV_DESC); MODULE_AUTHOR("Chelsio Communications"); @@ -241,6 +281,7 @@ MODULE_LICENSE("Dual BSD/GPL"); MODULE_VERSION(DRV_VERSION); MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl); MODULE_FIRMWARE(FW_FNAME); +MODULE_FIRMWARE(FW5_FNAME); /* * Normally we're willing to become the firmware's Master PF but will be happy @@ -319,7 +360,10 @@ static bool vf_acls; module_param(vf_acls, bool, 0644); MODULE_PARM_DESC(vf_acls, "if set enable virtualization L2 ACL enforcement"); -static unsigned int num_vf[4]; +/* Configure the number of PCI-E Virtual Function which are to be instantiated + * on SR-IOV Capable Physical Functions. + */ +static unsigned int num_vf[NUM_OF_PF_WITH_SRIOV]; module_param_array(num_vf, uint, NULL, 0644); MODULE_PARM_DESC(num_vf, "number of VFs for each of PFs 0-3"); @@ -1002,21 +1046,36 @@ freeout: t4_free_sge_resources(adap); static int upgrade_fw(struct adapter *adap) { int ret; - u32 vers; + u32 vers, exp_major; const struct fw_hdr *hdr; const struct firmware *fw; struct device *dev = adap->pdev_dev; + char *fw_file_name; - ret = request_firmware(&fw, FW_FNAME, dev); + switch (CHELSIO_CHIP_VERSION(adap->chip)) { + case CHELSIO_T4: + fw_file_name = FW_FNAME; + exp_major = FW_VERSION_MAJOR; + break; + case CHELSIO_T5: + fw_file_name = FW5_FNAME; + exp_major = FW_VERSION_MAJOR_T5; + break; + default: + dev_err(dev, "Unsupported chip type, %x\n", adap->chip); + return -EINVAL; + } + + ret = request_firmware(&fw, fw_file_name, dev); if (ret < 0) { - dev_err(dev, "unable to load firmware image " FW_FNAME - ", error %d\n", ret); + dev_err(dev, "unable to load firmware image %s, error %d\n", + fw_file_name, ret); return ret; } hdr = (const struct fw_hdr *)fw->data; vers = ntohl(hdr->fw_ver); - if (FW_HDR_FW_VER_MAJOR_GET(vers) != FW_VERSION_MAJOR) { + if (FW_HDR_FW_VER_MAJOR_GET(vers) != exp_major) { ret = -EINVAL; /* wrong major version, won't do */ goto out; } @@ -1024,18 +1083,15 @@ static int upgrade_fw(struct adapter *adap) /* * If the flash FW is unusable or we found something newer, load it. */ - if (FW_HDR_FW_VER_MAJOR_GET(adap->params.fw_vers) != FW_VERSION_MAJOR || + if (FW_HDR_FW_VER_MAJOR_GET(adap->params.fw_vers) != exp_major || vers > adap->params.fw_vers) { dev_info(dev, "upgrading firmware ...\n"); ret = t4_fw_upgrade(adap, adap->mbox, fw->data, fw->size, /*force=*/false); if (!ret) - dev_info(dev, "firmware successfully upgraded to " - FW_FNAME " (%d.%d.%d.%d)\n", - FW_HDR_FW_VER_MAJOR_GET(vers), - FW_HDR_FW_VER_MINOR_GET(vers), - FW_HDR_FW_VER_MICRO_GET(vers), - FW_HDR_FW_VER_BUILD_GET(vers)); + dev_info(dev, + "firmware upgraded to version %pI4 from %s\n", + &hdr->fw_ver, fw_file_name); else dev_err(dev, "firmware upgrade failed! err=%d\n", -ret); } else { @@ -1308,6 +1364,8 @@ static char stats_strings[][ETH_GSTRING_LEN] = { "VLANinsertions ", "GROpackets ", "GROmerged ", + "WriteCoalSuccess ", + "WriteCoalFail ", }; static int get_sset_count(struct net_device *dev, int sset) @@ -1321,10 +1379,15 @@ static int get_sset_count(struct net_device *dev, int sset) } #define T4_REGMAP_SIZE (160 * 1024) +#define T5_REGMAP_SIZE (332 * 1024) static int get_regs_len(struct net_device *dev) { - return T4_REGMAP_SIZE; + struct adapter *adap = netdev2adap(dev); + if (is_t4(adap->chip)) + return T4_REGMAP_SIZE; + else + return T5_REGMAP_SIZE; } static int get_eeprom_len(struct net_device *dev) @@ -1398,11 +1461,25 @@ static void get_stats(struct net_device *dev, struct ethtool_stats *stats, { struct port_info *pi = netdev_priv(dev); struct adapter *adapter = pi->adapter; + u32 val1, val2; t4_get_port_stats(adapter, pi->tx_chan, (struct port_stats *)data); data += sizeof(struct port_stats) / sizeof(u64); collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data); + data += sizeof(struct queue_port_stats) / sizeof(u64); + if (!is_t4(adapter->chip)) { + t4_write_reg(adapter, SGE_STAT_CFG, STATSOURCE_T5(7)); + val1 = t4_read_reg(adapter, SGE_STAT_TOTAL); + val2 = t4_read_reg(adapter, SGE_STAT_MATCH); + *data = val1 - val2; + data++; + *data = val2; + data++; + } else { + memset(data, 0, 2 * sizeof(u64)); + *data += 2; + } } /* @@ -1413,7 +1490,8 @@ static void get_stats(struct net_device *dev, struct ethtool_stats *stats, */ static inline unsigned int mk_adap_vers(const struct adapter *ap) { - return 4 | (ap->params.rev << 10) | (1 << 16); + return CHELSIO_CHIP_VERSION(ap->chip) | + (CHELSIO_CHIP_RELEASE(ap->chip) << 10) | (1 << 16); } static void reg_block_dump(struct adapter *ap, void *buf, unsigned int start, @@ -1428,7 +1506,7 @@ static void reg_block_dump(struct adapter *ap, void *buf, unsigned int start, static void get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf) { - static const unsigned int reg_ranges[] = { + static const unsigned int t4_reg_ranges[] = { 0x1008, 0x1108, 0x1180, 0x11b4, 0x11fc, 0x123c, @@ -1648,13 +1726,452 @@ static void get_regs(struct net_device *dev, struct ethtool_regs *regs, 0x27e00, 0x27e04 }; + static const unsigned int t5_reg_ranges[] = { + 0x1008, 0x1148, + 0x1180, 0x11b4, + 0x11fc, 0x123c, + 0x1280, 0x173c, + 0x1800, 0x18fc, + 0x3000, 0x3028, + 0x3060, 0x30d8, + 0x30e0, 0x30fc, + 0x3140, 0x357c, + 0x35a8, 0x35cc, + 0x35ec, 0x35ec, + 0x3600, 0x5624, + 0x56cc, 0x575c, + 0x580c, 0x5814, + 0x5890, 0x58bc, + 0x5940, 0x59dc, + 0x59fc, 0x5a18, + 0x5a60, 0x5a9c, + 0x5b9c, 0x5bfc, + 0x6000, 0x6040, + 0x6058, 0x614c, + 0x7700, 0x7798, + 0x77c0, 0x78fc, + 0x7b00, 0x7c54, + 0x7d00, 0x7efc, + 0x8dc0, 0x8de0, + 0x8df8, 0x8e84, + 0x8ea0, 0x8f84, + 0x8fc0, 0x90f8, + 0x9400, 0x9470, + 0x9600, 0x96f4, + 0x9800, 0x9808, + 0x9820, 0x983c, + 0x9850, 0x9864, + 0x9c00, 0x9c6c, + 0x9c80, 0x9cec, + 0x9d00, 0x9d6c, + 0x9d80, 0x9dec, + 0x9e00, 0x9e6c, + 0x9e80, 0x9eec, + 0x9f00, 0x9f6c, + 0x9f80, 0xa020, + 0xd004, 0xd03c, + 0xdfc0, 0xdfe0, + 0xe000, 0x11088, + 0x1109c, 0x1117c, + 0x11190, 0x11204, + 0x19040, 0x1906c, + 0x19078, 0x19080, + 0x1908c, 0x19124, + 0x19150, 0x191b0, + 0x191d0, 0x191e8, + 0x19238, 0x19290, + 0x193f8, 0x19474, + 0x19490, 0x194cc, + 0x194f0, 0x194f8, + 0x19c00, 0x19c60, + 0x19c94, 0x19e10, + 0x19e50, 0x19f34, + 0x19f40, 0x19f50, + 0x19f90, 0x19fe4, + 0x1a000, 0x1a06c, + 0x1a0b0, 0x1a120, + 0x1a128, 0x1a138, + 0x1a190, 0x1a1c4, + 0x1a1fc, 0x1a1fc, + 0x1e008, 0x1e00c, + 0x1e040, 0x1e04c, + 0x1e284, 0x1e290, + 0x1e2c0, 0x1e2c0, + 0x1e2e0, 0x1e2e0, + 0x1e300, 0x1e384, + 0x1e3c0, 0x1e3c8, + 0x1e408, 0x1e40c, + 0x1e440, 0x1e44c, + 0x1e684, 0x1e690, + 0x1e6c0, 0x1e6c0, + 0x1e6e0, 0x1e6e0, + 0x1e700, 0x1e784, + 0x1e7c0, 0x1e7c8, + 0x1e808, 0x1e80c, + 0x1e840, 0x1e84c, + 0x1ea84, 0x1ea90, + 0x1eac0, 0x1eac0, + 0x1eae0, 0x1eae0, + 0x1eb00, 0x1eb84, + 0x1ebc0, 0x1ebc8, + 0x1ec08, 0x1ec0c, + 0x1ec40, 0x1ec4c, + 0x1ee84, 0x1ee90, + 0x1eec0, 0x1eec0, + 0x1eee0, 0x1eee0, + 0x1ef00, 0x1ef84, + 0x1efc0, 0x1efc8, + 0x1f008, 0x1f00c, + 0x1f040, 0x1f04c, + 0x1f284, 0x1f290, + 0x1f2c0, 0x1f2c0, + 0x1f2e0, 0x1f2e0, + 0x1f300, 0x1f384, + 0x1f3c0, 0x1f3c8, + 0x1f408, 0x1f40c, + 0x1f440, 0x1f44c, + 0x1f684, 0x1f690, + 0x1f6c0, 0x1f6c0, + 0x1f6e0, 0x1f6e0, + 0x1f700, 0x1f784, + 0x1f7c0, 0x1f7c8, + 0x1f808, 0x1f80c, + 0x1f840, 0x1f84c, + 0x1fa84, 0x1fa90, + 0x1fac0, 0x1fac0, + 0x1fae0, 0x1fae0, + 0x1fb00, 0x1fb84, + 0x1fbc0, 0x1fbc8, + 0x1fc08, 0x1fc0c, + 0x1fc40, 0x1fc4c, + 0x1fe84, 0x1fe90, + 0x1fec0, 0x1fec0, + 0x1fee0, 0x1fee0, + 0x1ff00, 0x1ff84, + 0x1ffc0, 0x1ffc8, + 0x30000, 0x30030, + 0x30100, 0x30144, + 0x30190, 0x301d0, + 0x30200, 0x30318, + 0x30400, 0x3052c, + 0x30540, 0x3061c, + 0x30800, 0x30834, + 0x308c0, 0x30908, + 0x30910, 0x309ac, + 0x30a00, 0x30a04, + 0x30a0c, 0x30a2c, + 0x30a44, 0x30a50, + 0x30a74, 0x30c24, + 0x30d08, 0x30d14, + 0x30d1c, 0x30d20, + 0x30d3c, 0x30d50, + 0x31200, 0x3120c, + 0x31220, 0x31220, + 0x31240, 0x31240, + 0x31600, 0x31600, + 0x31608, 0x3160c, + 0x31a00, 0x31a1c, + 0x31e04, 0x31e20, + 0x31e38, 0x31e3c, + 0x31e80, 0x31e80, + 0x31e88, 0x31ea8, + 0x31eb0, 0x31eb4, + 0x31ec8, 0x31ed4, + 0x31fb8, 0x32004, + 0x32208, 0x3223c, + 0x32600, 0x32630, + 0x32a00, 0x32abc, + 0x32b00, 0x32b70, + 0x33000, 0x33048, + 0x33060, 0x3309c, + 0x330f0, 0x33148, + 0x33160, 0x3319c, + 0x331f0, 0x332e4, + 0x332f8, 0x333e4, + 0x333f8, 0x33448, + 0x33460, 0x3349c, + 0x334f0, 0x33548, + 0x33560, 0x3359c, + 0x335f0, 0x336e4, + 0x336f8, 0x337e4, + 0x337f8, 0x337fc, + 0x33814, 0x33814, + 0x3382c, 0x3382c, + 0x33880, 0x3388c, + 0x338e8, 0x338ec, + 0x33900, 0x33948, + 0x33960, 0x3399c, + 0x339f0, 0x33ae4, + 0x33af8, 0x33b10, + 0x33b28, 0x33b28, + 0x33b3c, 0x33b50, + 0x33bf0, 0x33c10, + 0x33c28, 0x33c28, + 0x33c3c, 0x33c50, + 0x33cf0, 0x33cfc, + 0x34000, 0x34030, + 0x34100, 0x34144, + 0x34190, 0x341d0, + 0x34200, 0x34318, + 0x34400, 0x3452c, + 0x34540, 0x3461c, + 0x34800, 0x34834, + 0x348c0, 0x34908, + 0x34910, 0x349ac, + 0x34a00, 0x34a04, + 0x34a0c, 0x34a2c, + 0x34a44, 0x34a50, + 0x34a74, 0x34c24, + 0x34d08, 0x34d14, + 0x34d1c, 0x34d20, + 0x34d3c, 0x34d50, + 0x35200, 0x3520c, + 0x35220, 0x35220, + 0x35240, 0x35240, + 0x35600, 0x35600, + 0x35608, 0x3560c, + 0x35a00, 0x35a1c, + 0x35e04, 0x35e20, + 0x35e38, 0x35e3c, + 0x35e80, 0x35e80, + 0x35e88, 0x35ea8, + 0x35eb0, 0x35eb4, + 0x35ec8, 0x35ed4, + 0x35fb8, 0x36004, + 0x36208, 0x3623c, + 0x36600, 0x36630, + 0x36a00, 0x36abc, + 0x36b00, 0x36b70, + 0x37000, 0x37048, + 0x37060, 0x3709c, + 0x370f0, 0x37148, + 0x37160, 0x3719c, + 0x371f0, 0x372e4, + 0x372f8, 0x373e4, + 0x373f8, 0x37448, + 0x37460, 0x3749c, + 0x374f0, 0x37548, + 0x37560, 0x3759c, + 0x375f0, 0x376e4, + 0x376f8, 0x377e4, + 0x377f8, 0x377fc, + 0x37814, 0x37814, + 0x3782c, 0x3782c, + 0x37880, 0x3788c, + 0x378e8, 0x378ec, + 0x37900, 0x37948, + 0x37960, 0x3799c, + 0x379f0, 0x37ae4, + 0x37af8, 0x37b10, + 0x37b28, 0x37b28, + 0x37b3c, 0x37b50, + 0x37bf0, 0x37c10, + 0x37c28, 0x37c28, + 0x37c3c, 0x37c50, + 0x37cf0, 0x37cfc, + 0x38000, 0x38030, + 0x38100, 0x38144, + 0x38190, 0x381d0, + 0x38200, 0x38318, + 0x38400, 0x3852c, + 0x38540, 0x3861c, + 0x38800, 0x38834, + 0x388c0, 0x38908, + 0x38910, 0x389ac, + 0x38a00, 0x38a04, + 0x38a0c, 0x38a2c, + 0x38a44, 0x38a50, + 0x38a74, 0x38c24, + 0x38d08, 0x38d14, + 0x38d1c, 0x38d20, + 0x38d3c, 0x38d50, + 0x39200, 0x3920c, + 0x39220, 0x39220, + 0x39240, 0x39240, + 0x39600, 0x39600, + 0x39608, 0x3960c, + 0x39a00, 0x39a1c, + 0x39e04, 0x39e20, + 0x39e38, 0x39e3c, + 0x39e80, 0x39e80, + 0x39e88, 0x39ea8, + 0x39eb0, 0x39eb4, + 0x39ec8, 0x39ed4, + 0x39fb8, 0x3a004, + 0x3a208, 0x3a23c, + 0x3a600, 0x3a630, + 0x3aa00, 0x3aabc, + 0x3ab00, 0x3ab70, + 0x3b000, 0x3b048, + 0x3b060, 0x3b09c, + 0x3b0f0, 0x3b148, + 0x3b160, 0x3b19c, + 0x3b1f0, 0x3b2e4, + 0x3b2f8, 0x3b3e4, + 0x3b3f8, 0x3b448, + 0x3b460, 0x3b49c, + 0x3b4f0, 0x3b548, + 0x3b560, 0x3b59c, + 0x3b5f0, 0x3b6e4, + 0x3b6f8, 0x3b7e4, + 0x3b7f8, 0x3b7fc, + 0x3b814, 0x3b814, + 0x3b82c, 0x3b82c, + 0x3b880, 0x3b88c, + 0x3b8e8, 0x3b8ec, + 0x3b900, 0x3b948, + 0x3b960, 0x3b99c, + 0x3b9f0, 0x3bae4, + 0x3baf8, 0x3bb10, + 0x3bb28, 0x3bb28, + 0x3bb3c, 0x3bb50, + 0x3bbf0, 0x3bc10, + 0x3bc28, 0x3bc28, + 0x3bc3c, 0x3bc50, + 0x3bcf0, 0x3bcfc, + 0x3c000, 0x3c030, + 0x3c100, 0x3c144, + 0x3c190, 0x3c1d0, + 0x3c200, 0x3c318, + 0x3c400, 0x3c52c, + 0x3c540, 0x3c61c, + 0x3c800, 0x3c834, + 0x3c8c0, 0x3c908, + 0x3c910, 0x3c9ac, + 0x3ca00, 0x3ca04, + 0x3ca0c, 0x3ca2c, + 0x3ca44, 0x3ca50, + 0x3ca74, 0x3cc24, + 0x3cd08, 0x3cd14, + 0x3cd1c, 0x3cd20, + 0x3cd3c, 0x3cd50, + 0x3d200, 0x3d20c, + 0x3d220, 0x3d220, + 0x3d240, 0x3d240, + 0x3d600, 0x3d600, + 0x3d608, 0x3d60c, + 0x3da00, 0x3da1c, + 0x3de04, 0x3de20, + 0x3de38, 0x3de3c, + 0x3de80, 0x3de80, + 0x3de88, 0x3dea8, + 0x3deb0, 0x3deb4, + 0x3dec8, 0x3ded4, + 0x3dfb8, 0x3e004, + 0x3e208, 0x3e23c, + 0x3e600, 0x3e630, + 0x3ea00, 0x3eabc, + 0x3eb00, 0x3eb70, + 0x3f000, 0x3f048, + 0x3f060, 0x3f09c, + 0x3f0f0, 0x3f148, + 0x3f160, 0x3f19c, + 0x3f1f0, 0x3f2e4, + 0x3f2f8, 0x3f3e4, + 0x3f3f8, 0x3f448, + 0x3f460, 0x3f49c, + 0x3f4f0, 0x3f548, + 0x3f560, 0x3f59c, + 0x3f5f0, 0x3f6e4, + 0x3f6f8, 0x3f7e4, + 0x3f7f8, 0x3f7fc, + 0x3f814, 0x3f814, + 0x3f82c, 0x3f82c, + 0x3f880, 0x3f88c, + 0x3f8e8, 0x3f8ec, + 0x3f900, 0x3f948, + 0x3f960, 0x3f99c, + 0x3f9f0, 0x3fae4, + 0x3faf8, 0x3fb10, + 0x3fb28, 0x3fb28, + 0x3fb3c, 0x3fb50, + 0x3fbf0, 0x3fc10, + 0x3fc28, 0x3fc28, + 0x3fc3c, 0x3fc50, + 0x3fcf0, 0x3fcfc, + 0x40000, 0x4000c, + 0x40040, 0x40068, + 0x40080, 0x40144, + 0x40180, 0x4018c, + 0x40200, 0x40298, + 0x402ac, 0x4033c, + 0x403f8, 0x403fc, + 0x41300, 0x413c4, + 0x41400, 0x4141c, + 0x41480, 0x414d0, + 0x44000, 0x44078, + 0x440c0, 0x44278, + 0x442c0, 0x44478, + 0x444c0, 0x44678, + 0x446c0, 0x44878, + 0x448c0, 0x449fc, + 0x45000, 0x45068, + 0x45080, 0x45084, + 0x450a0, 0x450b0, + 0x45200, 0x45268, + 0x45280, 0x45284, + 0x452a0, 0x452b0, + 0x460c0, 0x460e4, + 0x47000, 0x4708c, + 0x47200, 0x47250, + 0x47400, 0x47420, + 0x47600, 0x47618, + 0x47800, 0x47814, + 0x48000, 0x4800c, + 0x48040, 0x48068, + 0x48080, 0x48144, + 0x48180, 0x4818c, + 0x48200, 0x48298, + 0x482ac, 0x4833c, + 0x483f8, 0x483fc, + 0x49300, 0x493c4, + 0x49400, 0x4941c, + 0x49480, 0x494d0, + 0x4c000, 0x4c078, + 0x4c0c0, 0x4c278, + 0x4c2c0, 0x4c478, + 0x4c4c0, 0x4c678, + 0x4c6c0, 0x4c878, + 0x4c8c0, 0x4c9fc, + 0x4d000, 0x4d068, + 0x4d080, 0x4d084, + 0x4d0a0, 0x4d0b0, + 0x4d200, 0x4d268, + 0x4d280, 0x4d284, + 0x4d2a0, 0x4d2b0, + 0x4e0c0, 0x4e0e4, + 0x4f000, 0x4f08c, + 0x4f200, 0x4f250, + 0x4f400, 0x4f420, + 0x4f600, 0x4f618, + 0x4f800, 0x4f814, + 0x50000, 0x500cc, + 0x50400, 0x50400, + 0x50800, 0x508cc, + 0x50c00, 0x50c00, + 0x51000, 0x5101c, + 0x51300, 0x51308, + }; + int i; struct adapter *ap = netdev2adap(dev); + static const unsigned int *reg_ranges; + int arr_size = 0, buf_size = 0; + + if (is_t4(ap->chip)) { + reg_ranges = &t4_reg_ranges[0]; + arr_size = ARRAY_SIZE(t4_reg_ranges); + buf_size = T4_REGMAP_SIZE; + } else { + reg_ranges = &t5_reg_ranges[0]; + arr_size = ARRAY_SIZE(t5_reg_ranges); + buf_size = T5_REGMAP_SIZE; + } regs->version = mk_adap_vers(ap); - memset(buf, 0, T4_REGMAP_SIZE); - for (i = 0; i < ARRAY_SIZE(reg_ranges); i += 2) + memset(buf, 0, buf_size); + for (i = 0; i < arr_size; i += 2) reg_block_dump(ap, buf, reg_ranges[i], reg_ranges[i + 1]); } @@ -2363,8 +2880,8 @@ static ssize_t mem_read(struct file *file, char __user *buf, size_t count, int ret, ofst; __be32 data[16]; - if (mem == MEM_MC) - ret = t4_mc_read(adap, pos, data, NULL); + if ((mem == MEM_MC) || (mem == MEM_MC1)) + ret = t4_mc_read(adap, mem % MEM_MC, pos, data, NULL); else ret = t4_edc_read(adap, mem, pos, data, NULL); if (ret) @@ -2405,18 +2922,37 @@ static void add_debugfs_mem(struct adapter *adap, const char *name, static int setup_debugfs(struct adapter *adap) { int i; + u32 size; if (IS_ERR_OR_NULL(adap->debugfs_root)) return -1; i = t4_read_reg(adap, MA_TARGET_MEM_ENABLE); - if (i & EDRAM0_ENABLE) - add_debugfs_mem(adap, "edc0", MEM_EDC0, 5); - if (i & EDRAM1_ENABLE) - add_debugfs_mem(adap, "edc1", MEM_EDC1, 5); - if (i & EXT_MEM_ENABLE) - add_debugfs_mem(adap, "mc", MEM_MC, - EXT_MEM_SIZE_GET(t4_read_reg(adap, MA_EXT_MEMORY_BAR))); + if (i & EDRAM0_ENABLE) { + size = t4_read_reg(adap, MA_EDRAM0_BAR); + add_debugfs_mem(adap, "edc0", MEM_EDC0, EDRAM_SIZE_GET(size)); + } + if (i & EDRAM1_ENABLE) { + size = t4_read_reg(adap, MA_EDRAM1_BAR); + add_debugfs_mem(adap, "edc1", MEM_EDC1, EDRAM_SIZE_GET(size)); + } + if (is_t4(adap->chip)) { + size = t4_read_reg(adap, MA_EXT_MEMORY_BAR); + if (i & EXT_MEM_ENABLE) + add_debugfs_mem(adap, "mc", MEM_MC, + EXT_MEM_SIZE_GET(size)); + } else { + if (i & EXT_MEM_ENABLE) { + size = t4_read_reg(adap, MA_EXT_MEMORY_BAR); + add_debugfs_mem(adap, "mc0", MEM_MC0, + EXT_MEM_SIZE_GET(size)); + } + if (i & EXT_MEM1_ENABLE) { + size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR); + add_debugfs_mem(adap, "mc1", MEM_MC1, + EXT_MEM_SIZE_GET(size)); + } + } if (adap->l2t) debugfs_create_file("l2t", S_IRUSR, adap->debugfs_root, adap, &t4_l2t_fops); @@ -2747,10 +3283,18 @@ EXPORT_SYMBOL(cxgb4_port_chan); unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo) { struct adapter *adap = netdev2adap(dev); - u32 v; + u32 v1, v2, lp_count, hp_count; - v = t4_read_reg(adap, A_SGE_DBFIFO_STATUS); - return lpfifo ? G_LP_COUNT(v) : G_HP_COUNT(v); + v1 = t4_read_reg(adap, A_SGE_DBFIFO_STATUS); + v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2); + if (is_t4(adap->chip)) { + lp_count = G_LP_COUNT(v1); + hp_count = G_HP_COUNT(v1); + } else { + lp_count = G_LP_COUNT_T5(v1); + hp_count = G_HP_COUNT_T5(v2); + } + return lpfifo ? lp_count : hp_count; } EXPORT_SYMBOL(cxgb4_dbfifo_count); @@ -2853,6 +3397,25 @@ out: } EXPORT_SYMBOL(cxgb4_sync_txq_pidx); +void cxgb4_disable_db_coalescing(struct net_device *dev) +{ + struct adapter *adap; + + adap = netdev2adap(dev); + t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_NOCOALESCE, + F_NOCOALESCE); +} +EXPORT_SYMBOL(cxgb4_disable_db_coalescing); + +void cxgb4_enable_db_coalescing(struct net_device *dev) +{ + struct adapter *adap; + + adap = netdev2adap(dev); + t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_NOCOALESCE, 0); +} +EXPORT_SYMBOL(cxgb4_enable_db_coalescing); + static struct pci_driver cxgb4_driver; static void check_neigh_update(struct neighbour *neigh) @@ -2888,14 +3451,23 @@ static struct notifier_block cxgb4_netevent_nb = { static void drain_db_fifo(struct adapter *adap, int usecs) { - u32 v; + u32 v1, v2, lp_count, hp_count; do { + v1 = t4_read_reg(adap, A_SGE_DBFIFO_STATUS); + v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2); + if (is_t4(adap->chip)) { + lp_count = G_LP_COUNT(v1); + hp_count = G_HP_COUNT(v1); + } else { + lp_count = G_LP_COUNT_T5(v1); + hp_count = G_HP_COUNT_T5(v2); + } + + if (lp_count == 0 && hp_count == 0) + break; set_current_state(TASK_UNINTERRUPTIBLE); schedule_timeout(usecs_to_jiffies(usecs)); - v = t4_read_reg(adap, A_SGE_DBFIFO_STATUS); - if (G_LP_COUNT(v) == 0 && G_HP_COUNT(v) == 0) - break; } while (1); } @@ -3004,24 +3576,62 @@ static void process_db_drop(struct work_struct *work) adap = container_of(work, struct adapter, db_drop_task); + if (is_t4(adap->chip)) { + disable_dbs(adap); + notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP); + drain_db_fifo(adap, 1); + recover_all_queues(adap); + enable_dbs(adap); + } else { + u32 dropped_db = t4_read_reg(adap, 0x010ac); + u16 qid = (dropped_db >> 15) & 0x1ffff; + u16 pidx_inc = dropped_db & 0x1fff; + unsigned int s_qpp; + unsigned short udb_density; + unsigned long qpshift; + int page; + u32 udb; + + dev_warn(adap->pdev_dev, + "Dropped DB 0x%x qid %d bar2 %d coalesce %d pidx %d\n", + dropped_db, qid, + (dropped_db >> 14) & 1, + (dropped_db >> 13) & 1, + pidx_inc); + + drain_db_fifo(adap, 1); + + s_qpp = QUEUESPERPAGEPF1 * adap->fn; + udb_density = 1 << QUEUESPERPAGEPF0_GET(t4_read_reg(adap, + SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp); + qpshift = PAGE_SHIFT - ilog2(udb_density); + udb = qid << qpshift; + udb &= PAGE_MASK; + page = udb / PAGE_SIZE; + udb += (qid - (page * udb_density)) * 128; + + writel(PIDX(pidx_inc), adap->bar2 + udb + 8); + + /* Re-enable BAR2 WC */ + t4_set_reg_field(adap, 0x10b0, 1<<15, 1<<15); + } + t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_DROPPED_DB, 0); - disable_dbs(adap); - notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP); - drain_db_fifo(adap, 1); - recover_all_queues(adap); - enable_dbs(adap); } void t4_db_full(struct adapter *adap) { - t4_set_reg_field(adap, SGE_INT_ENABLE3, - DBFIFO_HP_INT | DBFIFO_LP_INT, 0); - queue_work(workq, &adap->db_full_task); + if (is_t4(adap->chip)) { + t4_set_reg_field(adap, SGE_INT_ENABLE3, + DBFIFO_HP_INT | DBFIFO_LP_INT, 0); + queue_work(workq, &adap->db_full_task); + } } void t4_db_dropped(struct adapter *adap) { - queue_work(workq, &adap->db_drop_task); + if (is_t4(adap->chip)) + queue_work(workq, &adap->db_drop_task); } static void uld_attach(struct adapter *adap, unsigned int uld) @@ -3566,17 +4176,27 @@ void t4_fatal_err(struct adapter *adap) static void setup_memwin(struct adapter *adap) { - u32 bar0; + u32 bar0, mem_win0_base, mem_win1_base, mem_win2_base; bar0 = pci_resource_start(adap->pdev, 0); /* truncation intentional */ + if (is_t4(adap->chip)) { + mem_win0_base = bar0 + MEMWIN0_BASE; + mem_win1_base = bar0 + MEMWIN1_BASE; + mem_win2_base = bar0 + MEMWIN2_BASE; + } else { + /* For T5, only relative offset inside the PCIe BAR is passed */ + mem_win0_base = MEMWIN0_BASE; + mem_win1_base = MEMWIN1_BASE_T5; + mem_win2_base = MEMWIN2_BASE_T5; + } t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 0), - (bar0 + MEMWIN0_BASE) | BIR(0) | + mem_win0_base | BIR(0) | WINDOW(ilog2(MEMWIN0_APERTURE) - 10)); t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 1), - (bar0 + MEMWIN1_BASE) | BIR(0) | + mem_win1_base | BIR(0) | WINDOW(ilog2(MEMWIN1_APERTURE) - 10)); t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2), - (bar0 + MEMWIN2_BASE) | BIR(0) | + mem_win2_base | BIR(0) | WINDOW(ilog2(MEMWIN2_APERTURE) - 10)); } @@ -3745,6 +4365,7 @@ static int adap_init0_config(struct adapter *adapter, int reset) unsigned long mtype = 0, maddr = 0; u32 finiver, finicsum, cfcsum; int ret, using_flash; + char *fw_config_file, fw_config_file_path[256]; /* * Reset device if necessary. @@ -3761,7 +4382,21 @@ static int adap_init0_config(struct adapter *adapter, int reset) * then use that. Otherwise, use the configuration file stored * in the adapter flash ... */ - ret = request_firmware(&cf, FW_CFNAME, adapter->pdev_dev); + switch (CHELSIO_CHIP_VERSION(adapter->chip)) { + case CHELSIO_T4: + fw_config_file = FW_CFNAME; + break; + case CHELSIO_T5: + fw_config_file = FW5_CFNAME; + break; + default: + dev_err(adapter->pdev_dev, "Device %d is not supported\n", + adapter->pdev->device); + ret = -EINVAL; + goto bye; + } + + ret = request_firmware(&cf, fw_config_file, adapter->pdev_dev); if (ret < 0) { using_flash = 1; mtype = FW_MEMTYPE_CF_FLASH; @@ -3877,6 +4512,7 @@ static int adap_init0_config(struct adapter *adapter, int reset) if (ret < 0) goto bye; + sprintf(fw_config_file_path, "/lib/firmware/%s", fw_config_file); /* * Return successfully and note that we're operating with parameters * not supplied by the driver, rather than from hard-wired @@ -3887,7 +4523,7 @@ static int adap_init0_config(struct adapter *adapter, int reset) "Configuration File %s, version %#x, computed checksum %#x\n", (using_flash ? "in device FLASH" - : "/lib/firmware/" FW_CFNAME), + : fw_config_file_path), finiver, cfcsum); return 0; @@ -4814,7 +5450,8 @@ static void print_port_info(const struct net_device *dev) sprintf(bufp, "BASE-%s", base[pi->port_type]); netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s%s\n", - adap->params.vpd.id, adap->params.rev, buf, + adap->params.vpd.id, + CHELSIO_CHIP_RELEASE(adap->params.rev), buf, is_offload(adap) ? "R" : "", adap->params.pci.width, spd, (adap->flags & USING_MSIX) ? " MSI-X" : (adap->flags & USING_MSI) ? " MSI" : ""); @@ -4854,10 +5491,11 @@ static void free_some_resources(struct adapter *adapter) #define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN) #define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \ NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA) +#define SEGMENT_SIZE 128 static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { - int func, i, err; + int func, i, err, s_qpp, qpp, num_seg; struct port_info *pi; bool highdma = false; struct adapter *adapter = NULL; @@ -4934,7 +5572,34 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) err = t4_prep_adapter(adapter); if (err) - goto out_unmap_bar; + goto out_unmap_bar0; + + if (!is_t4(adapter->chip)) { + s_qpp = QUEUESPERPAGEPF1 * adapter->fn; + qpp = 1 << QUEUESPERPAGEPF0_GET(t4_read_reg(adapter, + SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp); + num_seg = PAGE_SIZE / SEGMENT_SIZE; + + /* Each segment size is 128B. Write coalescing is enabled only + * when SGE_EGRESS_QUEUES_PER_PAGE_PF reg value for the + * queue is less no of segments that can be accommodated in + * a page size. + */ + if (qpp > num_seg) { + dev_err(&pdev->dev, + "Incorrect number of egress queues per page\n"); + err = -EINVAL; + goto out_unmap_bar0; + } + adapter->bar2 = ioremap_wc(pci_resource_start(pdev, 2), + pci_resource_len(pdev, 2)); + if (!adapter->bar2) { + dev_err(&pdev->dev, "cannot map device bar2 region\n"); + err = -ENOMEM; + goto out_unmap_bar0; + } + } + setup_memwin(adapter); err = adap_init0(adapter); setup_memwin_rdma(adapter); @@ -5063,6 +5728,9 @@ sriov: out_free_dev: free_some_resources(adapter); out_unmap_bar: + if (!is_t4(adapter->chip)) + iounmap(adapter->bar2); + out_unmap_bar0: iounmap(adapter->regs); out_free_adapter: kfree(adapter); @@ -5113,6 +5781,8 @@ static void remove_one(struct pci_dev *pdev) free_some_resources(adapter); iounmap(adapter->regs); + if (!is_t4(adapter->chip)) + iounmap(adapter->bar2); kfree(adapter); pci_disable_pcie_error_reporting(pdev); pci_disable_device(pdev); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h index e2bbc7f3e2de..4faf4d067ee7 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h @@ -269,4 +269,7 @@ struct sk_buff *cxgb4_pktgl_to_skb(const struct pkt_gl *gl, unsigned int skb_len, unsigned int pull_len); int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx, u16 size); int cxgb4_flush_eq_cache(struct net_device *dev); +void cxgb4_disable_db_coalescing(struct net_device *dev); +void cxgb4_enable_db_coalescing(struct net_device *dev); + #endif /* !__CXGB4_OFLD_H */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index fe9a2ea3588b..8b47b253e204 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c @@ -506,10 +506,14 @@ static void unmap_rx_buf(struct adapter *adap, struct sge_fl *q) static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q) { + u32 val; if (q->pend_cred >= 8) { + val = PIDX(q->pend_cred / 8); + if (!is_t4(adap->chip)) + val |= DBTYPE(1); wmb(); t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL), DBPRIO(1) | - QID(q->cntxt_id) | PIDX(q->pend_cred / 8)); + QID(q->cntxt_id) | val); q->pend_cred &= 7; } } @@ -812,6 +816,22 @@ static void write_sgl(const struct sk_buff *skb, struct sge_txq *q, *end = 0; } +/* This function copies 64 byte coalesced work request to + * memory mapped BAR2 space(user space writes). + * For coalesced WR SGE, fetches data from the FIFO instead of from Host. + */ +static void cxgb_pio_copy(u64 __iomem *dst, u64 *src) +{ + int count = 8; + + while (count) { + writeq(*src, dst); + src++; + dst++; + count--; + } +} + /** * ring_tx_db - check and potentially ring a Tx queue's doorbell * @adap: the adapter @@ -822,11 +842,25 @@ static void write_sgl(const struct sk_buff *skb, struct sge_txq *q, */ static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n) { + unsigned int *wr, index; + wmb(); /* write descriptors before telling HW */ spin_lock(&q->db_lock); if (!q->db_disabled) { - t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL), - QID(q->cntxt_id) | PIDX(n)); + if (is_t4(adap->chip)) { + t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL), + QID(q->cntxt_id) | PIDX(n)); + } else { + if (n == 1) { + index = q->pidx ? (q->pidx - 1) : (q->size - 1); + wr = (unsigned int *)&q->desc[index]; + cxgb_pio_copy((u64 __iomem *) + (adap->bar2 + q->udb + 64), + (u64 *)wr); + } else + writel(n, adap->bar2 + q->udb + 8); + wmb(); + } } q->db_pidx = q->pidx; spin_unlock(&q->db_lock); @@ -1555,7 +1589,6 @@ static noinline int handle_trace_pkt(struct adapter *adap, const struct pkt_gl *gl) { struct sk_buff *skb; - struct cpl_trace_pkt *p; skb = cxgb4_pktgl_to_skb(gl, RX_PULL_LEN, RX_PULL_LEN); if (unlikely(!skb)) { @@ -1563,8 +1596,11 @@ static noinline int handle_trace_pkt(struct adapter *adap, return 0; } - p = (struct cpl_trace_pkt *)skb->data; - __skb_pull(skb, sizeof(*p)); + if (is_t4(adap->chip)) + __skb_pull(skb, sizeof(struct cpl_trace_pkt)); + else + __skb_pull(skb, sizeof(struct cpl_t5_trace_pkt)); + skb_reset_mac_header(skb); skb->protocol = htons(0xffff); skb->dev = adap->port[0]; @@ -1625,8 +1661,10 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp, const struct cpl_rx_pkt *pkt; struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq); struct sge *s = &q->adap->sge; + int cpl_trace_pkt = is_t4(q->adap->chip) ? + CPL_TRACE_PKT : CPL_TRACE_PKT_T5; - if (unlikely(*(u8 *)rsp == CPL_TRACE_PKT)) + if (unlikely(*(u8 *)rsp == cpl_trace_pkt)) return handle_trace_pkt(q->adap, si); pkt = (const struct cpl_rx_pkt *)rsp; @@ -2143,11 +2181,27 @@ err: static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id) { + q->cntxt_id = id; + if (!is_t4(adap->chip)) { + unsigned int s_qpp; + unsigned short udb_density; + unsigned long qpshift; + int page; + + s_qpp = QUEUESPERPAGEPF1 * adap->fn; + udb_density = 1 << QUEUESPERPAGEPF0_GET((t4_read_reg(adap, + SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp)); + qpshift = PAGE_SHIFT - ilog2(udb_density); + q->udb = q->cntxt_id << qpshift; + q->udb &= PAGE_MASK; + page = q->udb / PAGE_SIZE; + q->udb += (q->cntxt_id - (page * udb_density)) * 128; + } + q->in_use = 0; q->cidx = q->pidx = 0; q->stops = q->restarts = 0; q->stat = (void *)&q->desc[q->size]; - q->cntxt_id = id; spin_lock_init(&q->db_lock); adap->sge.egr_map[id - adap->sge.egr_start] = q; } @@ -2587,11 +2641,20 @@ static int t4_sge_init_hard(struct adapter *adap) * Set up to drop DOORBELL writes when the DOORBELL FIFO overflows * and generate an interrupt when this occurs so we can recover. */ - t4_set_reg_field(adap, A_SGE_DBFIFO_STATUS, - V_HP_INT_THRESH(M_HP_INT_THRESH) | - V_LP_INT_THRESH(M_LP_INT_THRESH), - V_HP_INT_THRESH(dbfifo_int_thresh) | - V_LP_INT_THRESH(dbfifo_int_thresh)); + if (is_t4(adap->chip)) { + t4_set_reg_field(adap, A_SGE_DBFIFO_STATUS, + V_HP_INT_THRESH(M_HP_INT_THRESH) | + V_LP_INT_THRESH(M_LP_INT_THRESH), + V_HP_INT_THRESH(dbfifo_int_thresh) | + V_LP_INT_THRESH(dbfifo_int_thresh)); + } else { + t4_set_reg_field(adap, A_SGE_DBFIFO_STATUS, + V_LP_INT_THRESH_T5(M_LP_INT_THRESH_T5), + V_LP_INT_THRESH_T5(dbfifo_int_thresh)); + t4_set_reg_field(adap, SGE_DBFIFO_STATUS2, + V_HP_INT_THRESH_T5(M_HP_INT_THRESH_T5), + V_HP_INT_THRESH_T5(dbfifo_int_thresh)); + } t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_ENABLE_DROP, F_ENABLE_DROP); diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 8049268ce0f2..d02d4e8c4417 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -282,6 +282,7 @@ int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size, * t4_mc_read - read from MC through backdoor accesses * @adap: the adapter * @addr: address of first byte requested + * @idx: which MC to access * @data: 64 bytes of data containing the requested address * @ecc: where to store the corresponding 64-bit ECC word * @@ -289,22 +290,38 @@ int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size, * that covers the requested address @addr. If @parity is not %NULL it * is assigned the 64-bit ECC word for the read data. */ -int t4_mc_read(struct adapter *adap, u32 addr, __be32 *data, u64 *ecc) +int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc) { int i; + u32 mc_bist_cmd, mc_bist_cmd_addr, mc_bist_cmd_len; + u32 mc_bist_status_rdata, mc_bist_data_pattern; + + if (is_t4(adap->chip)) { + mc_bist_cmd = MC_BIST_CMD; + mc_bist_cmd_addr = MC_BIST_CMD_ADDR; + mc_bist_cmd_len = MC_BIST_CMD_LEN; + mc_bist_status_rdata = MC_BIST_STATUS_RDATA; + mc_bist_data_pattern = MC_BIST_DATA_PATTERN; + } else { + mc_bist_cmd = MC_REG(MC_P_BIST_CMD, idx); + mc_bist_cmd_addr = MC_REG(MC_P_BIST_CMD_ADDR, idx); + mc_bist_cmd_len = MC_REG(MC_P_BIST_CMD_LEN, idx); + mc_bist_status_rdata = MC_REG(MC_P_BIST_STATUS_RDATA, idx); + mc_bist_data_pattern = MC_REG(MC_P_BIST_DATA_PATTERN, idx); + } - if (t4_read_reg(adap, MC_BIST_CMD) & START_BIST) + if (t4_read_reg(adap, mc_bist_cmd) & START_BIST) return -EBUSY; - t4_write_reg(adap, MC_BIST_CMD_ADDR, addr & ~0x3fU); - t4_write_reg(adap, MC_BIST_CMD_LEN, 64); - t4_write_reg(adap, MC_BIST_DATA_PATTERN, 0xc); - t4_write_reg(adap, MC_BIST_CMD, BIST_OPCODE(1) | START_BIST | + t4_write_reg(adap, mc_bist_cmd_addr, addr & ~0x3fU); + t4_write_reg(adap, mc_bist_cmd_len, 64); + t4_write_reg(adap, mc_bist_data_pattern, 0xc); + t4_write_reg(adap, mc_bist_cmd, BIST_OPCODE(1) | START_BIST | BIST_CMD_GAP(1)); - i = t4_wait_op_done(adap, MC_BIST_CMD, START_BIST, 0, 10, 1); + i = t4_wait_op_done(adap, mc_bist_cmd, START_BIST, 0, 10, 1); if (i) return i; -#define MC_DATA(i) MC_BIST_STATUS_REG(MC_BIST_STATUS_RDATA, i) +#define MC_DATA(i) MC_BIST_STATUS_REG(mc_bist_status_rdata, i) for (i = 15; i >= 0; i--) *data++ = htonl(t4_read_reg(adap, MC_DATA(i))); @@ -329,20 +346,39 @@ int t4_mc_read(struct adapter *adap, u32 addr, __be32 *data, u64 *ecc) int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc) { int i; + u32 edc_bist_cmd, edc_bist_cmd_addr, edc_bist_cmd_len; + u32 edc_bist_cmd_data_pattern, edc_bist_status_rdata; + + if (is_t4(adap->chip)) { + edc_bist_cmd = EDC_REG(EDC_BIST_CMD, idx); + edc_bist_cmd_addr = EDC_REG(EDC_BIST_CMD_ADDR, idx); + edc_bist_cmd_len = EDC_REG(EDC_BIST_CMD_LEN, idx); + edc_bist_cmd_data_pattern = EDC_REG(EDC_BIST_DATA_PATTERN, + idx); + edc_bist_status_rdata = EDC_REG(EDC_BIST_STATUS_RDATA, + idx); + } else { + edc_bist_cmd = EDC_REG_T5(EDC_H_BIST_CMD, idx); + edc_bist_cmd_addr = EDC_REG_T5(EDC_H_BIST_CMD_ADDR, idx); + edc_bist_cmd_len = EDC_REG_T5(EDC_H_BIST_CMD_LEN, idx); + edc_bist_cmd_data_pattern = + EDC_REG_T5(EDC_H_BIST_DATA_PATTERN, idx); + edc_bist_status_rdata = + EDC_REG_T5(EDC_H_BIST_STATUS_RDATA, idx); + } - idx *= EDC_STRIDE; - if (t4_read_reg(adap, EDC_BIST_CMD + idx) & START_BIST) + if (t4_read_reg(adap, edc_bist_cmd) & START_BIST) return -EBUSY; - t4_write_reg(adap, EDC_BIST_CMD_ADDR + idx, addr & ~0x3fU); - t4_write_reg(adap, EDC_BIST_CMD_LEN + idx, 64); - t4_write_reg(adap, EDC_BIST_DATA_PATTERN + idx, 0xc); - t4_write_reg(adap, EDC_BIST_CMD + idx, + t4_write_reg(adap, edc_bist_cmd_addr, addr & ~0x3fU); + t4_write_reg(adap, edc_bist_cmd_len, 64); + t4_write_reg(adap, edc_bist_cmd_data_pattern, 0xc); + t4_write_reg(adap, edc_bist_cmd, BIST_OPCODE(1) | BIST_CMD_GAP(1) | START_BIST); - i = t4_wait_op_done(adap, EDC_BIST_CMD + idx, START_BIST, 0, 10, 1); + i = t4_wait_op_done(adap, edc_bist_cmd, START_BIST, 0, 10, 1); if (i) return i; -#define EDC_DATA(i) (EDC_BIST_STATUS_REG(EDC_BIST_STATUS_RDATA, i) + idx) +#define EDC_DATA(i) (EDC_BIST_STATUS_REG(edc_bist_status_rdata, i)) for (i = 15; i >= 0; i--) *data++ = htonl(t4_read_reg(adap, EDC_DATA(i))); @@ -366,6 +402,7 @@ int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc) static int t4_mem_win_rw(struct adapter *adap, u32 addr, __be32 *data, int dir) { int i; + u32 win_pf = is_t4(adap->chip) ? 0 : V_PFNUM(adap->fn); /* * Setup offset into PCIE memory window. Address must be a @@ -374,7 +411,7 @@ static int t4_mem_win_rw(struct adapter *adap, u32 addr, __be32 *data, int dir) * values.) */ t4_write_reg(adap, PCIE_MEM_ACCESS_OFFSET, - addr & ~(MEMWIN0_APERTURE - 1)); + (addr & ~(MEMWIN0_APERTURE - 1)) | win_pf); t4_read_reg(adap, PCIE_MEM_ACCESS_OFFSET); /* Collecting data 4 bytes at a time upto MEMWIN0_APERTURE */ @@ -410,6 +447,7 @@ static int t4_memory_rw(struct adapter *adap, int mtype, u32 addr, u32 len, __be32 *buf, int dir) { u32 pos, start, end, offset, memoffset; + u32 edc_size, mc_size; int ret = 0; __be32 *data; @@ -423,13 +461,21 @@ static int t4_memory_rw(struct adapter *adap, int mtype, u32 addr, u32 len, if (!data) return -ENOMEM; - /* - * Offset into the region of memory which is being accessed + /* Offset into the region of memory which is being accessed * MEM_EDC0 = 0 * MEM_EDC1 = 1 - * MEM_MC = 2 + * MEM_MC = 2 -- T4 + * MEM_MC0 = 2 -- For T5 + * MEM_MC1 = 3 -- For T5 */ - memoffset = (mtype * (5 * 1024 * 1024)); + edc_size = EDRAM_SIZE_GET(t4_read_reg(adap, MA_EDRAM0_BAR)); + if (mtype != MEM_MC1) + memoffset = (mtype * (edc_size * 1024 * 1024)); + else { + mc_size = EXT_MEM_SIZE_GET(t4_read_reg(adap, + MA_EXT_MEMORY_BAR)); + memoffset = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024; + } /* Determine the PCIE_MEM_ACCESS_OFFSET */ addr = addr + memoffset; @@ -497,9 +543,9 @@ int t4_memory_write(struct adapter *adap, int mtype, u32 addr, u32 len, } #define EEPROM_STAT_ADDR 0x7bfc -#define VPD_LEN 512 #define VPD_BASE 0x400 #define VPD_BASE_OLD 0 +#define VPD_LEN 1024 /** * t4_seeprom_wp - enable/disable EEPROM write protection @@ -856,6 +902,7 @@ int t4_check_fw_version(struct adapter *adapter) { u32 api_vers[2]; int ret, major, minor, micro; + int exp_major, exp_minor, exp_micro; ret = get_fw_version(adapter, &adapter->params.fw_vers); if (!ret) @@ -870,17 +917,35 @@ int t4_check_fw_version(struct adapter *adapter) major = FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers); minor = FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers); micro = FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers); + + switch (CHELSIO_CHIP_VERSION(adapter->chip)) { + case CHELSIO_T4: + exp_major = FW_VERSION_MAJOR; + exp_minor = FW_VERSION_MINOR; + exp_micro = FW_VERSION_MICRO; + break; + case CHELSIO_T5: + exp_major = FW_VERSION_MAJOR_T5; + exp_minor = FW_VERSION_MINOR_T5; + exp_micro = FW_VERSION_MICRO_T5; + break; + default: + dev_err(adapter->pdev_dev, "Unsupported chip type, %x\n", + adapter->chip); + return -EINVAL; + } + memcpy(adapter->params.api_vers, api_vers, sizeof(adapter->params.api_vers)); - if (major != FW_VERSION_MAJOR) { /* major mismatch - fail */ + if (major != exp_major) { /* major mismatch - fail */ dev_err(adapter->pdev_dev, "card FW has major version %u, driver wants %u\n", - major, FW_VERSION_MAJOR); + major, exp_major); return -EINVAL; } - if (minor == FW_VERSION_MINOR && micro == FW_VERSION_MICRO) + if (minor == exp_minor && micro == exp_micro) return 0; /* perfect match */ /* Minor/micro version mismatch. Report it but often it's OK. */ @@ -1246,6 +1311,45 @@ static void pcie_intr_handler(struct adapter *adapter) { 0 } }; + static struct intr_info t5_pcie_intr_info[] = { + { MSTGRPPERR, "Master Response Read Queue parity error", + -1, 1 }, + { MSTTIMEOUTPERR, "Master Timeout FIFO parity error", -1, 1 }, + { MSIXSTIPERR, "MSI-X STI SRAM parity error", -1, 1 }, + { MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 }, + { MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 }, + { MSIXDATAPERR, "MSI-X data parity error", -1, 1 }, + { MSIXDIPERR, "MSI-X DI parity error", -1, 1 }, + { PIOCPLGRPPERR, "PCI PIO completion Group FIFO parity error", + -1, 1 }, + { PIOREQGRPPERR, "PCI PIO request Group FIFO parity error", + -1, 1 }, + { TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 }, + { MSTTAGQPERR, "PCI master tag queue parity error", -1, 1 }, + { CREQPERR, "PCI CMD channel request parity error", -1, 1 }, + { CRSPPERR, "PCI CMD channel response parity error", -1, 1 }, + { DREQWRPERR, "PCI DMA channel write request parity error", + -1, 1 }, + { DREQPERR, "PCI DMA channel request parity error", -1, 1 }, + { DRSPPERR, "PCI DMA channel response parity error", -1, 1 }, + { HREQWRPERR, "PCI HMA channel count parity error", -1, 1 }, + { HREQPERR, "PCI HMA channel request parity error", -1, 1 }, + { HRSPPERR, "PCI HMA channel response parity error", -1, 1 }, + { CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 }, + { FIDPERR, "PCI FID parity error", -1, 1 }, + { VFIDPERR, "PCI INTx clear parity error", -1, 1 }, + { MAGRPPERR, "PCI MA group FIFO parity error", -1, 1 }, + { PIOTAGPERR, "PCI PIO tag parity error", -1, 1 }, + { IPRXHDRGRPPERR, "PCI IP Rx header group parity error", + -1, 1 }, + { IPRXDATAGRPPERR, "PCI IP Rx data group parity error", -1, 1 }, + { RPLPERR, "PCI IP replay buffer parity error", -1, 1 }, + { IPSOTPERR, "PCI IP SOT buffer parity error", -1, 1 }, + { TRGT1GRPPERR, "PCI TRGT1 group FIFOs parity error", -1, 1 }, + { READRSPERR, "Outbound read error", -1, 0 }, + { 0 } + }; + int fat; fat = t4_handle_intr_status(adapter, @@ -1254,7 +1358,10 @@ static void pcie_intr_handler(struct adapter *adapter) t4_handle_intr_status(adapter, PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS, pcie_port_intr_info) + - t4_handle_intr_status(adapter, PCIE_INT_CAUSE, pcie_intr_info); + t4_handle_intr_status(adapter, PCIE_INT_CAUSE, + is_t4(adapter->chip) ? + pcie_intr_info : t5_pcie_intr_info); + if (fat) t4_fatal_err(adapter); } @@ -1664,7 +1771,14 @@ static void ncsi_intr_handler(struct adapter *adap) */ static void xgmac_intr_handler(struct adapter *adap, int port) { - u32 v = t4_read_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE)); + u32 v, int_cause_reg; + + if (is_t4(adap->chip)) + int_cause_reg = PORT_REG(port, XGMAC_PORT_INT_CAUSE); + else + int_cause_reg = T5_PORT_REG(port, MAC_PORT_INT_CAUSE); + + v = t4_read_reg(adap, int_cause_reg); v &= TXFIFO_PRTY_ERR | RXFIFO_PRTY_ERR; if (!v) @@ -2126,7 +2240,9 @@ void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p) u32 bgmap = get_mps_bg_map(adap, idx); #define GET_STAT(name) \ - t4_read_reg64(adap, PORT_REG(idx, MPS_PORT_STAT_##name##_L)) + t4_read_reg64(adap, \ + (is_t4(adap->chip) ? PORT_REG(idx, MPS_PORT_STAT_##name##_L) : \ + T5_PORT_REG(idx, MPS_PORT_STAT_##name##_L))) #define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L) p->tx_octets = GET_STAT(TX_PORT_BYTES); @@ -2205,14 +2321,26 @@ void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p) void t4_wol_magic_enable(struct adapter *adap, unsigned int port, const u8 *addr) { + u32 mag_id_reg_l, mag_id_reg_h, port_cfg_reg; + + if (is_t4(adap->chip)) { + mag_id_reg_l = PORT_REG(port, XGMAC_PORT_MAGIC_MACID_LO); + mag_id_reg_h = PORT_REG(port, XGMAC_PORT_MAGIC_MACID_HI); + port_cfg_reg = PORT_REG(port, XGMAC_PORT_CFG2); + } else { + mag_id_reg_l = T5_PORT_REG(port, MAC_PORT_MAGIC_MACID_LO); + mag_id_reg_h = T5_PORT_REG(port, MAC_PORT_MAGIC_MACID_HI); + port_cfg_reg = T5_PORT_REG(port, MAC_PORT_CFG2); + } + if (addr) { - t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_MAGIC_MACID_LO), + t4_write_reg(adap, mag_id_reg_l, (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) | addr[5]); - t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_MAGIC_MACID_HI), + t4_write_reg(adap, mag_id_reg_h, (addr[0] << 8) | addr[1]); } - t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2), MAGICEN, + t4_set_reg_field(adap, port_cfg_reg, MAGICEN, addr ? MAGICEN : 0); } @@ -2235,16 +2363,23 @@ int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map, u64 mask0, u64 mask1, unsigned int crc, bool enable) { int i; + u32 port_cfg_reg; + + if (is_t4(adap->chip)) + port_cfg_reg = PORT_REG(port, XGMAC_PORT_CFG2); + else + port_cfg_reg = T5_PORT_REG(port, MAC_PORT_CFG2); if (!enable) { - t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2), - PATEN, 0); + t4_set_reg_field(adap, port_cfg_reg, PATEN, 0); return 0; } if (map > 0xff) return -EINVAL; -#define EPIO_REG(name) PORT_REG(port, XGMAC_PORT_EPIO_##name) +#define EPIO_REG(name) \ + (is_t4(adap->chip) ? PORT_REG(port, XGMAC_PORT_EPIO_##name) : \ + T5_PORT_REG(port, MAC_PORT_EPIO_##name)) t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32); t4_write_reg(adap, EPIO_REG(DATA2), mask1); @@ -2322,24 +2457,24 @@ int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox, * @addr: address of first byte requested aligned on 32b. * @data: len bytes to hold the data read * @len: amount of data to read from window. Must be <= - * MEMWIN0_APERATURE after adjusting for 16B alignment - * requirements of the the memory window. + * MEMWIN0_APERATURE after adjusting for 16B for T4 and + * 128B for T5 alignment requirements of the the memory window. * * Read len bytes of data from MC starting at @addr. */ int t4_mem_win_read_len(struct adapter *adap, u32 addr, __be32 *data, int len) { - int i; - int off; + int i, off; + u32 win_pf = is_t4(adap->chip) ? 0 : V_PFNUM(adap->fn); - /* - * Align on a 16B boundary. + /* Align on a 2KB boundary. */ - off = addr & 15; + off = addr & MEMWIN0_APERTURE; if ((addr & 3) || (len + off) > MEMWIN0_APERTURE) return -EINVAL; - t4_write_reg(adap, PCIE_MEM_ACCESS_OFFSET, addr & ~15); + t4_write_reg(adap, PCIE_MEM_ACCESS_OFFSET, + (addr & ~MEMWIN0_APERTURE) | win_pf); t4_read_reg(adap, PCIE_MEM_ACCESS_OFFSET); for (i = 0; i < len; i += 4) @@ -3162,6 +3297,9 @@ int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox, int i, ret; struct fw_vi_mac_cmd c; struct fw_vi_mac_exact *p; + unsigned int max_naddr = is_t4(adap->chip) ? + NUM_MPS_CLS_SRAM_L_INSTANCES : + NUM_MPS_T5_CLS_SRAM_L_INSTANCES; if (naddr > 7) return -EINVAL; @@ -3187,8 +3325,8 @@ int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox, u16 index = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx)); if (idx) - idx[i] = index >= NEXACT_MAC ? 0xffff : index; - if (index < NEXACT_MAC) + idx[i] = index >= max_naddr ? 0xffff : index; + if (index < max_naddr) ret++; else if (hash) *hash |= (1ULL << hash_mac_addr(addr[i])); @@ -3221,6 +3359,9 @@ int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid, int ret, mode; struct fw_vi_mac_cmd c; struct fw_vi_mac_exact *p = c.u.exact; + unsigned int max_mac_addr = is_t4(adap->chip) ? + NUM_MPS_CLS_SRAM_L_INSTANCES : + NUM_MPS_T5_CLS_SRAM_L_INSTANCES; if (idx < 0) /* new allocation */ idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC; @@ -3238,7 +3379,7 @@ int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid, ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); if (ret == 0) { ret = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx)); - if (ret >= NEXACT_MAC) + if (ret >= max_mac_addr) ret = -ENOMEM; } return ret; @@ -3547,7 +3688,8 @@ static int get_flash_params(struct adapter *adap) */ int t4_prep_adapter(struct adapter *adapter) { - int ret; + int ret, ver; + uint16_t device_id; ret = t4_wait_dev_ready(adapter); if (ret < 0) @@ -3562,6 +3704,28 @@ int t4_prep_adapter(struct adapter *adapter) return ret; } + /* Retrieve adapter's device ID + */ + pci_read_config_word(adapter->pdev, PCI_DEVICE_ID, &device_id); + ver = device_id >> 12; + switch (ver) { + case CHELSIO_T4: + adapter->chip = CHELSIO_CHIP_CODE(CHELSIO_T4, + adapter->params.rev); + break; + case CHELSIO_T5: + adapter->chip = CHELSIO_CHIP_CODE(CHELSIO_T5, + adapter->params.rev); + break; + default: + dev_err(adapter->pdev_dev, "Device %d is not supported\n", + device_id); + return -EINVAL; + } + + /* Reassign the updated revision field */ + adapter->params.rev = adapter->chip; + init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd); /* diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h index f534ed7e10e9..1d1623be9f1e 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h @@ -47,7 +47,6 @@ enum { TCB_SIZE = 128, /* TCB size */ NMTUS = 16, /* size of MTU table */ NCCTRL_WIN = 32, /* # of congestion control windows */ - NEXACT_MAC = 336, /* # of exact MAC address filters */ L2T_SIZE = 4096, /* # of L2T entries */ MBOX_LEN = 64, /* mailbox size in bytes */ TRACE_LEN = 112, /* length of trace data and mask */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h index 261d17703adc..47656ac1ac25 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h @@ -74,6 +74,7 @@ enum { CPL_PASS_ESTABLISH = 0x41, CPL_RX_DATA_DDP = 0x42, CPL_PASS_ACCEPT_REQ = 0x44, + CPL_TRACE_PKT_T5 = 0x48, CPL_RDMA_READ_REQ = 0x60, @@ -287,6 +288,23 @@ struct cpl_act_open_req { __be32 opt2; }; +#define S_FILTER_TUPLE 24 +#define M_FILTER_TUPLE 0xFFFFFFFFFF +#define V_FILTER_TUPLE(x) ((x) << S_FILTER_TUPLE) +#define G_FILTER_TUPLE(x) (((x) >> S_FILTER_TUPLE) & M_FILTER_TUPLE) +struct cpl_t5_act_open_req { + WR_HDR; + union opcode_tid ot; + __be16 local_port; + __be16 peer_port; + __be32 local_ip; + __be32 peer_ip; + __be64 opt0; + __be32 rsvd; + __be32 opt2; + __be64 params; +}; + struct cpl_act_open_req6 { WR_HDR; union opcode_tid ot; @@ -566,6 +584,11 @@ struct cpl_rx_pkt { #define V_RX_ETHHDR_LEN(x) ((x) << S_RX_ETHHDR_LEN) #define G_RX_ETHHDR_LEN(x) (((x) >> S_RX_ETHHDR_LEN) & M_RX_ETHHDR_LEN) +#define S_RX_T5_ETHHDR_LEN 0 +#define M_RX_T5_ETHHDR_LEN 0x3F +#define V_RX_T5_ETHHDR_LEN(x) ((x) << S_RX_T5_ETHHDR_LEN) +#define G_RX_T5_ETHHDR_LEN(x) (((x) >> S_RX_T5_ETHHDR_LEN) & M_RX_T5_ETHHDR_LEN) + #define S_RX_MACIDX 8 #define M_RX_MACIDX 0x1FF #define V_RX_MACIDX(x) ((x) << S_RX_MACIDX) @@ -612,6 +635,28 @@ struct cpl_trace_pkt { __be64 tstamp; }; +struct cpl_t5_trace_pkt { + __u8 opcode; + __u8 intf; +#if defined(__LITTLE_ENDIAN_BITFIELD) + __u8 runt:4; + __u8 filter_hit:4; + __u8:6; + __u8 err:1; + __u8 trunc:1; +#else + __u8 filter_hit:4; + __u8 runt:4; + __u8 trunc:1; + __u8 err:1; + __u8:6; +#endif + __be16 rsvd; + __be16 len; + __be64 tstamp; + __be64 rsvd1; +}; + struct cpl_l2t_write_req { WR_HDR; union opcode_tid ot; @@ -742,4 +787,12 @@ struct ulp_mem_io { #define ULP_MEMIO_LOCK(x) ((x) << 31) }; +#define S_T5_ULP_MEMIO_IMM 23 +#define V_T5_ULP_MEMIO_IMM(x) ((x) << S_T5_ULP_MEMIO_IMM) +#define F_T5_ULP_MEMIO_IMM V_T5_ULP_MEMIO_IMM(1U) + +#define S_T5_ULP_MEMIO_ORDER 22 +#define V_T5_ULP_MEMIO_ORDER(x) ((x) << S_T5_ULP_MEMIO_ORDER) +#define F_T5_ULP_MEMIO_ORDER V_T5_ULP_MEMIO_ORDER(1U) + #endif /* __T4_MSG_H */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h index 83ec5f7844ac..ef146c0ba481 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h @@ -68,9 +68,14 @@ #define QID_SHIFT 15 #define QID(x) ((x) << QID_SHIFT) #define DBPRIO(x) ((x) << 14) +#define DBTYPE(x) ((x) << 13) #define PIDX_MASK 0x00003fffU #define PIDX_SHIFT 0 #define PIDX(x) ((x) << PIDX_SHIFT) +#define S_PIDX_T5 0 +#define M_PIDX_T5 0x1fffU +#define PIDX_T5(x) (((x) >> S_PIDX_T5) & M_PIDX_T5) + #define SGE_PF_GTS 0x4 #define INGRESSQID_MASK 0xffff0000U @@ -152,6 +157,8 @@ #define QUEUESPERPAGEPF0_MASK 0x0000000fU #define QUEUESPERPAGEPF0_GET(x) ((x) & QUEUESPERPAGEPF0_MASK) +#define QUEUESPERPAGEPF1 4 + #define SGE_INT_CAUSE1 0x1024 #define SGE_INT_CAUSE2 0x1030 #define SGE_INT_CAUSE3 0x103c @@ -234,6 +241,10 @@ #define SGE_DOORBELL_CONTROL 0x10a8 #define ENABLE_DROP (1 << 13) +#define S_NOCOALESCE 26 +#define V_NOCOALESCE(x) ((x) << S_NOCOALESCE) +#define F_NOCOALESCE V_NOCOALESCE(1U) + #define SGE_TIMER_VALUE_0_AND_1 0x10b8 #define TIMERVALUE0_MASK 0xffff0000U #define TIMERVALUE0_SHIFT 16 @@ -272,17 +283,36 @@ #define S_HP_INT_THRESH 28 #define M_HP_INT_THRESH 0xfU #define V_HP_INT_THRESH(x) ((x) << S_HP_INT_THRESH) +#define S_LP_INT_THRESH_T5 18 +#define V_LP_INT_THRESH_T5(x) ((x) << S_LP_INT_THRESH_T5) +#define M_LP_COUNT_T5 0x3ffffU +#define G_LP_COUNT_T5(x) (((x) >> S_LP_COUNT) & M_LP_COUNT_T5) #define M_HP_COUNT 0x7ffU #define S_HP_COUNT 16 #define G_HP_COUNT(x) (((x) >> S_HP_COUNT) & M_HP_COUNT) #define S_LP_INT_THRESH 12 #define M_LP_INT_THRESH 0xfU +#define M_LP_INT_THRESH_T5 0xfffU #define V_LP_INT_THRESH(x) ((x) << S_LP_INT_THRESH) #define M_LP_COUNT 0x7ffU #define S_LP_COUNT 0 #define G_LP_COUNT(x) (((x) >> S_LP_COUNT) & M_LP_COUNT) #define A_SGE_DBFIFO_STATUS 0x10a4 +#define SGE_STAT_TOTAL 0x10e4 +#define SGE_STAT_MATCH 0x10e8 + +#define SGE_STAT_CFG 0x10ec +#define S_STATSOURCE_T5 9 +#define STATSOURCE_T5(x) ((x) << S_STATSOURCE_T5) + +#define SGE_DBFIFO_STATUS2 0x1118 +#define M_HP_COUNT_T5 0x3ffU +#define G_HP_COUNT_T5(x) ((x) & M_HP_COUNT_T5) +#define S_HP_INT_THRESH_T5 10 +#define M_HP_INT_THRESH_T5 0xfU +#define V_HP_INT_THRESH_T5(x) ((x) << S_HP_INT_THRESH_T5) + #define S_ENABLE_DROP 13 #define V_ENABLE_DROP(x) ((x) << S_ENABLE_DROP) #define F_ENABLE_DROP V_ENABLE_DROP(1U) @@ -331,8 +361,27 @@ #define MSIADDRHPERR 0x00000002U #define MSIADDRLPERR 0x00000001U +#define READRSPERR 0x20000000U +#define TRGT1GRPPERR 0x10000000U +#define IPSOTPERR 0x08000000U +#define IPRXDATAGRPPERR 0x02000000U +#define IPRXHDRGRPPERR 0x01000000U +#define MAGRPPERR 0x00400000U +#define VFIDPERR 0x00200000U +#define HREQWRPERR 0x00010000U +#define DREQWRPERR 0x00002000U +#define MSTTAGQPERR 0x00000400U +#define PIOREQGRPPERR 0x00000100U +#define PIOCPLGRPPERR 0x00000080U +#define MSIXSTIPERR 0x00000004U +#define MSTTIMEOUTPERR 0x00000002U +#define MSTGRPPERR 0x00000001U + #define PCIE_NONFAT_ERR 0x3010 #define PCIE_MEM_ACCESS_BASE_WIN 0x3068 +#define S_PCIEOFST 10 +#define M_PCIEOFST 0x3fffffU +#define GET_PCIEOFST(x) (((x) >> S_PCIEOFST) & M_PCIEOFST) #define PCIEOFST_MASK 0xfffffc00U #define BIR_MASK 0x00000300U #define BIR_SHIFT 8 @@ -342,6 +391,9 @@ #define WINDOW(x) ((x) << WINDOW_SHIFT) #define PCIE_MEM_ACCESS_OFFSET 0x306c +#define S_PFNUM 0 +#define V_PFNUM(x) ((x) << S_PFNUM) + #define PCIE_FW 0x30b8 #define PCIE_FW_ERR 0x80000000U #define PCIE_FW_INIT 0x40000000U @@ -407,12 +459,18 @@ #define MC_BIST_STATUS_RDATA 0x7688 +#define MA_EDRAM0_BAR 0x77c0 +#define MA_EDRAM1_BAR 0x77c4 +#define EDRAM_SIZE_MASK 0xfffU +#define EDRAM_SIZE_GET(x) ((x) & EDRAM_SIZE_MASK) + #define MA_EXT_MEMORY_BAR 0x77c8 #define EXT_MEM_SIZE_MASK 0x00000fffU #define EXT_MEM_SIZE_SHIFT 0 #define EXT_MEM_SIZE_GET(x) (((x) & EXT_MEM_SIZE_MASK) >> EXT_MEM_SIZE_SHIFT) #define MA_TARGET_MEM_ENABLE 0x77d8 +#define EXT_MEM1_ENABLE 0x00000010U #define EXT_MEM_ENABLE 0x00000004U #define EDRAM1_ENABLE 0x00000002U #define EDRAM0_ENABLE 0x00000001U @@ -431,6 +489,7 @@ #define MA_PCIE_FW 0x30b8 #define MA_PARITY_ERROR_STATUS 0x77f4 +#define MA_EXT_MEMORY1_BAR 0x7808 #define EDC_0_BASE_ADDR 0x7900 #define EDC_BIST_CMD 0x7904 @@ -801,6 +860,15 @@ #define MPS_PORT_STAT_RX_PORT_PPP7_H 0x60c #define MPS_PORT_STAT_RX_PORT_LESS_64B_L 0x610 #define MPS_PORT_STAT_RX_PORT_LESS_64B_H 0x614 +#define MAC_PORT_CFG2 0x818 +#define MAC_PORT_MAGIC_MACID_LO 0x824 +#define MAC_PORT_MAGIC_MACID_HI 0x828 +#define MAC_PORT_EPIO_DATA0 0x8c0 +#define MAC_PORT_EPIO_DATA1 0x8c4 +#define MAC_PORT_EPIO_DATA2 0x8c8 +#define MAC_PORT_EPIO_DATA3 0x8cc +#define MAC_PORT_EPIO_OP 0x8d0 + #define MPS_CMN_CTL 0x9000 #define NUMPORTS_MASK 0x00000003U #define NUMPORTS_SHIFT 0 @@ -1063,6 +1131,7 @@ #define ADDRESS_SHIFT 0 #define ADDRESS(x) ((x) << ADDRESS_SHIFT) +#define MAC_PORT_INT_CAUSE 0x8dc #define XGMAC_PORT_INT_CAUSE 0x10dc #define A_TP_TX_MOD_QUEUE_REQ_MAP 0x7e28 @@ -1101,4 +1170,33 @@ #define V_PORT(x) ((x) << S_PORT) #define F_PORT V_PORT(1U) +#define NUM_MPS_CLS_SRAM_L_INSTANCES 336 +#define NUM_MPS_T5_CLS_SRAM_L_INSTANCES 512 + +#define T5_PORT0_BASE 0x30000 +#define T5_PORT_STRIDE 0x4000 +#define T5_PORT_BASE(idx) (T5_PORT0_BASE + (idx) * T5_PORT_STRIDE) +#define T5_PORT_REG(idx, reg) (T5_PORT_BASE(idx) + (reg)) + +#define MC_0_BASE_ADDR 0x40000 +#define MC_1_BASE_ADDR 0x48000 +#define MC_STRIDE (MC_1_BASE_ADDR - MC_0_BASE_ADDR) +#define MC_REG(reg, idx) (reg + MC_STRIDE * idx) + +#define MC_P_BIST_CMD 0x41400 +#define MC_P_BIST_CMD_ADDR 0x41404 +#define MC_P_BIST_CMD_LEN 0x41408 +#define MC_P_BIST_DATA_PATTERN 0x4140c +#define MC_P_BIST_STATUS_RDATA 0x41488 +#define EDC_T50_BASE_ADDR 0x50000 +#define EDC_H_BIST_CMD 0x50004 +#define EDC_H_BIST_CMD_ADDR 0x50008 +#define EDC_H_BIST_CMD_LEN 0x5000c +#define EDC_H_BIST_DATA_PATTERN 0x50010 +#define EDC_H_BIST_STATUS_RDATA 0x50028 + +#define EDC_T51_BASE_ADDR 0x50800 +#define EDC_STRIDE_T5 (EDC_T51_BASE_ADDR - EDC_T50_BASE_ADDR) +#define EDC_REG_T5(reg, idx) (reg + EDC_STRIDE_T5 * idx) + #endif /* __T4_REGS_H */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h index a0dcccd846c9..93444325b1e8 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h @@ -574,7 +574,7 @@ struct fw_eth_tx_pkt_vm_wr { __be16 vlantci; }; -#define FW_CMD_MAX_TIMEOUT 3000 +#define FW_CMD_MAX_TIMEOUT 10000 /* * If a host driver does a HELLO and discovers that there's already a MASTER diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h index 68eaa9c88c7d..be5c7ef6ca93 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h +++ b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h @@ -344,6 +344,7 @@ struct adapter { unsigned long registered_device_map; unsigned long open_device_map; unsigned long flags; + enum chip_type chip; struct adapter_params params; /* queue and interrupt resources */ diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c index 56b46ab2d4c5..7fcac2003769 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c @@ -54,8 +54,8 @@ /* * Generic information about the driver. */ -#define DRV_VERSION "1.0.0" -#define DRV_DESC "Chelsio T4 Virtual Function (VF) Network Driver" +#define DRV_VERSION "2.0.0-ko" +#define DRV_DESC "Chelsio T4/T5 Virtual Function (VF) Network Driver" /* * Module Parameters. @@ -1050,7 +1050,7 @@ static inline unsigned int mk_adap_vers(const struct adapter *adapter) /* * Chip version 4, revision 0x3f (cxgb4vf). */ - return 4 | (0x3f << 10); + return CHELSIO_CHIP_VERSION(adapter->chip) | (0x3f << 10); } /* @@ -2099,6 +2099,15 @@ static int adap_init0(struct adapter *adapter) return err; } + switch (adapter->pdev->device >> 12) { + case CHELSIO_T4: + adapter->chip = CHELSIO_CHIP_CODE(CHELSIO_T4, 0); + break; + case CHELSIO_T5: + adapter->chip = CHELSIO_CHIP_CODE(CHELSIO_T5, 0); + break; + } + /* * Grab basic operational parameters. These will predominantly have * been set up by the Physical Function Driver or will be hard coded @@ -2888,6 +2897,26 @@ static struct pci_device_id cxgb4vf_pci_tbl[] = { CH_DEVICE(0x480a, 0), /* T404-bt */ CH_DEVICE(0x480d, 0), /* T480-cr */ CH_DEVICE(0x480e, 0), /* T440-lp-cr */ + CH_DEVICE(0x5800, 0), /* T580-dbg */ + CH_DEVICE(0x5801, 0), /* T520-cr */ + CH_DEVICE(0x5802, 0), /* T522-cr */ + CH_DEVICE(0x5803, 0), /* T540-cr */ + CH_DEVICE(0x5804, 0), /* T520-bch */ + CH_DEVICE(0x5805, 0), /* T540-bch */ + CH_DEVICE(0x5806, 0), /* T540-ch */ + CH_DEVICE(0x5807, 0), /* T520-so */ + CH_DEVICE(0x5808, 0), /* T520-cx */ + CH_DEVICE(0x5809, 0), /* T520-bt */ + CH_DEVICE(0x580a, 0), /* T504-bt */ + CH_DEVICE(0x580b, 0), /* T520-sr */ + CH_DEVICE(0x580c, 0), /* T504-bt */ + CH_DEVICE(0x580d, 0), /* T580-cr */ + CH_DEVICE(0x580e, 0), /* T540-lp-cr */ + CH_DEVICE(0x580f, 0), /* Amsterdam */ + CH_DEVICE(0x5810, 0), /* T580-lp-cr */ + CH_DEVICE(0x5811, 0), /* T520-lp-cr */ + CH_DEVICE(0x5812, 0), /* T560-cr */ + CH_DEVICE(0x5813, 0), /* T580-cr */ { 0, } }; diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c index 9488032d6d2d..61dfb2a47929 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c @@ -528,17 +528,21 @@ static void unmap_rx_buf(struct adapter *adapter, struct sge_fl *fl) */ static inline void ring_fl_db(struct adapter *adapter, struct sge_fl *fl) { + u32 val; + /* * The SGE keeps track of its Producer and Consumer Indices in terms * of Egress Queue Units so we can only tell it about integral numbers * of multiples of Free List Entries per Egress Queue Units ... */ if (fl->pend_cred >= FL_PER_EQ_UNIT) { + val = PIDX(fl->pend_cred / FL_PER_EQ_UNIT); + if (!is_t4(adapter->chip)) + val |= DBTYPE(1); wmb(); t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_KDOORBELL, DBPRIO(1) | - QID(fl->cntxt_id) | - PIDX(fl->pend_cred / FL_PER_EQ_UNIT)); + QID(fl->cntxt_id) | val); fl->pend_cred %= FL_PER_EQ_UNIT; } } diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h index 283f9d0d37fd..53cbfed21d0b 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h +++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h @@ -38,6 +38,25 @@ #include "../cxgb4/t4fw_api.h" +#define CHELSIO_CHIP_CODE(version, revision) (((version) << 4) | (revision)) +#define CHELSIO_CHIP_VERSION(code) ((code) >> 4) +#define CHELSIO_CHIP_RELEASE(code) ((code) & 0xf) + +#define CHELSIO_T4 0x4 +#define CHELSIO_T5 0x5 + +enum chip_type { + T4_A1 = CHELSIO_CHIP_CODE(CHELSIO_T4, 0), + T4_A2 = CHELSIO_CHIP_CODE(CHELSIO_T4, 1), + T4_A3 = CHELSIO_CHIP_CODE(CHELSIO_T4, 2), + T4_FIRST_REV = T4_A1, + T4_LAST_REV = T4_A3, + + T5_A1 = CHELSIO_CHIP_CODE(CHELSIO_T5, 0), + T5_FIRST_REV = T5_A1, + T5_LAST_REV = T5_A1, +}; + /* * The "len16" field of a Firmware Command Structure ... */ @@ -232,6 +251,11 @@ static inline int t4vf_wr_mbox_ns(struct adapter *adapter, const void *cmd, return t4vf_wr_mbox_core(adapter, cmd, size, rpl, false); } +static inline int is_t4(enum chip_type chip) +{ + return (chip >= T4_FIRST_REV && chip <= T4_LAST_REV); +} + int t4vf_wait_dev_ready(struct adapter *); int t4vf_port_init(struct adapter *, int); diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c index 7127c7b9efde..9f96dc3bb112 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c @@ -1027,8 +1027,11 @@ int t4vf_alloc_mac_filt(struct adapter *adapter, unsigned int viid, bool free, unsigned nfilters = 0; unsigned int rem = naddr; struct fw_vi_mac_cmd cmd, rpl; + unsigned int max_naddr = is_t4(adapter->chip) ? + NUM_MPS_CLS_SRAM_L_INSTANCES : + NUM_MPS_T5_CLS_SRAM_L_INSTANCES; - if (naddr > FW_CLS_TCAM_NUM_ENTRIES) + if (naddr > max_naddr) return -EINVAL; for (offset = 0; offset < naddr; /**/) { @@ -1069,10 +1072,10 @@ int t4vf_alloc_mac_filt(struct adapter *adapter, unsigned int viid, bool free, if (idx) idx[offset+i] = - (index >= FW_CLS_TCAM_NUM_ENTRIES + (index >= max_naddr ? 0xffff : index); - if (index < FW_CLS_TCAM_NUM_ENTRIES) + if (index < max_naddr) nfilters++; else if (hash) *hash |= (1ULL << hash_mac_addr(addr[offset+i])); @@ -1118,6 +1121,9 @@ int t4vf_change_mac(struct adapter *adapter, unsigned int viid, struct fw_vi_mac_exact *p = &cmd.u.exact[0]; size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd, u.exact[1]), 16); + unsigned int max_naddr = is_t4(adapter->chip) ? + NUM_MPS_CLS_SRAM_L_INSTANCES : + NUM_MPS_T5_CLS_SRAM_L_INSTANCES; /* * If this is a new allocation, determine whether it should be @@ -1140,7 +1146,7 @@ int t4vf_change_mac(struct adapter *adapter, unsigned int viid, if (ret == 0) { p = &rpl.u.exact[0]; ret = FW_VI_MAC_CMD_IDX_GET(be16_to_cpu(p->valid_to_idx)); - if (ret >= FW_CLS_TCAM_NUM_ENTRIES) + if (ret >= max_naddr) ret = -ENOMEM; } return ret; diff --git a/drivers/net/ethernet/cirrus/cs89x0.c b/drivers/net/ethernet/cirrus/cs89x0.c index 138446957786..19f642a45f40 100644 --- a/drivers/net/ethernet/cirrus/cs89x0.c +++ b/drivers/net/ethernet/cirrus/cs89x0.c @@ -101,23 +101,6 @@ static char version[] __initdata = * them to system IRQ numbers. This mapping is card specific and is set to * the configuration of the Cirrus Eval board for this chip. */ -#if defined(CONFIG_MACH_IXDP2351) -#define CS89x0_NONISA_IRQ -static unsigned int netcard_portlist[] __used __initdata = { - IXDP2351_VIRT_CS8900_BASE, 0 -}; -static unsigned int cs8900_irq_map[] = { - IRQ_IXDP2351_CS8900, 0, 0, 0 -}; -#elif defined(CONFIG_ARCH_IXDP2X01) -#define CS89x0_NONISA_IRQ -static unsigned int netcard_portlist[] __used __initdata = { - IXDP2X01_CS8900_VIRT_BASE, 0 -}; -static unsigned int cs8900_irq_map[] = { - IRQ_IXDP2X01_CS8900, 0, 0, 0 -}; -#else #ifndef CONFIG_CS89x0_PLATFORM static unsigned int netcard_portlist[] __used __initdata = { 0x300, 0x320, 0x340, 0x360, 0x200, 0x220, 0x240, @@ -127,7 +110,6 @@ static unsigned int cs8900_irq_map[] = { 10, 11, 12, 5 }; #endif -#endif #if DEBUGGING static unsigned int net_debug = DEBUGGING; @@ -210,32 +192,6 @@ static int __init media_fn(char *str) __setup("cs89x0_media=", media_fn); #endif -#if defined(CONFIG_MACH_IXDP2351) -static u16 -readword(unsigned long base_addr, int portno) -{ - return __raw_readw(base_addr + (portno << 1)); -} - -static void -writeword(unsigned long base_addr, int portno, u16 value) -{ - __raw_writew(value, base_addr + (portno << 1)); -} -#elif defined(CONFIG_ARCH_IXDP2X01) -static u16 -readword(unsigned long base_addr, int portno) -{ - return __raw_readl(base_addr + (portno << 1)); -} - -static void -writeword(unsigned long base_addr, int portno, u16 value) -{ - __raw_writel(value, base_addr + (portno << 1)); -} -#endif - static void readwords(struct net_local *lp, int portno, void *buf, int length) { u8 *buf8 = (u8 *)buf; @@ -478,9 +434,6 @@ dma_rx(struct net_device *dev) /* Malloc up new buffer. */ skb = netdev_alloc_skb(dev, length + 2); if (skb == NULL) { - /* I don't think we want to do this to a stressed system */ - cs89_dbg(0, err, "%s: Memory squeeze, dropping packet\n", - dev->name); dev->stats.rx_dropped++; /* AKPM: advance bp to the next frame */ @@ -731,9 +684,6 @@ net_rx(struct net_device *dev) /* Malloc up new buffer. */ skb = netdev_alloc_skb(dev, length + 2); if (skb == NULL) { -#if 0 /* Again, this seems a cruel thing to do */ - pr_warn("%s: Memory squeeze, dropping packet\n", dev->name); -#endif dev->stats.rx_dropped++; return; } @@ -908,7 +858,7 @@ net_open(struct net_device *dev) goto bad_out; } } else { -#if !defined(CS89x0_NONISA_IRQ) && !defined(CONFIG_CS89x0_PLATFORM) +#if !defined(CONFIG_CS89x0_PLATFORM) if (((1 << dev->irq) & lp->irq_map) == 0) { pr_err("%s: IRQ %d is not in our map of allowable IRQs, which is %x\n", dev->name, dev->irq, lp->irq_map); @@ -1321,9 +1271,7 @@ static const struct net_device_ops net_ops = { static void __init reset_chip(struct net_device *dev) { #if !defined(CONFIG_MACH_MX31ADS) -#if !defined(CS89x0_NONISA_IRQ) struct net_local *lp = netdev_priv(dev); -#endif /* CS89x0_NONISA_IRQ */ int reset_start_time; writereg(dev, PP_SelfCTL, readreg(dev, PP_SelfCTL) | POWER_ON_RESET); @@ -1331,7 +1279,6 @@ static void __init reset_chip(struct net_device *dev) /* wait 30 ms */ msleep(30); -#if !defined(CS89x0_NONISA_IRQ) if (lp->chip_type != CS8900) { /* Hardware problem requires PNP registers to be reconfigured after a reset */ iowrite16(PP_CS8920_ISAINT, lp->virt_addr + ADD_PORT); @@ -1344,7 +1291,6 @@ static void __init reset_chip(struct net_device *dev) iowrite8((dev->mem_start >> 8) & 0xff, lp->virt_addr + DATA_PORT + 1); } -#endif /* CS89x0_NONISA_IRQ */ /* Wait until the chip is reset */ reset_start_time = jiffies; @@ -1579,9 +1525,6 @@ cs89x0_probe1(struct net_device *dev, void __iomem *ioaddr, int modular) i = lp->isa_config & INT_NO_MASK; #ifndef CONFIG_CS89x0_PLATFORM if (lp->chip_type == CS8900) { -#ifdef CS89x0_NONISA_IRQ - i = cs8900_irq_map[0]; -#else /* Translate the IRQ using the IRQ mapping table. */ if (i >= ARRAY_SIZE(cs8900_irq_map)) pr_err("invalid ISA interrupt number %d\n", i); @@ -1599,7 +1542,6 @@ cs89x0_probe1(struct net_device *dev, void __iomem *ioaddr, int modular) lp->irq_map = ((irq_map_buff[0] >> 8) | (irq_map_buff[1] << 8)); } -#endif } #endif if (!dev->irq) @@ -1978,18 +1920,6 @@ static struct platform_driver cs89x0_driver = { .remove = cs89x0_platform_remove, }; -static int __init cs89x0_init(void) -{ - return platform_driver_probe(&cs89x0_driver, cs89x0_platform_probe); -} - -module_init(cs89x0_init); - -static void __exit cs89x0_cleanup(void) -{ - platform_driver_unregister(&cs89x0_driver); -} - -module_exit(cs89x0_cleanup); +module_platform_driver_probe(cs89x0_driver, cs89x0_platform_probe); #endif /* CONFIG_CS89x0_PLATFORM */ diff --git a/drivers/net/ethernet/cirrus/ep93xx_eth.c b/drivers/net/ethernet/cirrus/ep93xx_eth.c index 354cbb78ed50..67b0388b6e68 100644 --- a/drivers/net/ethernet/cirrus/ep93xx_eth.c +++ b/drivers/net/ethernet/cirrus/ep93xx_eth.c @@ -887,18 +887,7 @@ static struct platform_driver ep93xx_eth_driver = { }, }; -static int __init ep93xx_eth_init_module(void) -{ - printk(KERN_INFO DRV_MODULE_NAME " version " DRV_MODULE_VERSION " loading\n"); - return platform_driver_register(&ep93xx_eth_driver); -} - -static void __exit ep93xx_eth_cleanup_module(void) -{ - platform_driver_unregister(&ep93xx_eth_driver); -} +module_platform_driver(ep93xx_eth_driver); -module_init(ep93xx_eth_init_module); -module_exit(ep93xx_eth_cleanup_module); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:ep93xx-eth"); diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c index 9eada8e86078..9105465b2a1a 100644 --- a/drivers/net/ethernet/davicom/dm9000.c +++ b/drivers/net/ethernet/davicom/dm9000.c @@ -1693,22 +1693,7 @@ static struct platform_driver dm9000_driver = { .remove = dm9000_drv_remove, }; -static int __init -dm9000_init(void) -{ - printk(KERN_INFO "%s Ethernet Driver, V%s\n", CARDNAME, DRV_VERSION); - - return platform_driver_register(&dm9000_driver); -} - -static void __exit -dm9000_cleanup(void) -{ - platform_driver_unregister(&dm9000_driver); -} - -module_init(dm9000_init); -module_exit(dm9000_cleanup); +module_platform_driver(dm9000_driver); MODULE_AUTHOR("Sascha Hauer, Ben Dooks"); MODULE_DESCRIPTION("Davicom DM9000 network driver"); diff --git a/drivers/net/ethernet/dec/tulip/xircom_cb.c b/drivers/net/ethernet/dec/tulip/xircom_cb.c index 88feced9a629..cdbcd1643141 100644 --- a/drivers/net/ethernet/dec/tulip/xircom_cb.c +++ b/drivers/net/ethernet/dec/tulip/xircom_cb.c @@ -236,17 +236,14 @@ static int xircom_probe(struct pci_dev *pdev, const struct pci_device_id *id) private->rx_buffer = dma_alloc_coherent(d, 8192, &private->rx_dma_handle, GFP_KERNEL); - if (private->rx_buffer == NULL) { - pr_err("%s: no memory for rx buffer\n", __func__); + if (private->rx_buffer == NULL) goto rx_buf_fail; - } + private->tx_buffer = dma_alloc_coherent(d, 8192, &private->tx_dma_handle, GFP_KERNEL); - if (private->tx_buffer == NULL) { - pr_err("%s: no memory for tx buffer\n", __func__); + if (private->tx_buffer == NULL) goto tx_buf_fail; - } SET_NETDEV_DEV(dev, &pdev->dev); diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c index 110d26f4c602..afa8e3af2c4d 100644 --- a/drivers/net/ethernet/dlink/dl2k.c +++ b/drivers/net/ethernet/dlink/dl2k.c @@ -580,12 +580,9 @@ alloc_list (struct net_device *dev) skb = netdev_alloc_skb_ip_align(dev, np->rx_buf_sz); np->rx_skbuff[i] = skb; - if (skb == NULL) { - printk (KERN_ERR - "%s: alloc_list: allocate Rx buffer error! ", - dev->name); + if (skb == NULL) break; - } + /* Rubicon now supports 40 bits of addressing space. */ np->rx_ring[i].fraginfo = cpu_to_le64 ( pci_map_single ( diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h index 29aff55f2eea..2e2700e3a5ab 100644 --- a/drivers/net/ethernet/emulex/benet/be.h +++ b/drivers/net/ethernet/emulex/benet/be.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2005 - 2011 Emulex + * Copyright (C) 2005 - 2013 Emulex * All rights reserved. * * This program is free software; you can redistribute it and/or diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index 3c9b4f12e3e5..f286ad2da1ff 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2005 - 2011 Emulex + * Copyright (C) 2005 - 2013 Emulex * All rights reserved. * * This program is free software; you can redistribute it and/or @@ -2667,10 +2667,8 @@ int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array, cmd.size = sizeof(struct be_cmd_req_set_mac_list); cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, GFP_KERNEL); - if (!cmd.va) { - dev_err(&adapter->pdev->dev, "Memory alloc failure\n"); + if (!cmd.va) return -ENOMEM; - } spin_lock_bh(&adapter->mcc_lock); @@ -3202,6 +3200,31 @@ err: return status; } +int be_cmd_intr_set(struct be_adapter *adapter, bool intr_enable) +{ + struct be_mcc_wrb *wrb; + struct be_cmd_req_intr_set *req; + int status; + + if (mutex_lock_interruptible(&adapter->mbox_lock)) + return -1; + + wrb = wrb_from_mbox(adapter); + + req = embedded_payload(wrb); + + be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, + OPCODE_COMMON_SET_INTERRUPT_ENABLE, sizeof(*req), + wrb, NULL); + + req->intr_enabled = intr_enable; + + status = be_mbox_notify_wait(adapter); + + mutex_unlock(&adapter->mbox_lock); + return status; +} + int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload, int wrb_payload_size, u16 *cmd_status, u16 *ext_status) { diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h index 96970860c915..f2af85517218 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.h +++ b/drivers/net/ethernet/emulex/benet/be_cmds.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2005 - 2011 Emulex + * Copyright (C) 2005 - 2013 Emulex * All rights reserved. * * This program is free software; you can redistribute it and/or @@ -188,6 +188,7 @@ struct be_mcc_mailbox { #define OPCODE_COMMON_GET_BEACON_STATE 70 #define OPCODE_COMMON_READ_TRANSRECV_DATA 73 #define OPCODE_COMMON_GET_PORT_NAME 77 +#define OPCODE_COMMON_SET_INTERRUPT_ENABLE 89 #define OPCODE_COMMON_GET_PHY_DETAILS 102 #define OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP 103 #define OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES 121 @@ -1791,6 +1792,12 @@ struct be_cmd_enable_disable_vf { u8 rsvd[3]; }; +struct be_cmd_req_intr_set { + struct be_cmd_req_hdr hdr; + u8 intr_enabled; + u8 rsvd[3]; +}; + static inline bool check_privilege(struct be_adapter *adapter, u32 flags) { return flags & adapter->cmd_privileges ? true : false; @@ -1938,3 +1945,4 @@ extern int be_cmd_set_profile_config(struct be_adapter *adapter, u32 bps, extern int be_cmd_get_if_id(struct be_adapter *adapter, struct be_vf_cfg *vf_cfg, int vf_num); extern int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain); +extern int be_cmd_intr_set(struct be_adapter *adapter, bool intr_enable); diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c index 76b302f30c87..07b7f27cb0b9 100644 --- a/drivers/net/ethernet/emulex/benet/be_ethtool.c +++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2005 - 2011 Emulex + * Copyright (C) 2005 - 2013 Emulex * All rights reserved. * * This program is free software; you can redistribute it and/or @@ -719,10 +719,8 @@ be_test_ddr_dma(struct be_adapter *adapter) ddrdma_cmd.size = sizeof(struct be_cmd_req_ddrdma_test); ddrdma_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, ddrdma_cmd.size, &ddrdma_cmd.dma, GFP_KERNEL); - if (!ddrdma_cmd.va) { - dev_err(&adapter->pdev->dev, "Memory allocation failure\n"); + if (!ddrdma_cmd.va) return -ENOMEM; - } for (i = 0; i < 2; i++) { ret = be_cmd_ddr_dma_test(adapter, pattern[i], @@ -845,11 +843,8 @@ be_read_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom, eeprom_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, eeprom_cmd.size, &eeprom_cmd.dma, GFP_KERNEL); - if (!eeprom_cmd.va) { - dev_err(&adapter->pdev->dev, - "Memory allocation failure. Could not read eeprom\n"); + if (!eeprom_cmd.va) return -ENOMEM; - } status = be_cmd_get_seeprom_data(adapter, &eeprom_cmd); diff --git a/drivers/net/ethernet/emulex/benet/be_hw.h b/drivers/net/ethernet/emulex/benet/be_hw.h index 62dc220695f7..89e6d8cfaf0d 100644 --- a/drivers/net/ethernet/emulex/benet/be_hw.h +++ b/drivers/net/ethernet/emulex/benet/be_hw.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2005 - 2011 Emulex + * Copyright (C) 2005 - 2013 Emulex * All rights reserved. * * This program is free software; you can redistribute it and/or diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 08e54f3d288b..536afa2fb94c 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2005 - 2011 Emulex + * Copyright (C) 2005 - 2013 Emulex * All rights reserved. * * This program is free software; you can redistribute it and/or @@ -146,20 +146,16 @@ static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q, q->entry_size = entry_size; mem->size = len * entry_size; mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma, - GFP_KERNEL); + GFP_KERNEL | __GFP_ZERO); if (!mem->va) return -ENOMEM; - memset(mem->va, 0, mem->size); return 0; } -static void be_intr_set(struct be_adapter *adapter, bool enable) +static void be_reg_intr_set(struct be_adapter *adapter, bool enable) { u32 reg, enabled; - if (adapter->eeh_error) - return; - pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, ®); enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK; @@ -175,6 +171,22 @@ static void be_intr_set(struct be_adapter *adapter, bool enable) PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg); } +static void be_intr_set(struct be_adapter *adapter, bool enable) +{ + int status = 0; + + /* On lancer interrupts can't be controlled via this register */ + if (lancer_chip(adapter)) + return; + + if (adapter->eeh_error) + return; + + status = be_cmd_intr_set(adapter, enable); + if (status) + be_reg_intr_set(adapter, enable); +} + static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted) { u32 val = 0; @@ -2435,9 +2447,6 @@ static int be_close(struct net_device *netdev) be_roce_dev_close(adapter); - if (!lancer_chip(adapter)) - be_intr_set(adapter, false); - for_all_evt_queues(adapter, eqo, i) napi_disable(&eqo->napi); @@ -2525,9 +2534,6 @@ static int be_open(struct net_device *netdev) be_irq_register(adapter); - if (!lancer_chip(adapter)) - be_intr_set(adapter, true); - for_all_rx_queues(adapter, rxo, i) be_cq_notify(adapter, rxo->cq.id, true, 0); @@ -2562,10 +2568,9 @@ static int be_setup_wol(struct be_adapter *adapter, bool enable) cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config); cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, - GFP_KERNEL); + GFP_KERNEL | __GFP_ZERO); if (cmd.va == NULL) return -1; - memset(cmd.va, 0, cmd.size); if (enable) { status = pci_write_config_dword(adapter->pdev, @@ -3457,11 +3462,9 @@ static int lancer_fw_download(struct be_adapter *adapter, flash_cmd.size = sizeof(struct lancer_cmd_req_write_object) + LANCER_FW_DOWNLOAD_CHUNK; flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size, - &flash_cmd.dma, GFP_KERNEL); + &flash_cmd.dma, GFP_KERNEL); if (!flash_cmd.va) { status = -ENOMEM; - dev_err(&adapter->pdev->dev, - "Memory allocation failure while flashing\n"); goto lancer_fw_exit; } @@ -3563,8 +3566,6 @@ static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw) &flash_cmd.dma, GFP_KERNEL); if (!flash_cmd.va) { status = -ENOMEM; - dev_err(&adapter->pdev->dev, - "Memory allocation failure while flashing\n"); goto be_fw_exit; } @@ -3791,12 +3792,13 @@ static int be_ctrl_init(struct be_adapter *adapter) rx_filter->size = sizeof(struct be_cmd_req_rx_filter); rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size, - &rx_filter->dma, GFP_KERNEL); + &rx_filter->dma, + GFP_KERNEL | __GFP_ZERO); if (rx_filter->va == NULL) { status = -ENOMEM; goto free_mbox; } - memset(rx_filter->va, 0, rx_filter->size); + mutex_init(&adapter->mbox_lock); spin_lock_init(&adapter->mcc_lock); spin_lock_init(&adapter->mcc_cq_lock); @@ -3838,10 +3840,9 @@ static int be_stats_init(struct be_adapter *adapter) cmd->size = sizeof(struct be_cmd_req_get_stats_v1); cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma, - GFP_KERNEL); + GFP_KERNEL | __GFP_ZERO); if (cmd->va == NULL) return -1; - memset(cmd->va, 0, cmd->size); return 0; } @@ -3853,6 +3854,7 @@ static void be_remove(struct pci_dev *pdev) return; be_roce_dev_remove(adapter); + be_intr_set(adapter, false); cancel_delayed_work_sync(&adapter->func_recovery_work); @@ -4142,11 +4144,11 @@ static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id) goto ctrl_clean; } - /* The INTR bit may be set in the card when probed by a kdump kernel - * after a crash. - */ - if (!lancer_chip(adapter)) - be_intr_set(adapter, false); + /* Wait for interrupts to quiesce after an FLR */ + msleep(100); + + /* Allow interrupts for other ULPs running on NIC function */ + be_intr_set(adapter, true); status = be_stats_init(adapter); if (status) diff --git a/drivers/net/ethernet/emulex/benet/be_roce.c b/drivers/net/ethernet/emulex/benet/be_roce.c index 55d32aa0a093..f3d126dcc104 100644 --- a/drivers/net/ethernet/emulex/benet/be_roce.c +++ b/drivers/net/ethernet/emulex/benet/be_roce.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2005 - 2011 Emulex + * Copyright (C) 2005 - 2013 Emulex * All rights reserved. * * This program is free software; you can redistribute it and/or diff --git a/drivers/net/ethernet/emulex/benet/be_roce.h b/drivers/net/ethernet/emulex/benet/be_roce.h index db4ea8081c07..276572998463 100644 --- a/drivers/net/ethernet/emulex/benet/be_roce.h +++ b/drivers/net/ethernet/emulex/benet/be_roce.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2005 - 2011 Emulex + * Copyright (C) 2005 - 2013 Emulex * All rights reserved. * * This program is free software; you can redistribute it and/or diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c index 7c361d1db94c..21b85fb7d05f 100644 --- a/drivers/net/ethernet/faraday/ftgmac100.c +++ b/drivers/net/ethernet/faraday/ftgmac100.c @@ -780,12 +780,11 @@ static int ftgmac100_alloc_buffers(struct ftgmac100 *priv) priv->descs = dma_alloc_coherent(priv->dev, sizeof(struct ftgmac100_descs), - &priv->descs_dma_addr, GFP_KERNEL); + &priv->descs_dma_addr, + GFP_KERNEL | __GFP_ZERO); if (!priv->descs) return -ENOMEM; - memset(priv->descs, 0, sizeof(struct ftgmac100_descs)); - /* initialize RX ring */ ftgmac100_rxdes_set_end_of_ring(&priv->descs->rxdes[RX_QUEUE_ENTRIES - 1]); @@ -1350,22 +1349,7 @@ static struct platform_driver ftgmac100_driver = { }, }; -/****************************************************************************** - * initialization / finalization - *****************************************************************************/ -static int __init ftgmac100_init(void) -{ - pr_info("Loading version " DRV_VERSION " ...\n"); - return platform_driver_register(&ftgmac100_driver); -} - -static void __exit ftgmac100_exit(void) -{ - platform_driver_unregister(&ftgmac100_driver); -} - -module_init(ftgmac100_init); -module_exit(ftgmac100_exit); +module_platform_driver(ftgmac100_driver); MODULE_AUTHOR("Po-Yu Chuang <ratbert@faraday-tech.com>"); MODULE_DESCRIPTION("FTGMAC100 driver"); diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c index b5ea8fbd8a76..a6eda8d83138 100644 --- a/drivers/net/ethernet/faraday/ftmac100.c +++ b/drivers/net/ethernet/faraday/ftmac100.c @@ -732,13 +732,13 @@ static int ftmac100_alloc_buffers(struct ftmac100 *priv) { int i; - priv->descs = dma_alloc_coherent(priv->dev, sizeof(struct ftmac100_descs), - &priv->descs_dma_addr, GFP_KERNEL); + priv->descs = dma_alloc_coherent(priv->dev, + sizeof(struct ftmac100_descs), + &priv->descs_dma_addr, + GFP_KERNEL | __GFP_ZERO); if (!priv->descs) return -ENOMEM; - memset(priv->descs, 0, sizeof(struct ftmac100_descs)); - /* initialize RX ring */ ftmac100_rxdes_set_end_of_ring(&priv->descs->rxdes[RX_QUEUE_ENTRIES - 1]); diff --git a/drivers/net/ethernet/freescale/Makefile b/drivers/net/ethernet/freescale/Makefile index b7d58fe6f531..549ce13b92ac 100644 --- a/drivers/net/ethernet/freescale/Makefile +++ b/drivers/net/ethernet/freescale/Makefile @@ -2,7 +2,8 @@ # Makefile for the Freescale network device drivers. # -obj-$(CONFIG_FEC) += fec.o fec_ptp.o +obj-$(CONFIG_FEC) += fec.o +fec-objs :=fec_main.o fec_ptp.o obj-$(CONFIG_FEC_MPC52xx) += fec_mpc52xx.o ifeq ($(CONFIG_FEC_MPC52xx_MDIO),y) obj-$(CONFIG_FEC_MPC52xx) += fec_mpc52xx_phy.o diff --git a/drivers/net/ethernet/freescale/fec.c b/drivers/net/ethernet/freescale/fec_main.c index f292c3aa423f..a82a70345bbf 100644 --- a/drivers/net/ethernet/freescale/fec.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -29,7 +29,6 @@ #include <linux/ioport.h> #include <linux/slab.h> #include <linux/interrupt.h> -#include <linux/pci.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/netdevice.h> @@ -791,8 +790,6 @@ fec_enet_rx(struct net_device *ndev, int budget) skb = netdev_alloc_skb(ndev, pkt_len - 4 + NET_IP_ALIGN); if (unlikely(!skb)) { - printk("%s: Memory squeeze, dropping packet.\n", - ndev->name); ndev->stats.rx_dropped++; } else { skb_reserve(skb, NET_IP_ALIGN); @@ -1442,7 +1439,7 @@ static int fec_enet_alloc_buffers(struct net_device *ndev) if (fep->bufdesc_ex) { struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; - ebdp->cbd_esc = BD_ENET_RX_INT; + ebdp->cbd_esc = BD_ENET_TX_INT; } bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); @@ -1607,7 +1604,7 @@ fec_set_mac_address(struct net_device *ndev, void *p) * Polled functionality used by netconsole and others in non interrupt mode * */ -void fec_poll_controller(struct net_device *dev) +static void fec_poll_controller(struct net_device *dev) { int i; struct fec_enet_private *fep = netdev_priv(dev); @@ -1648,11 +1645,9 @@ static int fec_enet_init(struct net_device *ndev) /* Allocate memory for buffer descriptors. */ cbd_base = dma_alloc_coherent(NULL, PAGE_SIZE, &fep->bd_dma, - GFP_KERNEL); - if (!cbd_base) { - printk("FEC: allocate descriptor memory failed?\n"); + GFP_KERNEL); + if (!cbd_base) return -ENOMEM; - } memset(cbd_base, 0, PAGE_SIZE); spin_lock_init(&fep->hw_lock); @@ -1757,16 +1752,10 @@ fec_probe(struct platform_device *pdev) if (!r) return -ENXIO; - r = request_mem_region(r->start, resource_size(r), pdev->name); - if (!r) - return -EBUSY; - /* Init network device */ ndev = alloc_etherdev(sizeof(struct fec_enet_private)); - if (!ndev) { - ret = -ENOMEM; - goto failed_alloc_etherdev; - } + if (!ndev) + return -ENOMEM; SET_NETDEV_DEV(ndev, &pdev->dev); @@ -1778,7 +1767,7 @@ fec_probe(struct platform_device *pdev) (pdev->id_entry->driver_data & FEC_QUIRK_HAS_GBIT)) fep->pause_flag |= FEC_PAUSE_FLAG_AUTONEG; - fep->hwp = ioremap(r->start, resource_size(r)); + fep->hwp = devm_request_and_ioremap(&pdev->dev, r); fep->pdev = pdev; fep->dev_id = dev_id++; @@ -1900,11 +1889,8 @@ failed_regulator: clk_disable_unprepare(fep->clk_ptp); failed_pin: failed_clk: - iounmap(fep->hwp); failed_ioremap: free_netdev(ndev); -failed_alloc_etherdev: - release_mem_region(r->start, resource_size(r)); return ret; } @@ -1914,7 +1900,6 @@ fec_drv_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); struct fec_enet_private *fep = netdev_priv(ndev); - struct resource *r; int i; unregister_netdev(ndev); @@ -1930,13 +1915,8 @@ fec_drv_remove(struct platform_device *pdev) if (irq > 0) free_irq(irq, ndev); } - iounmap(fep->hwp); free_netdev(ndev); - r = platform_get_resource(pdev, IORESOURCE_MEM, 0); - BUG_ON(!r); - release_mem_region(r->start, resource_size(r)); - platform_set_drvdata(pdev, NULL); return 0; diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c index 0d8df400a479..1f17ca0f2201 100644 --- a/drivers/net/ethernet/freescale/fec_ptp.c +++ b/drivers/net/ethernet/freescale/fec_ptp.c @@ -128,7 +128,6 @@ void fec_ptp_start_cyclecounter(struct net_device *ndev) spin_unlock_irqrestore(&fep->tmreg_lock, flags); } -EXPORT_SYMBOL(fec_ptp_start_cyclecounter); /** * fec_ptp_adjfreq - adjust ptp cycle frequency @@ -319,7 +318,6 @@ int fec_ptp_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd) return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? -EFAULT : 0; } -EXPORT_SYMBOL(fec_ptp_ioctl); /** * fec_time_keep - call timecounter_read every second to avoid timer overrun @@ -385,4 +383,3 @@ void fec_ptp_init(struct net_device *ndev, struct platform_device *pdev) pr_info("registered PHC device on %s\n", ndev->name); } } -EXPORT_SYMBOL(fec_ptp_init); diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c index 46df28893c10..edc120094c34 100644 --- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c +++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c @@ -177,8 +177,6 @@ static int fs_enet_rx_napi(struct napi_struct *napi, int budget) received++; netif_receive_skb(skb); } else { - dev_warn(fep->dev, - "Memory squeeze, dropping packet.\n"); fep->stats.rx_dropped++; skbn = skb; } @@ -309,8 +307,6 @@ static int fs_enet_rx_non_napi(struct net_device *dev) received++; netif_rx(skb); } else { - dev_warn(fep->dev, - "Memory squeeze, dropping packet.\n"); fep->stats.rx_dropped++; skbn = skb; } @@ -505,11 +501,9 @@ void fs_init_bds(struct net_device *dev) */ for (i = 0, bdp = fep->rx_bd_base; i < fep->rx_ring; i++, bdp++) { skb = netdev_alloc_skb(dev, ENET_RX_FRSIZE); - if (skb == NULL) { - dev_warn(fep->dev, - "Memory squeeze, unable to allocate skb\n"); + if (skb == NULL) break; - } + skb_align(skb, ENET_RX_ALIGN); fep->rx_skbuff[i] = skb; CBDW_BUFADDR(bdp, @@ -593,13 +587,8 @@ static struct sk_buff *tx_skb_align_workaround(struct net_device *dev, /* Alloc new skb */ new_skb = netdev_alloc_skb(dev, skb->len + 4); - if (!new_skb) { - if (net_ratelimit()) { - dev_warn(fep->dev, - "Memory squeeze, dropping tx packet.\n"); - } + if (!new_skb) return NULL; - } /* Make sure new skb is properly aligned */ skb_align(new_skb, 4); diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index d2c5441d1bf0..96fbe3548243 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -132,7 +132,7 @@ static int gfar_poll(struct napi_struct *napi, int budget); static void gfar_netpoll(struct net_device *dev); #endif int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit); -static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue); +static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue); static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb, int amount_pull, struct napi_struct *napi); void gfar_halt(struct net_device *dev); @@ -245,14 +245,13 @@ static int gfar_alloc_skb_resources(struct net_device *ndev) /* Allocate memory for the buffer descriptors */ vaddr = dma_alloc_coherent(dev, - sizeof(struct txbd8) * priv->total_tx_ring_size + - sizeof(struct rxbd8) * priv->total_rx_ring_size, - &addr, GFP_KERNEL); - if (!vaddr) { - netif_err(priv, ifup, ndev, - "Could not allocate buffer descriptors!\n"); + (priv->total_tx_ring_size * + sizeof(struct txbd8)) + + (priv->total_rx_ring_size * + sizeof(struct rxbd8)), + &addr, GFP_KERNEL); + if (!vaddr) return -ENOMEM; - } for (i = 0; i < priv->num_tx_queues; i++) { tx_queue = priv->tx_queue[i]; @@ -342,7 +341,7 @@ static void gfar_init_mac(struct net_device *ndev) gfar_init_tx_rx_base(priv); /* Configure the coalescing support */ - gfar_configure_coalescing(priv, 0xFF, 0xFF); + gfar_configure_coalescing_all(priv); /* set this when rx hw offload (TOE) functions are being used */ priv->uses_rxfcb = 0; @@ -691,7 +690,7 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) } for (i = 0; i < priv->num_tx_queues; i++) - priv->tx_queue[i] = NULL; + priv->tx_queue[i] = NULL; for (i = 0; i < priv->num_rx_queues; i++) priv->rx_queue[i] = NULL; @@ -1817,25 +1816,15 @@ void gfar_start(struct net_device *dev) dev->trans_start = jiffies; /* prevent tx timeout */ } -void gfar_configure_coalescing(struct gfar_private *priv, +static void gfar_configure_coalescing(struct gfar_private *priv, unsigned long tx_mask, unsigned long rx_mask) { struct gfar __iomem *regs = priv->gfargrp[0].regs; u32 __iomem *baddr; - int i = 0; - - /* Backward compatible case ---- even if we enable - * multiple queues, there's only single reg to program - */ - gfar_write(®s->txic, 0); - if (likely(priv->tx_queue[0]->txcoalescing)) - gfar_write(®s->txic, priv->tx_queue[0]->txic); - - gfar_write(®s->rxic, 0); - if (unlikely(priv->rx_queue[0]->rxcoalescing)) - gfar_write(®s->rxic, priv->rx_queue[0]->rxic); if (priv->mode == MQ_MG_MODE) { + int i = 0; + baddr = ®s->txic0; for_each_set_bit(i, &tx_mask, priv->num_tx_queues) { gfar_write(baddr + i, 0); @@ -1849,9 +1838,25 @@ void gfar_configure_coalescing(struct gfar_private *priv, if (likely(priv->rx_queue[i]->rxcoalescing)) gfar_write(baddr + i, priv->rx_queue[i]->rxic); } + } else { + /* Backward compatible case -- even if we enable + * multiple queues, there's only single reg to program + */ + gfar_write(®s->txic, 0); + if (likely(priv->tx_queue[0]->txcoalescing)) + gfar_write(®s->txic, priv->tx_queue[0]->txic); + + gfar_write(®s->rxic, 0); + if (unlikely(priv->rx_queue[0]->rxcoalescing)) + gfar_write(®s->rxic, priv->rx_queue[0]->rxic); } } +void gfar_configure_coalescing_all(struct gfar_private *priv) +{ + gfar_configure_coalescing(priv, 0xFF, 0xFF); +} + static int register_grp_irqs(struct gfar_priv_grp *grp) { struct gfar_private *priv = grp->priv; @@ -1941,7 +1946,7 @@ int startup_gfar(struct net_device *ndev) phy_start(priv->phydev); - gfar_configure_coalescing(priv, 0xFF, 0xFF); + gfar_configure_coalescing_all(priv); return 0; @@ -2469,12 +2474,11 @@ static void gfar_align_skb(struct sk_buff *skb) } /* Interrupt Handler for Transmit complete */ -static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) +static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) { struct net_device *dev = tx_queue->dev; struct netdev_queue *txq; struct gfar_private *priv = netdev_priv(dev); - struct gfar_priv_rx_q *rx_queue = NULL; struct txbd8 *bdp, *next = NULL; struct txbd8 *lbdp = NULL; struct txbd8 *base = tx_queue->tx_bd_base; @@ -2489,7 +2493,6 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) u32 lstatus; size_t buflen; - rx_queue = priv->rx_queue[tqi]; txq = netdev_get_tx_queue(dev, tqi); bdp = tx_queue->dirty_tx; skb_dirtytx = tx_queue->skb_dirtytx; @@ -2571,8 +2574,6 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) tx_queue->dirty_tx = bdp; netdev_tx_completed_queue(txq, howmany, bytes_sent); - - return howmany; } static void gfar_schedule_cleanup(struct gfar_priv_grp *gfargrp) @@ -2694,8 +2695,6 @@ static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb, struct gfar_private *priv = netdev_priv(dev); struct rxfcb *fcb = NULL; - gro_result_t ret; - /* fcb is at the beginning if exists */ fcb = (struct rxfcb *)skb->data; @@ -2734,10 +2733,8 @@ static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb, __vlan_hwaccel_put_tag(skb, fcb->vlctl); /* Send the packet up the stack */ - ret = napi_gro_receive(napi, skb); + napi_gro_receive(napi, skb); - if (unlikely(GRO_DROP == ret)) - atomic64_inc(&priv->extra_stats.kernel_dropped); } /* gfar_clean_rx_ring() -- Processes each frame in the rx ring @@ -2835,62 +2832,82 @@ static int gfar_poll(struct napi_struct *napi, int budget) struct gfar __iomem *regs = gfargrp->regs; struct gfar_priv_tx_q *tx_queue = NULL; struct gfar_priv_rx_q *rx_queue = NULL; - int rx_cleaned = 0, budget_per_queue = 0, rx_cleaned_per_queue = 0; - int tx_cleaned = 0, i, left_over_budget = budget; - unsigned long serviced_queues = 0; - int num_queues = 0; - - num_queues = gfargrp->num_rx_queues; - budget_per_queue = budget/num_queues; + int work_done = 0, work_done_per_q = 0; + int i, budget_per_q = 0; + int has_tx_work; + unsigned long rstat_rxf; + int num_act_queues; /* Clear IEVENT, so interrupts aren't called again * because of the packets that have already arrived */ gfar_write(®s->ievent, IEVENT_RTX_MASK); - while (num_queues && left_over_budget) { - budget_per_queue = left_over_budget/num_queues; - left_over_budget = 0; + rstat_rxf = gfar_read(®s->rstat) & RSTAT_RXF_MASK; + + num_act_queues = bitmap_weight(&rstat_rxf, MAX_RX_QS); + if (num_act_queues) + budget_per_q = budget/num_act_queues; + + while (1) { + has_tx_work = 0; + for_each_set_bit(i, &gfargrp->tx_bit_map, priv->num_tx_queues) { + tx_queue = priv->tx_queue[i]; + /* run Tx cleanup to completion */ + if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx]) { + gfar_clean_tx_ring(tx_queue); + has_tx_work = 1; + } + } for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) { - if (test_bit(i, &serviced_queues)) + /* skip queue if not active */ + if (!(rstat_rxf & (RSTAT_CLEAR_RXF0 >> i))) continue; + rx_queue = priv->rx_queue[i]; - tx_queue = priv->tx_queue[rx_queue->qindex]; - - tx_cleaned += gfar_clean_tx_ring(tx_queue); - rx_cleaned_per_queue = - gfar_clean_rx_ring(rx_queue, budget_per_queue); - rx_cleaned += rx_cleaned_per_queue; - if (rx_cleaned_per_queue < budget_per_queue) { - left_over_budget = left_over_budget + - (budget_per_queue - - rx_cleaned_per_queue); - set_bit(i, &serviced_queues); - num_queues--; + work_done_per_q = + gfar_clean_rx_ring(rx_queue, budget_per_q); + work_done += work_done_per_q; + + /* finished processing this queue */ + if (work_done_per_q < budget_per_q) { + /* clear active queue hw indication */ + gfar_write(®s->rstat, + RSTAT_CLEAR_RXF0 >> i); + rstat_rxf &= ~(RSTAT_CLEAR_RXF0 >> i); + num_act_queues--; + + if (!num_act_queues) + break; + /* recompute budget per Rx queue */ + budget_per_q = + (budget - work_done) / num_act_queues; } } - } - if (tx_cleaned) - return budget; + if (work_done >= budget) + break; - if (rx_cleaned < budget) { - napi_complete(napi); + if (!num_act_queues && !has_tx_work) { - /* Clear the halt bit in RSTAT */ - gfar_write(®s->rstat, gfargrp->rstat); + napi_complete(napi); - gfar_write(®s->imask, IMASK_DEFAULT); + /* Clear the halt bit in RSTAT */ + gfar_write(®s->rstat, gfargrp->rstat); - /* If we are coalescing interrupts, update the timer - * Otherwise, clear it - */ - gfar_configure_coalescing(priv, gfargrp->rx_bit_map, - gfargrp->tx_bit_map); + gfar_write(®s->imask, IMASK_DEFAULT); + + /* If we are coalescing interrupts, update the timer + * Otherwise, clear it + */ + gfar_configure_coalescing(priv, gfargrp->rx_bit_map, + gfargrp->tx_bit_map); + break; + } } - return rx_cleaned; + return work_done; } #ifdef CONFIG_NET_POLL_CONTROLLER diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h index 63a28d294e20..04b552cd419d 100644 --- a/drivers/net/ethernet/freescale/gianfar.h +++ b/drivers/net/ethernet/freescale/gianfar.h @@ -291,7 +291,9 @@ extern const char gfar_driver_version[]; #define RCTRL_PADDING(x) ((x << 16) & RCTRL_PAL_MASK) -#define RSTAT_CLEAR_RHALT 0x00800000 +#define RSTAT_CLEAR_RHALT 0x00800000 +#define RSTAT_CLEAR_RXF0 0x00000080 +#define RSTAT_RXF_MASK 0x000000ff #define TCTRL_IPCSEN 0x00004000 #define TCTRL_TUCSEN 0x00002000 @@ -627,7 +629,6 @@ struct rmon_mib }; struct gfar_extra_stats { - atomic64_t kernel_dropped; atomic64_t rx_large; atomic64_t rx_short; atomic64_t rx_nonoctet; @@ -1180,8 +1181,7 @@ extern void stop_gfar(struct net_device *dev); extern void gfar_halt(struct net_device *dev); extern void gfar_phy_test(struct mii_bus *bus, struct phy_device *phydev, int enable, u32 regnum, u32 read); -extern void gfar_configure_coalescing(struct gfar_private *priv, - unsigned long tx_mask, unsigned long rx_mask); +extern void gfar_configure_coalescing_all(struct gfar_private *priv); void gfar_init_sysfs(struct net_device *dev); int gfar_set_features(struct net_device *dev, netdev_features_t features); extern void gfar_check_rx_parser_mode(struct gfar_private *priv); diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c index 75e89acf4912..4e7118f9f075 100644 --- a/drivers/net/ethernet/freescale/gianfar_ethtool.c +++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c @@ -66,7 +66,6 @@ static void gfar_gdrvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo); static const char stat_gstrings[][ETH_GSTRING_LEN] = { - "rx-dropped-by-kernel", "rx-large-frame-errors", "rx-short-frame-errors", "rx-non-octet-errors", @@ -436,7 +435,7 @@ static int gfar_scoalesce(struct net_device *dev, gfar_usecs2ticks(priv, cvals->tx_coalesce_usecs)); } - gfar_configure_coalescing(priv, 0xFF, 0xFF); + gfar_configure_coalescing_all(priv); return 0; } diff --git a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c index 2418faf2251a..84125707f321 100644 --- a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c +++ b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c @@ -1003,8 +1003,6 @@ static void fjn_rx(struct net_device *dev) } skb = netdev_alloc_skb(dev, pkt_len + 2); if (skb == NULL) { - netdev_notice(dev, "Memory squeeze, dropping packet (len %d)\n", - pkt_len); outb(F_SKP_PKT, ioaddr + RX_SKIP); dev->stats.rx_dropped++; break; diff --git a/drivers/net/ethernet/i825xx/82596.c b/drivers/net/ethernet/i825xx/82596.c index 1c54e229e3cc..e38816145395 100644 --- a/drivers/net/ethernet/i825xx/82596.c +++ b/drivers/net/ethernet/i825xx/82596.c @@ -798,16 +798,14 @@ static inline int i596_rx(struct net_device *dev) #ifdef __mc68000__ cache_clear(virt_to_phys(newskb->data), PKT_BUF_SZ); #endif - } - else + } else { skb = netdev_alloc_skb(dev, pkt_len + 2); + } memory_squeeze: if (skb == NULL) { /* XXX tulip.c can defer packets here!! */ - printk(KERN_WARNING "%s: i596_rx Memory squeeze, dropping packet.\n", dev->name); dev->stats.rx_dropped++; - } - else { + } else { if (!rx_in_place) { /* 16 byte align the data fields */ skb_reserve(skb, 2); diff --git a/drivers/net/ethernet/i825xx/lib82596.c b/drivers/net/ethernet/i825xx/lib82596.c index f045ea4dc514..d653bac4cfc4 100644 --- a/drivers/net/ethernet/i825xx/lib82596.c +++ b/drivers/net/ethernet/i825xx/lib82596.c @@ -715,14 +715,12 @@ static inline int i596_rx(struct net_device *dev) rbd->v_data = newskb->data; rbd->b_data = SWAP32(dma_addr); DMA_WBACK_INV(dev, rbd, sizeof(struct i596_rbd)); - } else + } else { skb = netdev_alloc_skb_ip_align(dev, pkt_len); + } memory_squeeze: if (skb == NULL) { /* XXX tulip.c can defer packets here!! */ - printk(KERN_ERR - "%s: i596_rx Memory squeeze, dropping packet.\n", - dev->name); dev->stats.rx_dropped++; } else { if (!rx_in_place) { diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c index 328f47c92e26..029633434474 100644 --- a/drivers/net/ethernet/ibm/ehea/ehea_main.c +++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c @@ -402,7 +402,6 @@ static void ehea_refill_rq1(struct ehea_port_res *pr, int index, int nr_of_wqes) skb_arr_rq1[index] = netdev_alloc_skb(dev, EHEA_L_PKT_SIZE); if (!skb_arr_rq1[index]) { - netdev_info(dev, "Unable to allocate enough skb in the array\n"); pr->rq1_skba.os_skbs = fill_wqes - i; break; } @@ -432,10 +431,8 @@ static void ehea_init_fill_rq1(struct ehea_port_res *pr, int nr_rq1a) for (i = 0; i < nr_rq1a; i++) { skb_arr_rq1[i] = netdev_alloc_skb(dev, EHEA_L_PKT_SIZE); - if (!skb_arr_rq1[i]) { - netdev_info(dev, "Not enough memory to allocate skb array\n"); + if (!skb_arr_rq1[i]) break; - } } /* Ring doorbell */ ehea_update_rq1a(pr->qp, i - 1); @@ -695,10 +692,8 @@ static int ehea_proc_rwqes(struct net_device *dev, skb = netdev_alloc_skb(dev, EHEA_L_PKT_SIZE); - if (!skb) { - netdev_err(dev, "Not enough memory to allocate skb\n"); + if (!skb) break; - } } skb_copy_to_linear_data(skb, ((char *)cqe) + 64, cqe->num_bytes_transfered - 4); diff --git a/drivers/net/ethernet/ibm/emac/mal.c b/drivers/net/ethernet/ibm/emac/mal.c index 1f7ecf57181e..610ed223d1db 100644 --- a/drivers/net/ethernet/ibm/emac/mal.c +++ b/drivers/net/ethernet/ibm/emac/mal.c @@ -637,17 +637,12 @@ static int mal_probe(struct platform_device *ofdev) bd_size = sizeof(struct mal_descriptor) * (NUM_TX_BUFF * mal->num_tx_chans + NUM_RX_BUFF * mal->num_rx_chans); - mal->bd_virt = - dma_alloc_coherent(&ofdev->dev, bd_size, &mal->bd_dma, - GFP_KERNEL); + mal->bd_virt = dma_alloc_coherent(&ofdev->dev, bd_size, &mal->bd_dma, + GFP_KERNEL | __GFP_ZERO); if (mal->bd_virt == NULL) { - printk(KERN_ERR - "mal%d: out of memory allocating RX/TX descriptors!\n", - index); err = -ENOMEM; goto fail_unmap; } - memset(mal->bd_virt, 0, bd_size); for (i = 0; i < mal->num_tx_chans; ++i) set_mal_dcrn(mal, MAL_TXCTPR(i), mal->bd_dma + diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c index c859771a9902..302d59401065 100644 --- a/drivers/net/ethernet/ibm/ibmveth.c +++ b/drivers/net/ethernet/ibm/ibmveth.c @@ -556,11 +556,9 @@ static int ibmveth_open(struct net_device *netdev) adapter->rx_queue.queue_len = sizeof(struct ibmveth_rx_q_entry) * rxq_entries; adapter->rx_queue.queue_addr = - dma_alloc_coherent(dev, adapter->rx_queue.queue_len, - &adapter->rx_queue.queue_dma, GFP_KERNEL); - + dma_alloc_coherent(dev, adapter->rx_queue.queue_len, + &adapter->rx_queue.queue_dma, GFP_KERNEL); if (!adapter->rx_queue.queue_addr) { - netdev_err(netdev, "unable to allocate rx queue pages\n"); rc = -ENOMEM; goto err_out; } diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c index ffd287196bf8..82a967c95598 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c +++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c @@ -1020,12 +1020,11 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter) txdr->size = txdr->count * sizeof(struct e1000_tx_desc); txdr->size = ALIGN(txdr->size, 4096); txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma, - GFP_KERNEL); + GFP_KERNEL | __GFP_ZERO); if (!txdr->desc) { ret_val = 2; goto err_nomem; } - memset(txdr->desc, 0, txdr->size); txdr->next_to_use = txdr->next_to_clean = 0; ew32(TDBAL, ((u64)txdr->dma & 0x00000000FFFFFFFF)); @@ -1079,12 +1078,11 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter) rxdr->size = rxdr->count * sizeof(struct e1000_rx_desc); rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma, - GFP_KERNEL); + GFP_KERNEL | __GFP_ZERO); if (!rxdr->desc) { ret_val = 6; goto err_nomem; } - memset(rxdr->desc, 0, rxdr->size); rxdr->next_to_use = rxdr->next_to_clean = 0; rctl = er32(RCTL); diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c index 8502c625dbef..d98e1d0996d4 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_main.c +++ b/drivers/net/ethernet/intel/e1000/e1000_main.c @@ -1516,8 +1516,6 @@ static int e1000_setup_tx_resources(struct e1000_adapter *adapter, if (!txdr->desc) { setup_tx_desc_die: vfree(txdr->buffer_info); - e_err(probe, "Unable to allocate memory for the Tx descriptor " - "ring\n"); return -ENOMEM; } @@ -1707,10 +1705,7 @@ static int e1000_setup_rx_resources(struct e1000_adapter *adapter, rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma, GFP_KERNEL); - if (!rxdr->desc) { - e_err(probe, "Unable to allocate memory for the Rx descriptor " - "ring\n"); setup_rx_desc_die: vfree(rxdr->buffer_info); return -ENOMEM; @@ -1729,8 +1724,6 @@ setup_rx_desc_die: if (!rxdr->desc) { dma_free_coherent(&pdev->dev, rxdr->size, olddesc, olddma); - e_err(probe, "Unable to allocate memory for the Rx " - "descriptor ring\n"); goto setup_rx_desc_die; } diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.c b/drivers/net/ethernet/intel/e1000e/80003es2lan.c index e0991388664c..b71c8502a2b3 100644 --- a/drivers/net/ethernet/intel/e1000e/80003es2lan.c +++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.c @@ -37,7 +37,9 @@ * "index + 5". */ static const u16 e1000_gg82563_cable_length_table[] = { - 0, 60, 115, 150, 150, 60, 115, 150, 180, 180, 0xFF }; + 0, 60, 115, 150, 150, 60, 115, 150, 180, 180, 0xFF +}; + #define GG82563_CABLE_LENGTH_TABLE_SIZE \ ARRAY_SIZE(e1000_gg82563_cable_length_table) @@ -116,7 +118,7 @@ static s32 e1000_init_nvm_params_80003es2lan(struct e1000_hw *hw) nvm->type = e1000_nvm_eeprom_spi; size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >> - E1000_EECD_SIZE_EX_SHIFT); + E1000_EECD_SIZE_EX_SHIFT); /* Added to a constant, "size" becomes the left-shift value * for setting word_size. @@ -393,7 +395,7 @@ static s32 e1000_read_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw, * before the device has completed the "Page Select" MDI * transaction. So we wait 200us after each MDI command... */ - udelay(200); + usleep_range(200, 400); /* ...and verify the command was successful. */ ret_val = e1000e_read_phy_reg_mdic(hw, page_select, &temp); @@ -403,17 +405,17 @@ static s32 e1000_read_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw, return -E1000_ERR_PHY; } - udelay(200); + usleep_range(200, 400); ret_val = e1000e_read_phy_reg_mdic(hw, - MAX_PHY_REG_ADDRESS & offset, - data); + MAX_PHY_REG_ADDRESS & offset, + data); - udelay(200); + usleep_range(200, 400); } else { ret_val = e1000e_read_phy_reg_mdic(hw, - MAX_PHY_REG_ADDRESS & offset, - data); + MAX_PHY_REG_ADDRESS & offset, + data); } e1000_release_phy_80003es2lan(hw); @@ -462,7 +464,7 @@ static s32 e1000_write_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw, * before the device has completed the "Page Select" MDI * transaction. So we wait 200us after each MDI command... */ - udelay(200); + usleep_range(200, 400); /* ...and verify the command was successful. */ ret_val = e1000e_read_phy_reg_mdic(hw, page_select, &temp); @@ -472,17 +474,17 @@ static s32 e1000_write_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw, return -E1000_ERR_PHY; } - udelay(200); + usleep_range(200, 400); ret_val = e1000e_write_phy_reg_mdic(hw, - MAX_PHY_REG_ADDRESS & offset, - data); + MAX_PHY_REG_ADDRESS & + offset, data); - udelay(200); + usleep_range(200, 400); } else { ret_val = e1000e_write_phy_reg_mdic(hw, - MAX_PHY_REG_ADDRESS & offset, - data); + MAX_PHY_REG_ADDRESS & + offset, data); } e1000_release_phy_80003es2lan(hw); @@ -580,7 +582,7 @@ static s32 e1000_phy_force_speed_duplex_80003es2lan(struct e1000_hw *hw) e_dbg("Waiting for forced speed/duplex link on GG82563 phy.\n"); ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, - 100000, &link); + 100000, &link); if (ret_val) return ret_val; @@ -595,7 +597,7 @@ static s32 e1000_phy_force_speed_duplex_80003es2lan(struct e1000_hw *hw) /* Try once more */ ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, - 100000, &link); + 100000, &link); if (ret_val) return ret_val; } @@ -666,14 +668,12 @@ static s32 e1000_get_link_up_info_80003es2lan(struct e1000_hw *hw, u16 *speed, s32 ret_val; if (hw->phy.media_type == e1000_media_type_copper) { - ret_val = e1000e_get_speed_and_duplex_copper(hw, - speed, - duplex); + ret_val = e1000e_get_speed_and_duplex_copper(hw, speed, duplex); hw->phy.ops.cfg_on_link_up(hw); } else { ret_val = e1000e_get_speed_and_duplex_fiber_serdes(hw, - speed, - duplex); + speed, + duplex); } return ret_val; @@ -754,9 +754,9 @@ static s32 e1000_init_hw_80003es2lan(struct e1000_hw *hw) /* Initialize identification LED */ ret_val = mac->ops.id_led_init(hw); + /* An error is not fatal and we should not stop init due to this */ if (ret_val) e_dbg("Error initializing identification LED\n"); - /* This is not fatal and we should not stop init due to this */ /* Disabling VLAN filtering */ e_dbg("Initializing the IEEE VLAN\n"); @@ -784,14 +784,14 @@ static s32 e1000_init_hw_80003es2lan(struct e1000_hw *hw) /* Set the transmit descriptor write-back policy */ reg_data = er32(TXDCTL(0)); - reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) | - E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC; + reg_data = ((reg_data & ~E1000_TXDCTL_WTHRESH) | + E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC); ew32(TXDCTL(0), reg_data); /* ...for both queues. */ reg_data = er32(TXDCTL(1)); - reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) | - E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC; + reg_data = ((reg_data & ~E1000_TXDCTL_WTHRESH) | + E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC); ew32(TXDCTL(1), reg_data); /* Enable retransmit on late collisions */ @@ -818,13 +818,12 @@ static s32 e1000_init_hw_80003es2lan(struct e1000_hw *hw) /* default to true to enable the MDIC W/A */ hw->dev_spec.e80003es2lan.mdic_wa_enable = true; - ret_val = e1000_read_kmrn_reg_80003es2lan(hw, - E1000_KMRNCTRLSTA_OFFSET >> - E1000_KMRNCTRLSTA_OFFSET_SHIFT, - &i); + ret_val = + e1000_read_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_OFFSET >> + E1000_KMRNCTRLSTA_OFFSET_SHIFT, &i); if (!ret_val) { if ((i & E1000_KMRNCTRLSTA_OPMODE_MASK) == - E1000_KMRNCTRLSTA_OPMODE_INBAND_MDIO) + E1000_KMRNCTRLSTA_OPMODE_INBAND_MDIO) hw->dev_spec.e80003es2lan.mdic_wa_enable = false; } @@ -891,7 +890,7 @@ static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; - u32 ctrl_ext; + u32 reg; u16 data; ret_val = e1e_rphy(hw, GG82563_PHY_MAC_SPEC_CTRL, &data); @@ -954,22 +953,19 @@ static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw) } /* Bypass Rx and Tx FIFO's */ - ret_val = e1000_write_kmrn_reg_80003es2lan(hw, - E1000_KMRNCTRLSTA_OFFSET_FIFO_CTRL, - E1000_KMRNCTRLSTA_FIFO_CTRL_RX_BYPASS | - E1000_KMRNCTRLSTA_FIFO_CTRL_TX_BYPASS); + reg = E1000_KMRNCTRLSTA_OFFSET_FIFO_CTRL; + data = (E1000_KMRNCTRLSTA_FIFO_CTRL_RX_BYPASS | + E1000_KMRNCTRLSTA_FIFO_CTRL_TX_BYPASS); + ret_val = e1000_write_kmrn_reg_80003es2lan(hw, reg, data); if (ret_val) return ret_val; - ret_val = e1000_read_kmrn_reg_80003es2lan(hw, - E1000_KMRNCTRLSTA_OFFSET_MAC2PHY_OPMODE, - &data); + reg = E1000_KMRNCTRLSTA_OFFSET_MAC2PHY_OPMODE; + ret_val = e1000_read_kmrn_reg_80003es2lan(hw, reg, &data); if (ret_val) return ret_val; data |= E1000_KMRNCTRLSTA_OPMODE_E_IDLE; - ret_val = e1000_write_kmrn_reg_80003es2lan(hw, - E1000_KMRNCTRLSTA_OFFSET_MAC2PHY_OPMODE, - data); + ret_val = e1000_write_kmrn_reg_80003es2lan(hw, reg, data); if (ret_val) return ret_val; @@ -982,9 +978,9 @@ static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw) if (ret_val) return ret_val; - ctrl_ext = er32(CTRL_EXT); - ctrl_ext &= ~(E1000_CTRL_EXT_LINK_MODE_MASK); - ew32(CTRL_EXT, ctrl_ext); + reg = er32(CTRL_EXT); + reg &= ~E1000_CTRL_EXT_LINK_MODE_MASK; + ew32(CTRL_EXT, reg); ret_val = e1e_rphy(hw, GG82563_PHY_PWR_MGMT_CTRL, &data); if (ret_val) @@ -1049,27 +1045,29 @@ static s32 e1000_setup_copper_link_80003es2lan(struct e1000_hw *hw) * polling the phy; this fixes erroneous timeouts at 10Mbps. */ ret_val = e1000_write_kmrn_reg_80003es2lan(hw, GG82563_REG(0x34, 4), - 0xFFFF); + 0xFFFF); if (ret_val) return ret_val; ret_val = e1000_read_kmrn_reg_80003es2lan(hw, GG82563_REG(0x34, 9), - ®_data); + ®_data); if (ret_val) return ret_val; reg_data |= 0x3F; ret_val = e1000_write_kmrn_reg_80003es2lan(hw, GG82563_REG(0x34, 9), - reg_data); + reg_data); if (ret_val) return ret_val; - ret_val = e1000_read_kmrn_reg_80003es2lan(hw, - E1000_KMRNCTRLSTA_OFFSET_INB_CTRL, - ®_data); + ret_val = + e1000_read_kmrn_reg_80003es2lan(hw, + E1000_KMRNCTRLSTA_OFFSET_INB_CTRL, + ®_data); if (ret_val) return ret_val; reg_data |= E1000_KMRNCTRLSTA_INB_CTRL_DIS_PADDING; - ret_val = e1000_write_kmrn_reg_80003es2lan(hw, - E1000_KMRNCTRLSTA_OFFSET_INB_CTRL, - reg_data); + ret_val = + e1000_write_kmrn_reg_80003es2lan(hw, + E1000_KMRNCTRLSTA_OFFSET_INB_CTRL, + reg_data); if (ret_val) return ret_val; @@ -1096,7 +1094,7 @@ static s32 e1000_cfg_on_link_up_80003es2lan(struct e1000_hw *hw) if (hw->phy.media_type == e1000_media_type_copper) { ret_val = e1000e_get_speed_and_duplex_copper(hw, &speed, - &duplex); + &duplex); if (ret_val) return ret_val; @@ -1125,9 +1123,10 @@ static s32 e1000_cfg_kmrn_10_100_80003es2lan(struct e1000_hw *hw, u16 duplex) u16 reg_data, reg_data2; reg_data = E1000_KMRNCTRLSTA_HD_CTRL_10_100_DEFAULT; - ret_val = e1000_write_kmrn_reg_80003es2lan(hw, - E1000_KMRNCTRLSTA_OFFSET_HD_CTRL, - reg_data); + ret_val = + e1000_write_kmrn_reg_80003es2lan(hw, + E1000_KMRNCTRLSTA_OFFSET_HD_CTRL, + reg_data); if (ret_val) return ret_val; @@ -1171,9 +1170,10 @@ static s32 e1000_cfg_kmrn_1000_80003es2lan(struct e1000_hw *hw) u32 i = 0; reg_data = E1000_KMRNCTRLSTA_HD_CTRL_1000_DEFAULT; - ret_val = e1000_write_kmrn_reg_80003es2lan(hw, - E1000_KMRNCTRLSTA_OFFSET_HD_CTRL, - reg_data); + ret_val = + e1000_write_kmrn_reg_80003es2lan(hw, + E1000_KMRNCTRLSTA_OFFSET_HD_CTRL, + reg_data); if (ret_val) return ret_val; @@ -1220,7 +1220,7 @@ static s32 e1000_read_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset, return ret_val; kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) & - E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN; + E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN; ew32(KMRNCTRLSTA, kmrnctrlsta); e1e_flush(); @@ -1255,7 +1255,7 @@ static s32 e1000_write_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset, return ret_val; kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) & - E1000_KMRNCTRLSTA_OFFSET) | data; + E1000_KMRNCTRLSTA_OFFSET) | data; ew32(KMRNCTRLSTA, kmrnctrlsta); e1e_flush(); @@ -1419,4 +1419,3 @@ const struct e1000_info e1000_es2_info = { .phy_ops = &es2_phy_ops, .nvm_ops = &es2_nvm_ops, }; - diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c index 2faffbde179e..7380442a3829 100644 --- a/drivers/net/ethernet/intel/e1000e/82571.c +++ b/drivers/net/ethernet/intel/e1000e/82571.c @@ -184,7 +184,7 @@ static s32 e1000_init_nvm_params_82571(struct e1000_hw *hw) default: nvm->type = e1000_nvm_eeprom_spi; size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >> - E1000_EECD_SIZE_EX_SHIFT); + E1000_EECD_SIZE_EX_SHIFT); /* Added to a constant, "size" becomes the left-shift value * for setting word_size. */ @@ -437,7 +437,7 @@ static s32 e1000_get_phy_id_82571(struct e1000_hw *hw) return ret_val; phy->id = (u32)(phy_id << 16); - udelay(20); + usleep_range(20, 40); ret_val = e1e_rphy(hw, MII_PHYSID2, &phy_id); if (ret_val) return ret_val; @@ -482,7 +482,7 @@ static s32 e1000_get_hw_semaphore_82571(struct e1000_hw *hw) if (!(swsm & E1000_SWSM_SMBI)) break; - udelay(50); + usleep_range(50, 100); i++; } @@ -499,7 +499,7 @@ static s32 e1000_get_hw_semaphore_82571(struct e1000_hw *hw) if (er32(SWSM) & E1000_SWSM_SWESMBI) break; - udelay(50); + usleep_range(50, 100); } if (i == fw_timeout) { @@ -526,6 +526,7 @@ static void e1000_put_hw_semaphore_82571(struct e1000_hw *hw) swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI); ew32(SWSM, swsm); } + /** * e1000_get_hw_semaphore_82573 - Acquire hardware semaphore * @hw: pointer to the HW structure @@ -846,9 +847,9 @@ static s32 e1000_write_nvm_eewr_82571(struct e1000_hw *hw, u16 offset, } for (i = 0; i < words; i++) { - eewr = (data[i] << E1000_NVM_RW_REG_DATA) | - ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) | - E1000_NVM_RW_REG_START; + eewr = ((data[i] << E1000_NVM_RW_REG_DATA) | + ((offset + i) << E1000_NVM_RW_ADDR_SHIFT) | + E1000_NVM_RW_REG_START); ret_val = e1000e_poll_eerd_eewr_done(hw, E1000_NVM_POLL_WRITE); if (ret_val) @@ -875,8 +876,7 @@ static s32 e1000_get_cfg_done_82571(struct e1000_hw *hw) s32 timeout = PHY_CFG_TIMEOUT; while (timeout) { - if (er32(EEMNGCTL) & - E1000_NVM_CFG_DONE_PORT_0) + if (er32(EEMNGCTL) & E1000_NVM_CFG_DONE_PORT_0) break; usleep_range(1000, 2000); timeout--; @@ -1022,7 +1022,7 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw) } if (hw->nvm.type == e1000_nvm_flash_hw) { - udelay(10); + usleep_range(10, 20); ctrl_ext = er32(CTRL_EXT); ctrl_ext |= E1000_CTRL_EXT_EE_RST; ew32(CTRL_EXT, ctrl_ext); @@ -1095,9 +1095,9 @@ static s32 e1000_init_hw_82571(struct e1000_hw *hw) /* Initialize identification LED */ ret_val = mac->ops.id_led_init(hw); + /* An error is not fatal and we should not stop init due to this */ if (ret_val) e_dbg("Error initializing identification LED\n"); - /* This is not fatal and we should not stop init due to this */ /* Disabling VLAN filtering */ e_dbg("Initializing the IEEE VLAN\n"); @@ -1122,9 +1122,8 @@ static s32 e1000_init_hw_82571(struct e1000_hw *hw) /* Set the transmit descriptor write-back policy */ reg_data = er32(TXDCTL(0)); - reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) | - E1000_TXDCTL_FULL_TX_DESC_WB | - E1000_TXDCTL_COUNT_DESC; + reg_data = ((reg_data & ~E1000_TXDCTL_WTHRESH) | + E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC); ew32(TXDCTL(0), reg_data); /* ...for both queues. */ @@ -1140,9 +1139,9 @@ static s32 e1000_init_hw_82571(struct e1000_hw *hw) break; default: reg_data = er32(TXDCTL(1)); - reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) | - E1000_TXDCTL_FULL_TX_DESC_WB | - E1000_TXDCTL_COUNT_DESC; + reg_data = ((reg_data & ~E1000_TXDCTL_WTHRESH) | + E1000_TXDCTL_FULL_TX_DESC_WB | + E1000_TXDCTL_COUNT_DESC); ew32(TXDCTL(1), reg_data); break; } @@ -1530,7 +1529,7 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw) status = er32(STATUS); er32(RXCW); /* SYNCH bit and IV bit are sticky */ - udelay(10); + usleep_range(10, 20); rxcw = er32(RXCW); if ((rxcw & E1000_RXCW_SYNCH) && !(rxcw & E1000_RXCW_IV)) { @@ -1633,7 +1632,7 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw) * the IV bit and restart Autoneg */ for (i = 0; i < AN_RETRY_COUNT; i++) { - udelay(10); + usleep_range(10, 20); rxcw = er32(RXCW); if ((rxcw & E1000_RXCW_SYNCH) && (rxcw & E1000_RXCW_C)) @@ -2066,4 +2065,3 @@ const struct e1000_info e1000_82583_info = { .phy_ops = &e82_phy_ops_bm, .nvm_ops = &e82571_nvm_ops, }; - diff --git a/drivers/net/ethernet/intel/e1000e/82571.h b/drivers/net/ethernet/intel/e1000e/82571.h index 85cb1a3b7cd4..08e24dc3dc0e 100644 --- a/drivers/net/ethernet/intel/e1000e/82571.h +++ b/drivers/net/ethernet/intel/e1000e/82571.h @@ -44,6 +44,8 @@ #define E1000_EIAC_82574 0x000DC /* Ext. Interrupt Auto Clear - RW */ #define E1000_EIAC_MASK_82574 0x01F00000 +#define E1000_IVAR_INT_ALLOC_VALID 0x8 + /* Manageability Operation Mode mask */ #define E1000_NVM_INIT_CTRL2_MNGM 0x6000 diff --git a/drivers/net/ethernet/intel/e1000e/defines.h b/drivers/net/ethernet/intel/e1000e/defines.h index fc3a4fe1ac71..351c94a0cf74 100644 --- a/drivers/net/ethernet/intel/e1000e/defines.h +++ b/drivers/net/ethernet/intel/e1000e/defines.h @@ -66,7 +66,7 @@ #define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES 0x00C00000 #define E1000_CTRL_EXT_EIAME 0x01000000 #define E1000_CTRL_EXT_DRV_LOAD 0x10000000 /* Driver loaded bit for FW */ -#define E1000_CTRL_EXT_IAME 0x08000000 /* Interrupt acknowledge Auto-mask */ +#define E1000_CTRL_EXT_IAME 0x08000000 /* Int ACK Auto-mask */ #define E1000_CTRL_EXT_PBA_CLR 0x80000000 /* PBA Clear */ #define E1000_CTRL_EXT_LSECCK 0x00001000 #define E1000_CTRL_EXT_PHYPDEN 0x00100000 @@ -216,6 +216,8 @@ #define E1000_CTRL_MEHE 0x00080000 /* Memory Error Handling Enable */ #define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */ #define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */ +#define E1000_CTRL_ADVD3WUC 0x00100000 /* D3 WUC */ +#define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000 /* PHY PM enable */ #define E1000_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */ #define E1000_CTRL_RST 0x04000000 /* Global reset */ #define E1000_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */ @@ -234,17 +236,17 @@ #define E1000_STATUS_FUNC_SHIFT 2 #define E1000_STATUS_FUNC_1 0x00000004 /* Function 1 */ #define E1000_STATUS_TXOFF 0x00000010 /* transmission paused */ +#define E1000_STATUS_SPEED_MASK 0x000000C0 #define E1000_STATUS_SPEED_10 0x00000000 /* Speed 10Mb/s */ #define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */ #define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */ #define E1000_STATUS_LAN_INIT_DONE 0x00000200 /* Lan Init Completion by NVM */ #define E1000_STATUS_PHYRA 0x00000400 /* PHY Reset Asserted */ -#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Status of Master requests. */ +#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Master Req status */ #define HALF_DUPLEX 1 #define FULL_DUPLEX 2 - #define ADVERTISE_10_HALF 0x0001 #define ADVERTISE_10_FULL 0x0002 #define ADVERTISE_100_HALF 0x0004 @@ -311,6 +313,7 @@ /* SerDes Control */ #define E1000_SCTL_DISABLE_SERDES_LOOPBACK 0x0400 +#define E1000_SCTL_ENABLE_SERDES_LOOPBACK 0x0410 /* Receive Checksum Control */ #define E1000_RXCSUM_TUOFL 0x00000200 /* TCP / UDP checksum offload */ @@ -400,7 +403,8 @@ #define E1000_ICR_RXDMT0 0x00000010 /* Rx desc min. threshold (0) */ #define E1000_ICR_RXT0 0x00000080 /* Rx timer intr (ring 0) */ #define E1000_ICR_ECCER 0x00400000 /* Uncorrectable ECC Error */ -#define E1000_ICR_INT_ASSERTED 0x80000000 /* If this bit asserted, the driver should claim the interrupt */ +/* If this bit asserted, the driver should claim the interrupt */ +#define E1000_ICR_INT_ASSERTED 0x80000000 #define E1000_ICR_RXQ0 0x00100000 /* Rx Queue 0 Interrupt */ #define E1000_ICR_RXQ1 0x00200000 /* Rx Queue 1 Interrupt */ #define E1000_ICR_TXQ0 0x00400000 /* Tx Queue 0 Interrupt */ @@ -583,13 +587,13 @@ #define E1000_EECD_SEC1VAL 0x00400000 /* Sector One Valid */ #define E1000_EECD_SEC1VAL_VALID_MASK (E1000_EECD_AUTO_RD | E1000_EECD_PRES) -#define E1000_NVM_RW_REG_DATA 16 /* Offset to data in NVM read/write registers */ -#define E1000_NVM_RW_REG_DONE 2 /* Offset to READ/WRITE done bit */ -#define E1000_NVM_RW_REG_START 1 /* Start operation */ -#define E1000_NVM_RW_ADDR_SHIFT 2 /* Shift to the address bits */ -#define E1000_NVM_POLL_WRITE 1 /* Flag for polling for write complete */ -#define E1000_NVM_POLL_READ 0 /* Flag for polling for read complete */ -#define E1000_FLASH_UPDATES 2000 +#define E1000_NVM_RW_REG_DATA 16 /* Offset to data in NVM r/w regs */ +#define E1000_NVM_RW_REG_DONE 2 /* Offset to READ/WRITE done bit */ +#define E1000_NVM_RW_REG_START 1 /* Start operation */ +#define E1000_NVM_RW_ADDR_SHIFT 2 /* Shift to the address bits */ +#define E1000_NVM_POLL_WRITE 1 /* Flag for polling write complete */ +#define E1000_NVM_POLL_READ 0 /* Flag for polling read complete */ +#define E1000_FLASH_UPDATES 2000 /* NVM Word Offsets */ #define NVM_COMPAT 0x0003 @@ -785,6 +789,7 @@ GG82563_REG(194, 18) /* Inband Control */ /* MDI Control */ +#define E1000_MDIC_REG_MASK 0x001F0000 #define E1000_MDIC_REG_SHIFT 16 #define E1000_MDIC_PHY_SHIFT 21 #define E1000_MDIC_OP_WRITE 0x04000000 diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h index fcc758138b8a..82f1c84282db 100644 --- a/drivers/net/ethernet/intel/e1000e/e1000.h +++ b/drivers/net/ethernet/intel/e1000e/e1000.h @@ -46,6 +46,7 @@ #include <linux/ptp_clock_kernel.h> #include <linux/ptp_classify.h> #include <linux/mii.h> +#include <linux/mdio.h> #include "hw.h" struct e1000_info; @@ -61,7 +62,6 @@ struct e1000_info; #define e_notice(format, arg...) \ netdev_notice(adapter->netdev, format, ## arg) - /* Interrupt modes, as used by the IntMode parameter */ #define E1000E_INT_MODE_LEGACY 0 #define E1000E_INT_MODE_MSI 1 @@ -239,9 +239,8 @@ struct e1000_adapter { u16 tx_itr; u16 rx_itr; - /* Tx */ - struct e1000_ring *tx_ring /* One per active queue */ - ____cacheline_aligned_in_smp; + /* Tx - one ring per active queue */ + struct e1000_ring *tx_ring ____cacheline_aligned_in_smp; u32 tx_fifo_limit; struct napi_struct napi; @@ -352,6 +351,8 @@ struct e1000_adapter { struct timecounter tc; struct ptp_clock *ptp_clock; struct ptp_clock_info ptp_clock_info; + + u16 eee_advert; }; struct e1000_info { @@ -487,8 +488,8 @@ extern int e1000e_setup_tx_resources(struct e1000_ring *ring); extern void e1000e_free_rx_resources(struct e1000_ring *ring); extern void e1000e_free_tx_resources(struct e1000_ring *ring); extern struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev, - struct rtnl_link_stats64 - *stats); + struct rtnl_link_stats64 + *stats); extern void e1000e_set_interrupt_capability(struct e1000_adapter *adapter); extern void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter); extern void e1000e_get_hw_control(struct e1000_adapter *adapter); @@ -558,12 +559,14 @@ static inline s32 e1000e_update_nvm_checksum(struct e1000_hw *hw) return hw->nvm.ops.update(hw); } -static inline s32 e1000_read_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) +static inline s32 e1000_read_nvm(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) { return hw->nvm.ops.read(hw, offset, words, data); } -static inline s32 e1000_write_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) +static inline s32 e1000_write_nvm(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) { return hw->nvm.ops.write(hw, offset, words, data); } @@ -597,7 +600,7 @@ static inline s32 __ew32_prepare(struct e1000_hw *hw) s32 i = E1000_ICH_FWSM_PCIM2PCI_COUNT; while ((er32(FWSM) & E1000_ICH_FWSM_PCIM2PCI) && --i) - udelay(50); + usleep_range(50, 100); return i; } diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c index f91a8f3f9d48..7c8ca658d553 100644 --- a/drivers/net/ethernet/intel/e1000e/ethtool.c +++ b/drivers/net/ethernet/intel/e1000e/ethtool.c @@ -35,12 +35,11 @@ #include <linux/slab.h> #include <linux/delay.h> #include <linux/vmalloc.h> -#include <linux/mdio.h> #include <linux/pm_runtime.h> #include "e1000.h" -enum {NETDEV_STATS, E1000_STATS}; +enum { NETDEV_STATS, E1000_STATS }; struct e1000_stats { char stat_string[ETH_GSTRING_LEN]; @@ -121,6 +120,7 @@ static const char e1000_gstrings_test[][ETH_GSTRING_LEN] = { "Interrupt test (offline)", "Loopback test (offline)", "Link test (on/offline)" }; + #define E1000_TEST_LEN ARRAY_SIZE(e1000_gstrings_test) static int e1000_get_settings(struct net_device *netdev, @@ -197,8 +197,7 @@ static int e1000_get_settings(struct net_device *netdev, /* MDI-X => 2; MDI =>1; Invalid =>0 */ if ((hw->phy.media_type == e1000_media_type_copper) && netif_carrier_ok(netdev)) - ecmd->eth_tp_mdix = hw->phy.is_mdix ? ETH_TP_MDI_X : - ETH_TP_MDI; + ecmd->eth_tp_mdix = hw->phy.is_mdix ? ETH_TP_MDI_X : ETH_TP_MDI; else ecmd->eth_tp_mdix = ETH_TP_MDI_INVALID; @@ -224,8 +223,7 @@ static int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx) /* Fiber NICs only allow 1000 gbps Full duplex */ if ((adapter->hw.phy.media_type == e1000_media_type_fiber) && - spd != SPEED_1000 && - dplx != DUPLEX_FULL) { + (spd != SPEED_1000) && (dplx != DUPLEX_FULL)) { goto err_inval; } @@ -298,12 +296,10 @@ static int e1000_set_settings(struct net_device *netdev, hw->mac.autoneg = 1; if (hw->phy.media_type == e1000_media_type_fiber) hw->phy.autoneg_advertised = ADVERTISED_1000baseT_Full | - ADVERTISED_FIBRE | - ADVERTISED_Autoneg; + ADVERTISED_FIBRE | ADVERTISED_Autoneg; else hw->phy.autoneg_advertised = ecmd->advertising | - ADVERTISED_TP | - ADVERTISED_Autoneg; + ADVERTISED_TP | ADVERTISED_Autoneg; ecmd->advertising = hw->phy.autoneg_advertised; if (adapter->fc_autoneg) hw->fc.requested_mode = e1000_fc_default; @@ -346,7 +342,7 @@ static void e1000_get_pauseparam(struct net_device *netdev, struct e1000_hw *hw = &adapter->hw; pause->autoneg = - (adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE); + (adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE); if (hw->fc.current_mode == e1000_fc_rx_pause) { pause->rx_pause = 1; @@ -435,7 +431,7 @@ static void e1000_get_regs(struct net_device *netdev, memset(p, 0, E1000_REGS_LEN * sizeof(u32)); regs->version = (1 << 24) | (adapter->pdev->revision << 16) | - adapter->pdev->device; + adapter->pdev->device; regs_buff[0] = er32(CTRL); regs_buff[1] = er32(STATUS); @@ -503,8 +499,8 @@ static int e1000_get_eeprom(struct net_device *netdev, first_word = eeprom->offset >> 1; last_word = (eeprom->offset + eeprom->len - 1) >> 1; - eeprom_buff = kmalloc(sizeof(u16) * - (last_word - first_word + 1), GFP_KERNEL); + eeprom_buff = kmalloc(sizeof(u16) * (last_word - first_word + 1), + GFP_KERNEL); if (!eeprom_buff) return -ENOMEM; @@ -515,7 +511,7 @@ static int e1000_get_eeprom(struct net_device *netdev, } else { for (i = 0; i < last_word - first_word + 1; i++) { ret_val = e1000_read_nvm(hw, first_word + i, 1, - &eeprom_buff[i]); + &eeprom_buff[i]); if (ret_val) break; } @@ -553,7 +549,8 @@ static int e1000_set_eeprom(struct net_device *netdev, if (eeprom->len == 0) return -EOPNOTSUPP; - if (eeprom->magic != (adapter->pdev->vendor | (adapter->pdev->device << 16))) + if (eeprom->magic != + (adapter->pdev->vendor | (adapter->pdev->device << 16))) return -EFAULT; if (adapter->flags & FLAG_READ_ONLY_NVM) @@ -579,7 +576,7 @@ static int e1000_set_eeprom(struct net_device *netdev, /* need read/modify/write of last changed EEPROM word */ /* only the first byte of the word is being modified */ ret_val = e1000_read_nvm(hw, last_word, 1, - &eeprom_buff[last_word - first_word]); + &eeprom_buff[last_word - first_word]); if (ret_val) goto out; @@ -618,8 +615,7 @@ static void e1000_get_drvinfo(struct net_device *netdev, { struct e1000_adapter *adapter = netdev_priv(netdev); - strlcpy(drvinfo->driver, e1000e_driver_name, - sizeof(drvinfo->driver)); + strlcpy(drvinfo->driver, e1000e_driver_name, sizeof(drvinfo->driver)); strlcpy(drvinfo->version, e1000e_driver_version, sizeof(drvinfo->version)); @@ -627,10 +623,10 @@ static void e1000_get_drvinfo(struct net_device *netdev, * PCI-E controllers */ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), - "%d.%d-%d", - (adapter->eeprom_vers & 0xF000) >> 12, - (adapter->eeprom_vers & 0x0FF0) >> 4, - (adapter->eeprom_vers & 0x000F)); + "%d.%d-%d", + (adapter->eeprom_vers & 0xF000) >> 12, + (adapter->eeprom_vers & 0x0FF0) >> 4, + (adapter->eeprom_vers & 0x000F)); strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), sizeof(drvinfo->bus_info)); @@ -756,7 +752,8 @@ static bool reg_pattern_test(struct e1000_adapter *adapter, u64 *data, { u32 pat, val; static const u32 test[] = { - 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; + 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF + }; for (pat = 0; pat < ARRAY_SIZE(test); pat++) { E1000_WRITE_REG_ARRAY(&adapter->hw, reg, offset, (test[pat] & write)); @@ -786,6 +783,7 @@ static bool reg_set_and_check(struct e1000_adapter *adapter, u64 *data, } return 0; } + #define REG_PATTERN_TEST_ARRAY(reg, offset, mask, write) \ do { \ if (reg_pattern_test(adapter, data, reg, offset, mask, write)) \ @@ -813,16 +811,16 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data) u32 wlock_mac = 0; /* The status register is Read Only, so a write should fail. - * Some bits that get toggled are ignored. + * Some bits that get toggled are ignored. There are several bits + * on newer hardware that are r/w. */ switch (mac->type) { - /* there are several bits on newer hardware that are r/w */ case e1000_82571: case e1000_82572: case e1000_80003es2lan: toggle = 0x7FFFF3FF; break; - default: + default: toggle = 0x7FFFF033; break; } @@ -928,7 +926,7 @@ static int e1000_eeprom_test(struct e1000_adapter *adapter, u64 *data) } /* If Checksum is not Correct return error else test passed */ - if ((checksum != (u16) NVM_SUM) && !(*data)) + if ((checksum != (u16)NVM_SUM) && !(*data)) *data = 2; return *data; @@ -936,7 +934,7 @@ static int e1000_eeprom_test(struct e1000_adapter *adapter, u64 *data) static irqreturn_t e1000_test_intr(int __always_unused irq, void *data) { - struct net_device *netdev = (struct net_device *) data; + struct net_device *netdev = (struct net_device *)data; struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; @@ -969,8 +967,8 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data) if (!request_irq(irq, e1000_test_intr, IRQF_PROBE_SHARED, netdev->name, netdev)) { shared_int = 0; - } else if (request_irq(irq, e1000_test_intr, IRQF_SHARED, - netdev->name, netdev)) { + } else if (request_irq(irq, e1000_test_intr, IRQF_SHARED, netdev->name, + netdev)) { *data = 1; ret_val = -1; goto out; @@ -1080,28 +1078,33 @@ static void e1000_free_desc_rings(struct e1000_adapter *adapter) struct e1000_ring *tx_ring = &adapter->test_tx_ring; struct e1000_ring *rx_ring = &adapter->test_rx_ring; struct pci_dev *pdev = adapter->pdev; + struct e1000_buffer *buffer_info; int i; if (tx_ring->desc && tx_ring->buffer_info) { for (i = 0; i < tx_ring->count; i++) { - if (tx_ring->buffer_info[i].dma) + buffer_info = &tx_ring->buffer_info[i]; + + if (buffer_info->dma) dma_unmap_single(&pdev->dev, - tx_ring->buffer_info[i].dma, - tx_ring->buffer_info[i].length, - DMA_TO_DEVICE); - if (tx_ring->buffer_info[i].skb) - dev_kfree_skb(tx_ring->buffer_info[i].skb); + buffer_info->dma, + buffer_info->length, + DMA_TO_DEVICE); + if (buffer_info->skb) + dev_kfree_skb(buffer_info->skb); } } if (rx_ring->desc && rx_ring->buffer_info) { for (i = 0; i < rx_ring->count; i++) { - if (rx_ring->buffer_info[i].dma) + buffer_info = &rx_ring->buffer_info[i]; + + if (buffer_info->dma) dma_unmap_single(&pdev->dev, - rx_ring->buffer_info[i].dma, - 2048, DMA_FROM_DEVICE); - if (rx_ring->buffer_info[i].skb) - dev_kfree_skb(rx_ring->buffer_info[i].skb); + buffer_info->dma, + 2048, DMA_FROM_DEVICE); + if (buffer_info->skb) + dev_kfree_skb(buffer_info->skb); } } @@ -1138,8 +1141,7 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter) tx_ring->count = E1000_DEFAULT_TXD; tx_ring->buffer_info = kcalloc(tx_ring->count, - sizeof(struct e1000_buffer), - GFP_KERNEL); + sizeof(struct e1000_buffer), GFP_KERNEL); if (!tx_ring->buffer_info) { ret_val = 1; goto err_nomem; @@ -1156,8 +1158,8 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter) tx_ring->next_to_use = 0; tx_ring->next_to_clean = 0; - ew32(TDBAL(0), ((u64) tx_ring->dma & 0x00000000FFFFFFFF)); - ew32(TDBAH(0), ((u64) tx_ring->dma >> 32)); + ew32(TDBAL(0), ((u64)tx_ring->dma & 0x00000000FFFFFFFF)); + ew32(TDBAH(0), ((u64)tx_ring->dma >> 32)); ew32(TDLEN(0), tx_ring->count * sizeof(struct e1000_tx_desc)); ew32(TDH(0), 0); ew32(TDT(0), 0); @@ -1179,8 +1181,8 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter) tx_ring->buffer_info[i].skb = skb; tx_ring->buffer_info[i].length = skb->len; tx_ring->buffer_info[i].dma = - dma_map_single(&pdev->dev, skb->data, skb->len, - DMA_TO_DEVICE); + dma_map_single(&pdev->dev, skb->data, skb->len, + DMA_TO_DEVICE); if (dma_mapping_error(&pdev->dev, tx_ring->buffer_info[i].dma)) { ret_val = 4; @@ -1200,8 +1202,7 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter) rx_ring->count = E1000_DEFAULT_RXD; rx_ring->buffer_info = kcalloc(rx_ring->count, - sizeof(struct e1000_buffer), - GFP_KERNEL); + sizeof(struct e1000_buffer), GFP_KERNEL); if (!rx_ring->buffer_info) { ret_val = 5; goto err_nomem; @@ -1220,16 +1221,16 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter) rctl = er32(RCTL); if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX)) ew32(RCTL, rctl & ~E1000_RCTL_EN); - ew32(RDBAL(0), ((u64) rx_ring->dma & 0xFFFFFFFF)); - ew32(RDBAH(0), ((u64) rx_ring->dma >> 32)); + ew32(RDBAL(0), ((u64)rx_ring->dma & 0xFFFFFFFF)); + ew32(RDBAH(0), ((u64)rx_ring->dma >> 32)); ew32(RDLEN(0), rx_ring->size); ew32(RDH(0), 0); ew32(RDT(0), 0); rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_SZ_2048 | - E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_LPE | - E1000_RCTL_SBP | E1000_RCTL_SECRC | - E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF | - (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT); + E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_LPE | + E1000_RCTL_SBP | E1000_RCTL_SECRC | + E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF | + (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT); ew32(RCTL, rctl); for (i = 0; i < rx_ring->count; i++) { @@ -1244,8 +1245,8 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter) skb_reserve(skb, NET_IP_ALIGN); rx_ring->buffer_info[i].skb = skb; rx_ring->buffer_info[i].dma = - dma_map_single(&pdev->dev, skb->data, 2048, - DMA_FROM_DEVICE); + dma_map_single(&pdev->dev, skb->data, 2048, + DMA_FROM_DEVICE); if (dma_mapping_error(&pdev->dev, rx_ring->buffer_info[i].dma)) { ret_val = 8; @@ -1296,7 +1297,7 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter) ew32(CTRL, ctrl_reg); e1e_flush(); - udelay(500); + usleep_range(500, 1000); return 0; } @@ -1322,7 +1323,7 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter) e1e_wphy(hw, PHY_REG(2, 21), phy_reg); /* Assert SW reset for above settings to take effect */ hw->phy.ops.commit(hw); - mdelay(1); + usleep_range(1000, 2000); /* Force Full Duplex */ e1e_rphy(hw, PHY_REG(769, 16), &phy_reg); e1e_wphy(hw, PHY_REG(769, 16), phy_reg | 0x000C); @@ -1363,7 +1364,7 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter) /* force 1000, set loopback */ e1e_wphy(hw, MII_BMCR, 0x4140); - mdelay(250); + msleep(250); /* Now set up the MAC to the same speed/duplex as the PHY. */ ctrl_reg = er32(CTRL); @@ -1395,7 +1396,7 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter) if (hw->phy.type == e1000_phy_m88) e1000_phy_disable_receiver(adapter); - udelay(500); + usleep_range(500, 1000); return 0; } @@ -1431,8 +1432,7 @@ static int e1000_set_82571_fiber_loopback(struct e1000_adapter *adapter) /* special write to serdes control register to enable SerDes analog * loopback */ -#define E1000_SERDES_LB_ON 0x410 - ew32(SCTL, E1000_SERDES_LB_ON); + ew32(SCTL, E1000_SCTL_ENABLE_SERDES_LOOPBACK); e1e_flush(); usleep_range(10000, 20000); @@ -1526,8 +1526,7 @@ static void e1000_loopback_cleanup(struct e1000_adapter *adapter) case e1000_82572: if (hw->phy.media_type == e1000_media_type_fiber || hw->phy.media_type == e1000_media_type_internal_serdes) { -#define E1000_SERDES_LB_OFF 0x400 - ew32(SCTL, E1000_SERDES_LB_OFF); + ew32(SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK); e1e_flush(); usleep_range(10000, 20000); break; @@ -1564,7 +1563,7 @@ static int e1000_check_lbtest_frame(struct sk_buff *skb, frame_size &= ~1; if (*(skb->data + 3) == 0xFF) if ((*(skb->data + frame_size / 2 + 10) == 0xBE) && - (*(skb->data + frame_size / 2 + 12) == 0xAF)) + (*(skb->data + frame_size / 2 + 12) == 0xAF)) return 0; return 13; } @@ -1575,6 +1574,7 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter) struct e1000_ring *rx_ring = &adapter->test_rx_ring; struct pci_dev *pdev = adapter->pdev; struct e1000_hw *hw = &adapter->hw; + struct e1000_buffer *buffer_info; int i, j, k, l; int lc; int good_cnt; @@ -1595,14 +1595,17 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter) k = 0; l = 0; - for (j = 0; j <= lc; j++) { /* loop count loop */ - for (i = 0; i < 64; i++) { /* send the packets */ - e1000_create_lbtest_frame(tx_ring->buffer_info[k].skb, - 1024); + /* loop count loop */ + for (j = 0; j <= lc; j++) { + /* send the packets */ + for (i = 0; i < 64; i++) { + buffer_info = &tx_ring->buffer_info[k]; + + e1000_create_lbtest_frame(buffer_info->skb, 1024); dma_sync_single_for_device(&pdev->dev, - tx_ring->buffer_info[k].dma, - tx_ring->buffer_info[k].length, - DMA_TO_DEVICE); + buffer_info->dma, + buffer_info->length, + DMA_TO_DEVICE); k++; if (k == tx_ring->count) k = 0; @@ -1612,13 +1615,16 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter) msleep(200); time = jiffies; /* set the start time for the receive */ good_cnt = 0; - do { /* receive the sent packets */ + /* receive the sent packets */ + do { + buffer_info = &rx_ring->buffer_info[l]; + dma_sync_single_for_cpu(&pdev->dev, - rx_ring->buffer_info[l].dma, 2048, - DMA_FROM_DEVICE); + buffer_info->dma, 2048, + DMA_FROM_DEVICE); - ret_val = e1000_check_lbtest_frame( - rx_ring->buffer_info[l].skb, 1024); + ret_val = e1000_check_lbtest_frame(buffer_info->skb, + 1024); if (!ret_val) good_cnt++; l++; @@ -1637,7 +1643,7 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter) ret_val = 14; /* error code for time out error */ break; } - } /* end loop count loop */ + } return ret_val; } @@ -1696,7 +1702,7 @@ static int e1000_link_test(struct e1000_adapter *adapter, u64 *data) /* On some Phy/switch combinations, link establishment * can take a few seconds more than expected. */ - msleep(5000); + msleep_interruptible(5000); if (!(er32(STATUS) & E1000_STATUS_LU)) *data = 1; @@ -1980,12 +1986,12 @@ static void e1000_get_ethtool_stats(struct net_device *netdev, for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) { switch (e1000_gstrings_stats[i].type) { case NETDEV_STATS: - p = (char *) &net_stats + - e1000_gstrings_stats[i].stat_offset; + p = (char *)&net_stats + + e1000_gstrings_stats[i].stat_offset; break; case E1000_STATS: - p = (char *) adapter + - e1000_gstrings_stats[i].stat_offset; + p = (char *)adapter + + e1000_gstrings_stats[i].stat_offset; break; default: data[i] = 0; @@ -1993,7 +1999,7 @@ static void e1000_get_ethtool_stats(struct net_device *netdev, } data[i] = (e1000_gstrings_stats[i].sizeof_stat == - sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; } } @@ -2069,23 +2075,20 @@ static int e1000e_get_eee(struct net_device *netdev, struct ethtool_eee *edata) { struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; - u16 cap_addr, adv_addr, lpa_addr, pcs_stat_addr, phy_data, lpi_ctrl; - u32 status, ret_val; + u16 cap_addr, lpa_addr, pcs_stat_addr, phy_data; + u32 ret_val; - if (!(adapter->flags & FLAG_IS_ICH) || - !(adapter->flags2 & FLAG2_HAS_EEE)) + if (!(adapter->flags2 & FLAG2_HAS_EEE)) return -EOPNOTSUPP; switch (hw->phy.type) { case e1000_phy_82579: cap_addr = I82579_EEE_CAPABILITY; - adv_addr = I82579_EEE_ADVERTISEMENT; lpa_addr = I82579_EEE_LP_ABILITY; pcs_stat_addr = I82579_EEE_PCS_STATUS; break; case e1000_phy_i217: cap_addr = I217_EEE_CAPABILITY; - adv_addr = I217_EEE_ADVERTISEMENT; lpa_addr = I217_EEE_LP_ABILITY; pcs_stat_addr = I217_EEE_PCS_STATUS; break; @@ -2104,10 +2107,7 @@ static int e1000e_get_eee(struct net_device *netdev, struct ethtool_eee *edata) edata->supported = mmd_eee_cap_to_ethtool_sup_t(phy_data); /* EEE Advertised */ - ret_val = e1000_read_emi_reg_locked(hw, adv_addr, &phy_data); - if (ret_val) - goto release; - edata->advertised = mmd_eee_adv_to_ethtool_adv_t(phy_data); + edata->advertised = mmd_eee_adv_to_ethtool_adv_t(adapter->eee_advert); /* EEE Link Partner Advertised */ ret_val = e1000_read_emi_reg_locked(hw, lpa_addr, &phy_data); @@ -2125,25 +2125,11 @@ release: if (ret_val) return -ENODATA; - e1e_rphy(hw, I82579_LPI_CTRL, &lpi_ctrl); - status = er32(STATUS); - /* Result of the EEE auto negotiation - there is no register that * has the status of the EEE negotiation so do a best-guess based - * on whether both Tx and Rx LPI indications have been received or - * base it on the link speed, the EEE advertised speeds on both ends - * and the speeds on which EEE is enabled locally. + * on whether Tx or Rx LPI indications have been received. */ - if (((phy_data & E1000_EEE_TX_LPI_RCVD) && - (phy_data & E1000_EEE_RX_LPI_RCVD)) || - ((status & E1000_STATUS_SPEED_100) && - (edata->advertised & ADVERTISED_100baseT_Full) && - (edata->lp_advertised & ADVERTISED_100baseT_Full) && - (lpi_ctrl & I82579_LPI_CTRL_100_ENABLE)) || - ((status & E1000_STATUS_SPEED_1000) && - (edata->advertised & ADVERTISED_1000baseT_Full) && - (edata->lp_advertised & ADVERTISED_1000baseT_Full) && - (lpi_ctrl & I82579_LPI_CTRL_1000_ENABLE))) + if (phy_data & (E1000_EEE_TX_LPI_RCVD | E1000_EEE_RX_LPI_RCVD)) edata->eee_active = true; edata->eee_enabled = !hw->dev_spec.ich8lan.eee_disable; @@ -2160,19 +2146,10 @@ static int e1000e_set_eee(struct net_device *netdev, struct ethtool_eee *edata) struct ethtool_eee eee_curr; s32 ret_val; - if (!(adapter->flags & FLAG_IS_ICH) || - !(adapter->flags2 & FLAG2_HAS_EEE)) - return -EOPNOTSUPP; - ret_val = e1000e_get_eee(netdev, &eee_curr); if (ret_val) return ret_val; - if (eee_curr.advertised != edata->advertised) { - e_err("Setting EEE advertisement is not supported\n"); - return -EINVAL; - } - if (eee_curr.tx_lpi_enabled != edata->tx_lpi_enabled) { e_err("Setting EEE tx-lpi is not supported\n"); return -EINVAL; @@ -2183,16 +2160,21 @@ static int e1000e_set_eee(struct net_device *netdev, struct ethtool_eee *edata) return -EINVAL; } - if (hw->dev_spec.ich8lan.eee_disable != !edata->eee_enabled) { - hw->dev_spec.ich8lan.eee_disable = !edata->eee_enabled; - - /* reset the link */ - if (netif_running(netdev)) - e1000e_reinit_locked(adapter); - else - e1000e_reset(adapter); + if (edata->advertised & ~(ADVERTISE_100_FULL | ADVERTISE_1000_FULL)) { + e_err("EEE advertisement supports only 100TX and/or 1000T full-duplex\n"); + return -EINVAL; } + adapter->eee_advert = ethtool_adv_to_mmd_eee_adv_t(edata->advertised); + + hw->dev_spec.ich8lan.eee_disable = !edata->eee_enabled; + + /* reset the link */ + if (netif_running(netdev)) + e1000e_reinit_locked(adapter); + else + e1000e_reset(adapter); + return 0; } diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h index 1e6b889aee87..84850f7a23e4 100644 --- a/drivers/net/ethernet/intel/e1000e/hw.h +++ b/drivers/net/ethernet/intel/e1000e/hw.h @@ -167,7 +167,7 @@ enum e1000_1000t_rx_status { e1000_1000t_rx_status_undefined = 0xFF }; -enum e1000_rev_polarity{ +enum e1000_rev_polarity { e1000_rev_polarity_normal = 0, e1000_rev_polarity_reversed, e1000_rev_polarity_undefined = 0xFF @@ -545,7 +545,7 @@ struct e1000_mac_info { u16 mta_reg_count; /* Maximum size of the MTA register table in all supported adapters */ - #define MAX_MTA_REG 128 +#define MAX_MTA_REG 128 u32 mta_shadow[MAX_MTA_REG]; u16 rar_entry_count; diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c index 121a865c7fbd..ad9d8f2dd868 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c @@ -61,15 +61,15 @@ /* Offset 04h HSFSTS */ union ich8_hws_flash_status { struct ich8_hsfsts { - u16 flcdone :1; /* bit 0 Flash Cycle Done */ - u16 flcerr :1; /* bit 1 Flash Cycle Error */ - u16 dael :1; /* bit 2 Direct Access error Log */ - u16 berasesz :2; /* bit 4:3 Sector Erase Size */ - u16 flcinprog :1; /* bit 5 flash cycle in Progress */ - u16 reserved1 :2; /* bit 13:6 Reserved */ - u16 reserved2 :6; /* bit 13:6 Reserved */ - u16 fldesvalid :1; /* bit 14 Flash Descriptor Valid */ - u16 flockdn :1; /* bit 15 Flash Config Lock-Down */ + u16 flcdone:1; /* bit 0 Flash Cycle Done */ + u16 flcerr:1; /* bit 1 Flash Cycle Error */ + u16 dael:1; /* bit 2 Direct Access error Log */ + u16 berasesz:2; /* bit 4:3 Sector Erase Size */ + u16 flcinprog:1; /* bit 5 flash cycle in Progress */ + u16 reserved1:2; /* bit 13:6 Reserved */ + u16 reserved2:6; /* bit 13:6 Reserved */ + u16 fldesvalid:1; /* bit 14 Flash Descriptor Valid */ + u16 flockdn:1; /* bit 15 Flash Config Lock-Down */ } hsf_status; u16 regval; }; @@ -78,11 +78,11 @@ union ich8_hws_flash_status { /* Offset 06h FLCTL */ union ich8_hws_flash_ctrl { struct ich8_hsflctl { - u16 flcgo :1; /* 0 Flash Cycle Go */ - u16 flcycle :2; /* 2:1 Flash Cycle */ - u16 reserved :5; /* 7:3 Reserved */ - u16 fldbcount :2; /* 9:8 Flash Data Byte Count */ - u16 flockdn :6; /* 15:10 Reserved */ + u16 flcgo:1; /* 0 Flash Cycle Go */ + u16 flcycle:2; /* 2:1 Flash Cycle */ + u16 reserved:5; /* 7:3 Reserved */ + u16 fldbcount:2; /* 9:8 Flash Data Byte Count */ + u16 flockdn:6; /* 15:10 Reserved */ } hsf_ctrl; u16 regval; }; @@ -90,10 +90,10 @@ union ich8_hws_flash_ctrl { /* ICH Flash Region Access Permissions */ union ich8_hws_flash_regacc { struct ich8_flracc { - u32 grra :8; /* 0:7 GbE region Read Access */ - u32 grwa :8; /* 8:15 GbE region Write Access */ - u32 gmrag :8; /* 23:16 GbE Master Read Access Grant */ - u32 gmwag :8; /* 31:24 GbE Master Write Access Grant */ + u32 grra:8; /* 0:7 GbE region Read Access */ + u32 grwa:8; /* 8:15 GbE region Write Access */ + u32 gmrag:8; /* 23:16 GbE Master Read Access Grant */ + u32 gmwag:8; /* 31:24 GbE Master Write Access Grant */ } hsf_flregacc; u16 regval; }; @@ -142,6 +142,7 @@ static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index); static void e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index); static s32 e1000_k1_workaround_lv(struct e1000_hw *hw); static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate); +static s32 e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw); static inline u16 __er16flash(struct e1000_hw *hw, unsigned long reg) { @@ -312,7 +313,7 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw) mac_reg &= ~E1000_CTRL_LANPHYPC_VALUE; ew32(CTRL, mac_reg); e1e_flush(); - udelay(10); + usleep_range(10, 20); mac_reg &= ~E1000_CTRL_LANPHYPC_OVERRIDE; ew32(CTRL, mac_reg); e1e_flush(); @@ -548,8 +549,8 @@ static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw) /* find total size of the NVM, then cut in half since the total * size represents two separate NVM banks. */ - nvm->flash_bank_size = (sector_end_addr - sector_base_addr) - << FLASH_SECTOR_ADDR_SHIFT; + nvm->flash_bank_size = ((sector_end_addr - sector_base_addr) + << FLASH_SECTOR_ADDR_SHIFT); nvm->flash_bank_size /= 2; /* Adjust to word count */ nvm->flash_bank_size /= sizeof(u16); @@ -636,6 +637,8 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw) if (mac->type == e1000_pch_lpt) { mac->rar_entry_count = E1000_PCH_LPT_RAR_ENTRIES; mac->ops.rar_set = e1000_rar_set_pch_lpt; + mac->ops.setup_physical_interface = + e1000_setup_copper_link_pch_lpt; } /* Enable PCS Lock-loss workaround for ICH8 */ @@ -692,7 +695,7 @@ s32 e1000_read_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 *data) * * Assumes the SW/FW/HW Semaphore is already acquired. **/ -static s32 e1000_write_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 data) +s32 e1000_write_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 data) { return __e1000_access_emi_reg_locked(hw, addr, &data, false); } @@ -709,11 +712,22 @@ static s32 e1000_set_eee_pchlan(struct e1000_hw *hw) { struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; s32 ret_val; - u16 lpi_ctrl; + u16 lpa, pcs_status, adv, adv_addr, lpi_ctrl, data; - if ((hw->phy.type != e1000_phy_82579) && - (hw->phy.type != e1000_phy_i217)) + switch (hw->phy.type) { + case e1000_phy_82579: + lpa = I82579_EEE_LP_ABILITY; + pcs_status = I82579_EEE_PCS_STATUS; + adv_addr = I82579_EEE_ADVERTISEMENT; + break; + case e1000_phy_i217: + lpa = I217_EEE_LP_ABILITY; + pcs_status = I217_EEE_PCS_STATUS; + adv_addr = I217_EEE_ADVERTISEMENT; + break; + default: return 0; + } ret_val = hw->phy.ops.acquire(hw); if (ret_val) @@ -728,34 +742,24 @@ static s32 e1000_set_eee_pchlan(struct e1000_hw *hw) /* Enable EEE if not disabled by user */ if (!dev_spec->eee_disable) { - u16 lpa, pcs_status, data; - /* Save off link partner's EEE ability */ - switch (hw->phy.type) { - case e1000_phy_82579: - lpa = I82579_EEE_LP_ABILITY; - pcs_status = I82579_EEE_PCS_STATUS; - break; - case e1000_phy_i217: - lpa = I217_EEE_LP_ABILITY; - pcs_status = I217_EEE_PCS_STATUS; - break; - default: - ret_val = -E1000_ERR_PHY; - goto release; - } ret_val = e1000_read_emi_reg_locked(hw, lpa, &dev_spec->eee_lp_ability); if (ret_val) goto release; + /* Read EEE advertisement */ + ret_val = e1000_read_emi_reg_locked(hw, adv_addr, &adv); + if (ret_val) + goto release; + /* Enable EEE only for speeds in which the link partner is - * EEE capable. + * EEE capable and for which we advertise EEE. */ - if (dev_spec->eee_lp_ability & I82579_EEE_1000_SUPPORTED) + if (adv & dev_spec->eee_lp_ability & I82579_EEE_1000_SUPPORTED) lpi_ctrl |= I82579_LPI_CTRL_1000_ENABLE; - if (dev_spec->eee_lp_ability & I82579_EEE_100_SUPPORTED) { + if (adv & dev_spec->eee_lp_ability & I82579_EEE_100_SUPPORTED) { e1e_rphy_locked(hw, MII_LPA, &data); if (data & LPA_100FULL) lpi_ctrl |= I82579_LPI_CTRL_100_ENABLE; @@ -767,13 +771,13 @@ static s32 e1000_set_eee_pchlan(struct e1000_hw *hw) dev_spec->eee_lp_ability &= ~I82579_EEE_100_SUPPORTED; } - - /* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */ - ret_val = e1000_read_emi_reg_locked(hw, pcs_status, &data); - if (ret_val) - goto release; } + /* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */ + ret_val = e1000_read_emi_reg_locked(hw, pcs_status, &data); + if (ret_val) + goto release; + ret_val = e1e_wphy_locked(hw, I82579_LPI_CTRL, lpi_ctrl); release: hw->phy.ops.release(hw); @@ -835,6 +839,94 @@ release: } /** + * e1000_platform_pm_pch_lpt - Set platform power management values + * @hw: pointer to the HW structure + * @link: bool indicating link status + * + * Set the Latency Tolerance Reporting (LTR) values for the "PCIe-like" + * GbE MAC in the Lynx Point PCH based on Rx buffer size and link speed + * when link is up (which must not exceed the maximum latency supported + * by the platform), otherwise specify there is no LTR requirement. + * Unlike true-PCIe devices which set the LTR maximum snoop/no-snoop + * latencies in the LTR Extended Capability Structure in the PCIe Extended + * Capability register set, on this device LTR is set by writing the + * equivalent snoop/no-snoop latencies in the LTRV register in the MAC and + * set the SEND bit to send an Intel On-chip System Fabric sideband (IOSF-SB) + * message to the PMC. + **/ +static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link) +{ + u32 reg = link << (E1000_LTRV_REQ_SHIFT + E1000_LTRV_NOSNOOP_SHIFT) | + link << E1000_LTRV_REQ_SHIFT | E1000_LTRV_SEND; + u16 lat_enc = 0; /* latency encoded */ + + if (link) { + u16 speed, duplex, scale = 0; + u16 max_snoop, max_nosnoop; + u16 max_ltr_enc; /* max LTR latency encoded */ + s64 lat_ns; /* latency (ns) */ + s64 value; + u32 rxa; + + if (!hw->adapter->max_frame_size) { + e_dbg("max_frame_size not set.\n"); + return -E1000_ERR_CONFIG; + } + + hw->mac.ops.get_link_up_info(hw, &speed, &duplex); + if (!speed) { + e_dbg("Speed not set.\n"); + return -E1000_ERR_CONFIG; + } + + /* Rx Packet Buffer Allocation size (KB) */ + rxa = er32(PBA) & E1000_PBA_RXA_MASK; + + /* Determine the maximum latency tolerated by the device. + * + * Per the PCIe spec, the tolerated latencies are encoded as + * a 3-bit encoded scale (only 0-5 are valid) multiplied by + * a 10-bit value (0-1023) to provide a range from 1 ns to + * 2^25*(2^10-1) ns. The scale is encoded as 0=2^0ns, + * 1=2^5ns, 2=2^10ns,...5=2^25ns. + */ + lat_ns = ((s64)rxa * 1024 - + (2 * (s64)hw->adapter->max_frame_size)) * 8 * 1000; + if (lat_ns < 0) + lat_ns = 0; + else + do_div(lat_ns, speed); + + value = lat_ns; + while (value > PCI_LTR_VALUE_MASK) { + scale++; + value = DIV_ROUND_UP(value, (1 << 5)); + } + if (scale > E1000_LTRV_SCALE_MAX) { + e_dbg("Invalid LTR latency scale %d\n", scale); + return -E1000_ERR_CONFIG; + } + lat_enc = (u16)((scale << PCI_LTR_SCALE_SHIFT) | value); + + /* Determine the maximum latency tolerated by the platform */ + pci_read_config_word(hw->adapter->pdev, E1000_PCI_LTR_CAP_LPT, + &max_snoop); + pci_read_config_word(hw->adapter->pdev, + E1000_PCI_LTR_CAP_LPT + 2, &max_nosnoop); + max_ltr_enc = max_t(u16, max_snoop, max_nosnoop); + + if (lat_enc > max_ltr_enc) + lat_enc = max_ltr_enc; + } + + /* Set Snoop and No-Snoop latencies the same */ + reg |= lat_enc | (lat_enc << E1000_LTRV_NOSNOOP_SHIFT); + ew32(LTRV, reg); + + return 0; +} + +/** * e1000_check_for_copper_link_ich8lan - Check for link (Copper) * @hw: pointer to the HW structure * @@ -871,6 +963,34 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) return ret_val; } + /* When connected at 10Mbps half-duplex, 82579 parts are excessively + * aggressive resulting in many collisions. To avoid this, increase + * the IPG and reduce Rx latency in the PHY. + */ + if ((hw->mac.type == e1000_pch2lan) && link) { + u32 reg; + reg = er32(STATUS); + if (!(reg & (E1000_STATUS_FD | E1000_STATUS_SPEED_MASK))) { + reg = er32(TIPG); + reg &= ~E1000_TIPG_IPGT_MASK; + reg |= 0xFF; + ew32(TIPG, reg); + + /* Reduce Rx latency in analog PHY */ + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + ret_val = + e1000_write_emi_reg_locked(hw, I82579_RX_CONFIG, 0); + + hw->phy.ops.release(hw); + + if (ret_val) + return ret_val; + } + } + /* Work-around I218 hang issue */ if ((hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_LM) || (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_V)) { @@ -879,6 +999,15 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) return ret_val; } + if (hw->mac.type == e1000_pch_lpt) { + /* Set platform power management values for + * Latency Tolerance Reporting (LTR) + */ + ret_val = e1000_platform_pm_pch_lpt(hw, link); + if (ret_val) + return ret_val; + } + /* Clear link partner's EEE ability */ hw->dev_spec.ich8lan.eee_lp_ability = 0; @@ -1002,10 +1131,6 @@ static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter) (er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) adapter->flags2 |= FLAG2_PCIM2PCI_ARBITER_WA; - /* Disable EEE by default until IEEE802.3az spec is finalized */ - if (adapter->flags2 & FLAG2_HAS_EEE) - adapter->hw.dev_spec.ich8lan.eee_disable = true; - return 0; } @@ -1134,9 +1259,9 @@ static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw) u32 fwsm; fwsm = er32(FWSM); - return (fwsm & E1000_ICH_FWSM_FW_VALID) && - ((fwsm & E1000_FWSM_MODE_MASK) == - (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT)); + return ((fwsm & E1000_ICH_FWSM_FW_VALID) && + ((fwsm & E1000_FWSM_MODE_MASK) == + (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT))); } /** @@ -1153,7 +1278,7 @@ static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw) fwsm = er32(FWSM); return (fwsm & E1000_ICH_FWSM_FW_VALID) && - (fwsm & (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT)); + (fwsm & (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT)); } /** @@ -1440,8 +1565,7 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw) word_addr = (u16)(cnf_base_addr << 1); for (i = 0; i < cnf_size; i++) { - ret_val = e1000_read_nvm(hw, (word_addr + i * 2), 1, - ®_data); + ret_val = e1000_read_nvm(hw, (word_addr + i * 2), 1, ®_data); if (ret_val) goto release; @@ -1501,13 +1625,13 @@ static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link) if (ret_val) goto release; - status_reg &= BM_CS_STATUS_LINK_UP | - BM_CS_STATUS_RESOLVED | - BM_CS_STATUS_SPEED_MASK; + status_reg &= (BM_CS_STATUS_LINK_UP | + BM_CS_STATUS_RESOLVED | + BM_CS_STATUS_SPEED_MASK); if (status_reg == (BM_CS_STATUS_LINK_UP | - BM_CS_STATUS_RESOLVED | - BM_CS_STATUS_SPEED_1000)) + BM_CS_STATUS_RESOLVED | + BM_CS_STATUS_SPEED_1000)) k1_enable = false; } @@ -1516,13 +1640,13 @@ static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link) if (ret_val) goto release; - status_reg &= HV_M_STATUS_LINK_UP | - HV_M_STATUS_AUTONEG_COMPLETE | - HV_M_STATUS_SPEED_MASK; + status_reg &= (HV_M_STATUS_LINK_UP | + HV_M_STATUS_AUTONEG_COMPLETE | + HV_M_STATUS_SPEED_MASK); if (status_reg == (HV_M_STATUS_LINK_UP | - HV_M_STATUS_AUTONEG_COMPLETE | - HV_M_STATUS_SPEED_1000)) + HV_M_STATUS_AUTONEG_COMPLETE | + HV_M_STATUS_SPEED_1000)) k1_enable = false; } @@ -1579,7 +1703,7 @@ s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable) if (ret_val) return ret_val; - udelay(20); + usleep_range(20, 40); ctrl_ext = er32(CTRL_EXT); ctrl_reg = er32(CTRL); @@ -1589,11 +1713,11 @@ s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable) ew32(CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_SPD_BYPS); e1e_flush(); - udelay(20); + usleep_range(20, 40); ew32(CTRL, ctrl_reg); ew32(CTRL_EXT, ctrl_ext); e1e_flush(); - udelay(20); + usleep_range(20, 40); return 0; } @@ -1667,7 +1791,6 @@ release: return ret_val; } - /** * e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode * @hw: pointer to the HW structure @@ -1834,7 +1957,7 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable) * SHRAL/H) and initial CRC values to the MAC */ for (i = 0; i < (hw->mac.rar_entry_count + 4); i++) { - u8 mac_addr[ETH_ALEN] = {0}; + u8 mac_addr[ETH_ALEN] = { 0 }; u32 addr_high, addr_low; addr_high = er32(RAH(i)); @@ -1865,8 +1988,8 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable) ew32(RCTL, mac_reg); ret_val = e1000e_read_kmrn_reg(hw, - E1000_KMRNCTRLSTA_CTRL_OFFSET, - &data); + E1000_KMRNCTRLSTA_CTRL_OFFSET, + &data); if (ret_val) return ret_val; ret_val = e1000e_write_kmrn_reg(hw, @@ -1875,8 +1998,8 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable) if (ret_val) return ret_val; ret_val = e1000e_read_kmrn_reg(hw, - E1000_KMRNCTRLSTA_HD_CTRL, - &data); + E1000_KMRNCTRLSTA_HD_CTRL, + &data); if (ret_val) return ret_val; data &= ~(0xF << 8); @@ -1923,8 +2046,8 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable) ew32(RCTL, mac_reg); ret_val = e1000e_read_kmrn_reg(hw, - E1000_KMRNCTRLSTA_CTRL_OFFSET, - &data); + E1000_KMRNCTRLSTA_CTRL_OFFSET, + &data); if (ret_val) return ret_val; ret_val = e1000e_write_kmrn_reg(hw, @@ -1933,8 +2056,8 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable) if (ret_val) return ret_val; ret_val = e1000e_read_kmrn_reg(hw, - E1000_KMRNCTRLSTA_HD_CTRL, - &data); + E1000_KMRNCTRLSTA_HD_CTRL, + &data); if (ret_val) return ret_val; data &= ~(0xF << 8); @@ -2100,7 +2223,7 @@ static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw) do { data = er32(STATUS); data &= E1000_STATUS_LAN_INIT_DONE; - udelay(100); + usleep_range(100, 200); } while ((!data) && --loop); /* If basic configuration is incomplete before the above loop @@ -2445,7 +2568,7 @@ static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank) /* Check bank 0 */ ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset, - &sig_byte); + &sig_byte); if (ret_val) return ret_val; if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) == @@ -2456,8 +2579,8 @@ static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank) /* Check bank 1 */ ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset + - bank1_offset, - &sig_byte); + bank1_offset, + &sig_byte); if (ret_val) return ret_val; if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) == @@ -2510,8 +2633,8 @@ static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words, ret_val = 0; for (i = 0; i < words; i++) { - if (dev_spec->shadow_ram[offset+i].modified) { - data[i] = dev_spec->shadow_ram[offset+i].value; + if (dev_spec->shadow_ram[offset + i].modified) { + data[i] = dev_spec->shadow_ram[offset + i].value; } else { ret_val = e1000_read_flash_word_ich8lan(hw, act_offset + i, @@ -2696,8 +2819,8 @@ static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK) return -E1000_ERR_NVM; - flash_linear_addr = (ICH_FLASH_LINEAR_ADDR_MASK & offset) + - hw->nvm.flash_base_addr; + flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) + + hw->nvm.flash_base_addr); do { udelay(1); @@ -2714,8 +2837,9 @@ static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, ew32flash(ICH_FLASH_FADDR, flash_linear_addr); - ret_val = e1000_flash_cycle_ich8lan(hw, - ICH_FLASH_READ_COMMAND_TIMEOUT); + ret_val = + e1000_flash_cycle_ich8lan(hw, + ICH_FLASH_READ_COMMAND_TIMEOUT); /* Check if FCERR is set to 1, if set to 1, clear it * and try the whole sequence a few more times, else @@ -2774,8 +2898,8 @@ static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words, nvm->ops.acquire(hw); for (i = 0; i < words; i++) { - dev_spec->shadow_ram[offset+i].modified = true; - dev_spec->shadow_ram[offset+i].value = data[i]; + dev_spec->shadow_ram[offset + i].modified = true; + dev_spec->shadow_ram[offset + i].value = data[i]; } nvm->ops.release(hw); @@ -2844,8 +2968,8 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw) data = dev_spec->shadow_ram[i].value; } else { ret_val = e1000_read_flash_word_ich8lan(hw, i + - old_bank_offset, - &data); + old_bank_offset, + &data); if (ret_val) break; } @@ -2863,7 +2987,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw) /* Convert offset to bytes. */ act_offset = (i + new_bank_offset) << 1; - udelay(100); + usleep_range(100, 200); /* Write the bytes to the new bank. */ ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, @@ -2871,10 +2995,10 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw) if (ret_val) break; - udelay(100); + usleep_range(100, 200); ret_val = e1000_retry_write_flash_byte_ich8lan(hw, - act_offset + 1, - (u8)(data >> 8)); + act_offset + 1, + (u8)(data >> 8)); if (ret_val) break; } @@ -3050,8 +3174,8 @@ static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, offset > ICH_FLASH_LINEAR_ADDR_MASK) return -E1000_ERR_NVM; - flash_linear_addr = (ICH_FLASH_LINEAR_ADDR_MASK & offset) + - hw->nvm.flash_base_addr; + flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) + + hw->nvm.flash_base_addr); do { udelay(1); @@ -3062,7 +3186,7 @@ static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, hsflctl.regval = er16flash(ICH_FLASH_HSFCTL); /* 0b/1b corresponds to 1 or 2 byte size, respectively. */ - hsflctl.hsf_ctrl.fldbcount = size -1; + hsflctl.hsf_ctrl.fldbcount = size - 1; hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE; ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval); @@ -3078,8 +3202,9 @@ static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, /* check if FCERR is set to 1 , if set to 1, clear it * and try the whole sequence a few more times else done */ - ret_val = e1000_flash_cycle_ich8lan(hw, - ICH_FLASH_WRITE_COMMAND_TIMEOUT); + ret_val = + e1000_flash_cycle_ich8lan(hw, + ICH_FLASH_WRITE_COMMAND_TIMEOUT); if (!ret_val) break; @@ -3138,7 +3263,7 @@ static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw, for (program_retries = 0; program_retries < 100; program_retries++) { e_dbg("Retrying Byte %2.2X at offset %u\n", byte, offset); - udelay(100); + usleep_range(100, 200); ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte); if (!ret_val) break; @@ -3209,8 +3334,10 @@ static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank) flash_linear_addr = hw->nvm.flash_base_addr; flash_linear_addr += (bank) ? flash_bank_size : 0; - for (j = 0; j < iteration ; j++) { + for (j = 0; j < iteration; j++) { do { + u32 timeout = ICH_FLASH_ERASE_COMMAND_TIMEOUT; + /* Steps */ ret_val = e1000_flash_cycle_init_ich8lan(hw); if (ret_val) @@ -3230,8 +3357,7 @@ static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank) flash_linear_addr += (j * sector_size); ew32flash(ICH_FLASH_FADDR, flash_linear_addr); - ret_val = e1000_flash_cycle_ich8lan(hw, - ICH_FLASH_ERASE_COMMAND_TIMEOUT); + ret_val = e1000_flash_cycle_ich8lan(hw, timeout); if (!ret_val) break; @@ -3270,8 +3396,7 @@ static s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw, u16 *data) return ret_val; } - if (*data == ID_LED_RESERVED_0000 || - *data == ID_LED_RESERVED_FFFF) + if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) *data = ID_LED_DEFAULT_ICH8LAN; return 0; @@ -3511,9 +3636,9 @@ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw) /* Initialize identification LED */ ret_val = mac->ops.id_led_init(hw); + /* An error is not fatal and we should not stop init due to this */ if (ret_val) e_dbg("Error initializing identification LED\n"); - /* This is not fatal and we should not stop init due to this */ /* Setup the receive address. */ e1000e_init_rx_addrs(hw, mac->rar_entry_count); @@ -3541,16 +3666,16 @@ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw) /* Set the transmit descriptor write-back policy for both queues */ txdctl = er32(TXDCTL(0)); - txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) | - E1000_TXDCTL_FULL_TX_DESC_WB; - txdctl = (txdctl & ~E1000_TXDCTL_PTHRESH) | - E1000_TXDCTL_MAX_TX_DESC_PREFETCH; + txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) | + E1000_TXDCTL_FULL_TX_DESC_WB); + txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) | + E1000_TXDCTL_MAX_TX_DESC_PREFETCH); ew32(TXDCTL(0), txdctl); txdctl = er32(TXDCTL(1)); - txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) | - E1000_TXDCTL_FULL_TX_DESC_WB; - txdctl = (txdctl & ~E1000_TXDCTL_PTHRESH) | - E1000_TXDCTL_MAX_TX_DESC_PREFETCH; + txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) | + E1000_TXDCTL_FULL_TX_DESC_WB); + txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) | + E1000_TXDCTL_MAX_TX_DESC_PREFETCH); ew32(TXDCTL(1), txdctl); /* ICH8 has opposite polarity of no_snoop bits. @@ -3559,7 +3684,7 @@ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw) if (mac->type == e1000_ich8lan) snoop = PCIE_ICH8_SNOOP_ALL; else - snoop = (u32) ~(PCIE_NO_SNOOP_ALL); + snoop = (u32)~(PCIE_NO_SNOOP_ALL); e1000e_set_pcie_no_snoop(hw, snoop); ctrl_ext = er32(CTRL_EXT); @@ -3575,6 +3700,7 @@ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw) return ret_val; } + /** * e1000_initialize_hw_bits_ich8lan - Initialize required hardware bits * @hw: pointer to the HW structure @@ -3686,8 +3812,7 @@ static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw) */ hw->fc.current_mode = hw->fc.requested_mode; - e_dbg("After fix-ups FlowControl is now = %x\n", - hw->fc.current_mode); + e_dbg("After fix-ups FlowControl is now = %x\n", hw->fc.current_mode); /* Continue to configure the copper link. */ ret_val = hw->mac.ops.setup_physical_interface(hw); @@ -3737,12 +3862,12 @@ static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw) if (ret_val) return ret_val; ret_val = e1000e_read_kmrn_reg(hw, E1000_KMRNCTRLSTA_INBAND_PARAM, - ®_data); + ®_data); if (ret_val) return ret_val; reg_data |= 0x3F; ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_INBAND_PARAM, - reg_data); + reg_data); if (ret_val) return ret_val; @@ -3760,7 +3885,6 @@ static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw) break; case e1000_phy_82577: case e1000_phy_82579: - case e1000_phy_i217: ret_val = e1000_copper_link_setup_82577(hw); if (ret_val) return ret_val; @@ -3796,6 +3920,31 @@ static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw) } /** + * e1000_setup_copper_link_pch_lpt - Configure MAC/PHY interface + * @hw: pointer to the HW structure + * + * Calls the PHY specific link setup function and then calls the + * generic setup_copper_link to finish configuring the link for + * Lynxpoint PCH devices + **/ +static s32 e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val; + + ctrl = er32(CTRL); + ctrl |= E1000_CTRL_SLU; + ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); + ew32(CTRL, ctrl); + + ret_val = e1000_copper_link_setup_82577(hw); + if (ret_val) + return ret_val; + + return e1000e_setup_copper_link(hw); +} + +/** * e1000_get_link_up_info_ich8lan - Get current link speed and duplex * @hw: pointer to the HW structure * @speed: pointer to store current link speed @@ -3815,8 +3964,7 @@ static s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw, u16 *speed, return ret_val; if ((hw->mac.type == e1000_ich8lan) && - (hw->phy.type == e1000_phy_igp_3) && - (*speed == SPEED_1000)) { + (hw->phy.type == e1000_phy_igp_3) && (*speed == SPEED_1000)) { ret_val = e1000_kmrn_lock_loss_workaround_ich8lan(hw); } @@ -3899,7 +4047,7 @@ static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw) * /disabled - false). **/ void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw, - bool state) + bool state) { struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; @@ -3981,12 +4129,12 @@ void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw) return; ret_val = e1000e_read_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET, - ®_data); + ®_data); if (ret_val) return; reg_data |= E1000_KMRNCTRLSTA_DIAG_NELPBK; ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET, - reg_data); + reg_data); if (ret_val) return; reg_data &= ~E1000_KMRNCTRLSTA_DIAG_NELPBK; diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h index 8bf4655c2e17..80034a2b297c 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.h +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h @@ -211,7 +211,8 @@ #define I82579_MSE_THRESHOLD 0x084F /* 82579 Mean Square Error Threshold */ #define I82577_MSE_THRESHOLD 0x0887 /* 82577 Mean Square Error Threshold */ #define I82579_MSE_LINK_DOWN 0x2411 /* MSE count before dropping link */ -#define I82579_EEE_PCS_STATUS 0x182D /* IEEE MMD Register 3.1 >> 8 */ +#define I82579_RX_CONFIG 0x3412 /* Receive configuration */ +#define I82579_EEE_PCS_STATUS 0x182E /* IEEE MMD Register 3.1 >> 8 */ #define I82579_EEE_CAPABILITY 0x0410 /* IEEE MMD Register 3.20 */ #define I82579_EEE_ADVERTISEMENT 0x040E /* IEEE MMD Register 7.60 */ #define I82579_EEE_LP_ABILITY 0x040F /* IEEE MMD Register 7.61 */ @@ -249,13 +250,6 @@ /* Proprietary Latency Tolerance Reporting PCI Capability */ #define E1000_PCI_LTR_CAP_LPT 0xA8 -/* OBFF Control & Threshold Defines */ -#define E1000_SVCR_OFF_EN 0x00000001 -#define E1000_SVCR_OFF_MASKINT 0x00001000 -#define E1000_SVCR_OFF_TIMER_MASK 0xFFFF0000 -#define E1000_SVCR_OFF_TIMER_SHIFT 16 -#define E1000_SVT_OFF_HWM_MASK 0x0000001F - void e1000e_write_protect_nvm_ich8lan(struct e1000_hw *hw); void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw, bool state); @@ -267,4 +261,5 @@ s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable); void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw); s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable); s32 e1000_read_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 *data); +s32 e1000_write_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 data); #endif /* _E1000E_ICH8LAN_H_ */ diff --git a/drivers/net/ethernet/intel/e1000e/mac.c b/drivers/net/ethernet/intel/e1000e/mac.c index b78e02174601..2480c1091873 100644 --- a/drivers/net/ethernet/intel/e1000e/mac.c +++ b/drivers/net/ethernet/intel/e1000e/mac.c @@ -596,7 +596,7 @@ s32 e1000e_check_for_serdes_link(struct e1000_hw *hw) * serdes media type. */ /* SYNCH bit and IV bit are sticky. */ - udelay(10); + usleep_range(10, 20); rxcw = er32(RXCW); if (rxcw & E1000_RXCW_SYNCH) { if (!(rxcw & E1000_RXCW_IV)) { @@ -613,7 +613,7 @@ s32 e1000e_check_for_serdes_link(struct e1000_hw *hw) status = er32(STATUS); if (status & E1000_STATUS_LU) { /* SYNCH bit and IV bit are sticky, so reread rxcw. */ - udelay(10); + usleep_range(10, 20); rxcw = er32(RXCW); if (rxcw & E1000_RXCW_SYNCH) { if (!(rxcw & E1000_RXCW_IV)) { @@ -1382,7 +1382,7 @@ s32 e1000e_get_hw_semaphore(struct e1000_hw *hw) if (!(swsm & E1000_SWSM_SMBI)) break; - udelay(50); + usleep_range(50, 100); i++; } @@ -1400,7 +1400,7 @@ s32 e1000e_get_hw_semaphore(struct e1000_hw *hw) if (er32(SWSM) & E1000_SWSM_SWESMBI) break; - udelay(50); + usleep_range(50, 100); } if (i == timeout) { @@ -1600,15 +1600,28 @@ s32 e1000e_blink_led_generic(struct e1000_hw *hw) ledctl_blink = E1000_LEDCTL_LED0_BLINK | (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT); } else { - /* set the blink bit for each LED that's "on" (0x0E) - * in ledctl_mode2 + /* Set the blink bit for each LED that's "on" (0x0E) + * (or "off" if inverted) in ledctl_mode2. The blink + * logic in hardware only works when mode is set to "on" + * so it must be changed accordingly when the mode is + * "off" and inverted. */ ledctl_blink = hw->mac.ledctl_mode2; - for (i = 0; i < 4; i++) - if (((hw->mac.ledctl_mode2 >> (i * 8)) & 0xFF) == - E1000_LEDCTL_MODE_LED_ON) - ledctl_blink |= (E1000_LEDCTL_LED0_BLINK << - (i * 8)); + for (i = 0; i < 32; i += 8) { + u32 mode = (hw->mac.ledctl_mode2 >> i) & + E1000_LEDCTL_LED0_MODE_MASK; + u32 led_default = hw->mac.ledctl_default >> i; + + if ((!(led_default & E1000_LEDCTL_LED0_IVRT) && + (mode == E1000_LEDCTL_MODE_LED_ON)) || + ((led_default & E1000_LEDCTL_LED0_IVRT) && + (mode == E1000_LEDCTL_MODE_LED_OFF))) { + ledctl_blink &= + ~(E1000_LEDCTL_LED0_MODE_MASK << i); + ledctl_blink |= (E1000_LEDCTL_LED0_BLINK | + E1000_LEDCTL_MODE_LED_ON) << i; + } + } } ew32(LEDCTL, ledctl_blink); @@ -1712,7 +1725,7 @@ s32 e1000e_disable_pcie_master(struct e1000_hw *hw) while (timeout) { if (!(er32(STATUS) & E1000_STATUS_GIO_MASTER_ENABLE)) break; - udelay(100); + usleep_range(100, 200); timeout--; } diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 7e615e2bf7e6..b18fad5b579e 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -55,7 +55,7 @@ #define DRV_EXTRAVERSION "-k" -#define DRV_VERSION "2.2.14" DRV_EXTRAVERSION +#define DRV_VERSION "2.3.2" DRV_EXTRAVERSION char e1000e_driver_name[] = "e1000e"; const char e1000e_driver_version[] = DRV_VERSION; @@ -219,9 +219,8 @@ static void e1000e_dump(struct e1000_adapter *adapter) if (netdev) { dev_info(&adapter->pdev->dev, "Net device Info\n"); pr_info("Device Name state trans_start last_rx\n"); - pr_info("%-15s %016lX %016lX %016lX\n", - netdev->name, netdev->state, netdev->trans_start, - netdev->last_rx); + pr_info("%-15s %016lX %016lX %016lX\n", netdev->name, + netdev->state, netdev->trans_start, netdev->last_rx); } /* Print Registers */ @@ -755,8 +754,7 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_ring *rx_ring, cpu_to_le64(ps_page->dma); } - skb = __netdev_alloc_skb_ip_align(netdev, - adapter->rx_ps_bsize0, + skb = __netdev_alloc_skb_ip_align(netdev, adapter->rx_ps_bsize0, gfp); if (!skb) { @@ -850,8 +848,8 @@ check_page: if (!buffer_info->dma) { buffer_info->dma = dma_map_page(&pdev->dev, - buffer_info->page, 0, - PAGE_SIZE, + buffer_info->page, 0, + PAGE_SIZE, DMA_FROM_DEVICE); if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { adapter->alloc_rx_buff_failed++; @@ -942,10 +940,8 @@ static bool e1000_clean_rx_irq(struct e1000_ring *rx_ring, int *work_done, cleaned = true; cleaned_count++; - dma_unmap_single(&pdev->dev, - buffer_info->dma, - adapter->rx_buffer_len, - DMA_FROM_DEVICE); + dma_unmap_single(&pdev->dev, buffer_info->dma, + adapter->rx_buffer_len, DMA_FROM_DEVICE); buffer_info->dma = 0; length = le16_to_cpu(rx_desc->wb.upper.length); @@ -1073,8 +1069,8 @@ static void e1000_put_txbuf(struct e1000_ring *tx_ring, static void e1000_print_hw_hang(struct work_struct *work) { struct e1000_adapter *adapter = container_of(work, - struct e1000_adapter, - print_hang_task); + struct e1000_adapter, + print_hang_task); struct net_device *netdev = adapter->netdev; struct e1000_ring *tx_ring = adapter->tx_ring; unsigned int i = tx_ring->next_to_clean; @@ -1087,8 +1083,7 @@ static void e1000_print_hw_hang(struct work_struct *work) if (test_bit(__E1000_DOWN, &adapter->state)) return; - if (!adapter->tx_hang_recheck && - (adapter->flags2 & FLAG2_DMA_BURST)) { + if (!adapter->tx_hang_recheck && (adapter->flags2 & FLAG2_DMA_BURST)) { /* May be block on write-back, flush and detect again * flush pending descriptor writebacks to memory */ @@ -1130,19 +1125,10 @@ static void e1000_print_hw_hang(struct work_struct *work) "PHY 1000BASE-T Status <%x>\n" "PHY Extended Status <%x>\n" "PCI Status <%x>\n", - readl(tx_ring->head), - readl(tx_ring->tail), - tx_ring->next_to_use, - tx_ring->next_to_clean, - tx_ring->buffer_info[eop].time_stamp, - eop, - jiffies, - eop_desc->upper.fields.status, - er32(STATUS), - phy_status, - phy_1000t_status, - phy_ext_status, - pci_status); + readl(tx_ring->head), readl(tx_ring->tail), tx_ring->next_to_use, + tx_ring->next_to_clean, tx_ring->buffer_info[eop].time_stamp, + eop, jiffies, eop_desc->upper.fields.status, er32(STATUS), + phy_status, phy_1000t_status, phy_ext_status, pci_status); /* Suggest workaround for known h/w issue */ if ((hw->mac.type == e1000_pchlan) && (er32(CTRL) & E1000_CTRL_TFCE)) @@ -1435,7 +1421,7 @@ copydone: e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb); if (rx_desc->wb.upper.header_status & - cpu_to_le16(E1000_RXDPS_HDRSTAT_HDRSP)) + cpu_to_le16(E1000_RXDPS_HDRSTAT_HDRSP)) adapter->rx_hdr_split++; e1000_receive_skb(adapter, netdev, skb, staterr, @@ -1473,7 +1459,7 @@ next_desc: * e1000_consume_page - helper function **/ static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb, - u16 length) + u16 length) { bi->page = NULL; skb->len += length; @@ -1500,7 +1486,8 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_ring *rx_ring, int *work_done, unsigned int i; int cleaned_count = 0; bool cleaned = false; - unsigned int total_rx_bytes=0, total_rx_packets=0; + unsigned int total_rx_bytes = 0, total_rx_packets = 0; + struct skb_shared_info *shinfo; i = rx_ring->next_to_clean; rx_desc = E1000_RX_DESC_EXT(*rx_ring, i); @@ -1546,7 +1533,6 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_ring *rx_ring, int *work_done, rx_ring->rx_skb_top = NULL; goto next_desc; } - #define rxtop (rx_ring->rx_skb_top) if (!(staterr & E1000_RXD_STAT_EOP)) { /* this descriptor is only the beginning (or middle) */ @@ -1554,12 +1540,13 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_ring *rx_ring, int *work_done, /* this is the beginning of a chain */ rxtop = skb; skb_fill_page_desc(rxtop, 0, buffer_info->page, - 0, length); + 0, length); } else { /* this is the middle of a chain */ - skb_fill_page_desc(rxtop, - skb_shinfo(rxtop)->nr_frags, - buffer_info->page, 0, length); + shinfo = skb_shinfo(rxtop); + skb_fill_page_desc(rxtop, shinfo->nr_frags, + buffer_info->page, 0, + length); /* re-use the skb, only consumed the page */ buffer_info->skb = skb; } @@ -1568,9 +1555,10 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_ring *rx_ring, int *work_done, } else { if (rxtop) { /* end of the chain */ - skb_fill_page_desc(rxtop, - skb_shinfo(rxtop)->nr_frags, - buffer_info->page, 0, length); + shinfo = skb_shinfo(rxtop); + skb_fill_page_desc(rxtop, shinfo->nr_frags, + buffer_info->page, 0, + length); /* re-use the current skb, we only consumed the * page */ @@ -1595,10 +1583,10 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_ring *rx_ring, int *work_done, skb_put(skb, length); } else { skb_fill_page_desc(skb, 0, - buffer_info->page, 0, - length); + buffer_info->page, 0, + length); e1000_consume_page(buffer_info, skb, - length); + length); } } } @@ -1671,8 +1659,7 @@ static void e1000_clean_rx_ring(struct e1000_ring *rx_ring) DMA_FROM_DEVICE); else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq) dma_unmap_page(&pdev->dev, buffer_info->dma, - PAGE_SIZE, - DMA_FROM_DEVICE); + PAGE_SIZE, DMA_FROM_DEVICE); else if (adapter->clean_rx == e1000_clean_rx_irq_ps) dma_unmap_single(&pdev->dev, buffer_info->dma, adapter->rx_ps_bsize0, @@ -1725,7 +1712,8 @@ static void e1000_clean_rx_ring(struct e1000_ring *rx_ring) static void e1000e_downshift_workaround(struct work_struct *work) { struct e1000_adapter *adapter = container_of(work, - struct e1000_adapter, downshift_task); + struct e1000_adapter, + downshift_task); if (test_bit(__E1000_DOWN, &adapter->state)) return; @@ -1918,7 +1906,6 @@ static irqreturn_t e1000_intr_msix_tx(int __always_unused irq, void *data) struct e1000_hw *hw = &adapter->hw; struct e1000_ring *tx_ring = adapter->tx_ring; - adapter->total_tx_bytes = 0; adapter->total_tx_packets = 0; @@ -1975,7 +1962,6 @@ static void e1000_configure_msix(struct e1000_adapter *adapter) ew32(RFCTL, rfctl); } -#define E1000_IVAR_INT_ALLOC_VALID 0x8 /* Configure Rx vector */ rx_ring->ims_val = E1000_IMS_RXQ0; adapter->eiac_mask |= rx_ring->ims_val; @@ -2050,8 +2036,9 @@ void e1000e_set_interrupt_capability(struct e1000_adapter *adapter) if (adapter->flags & FLAG_HAS_MSIX) { adapter->num_vectors = 3; /* RxQ0, TxQ0 and other */ adapter->msix_entries = kcalloc(adapter->num_vectors, - sizeof(struct msix_entry), - GFP_KERNEL); + sizeof(struct + msix_entry), + GFP_KERNEL); if (adapter->msix_entries) { for (i = 0; i < adapter->num_vectors; i++) adapter->msix_entries[i].entry = i; @@ -2495,7 +2482,7 @@ static unsigned int e1000_update_itr(u16 itr_setting, int packets, int bytes) switch (itr_setting) { case lowest_latency: /* handle TSO and jumbo frames */ - if (bytes/packets > 8000) + if (bytes / packets > 8000) retval = bulk_latency; else if ((packets < 5) && (bytes > 512)) retval = low_latency; @@ -2503,13 +2490,13 @@ static unsigned int e1000_update_itr(u16 itr_setting, int packets, int bytes) case low_latency: /* 50 usec aka 20000 ints/s */ if (bytes > 10000) { /* this if handles the TSO accounting */ - if (bytes/packets > 8000) + if (bytes / packets > 8000) retval = bulk_latency; - else if ((packets < 10) || ((bytes/packets) > 1200)) + else if ((packets < 10) || ((bytes / packets) > 1200)) retval = bulk_latency; else if ((packets > 35)) retval = lowest_latency; - } else if (bytes/packets > 2000) { + } else if (bytes / packets > 2000) { retval = bulk_latency; } else if (packets <= 2 && bytes < 512) { retval = lowest_latency; @@ -2561,8 +2548,8 @@ static void e1000_set_itr(struct e1000_adapter *adapter) current_itr = max(adapter->rx_itr, adapter->tx_itr); - switch (current_itr) { /* counts and packets in update_itr are dependent on these numbers */ + switch (current_itr) { case lowest_latency: new_itr = 70000; break; @@ -2583,8 +2570,7 @@ set_itr_now: * increasing */ new_itr = new_itr > adapter->itr ? - min(adapter->itr + (new_itr >> 2), new_itr) : - new_itr; + min(adapter->itr + (new_itr >> 2), new_itr) : new_itr; adapter->itr = new_itr; adapter->rx_ring->itr_val = new_itr; if (adapter->msix_entries) @@ -2815,8 +2801,7 @@ static void e1000_update_mng_vlan(struct e1000_adapter *adapter) u16 vid = adapter->hw.mng_cookie.vlan_id; u16 old_vid = adapter->mng_vlan_id; - if (adapter->hw.mng_cookie.status & - E1000_MNG_DHCP_COOKIE_STATUS_VLAN) { + if (adapter->hw.mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) { e1000_vlan_rx_add_vid(netdev, vid); adapter->mng_vlan_id = vid; } @@ -2832,7 +2817,7 @@ static void e1000_restore_vlan(struct e1000_adapter *adapter) e1000_vlan_rx_add_vid(adapter->netdev, 0); for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) - e1000_vlan_rx_add_vid(adapter->netdev, vid); + e1000_vlan_rx_add_vid(adapter->netdev, vid); } static void e1000_init_manageability_pt(struct e1000_adapter *adapter) @@ -3007,8 +2992,8 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter) rctl = er32(RCTL); rctl &= ~(3 << E1000_RCTL_MO_SHIFT); rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | - E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF | - (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT); + E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF | + (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT); /* Do not Store bad packets */ rctl &= ~E1000_RCTL_SBP; @@ -3094,19 +3079,17 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter) /* Enable Packet split descriptors */ rctl |= E1000_RCTL_DTYP_PS; - psrctl |= adapter->rx_ps_bsize0 >> - E1000_PSRCTL_BSIZE0_SHIFT; + psrctl |= adapter->rx_ps_bsize0 >> E1000_PSRCTL_BSIZE0_SHIFT; switch (adapter->rx_ps_pages) { case 3: - psrctl |= PAGE_SIZE << - E1000_PSRCTL_BSIZE3_SHIFT; + psrctl |= PAGE_SIZE << E1000_PSRCTL_BSIZE3_SHIFT; + /* fall-through */ case 2: - psrctl |= PAGE_SIZE << - E1000_PSRCTL_BSIZE2_SHIFT; + psrctl |= PAGE_SIZE << E1000_PSRCTL_BSIZE2_SHIFT; + /* fall-through */ case 1: - psrctl |= PAGE_SIZE >> - E1000_PSRCTL_BSIZE1_SHIFT; + psrctl |= PAGE_SIZE >> E1000_PSRCTL_BSIZE1_SHIFT; break; } @@ -3280,7 +3263,7 @@ static int e1000e_write_mc_addr_list(struct net_device *netdev) /* update_mc_addr_list expects a packed array of only addresses. */ i = 0; netdev_for_each_mc_addr(ha, netdev) - memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN); + memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN); hw->mac.ops.update_mc_addr_list(hw, mta_list, i); kfree(mta_list); @@ -3757,8 +3740,7 @@ void e1000e_reset(struct e1000_adapter *adapter) * but don't include ethernet FCS because hardware appends it */ min_tx_space = (adapter->max_frame_size + - sizeof(struct e1000_tx_desc) - - ETH_FCS_LEN) * 2; + sizeof(struct e1000_tx_desc) - ETH_FCS_LEN) * 2; min_tx_space = ALIGN(min_tx_space, 1024); min_tx_space >>= 10; /* software strips receive CRC, so leave room for it */ @@ -3861,13 +3843,13 @@ void e1000e_reset(struct e1000_adapter *adapter) if ((adapter->max_frame_size * 2) > (pba << 10)) { if (!(adapter->flags2 & FLAG2_DISABLE_AIM)) { dev_info(&adapter->pdev->dev, - "Interrupt Throttle Rate turned off\n"); + "Interrupt Throttle Rate off\n"); adapter->flags2 |= FLAG2_DISABLE_AIM; e1000e_write_itr(adapter, 0); } } else if (adapter->flags2 & FLAG2_DISABLE_AIM) { dev_info(&adapter->pdev->dev, - "Interrupt Throttle Rate turned on\n"); + "Interrupt Throttle Rate on\n"); adapter->flags2 &= ~FLAG2_DISABLE_AIM; adapter->itr = 20000; e1000e_write_itr(adapter, adapter->itr); @@ -3898,6 +3880,38 @@ void e1000e_reset(struct e1000_adapter *adapter) /* initialize systim and reset the ns time counter */ e1000e_config_hwtstamp(adapter); + /* Set EEE advertisement as appropriate */ + if (adapter->flags2 & FLAG2_HAS_EEE) { + s32 ret_val; + u16 adv_addr; + + switch (hw->phy.type) { + case e1000_phy_82579: + adv_addr = I82579_EEE_ADVERTISEMENT; + break; + case e1000_phy_i217: + adv_addr = I217_EEE_ADVERTISEMENT; + break; + default: + dev_err(&adapter->pdev->dev, + "Invalid PHY type setting EEE advertisement\n"); + return; + } + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) { + dev_err(&adapter->pdev->dev, + "EEE advertisement - unable to acquire PHY\n"); + return; + } + + e1000_write_emi_reg_locked(hw, adv_addr, + hw->dev_spec.ich8lan.eee_disable ? + 0 : adapter->eee_advert); + + hw->phy.ops.release(hw); + } + if (!netif_running(adapter->netdev) && !test_bit(__E1000_TESTING, &adapter->state)) { e1000_power_down_phy(adapter); @@ -4266,8 +4280,7 @@ static int e1000_open(struct net_device *netdev) e1000e_power_up_phy(adapter); adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; - if ((adapter->hw.mng_cookie.status & - E1000_MNG_DHCP_COOKIE_STATUS_VLAN)) + if ((adapter->hw.mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN)) e1000_update_mng_vlan(adapter); /* DMA latency requirement to workaround jumbo issue */ @@ -4370,8 +4383,7 @@ static int e1000_close(struct net_device *netdev) /* kill manageability vlan ID if supported, but not if a vlan with * the same ID is registered on the host OS (let 8021q kill it) */ - if (adapter->hw.mng_cookie.status & - E1000_MNG_DHCP_COOKIE_STATUS_VLAN) + if (adapter->hw.mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); /* If AMT is enabled, let the firmware know that the network @@ -4387,6 +4399,7 @@ static int e1000_close(struct net_device *netdev) return 0; } + /** * e1000_set_mac - Change the Ethernet Address of the NIC * @netdev: network interface device structure @@ -4437,7 +4450,8 @@ static int e1000_set_mac(struct net_device *netdev, void *p) static void e1000e_update_phy_task(struct work_struct *work) { struct e1000_adapter *adapter = container_of(work, - struct e1000_adapter, update_phy_task); + struct e1000_adapter, + update_phy_task); if (test_bit(__E1000_DOWN, &adapter->state)) return; @@ -4454,7 +4468,7 @@ static void e1000e_update_phy_task(struct work_struct *work) **/ static void e1000_update_phy_info(unsigned long data) { - struct e1000_adapter *adapter = (struct e1000_adapter *) data; + struct e1000_adapter *adapter = (struct e1000_adapter *)data; if (test_bit(__E1000_DOWN, &adapter->state)) return; @@ -4621,18 +4635,16 @@ static void e1000e_update_stats(struct e1000_adapter *adapter) * our own version based on RUC and ROC */ netdev->stats.rx_errors = adapter->stats.rxerrc + - adapter->stats.crcerrs + adapter->stats.algnerrc + - adapter->stats.ruc + adapter->stats.roc + - adapter->stats.cexterr; + adapter->stats.crcerrs + adapter->stats.algnerrc + + adapter->stats.ruc + adapter->stats.roc + adapter->stats.cexterr; netdev->stats.rx_length_errors = adapter->stats.ruc + - adapter->stats.roc; + adapter->stats.roc; netdev->stats.rx_crc_errors = adapter->stats.crcerrs; netdev->stats.rx_frame_errors = adapter->stats.algnerrc; netdev->stats.rx_missed_errors = adapter->stats.mpc; /* Tx Errors */ - netdev->stats.tx_errors = adapter->stats.ecol + - adapter->stats.latecol; + netdev->stats.tx_errors = adapter->stats.ecol + adapter->stats.latecol; netdev->stats.tx_aborted_errors = adapter->stats.ecol; netdev->stats.tx_window_errors = adapter->stats.latecol; netdev->stats.tx_carrier_errors = adapter->stats.tncrs; @@ -4790,7 +4802,7 @@ static void e1000e_check_82574_phy_workaround(struct e1000_adapter *adapter) **/ static void e1000_watchdog(unsigned long data) { - struct e1000_adapter *adapter = (struct e1000_adapter *) data; + struct e1000_adapter *adapter = (struct e1000_adapter *)data; /* Do the rest outside of interrupt context */ schedule_work(&adapter->watchdog_task); @@ -4801,7 +4813,8 @@ static void e1000_watchdog(unsigned long data) static void e1000_watchdog_task(struct work_struct *work) { struct e1000_adapter *adapter = container_of(work, - struct e1000_adapter, watchdog_task); + struct e1000_adapter, + watchdog_task); struct net_device *netdev = adapter->netdev; struct e1000_mac_info *mac = &adapter->hw.mac; struct e1000_phy_info *phy = &adapter->hw.phy; @@ -4835,8 +4848,8 @@ static void e1000_watchdog_task(struct work_struct *work) /* update snapshot of PHY registers on LSC */ e1000_phy_read_status(adapter); mac->ops.get_link_up_info(&adapter->hw, - &adapter->link_speed, - &adapter->link_duplex); + &adapter->link_speed, + &adapter->link_duplex); e1000_print_link_info(adapter); /* check if SmartSpeed worked */ @@ -4949,7 +4962,7 @@ static void e1000_watchdog_task(struct work_struct *work) adapter->flags |= FLAG_RESTART_NOW; else pm_schedule_suspend(netdev->dev.parent, - LINK_TIMEOUT); + LINK_TIMEOUT); } } @@ -4984,8 +4997,8 @@ link_up: */ u32 goc = (adapter->gotc + adapter->gorc) / 10000; u32 dif = (adapter->gotc > adapter->gorc ? - adapter->gotc - adapter->gorc : - adapter->gorc - adapter->gotc) / 10000; + adapter->gotc - adapter->gorc : + adapter->gorc - adapter->gotc) / 10000; u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000; e1000e_write_itr(adapter, itr); @@ -5064,14 +5077,14 @@ static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb) iph->tot_len = 0; iph->check = 0; tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, - 0, IPPROTO_TCP, 0); + 0, IPPROTO_TCP, 0); cmd_length = E1000_TXD_CMD_IP; ipcse = skb_transport_offset(skb) - 1; } else if (skb_is_gso_v6(skb)) { ipv6_hdr(skb)->payload_len = 0; tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, - &ipv6_hdr(skb)->daddr, - 0, IPPROTO_TCP, 0); + &ipv6_hdr(skb)->daddr, + 0, IPPROTO_TCP, 0); ipcse = 0; } ipcss = skb_network_offset(skb); @@ -5080,7 +5093,7 @@ static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb) tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data; cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE | - E1000_TXD_CMD_TCP | (skb->len - (hdr_len))); + E1000_TXD_CMD_TCP | (skb->len - (hdr_len))); i = tx_ring->next_to_use; context_desc = E1000_CONTEXT_DESC(*tx_ring, i); @@ -5150,8 +5163,7 @@ static bool e1000_tx_csum(struct e1000_ring *tx_ring, struct sk_buff *skb) context_desc->lower_setup.ip_config = 0; context_desc->upper_setup.tcp_fields.tucss = css; - context_desc->upper_setup.tcp_fields.tucso = - css + skb->csum_offset; + context_desc->upper_setup.tcp_fields.tucso = css + skb->csum_offset; context_desc->upper_setup.tcp_fields.tucse = 0; context_desc->tcp_seg_setup.data = 0; context_desc->cmd_and_length = cpu_to_le32(cmd_len); @@ -5224,7 +5236,8 @@ static int e1000_tx_map(struct e1000_ring *tx_ring, struct sk_buff *skb, buffer_info->time_stamp = jiffies; buffer_info->next_to_watch = i; buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag, - offset, size, DMA_TO_DEVICE); + offset, size, + DMA_TO_DEVICE); buffer_info->mapped_as_page = true; if (dma_mapping_error(&pdev->dev, buffer_info->dma)) goto dma_error; @@ -5273,7 +5286,7 @@ static void e1000_tx_queue(struct e1000_ring *tx_ring, int tx_flags, int count) if (tx_flags & E1000_TX_FLAGS_TSO) { txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D | - E1000_TXD_CMD_TSE; + E1000_TXD_CMD_TSE; txd_upper |= E1000_TXD_POPTS_TXSM << 8; if (tx_flags & E1000_TX_FLAGS_IPV4) @@ -5304,8 +5317,8 @@ static void e1000_tx_queue(struct e1000_ring *tx_ring, int tx_flags, int count) buffer_info = &tx_ring->buffer_info[i]; tx_desc = E1000_TX_DESC(*tx_ring, i); tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); - tx_desc->lower.data = - cpu_to_le32(txd_lower | buffer_info->length); + tx_desc->lower.data = cpu_to_le32(txd_lower | + buffer_info->length); tx_desc->upper.data = cpu_to_le32(txd_upper); i++; @@ -5355,11 +5368,11 @@ static int e1000_transfer_dhcp_info(struct e1000_adapter *adapter, if (skb->len <= MINIMUM_DHCP_PACKET_SIZE) return 0; - if (((struct ethhdr *) skb->data)->h_proto != htons(ETH_P_IP)) + if (((struct ethhdr *)skb->data)->h_proto != htons(ETH_P_IP)) return 0; { - const struct iphdr *ip = (struct iphdr *)((u8 *)skb->data+14); + const struct iphdr *ip = (struct iphdr *)((u8 *)skb->data + 14); struct udphdr *udp; if (ip->protocol != IPPROTO_UDP) @@ -5584,7 +5597,7 @@ static void e1000_reset_task(struct work_struct *work) * Returns the address of the device statistics structure. **/ struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev, - struct rtnl_link_stats64 *stats) + struct rtnl_link_stats64 *stats) { struct e1000_adapter *adapter = netdev_priv(netdev); @@ -5605,18 +5618,15 @@ struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev, * our own version based on RUC and ROC */ stats->rx_errors = adapter->stats.rxerrc + - adapter->stats.crcerrs + adapter->stats.algnerrc + - adapter->stats.ruc + adapter->stats.roc + - adapter->stats.cexterr; - stats->rx_length_errors = adapter->stats.ruc + - adapter->stats.roc; + adapter->stats.crcerrs + adapter->stats.algnerrc + + adapter->stats.ruc + adapter->stats.roc + adapter->stats.cexterr; + stats->rx_length_errors = adapter->stats.ruc + adapter->stats.roc; stats->rx_crc_errors = adapter->stats.crcerrs; stats->rx_frame_errors = adapter->stats.algnerrc; stats->rx_missed_errors = adapter->stats.mpc; /* Tx Errors */ - stats->tx_errors = adapter->stats.ecol + - adapter->stats.latecol; + stats->tx_errors = adapter->stats.ecol + adapter->stats.latecol; stats->tx_aborted_errors = adapter->stats.ecol; stats->tx_window_errors = adapter->stats.latecol; stats->tx_carrier_errors = adapter->stats.tncrs; @@ -5685,9 +5695,9 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu) /* adjust allocation if LPE protects us, and we aren't using SBP */ if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) || - (max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN)) + (max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN)) adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN - + ETH_FCS_LEN; + + ETH_FCS_LEN; if (netif_running(netdev)) e1000e_up(adapter); @@ -5866,7 +5876,7 @@ static int e1000_init_phy_wakeup(struct e1000_adapter *adapter, u32 wufc) phy_reg &= ~(BM_RCTL_MO_MASK); if (mac_reg & E1000_RCTL_MO_3) phy_reg |= (((mac_reg & E1000_RCTL_MO_3) >> E1000_RCTL_MO_SHIFT) - << BM_RCTL_MO_SHIFT); + << BM_RCTL_MO_SHIFT); if (mac_reg & E1000_RCTL_BAM) phy_reg |= BM_RCTL_BAM; if (mac_reg & E1000_RCTL_PMCF) @@ -5935,10 +5945,6 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime) } ctrl = er32(CTRL); - /* advertise wake from D3Cold */ - #define E1000_CTRL_ADVD3WUC 0x00100000 - /* phy power management enable */ - #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000 ctrl |= E1000_CTRL_ADVD3WUC; if (!(adapter->flags2 & FLAG2_HAS_PHY_WAKEUP)) ctrl |= E1000_CTRL_EN_PHY_PWR_MGMT; @@ -5982,8 +5988,6 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime) */ e1000e_release_hw_control(adapter); - pci_clear_master(pdev); - /* The pci-e switch on some quad port adapters will report a * correctable error when the MAC transitions from D0 to D3. To * prevent this we need to mask off the correctable errors on the @@ -6082,24 +6086,24 @@ static int __e1000_resume(struct pci_dev *pdev) e1e_rphy(&adapter->hw, BM_WUS, &phy_data); if (phy_data) { e_info("PHY Wakeup cause - %s\n", - phy_data & E1000_WUS_EX ? "Unicast Packet" : - phy_data & E1000_WUS_MC ? "Multicast Packet" : - phy_data & E1000_WUS_BC ? "Broadcast Packet" : - phy_data & E1000_WUS_MAG ? "Magic Packet" : - phy_data & E1000_WUS_LNKC ? - "Link Status Change" : "other"); + phy_data & E1000_WUS_EX ? "Unicast Packet" : + phy_data & E1000_WUS_MC ? "Multicast Packet" : + phy_data & E1000_WUS_BC ? "Broadcast Packet" : + phy_data & E1000_WUS_MAG ? "Magic Packet" : + phy_data & E1000_WUS_LNKC ? + "Link Status Change" : "other"); } e1e_wphy(&adapter->hw, BM_WUS, ~0); } else { u32 wus = er32(WUS); if (wus) { e_info("MAC Wakeup cause - %s\n", - wus & E1000_WUS_EX ? "Unicast Packet" : - wus & E1000_WUS_MC ? "Multicast Packet" : - wus & E1000_WUS_BC ? "Broadcast Packet" : - wus & E1000_WUS_MAG ? "Magic Packet" : - wus & E1000_WUS_LNKC ? "Link Status Change" : - "other"); + wus & E1000_WUS_EX ? "Unicast Packet" : + wus & E1000_WUS_MC ? "Multicast Packet" : + wus & E1000_WUS_BC ? "Broadcast Packet" : + wus & E1000_WUS_MAG ? "Magic Packet" : + wus & E1000_WUS_LNKC ? "Link Status Change" : + "other"); } ew32(WUS, ~0); } @@ -6374,7 +6378,7 @@ static void e1000_print_device_info(struct e1000_adapter *adapter) e_info("(PCI Express:2.5GT/s:%s) %pM\n", /* bus width */ ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" : - "Width x1"), + "Width x1"), /* MAC address */ netdev->dev_addr); e_info("Intel(R) PRO/%s Network Connection\n", @@ -6484,7 +6488,7 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) resource_size_t flash_start, flash_len; static int cards_found; u16 aspm_disable_flag = 0; - int i, err, pci_using_dac; + int bars, i, err, pci_using_dac; u16 eeprom_data = 0; u16 eeprom_apme_mask = E1000_EEPROM_APME; @@ -6511,15 +6515,16 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); if (err) { - dev_err(&pdev->dev, "No usable DMA configuration, aborting\n"); + dev_err(&pdev->dev, + "No usable DMA configuration, aborting\n"); goto err_dma; } } } - err = pci_request_selected_regions_exclusive(pdev, - pci_select_bars(pdev, IORESOURCE_MEM), - e1000e_driver_name); + bars = pci_select_bars(pdev, IORESOURCE_MEM); + err = pci_request_selected_regions_exclusive(pdev, bars, + e1000e_driver_name); if (err) goto err_pci_reg; @@ -6572,6 +6577,10 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_flashmap; } + /* Set default EEE advertisement */ + if (adapter->flags2 & FLAG2_HAS_EEE) + adapter->eee_advert = MDIO_EEE_100TX | MDIO_EEE_1000T; + /* construct the net_device struct */ netdev->netdev_ops = &e1000e_netdev_ops; e1000e_set_ethtool_ops(netdev); @@ -6688,11 +6697,11 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) init_timer(&adapter->watchdog_timer); adapter->watchdog_timer.function = e1000_watchdog; - adapter->watchdog_timer.data = (unsigned long) adapter; + adapter->watchdog_timer.data = (unsigned long)adapter; init_timer(&adapter->phy_info_timer); adapter->phy_info_timer.function = e1000_update_phy_info; - adapter->phy_info_timer.data = (unsigned long) adapter; + adapter->phy_info_timer.data = (unsigned long)adapter; INIT_WORK(&adapter->reset_task, e1000_reset_task); INIT_WORK(&adapter->watchdog_task, e1000_watchdog_task); @@ -6800,7 +6809,7 @@ err_ioremap: free_netdev(netdev); err_alloc_etherdev: pci_release_selected_regions(pdev, - pci_select_bars(pdev, IORESOURCE_MEM)); + pci_select_bars(pdev, IORESOURCE_MEM)); err_pci_reg: err_dma: pci_disable_device(pdev); @@ -6870,7 +6879,7 @@ static void e1000_remove(struct pci_dev *pdev) if (adapter->hw.flash_address) iounmap(adapter->hw.flash_address); pci_release_selected_regions(pdev, - pci_select_bars(pdev, IORESOURCE_MEM)); + pci_select_bars(pdev, IORESOURCE_MEM)); free_netdev(netdev); @@ -6891,7 +6900,8 @@ static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = { { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_COPPER), board_82571 }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_FIBER), board_82571 }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER), board_82571 }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER_LP), board_82571 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER_LP), + board_82571 }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_FIBER), board_82571 }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES), board_82571 }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES_DUAL), board_82571 }, @@ -6967,8 +6977,8 @@ MODULE_DEVICE_TABLE(pci, e1000_pci_tbl); #ifdef CONFIG_PM static const struct dev_pm_ops e1000_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(e1000_suspend, e1000_resume) - SET_RUNTIME_PM_OPS(e1000_runtime_suspend, - e1000_runtime_resume, e1000_idle) + SET_RUNTIME_PM_OPS(e1000_runtime_suspend, e1000_runtime_resume, + e1000_idle) }; #endif diff --git a/drivers/net/ethernet/intel/e1000e/nvm.c b/drivers/net/ethernet/intel/e1000e/nvm.c index 84fecc268162..44ddc0a0ee0e 100644 --- a/drivers/net/ethernet/intel/e1000e/nvm.c +++ b/drivers/net/ethernet/intel/e1000e/nvm.c @@ -630,7 +630,7 @@ void e1000e_reload_nvm_generic(struct e1000_hw *hw) { u32 ctrl_ext; - udelay(10); + usleep_range(10, 20); ctrl_ext = er32(CTRL_EXT); ctrl_ext |= E1000_CTRL_EXT_EE_RST; ew32(CTRL_EXT, ctrl_ext); diff --git a/drivers/net/ethernet/intel/e1000e/param.c b/drivers/net/ethernet/intel/e1000e/param.c index 98da75dff936..c16bd75b6caa 100644 --- a/drivers/net/ethernet/intel/e1000e/param.c +++ b/drivers/net/ethernet/intel/e1000e/param.c @@ -45,7 +45,7 @@ unsigned int copybreak = COPYBREAK_DEFAULT; module_param(copybreak, uint, 0644); MODULE_PARM_DESC(copybreak, - "Maximum size of packet that is copied to a new buffer on receive"); + "Maximum size of packet that is copied to a new buffer on receive"); /* All parameters are treated the same, as an integer array of values. * This macro just reduces the need to repeat the same declaration code @@ -143,7 +143,8 @@ E1000_PARAM(KumeranLockLoss, "Enable Kumeran lock loss workaround"); * * Default Value: 1 (enabled) */ -E1000_PARAM(WriteProtectNVM, "Write-protect NVM [WARNING: disabling this can lead to corrupted NVM]"); +E1000_PARAM(WriteProtectNVM, + "Write-protect NVM [WARNING: disabling this can lead to corrupted NVM]"); /* Enable CRC Stripping * @@ -160,13 +161,18 @@ struct e1000_option { const char *err; int def; union { - struct { /* range_option info */ + /* range_option info */ + struct { int min; int max; } r; - struct { /* list_option info */ + /* list_option info */ + struct { int nr; - struct e1000_opt_list { int i; char *str; } *p; + struct e1000_opt_list { + int i; + char *str; + } *p; } l; } arg; }; @@ -246,7 +252,8 @@ void e1000e_check_options(struct e1000_adapter *adapter) "Using defaults for all values\n"); } - { /* Transmit Interrupt Delay */ + /* Transmit Interrupt Delay */ + { static const struct e1000_option opt = { .type = range_option, .name = "Transmit Interrupt Delay", @@ -265,7 +272,8 @@ void e1000e_check_options(struct e1000_adapter *adapter) adapter->tx_int_delay = opt.def; } } - { /* Transmit Absolute Interrupt Delay */ + /* Transmit Absolute Interrupt Delay */ + { static const struct e1000_option opt = { .type = range_option, .name = "Transmit Absolute Interrupt Delay", @@ -284,7 +292,8 @@ void e1000e_check_options(struct e1000_adapter *adapter) adapter->tx_abs_int_delay = opt.def; } } - { /* Receive Interrupt Delay */ + /* Receive Interrupt Delay */ + { static struct e1000_option opt = { .type = range_option, .name = "Receive Interrupt Delay", @@ -303,7 +312,8 @@ void e1000e_check_options(struct e1000_adapter *adapter) adapter->rx_int_delay = opt.def; } } - { /* Receive Absolute Interrupt Delay */ + /* Receive Absolute Interrupt Delay */ + { static const struct e1000_option opt = { .type = range_option, .name = "Receive Absolute Interrupt Delay", @@ -322,7 +332,8 @@ void e1000e_check_options(struct e1000_adapter *adapter) adapter->rx_abs_int_delay = opt.def; } } - { /* Interrupt Throttling Rate */ + /* Interrupt Throttling Rate */ + { static const struct e1000_option opt = { .type = range_option, .name = "Interrupt Throttling Rate (ints/sec)", @@ -392,7 +403,8 @@ void e1000e_check_options(struct e1000_adapter *adapter) break; } } - { /* Interrupt Mode */ + /* Interrupt Mode */ + { static struct e1000_option opt = { .type = range_option, .name = "Interrupt Mode", @@ -435,7 +447,8 @@ void e1000e_check_options(struct e1000_adapter *adapter) kfree(opt.err); #endif } - { /* Smart Power Down */ + /* Smart Power Down */ + { static const struct e1000_option opt = { .type = enable_option, .name = "PHY Smart Power Down", @@ -450,7 +463,8 @@ void e1000e_check_options(struct e1000_adapter *adapter) adapter->flags |= FLAG_SMART_POWER_DOWN; } } - { /* CRC Stripping */ + /* CRC Stripping */ + { static const struct e1000_option opt = { .type = enable_option, .name = "CRC Stripping", @@ -470,27 +484,28 @@ void e1000e_check_options(struct e1000_adapter *adapter) adapter->flags2 |= FLAG2_DFLT_CRC_STRIPPING; } } - { /* Kumeran Lock Loss Workaround */ + /* Kumeran Lock Loss Workaround */ + { static const struct e1000_option opt = { .type = enable_option, .name = "Kumeran Lock Loss Workaround", .err = "defaulting to Enabled", .def = OPTION_ENABLED }; + bool enabled = opt.def; if (num_KumeranLockLoss > bd) { unsigned int kmrn_lock_loss = KumeranLockLoss[bd]; e1000_validate_option(&kmrn_lock_loss, &opt, adapter); - if (hw->mac.type == e1000_ich8lan) - e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw, - kmrn_lock_loss); - } else { - if (hw->mac.type == e1000_ich8lan) - e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw, - opt.def); + enabled = kmrn_lock_loss; } + + if (hw->mac.type == e1000_ich8lan) + e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw, + enabled); } - { /* Write-protect NVM */ + /* Write-protect NVM */ + { static const struct e1000_option opt = { .type = enable_option, .name = "Write-protect NVM", @@ -500,7 +515,8 @@ void e1000e_check_options(struct e1000_adapter *adapter) if (adapter->flags & FLAG_IS_ICH) { if (num_WriteProtectNVM > bd) { - unsigned int write_protect_nvm = WriteProtectNVM[bd]; + unsigned int write_protect_nvm = + WriteProtectNVM[bd]; e1000_validate_option(&write_protect_nvm, &opt, adapter); if (write_protect_nvm) diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c index 0930c136aa31..59c76a6815a0 100644 --- a/drivers/net/ethernet/intel/e1000e/phy.c +++ b/drivers/net/ethernet/intel/e1000e/phy.c @@ -37,7 +37,9 @@ static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset, /* Cable length tables */ static const u16 e1000_m88_cable_length_table[] = { - 0, 50, 80, 110, 140, 140, E1000_CABLE_LENGTH_UNDEFINED }; + 0, 50, 80, 110, 140, 140, E1000_CABLE_LENGTH_UNDEFINED +}; + #define M88E1000_CABLE_LENGTH_TABLE_SIZE \ ARRAY_SIZE(e1000_m88_cable_length_table) @@ -49,7 +51,9 @@ static const u16 e1000_igp_2_cable_length_table[] = { 66, 70, 75, 79, 83, 87, 91, 94, 98, 101, 104, 60, 66, 72, 77, 82, 87, 92, 96, 100, 104, 108, 111, 114, 117, 119, 121, 83, 89, 95, 100, 105, 109, 113, 116, 119, 122, 124, 104, 109, 114, 118, 121, - 124}; + 124 +}; + #define IGP02E1000_CABLE_LENGTH_TABLE_SIZE \ ARRAY_SIZE(e1000_igp_2_cable_length_table) @@ -67,8 +71,7 @@ s32 e1000e_check_reset_block_generic(struct e1000_hw *hw) manc = er32(MANC); - return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ? - E1000_BLK_PHY_RESET : 0; + return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ? E1000_BLK_PHY_RESET : 0; } /** @@ -94,7 +97,7 @@ s32 e1000e_get_phy_id(struct e1000_hw *hw) return ret_val; phy->id = (u32)(phy_id << 16); - udelay(20); + usleep_range(20, 40); ret_val = e1e_rphy(hw, MII_PHYSID2, &phy_id); if (ret_val) return ret_val; @@ -175,7 +178,13 @@ s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data) e_dbg("MDI Error\n"); return -E1000_ERR_PHY; } - *data = (u16) mdic; + if (((mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT) != offset) { + e_dbg("MDI Read offset error - requested %d, returned %d\n", + offset, + (mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT); + return -E1000_ERR_PHY; + } + *data = (u16)mdic; /* Allow some time after each MDIC transaction to avoid * reading duplicate data in the next MDIC transaction. @@ -233,6 +242,12 @@ s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data) e_dbg("MDI Error\n"); return -E1000_ERR_PHY; } + if (((mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT) != offset) { + e_dbg("MDI Write offset error - requested %d, returned %d\n", + offset, + (mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT); + return -E1000_ERR_PHY; + } /* Allow some time after each MDIC transaction to avoid * reading duplicate data in the next MDIC transaction. @@ -324,7 +339,7 @@ s32 e1000_set_page_igp(struct e1000_hw *hw, u16 page) * semaphores before exiting. **/ static s32 __e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data, - bool locked) + bool locked) { s32 ret_val = 0; @@ -391,7 +406,7 @@ s32 e1000e_read_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 *data) * at the offset. Release any acquired semaphores before exiting. **/ static s32 __e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data, - bool locked) + bool locked) { s32 ret_val = 0; @@ -410,8 +425,7 @@ static s32 __e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data, (u16)offset); if (!ret_val) ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & - offset, - data); + offset, data); if (!locked) hw->phy.ops.release(hw); @@ -458,7 +472,7 @@ s32 e1000e_write_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 data) * Release any acquired semaphores before exiting. **/ static s32 __e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data, - bool locked) + bool locked) { u32 kmrnctrlsta; @@ -531,7 +545,7 @@ s32 e1000e_read_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 *data) * before exiting. **/ static s32 __e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data, - bool locked) + bool locked) { u32 kmrnctrlsta; @@ -772,8 +786,7 @@ s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw) phy_data |= M88E1000_EPSCR_TX_CLK_25; - if ((phy->revision == 2) && - (phy->id == M88E1111_I_PHY_ID)) { + if ((phy->revision == 2) && (phy->id == M88E1111_I_PHY_ID)) { /* 82573L PHY - set the downshift counter to 5x. */ phy_data &= ~M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK; phy_data |= M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X; @@ -1296,7 +1309,7 @@ s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw) e_dbg("Waiting for forced speed/duplex link on M88 phy.\n"); ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, - 100000, &link); + 100000, &link); if (ret_val) return ret_val; @@ -1319,7 +1332,7 @@ s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw) /* Try once more */ ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, - 100000, &link); + 100000, &link); if (ret_val) return ret_val; } @@ -1609,9 +1622,9 @@ s32 e1000_check_polarity_m88(struct e1000_hw *hw) ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_STATUS, &data); if (!ret_val) - phy->cable_polarity = (data & M88E1000_PSSR_REV_POLARITY) - ? e1000_rev_polarity_reversed - : e1000_rev_polarity_normal; + phy->cable_polarity = ((data & M88E1000_PSSR_REV_POLARITY) + ? e1000_rev_polarity_reversed + : e1000_rev_polarity_normal); return ret_val; } @@ -1653,9 +1666,9 @@ s32 e1000_check_polarity_igp(struct e1000_hw *hw) ret_val = e1e_rphy(hw, offset, &data); if (!ret_val) - phy->cable_polarity = (data & mask) - ? e1000_rev_polarity_reversed - : e1000_rev_polarity_normal; + phy->cable_polarity = ((data & mask) + ? e1000_rev_polarity_reversed + : e1000_rev_polarity_normal); return ret_val; } @@ -1685,9 +1698,9 @@ s32 e1000_check_polarity_ife(struct e1000_hw *hw) ret_val = e1e_rphy(hw, offset, &phy_data); if (!ret_val) - phy->cable_polarity = (phy_data & mask) - ? e1000_rev_polarity_reversed - : e1000_rev_polarity_normal; + phy->cable_polarity = ((phy_data & mask) + ? e1000_rev_polarity_reversed + : e1000_rev_polarity_normal); return ret_val; } @@ -1733,7 +1746,7 @@ static s32 e1000_wait_autoneg(struct e1000_hw *hw) * Polls the PHY status register for link, 'iterations' number of times. **/ s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations, - u32 usec_interval, bool *success) + u32 usec_interval, bool *success) { s32 ret_val = 0; u16 i, phy_status; @@ -1756,7 +1769,7 @@ s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations, if (phy_status & BMSR_LSTATUS) break; if (usec_interval >= 1000) - mdelay(usec_interval/1000); + mdelay(usec_interval / 1000); else udelay(usec_interval); } @@ -1791,8 +1804,8 @@ s32 e1000e_get_cable_length_m88(struct e1000_hw *hw) if (ret_val) return ret_val; - index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >> - M88E1000_PSSR_CABLE_LENGTH_SHIFT; + index = ((phy_data & M88E1000_PSSR_CABLE_LENGTH) >> + M88E1000_PSSR_CABLE_LENGTH_SHIFT); if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1) return -E1000_ERR_PHY; @@ -1824,10 +1837,10 @@ s32 e1000e_get_cable_length_igp_2(struct e1000_hw *hw) u16 cur_agc_index, max_agc_index = 0; u16 min_agc_index = IGP02E1000_CABLE_LENGTH_TABLE_SIZE - 1; static const u16 agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] = { - IGP02E1000_PHY_AGC_A, - IGP02E1000_PHY_AGC_B, - IGP02E1000_PHY_AGC_C, - IGP02E1000_PHY_AGC_D + IGP02E1000_PHY_AGC_A, + IGP02E1000_PHY_AGC_B, + IGP02E1000_PHY_AGC_C, + IGP02E1000_PHY_AGC_D }; /* Read the AGC registers for all channels */ @@ -1841,8 +1854,8 @@ s32 e1000e_get_cable_length_igp_2(struct e1000_hw *hw) * that can be put into the lookup table to obtain the * approximate cable length. */ - cur_agc_index = (phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) & - IGP02E1000_AGC_LENGTH_MASK; + cur_agc_index = ((phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) & + IGP02E1000_AGC_LENGTH_MASK); /* Array index bound check. */ if ((cur_agc_index >= IGP02E1000_CABLE_LENGTH_TABLE_SIZE) || @@ -1865,8 +1878,8 @@ s32 e1000e_get_cable_length_igp_2(struct e1000_hw *hw) agc_value /= (IGP02E1000_PHY_CHANNEL_NUM - 2); /* Calculate cable length with the error range of +/- 10 meters. */ - phy->min_cable_length = ((agc_value - IGP02E1000_AGC_RANGE) > 0) ? - (agc_value - IGP02E1000_AGC_RANGE) : 0; + phy->min_cable_length = (((agc_value - IGP02E1000_AGC_RANGE) > 0) ? + (agc_value - IGP02E1000_AGC_RANGE) : 0); phy->max_cable_length = agc_value + IGP02E1000_AGC_RANGE; phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2; @@ -2040,9 +2053,9 @@ s32 e1000_get_phy_info_ife(struct e1000_hw *hw) return ret_val; } else { /* Polarity is forced */ - phy->cable_polarity = (data & IFE_PSC_FORCE_POLARITY) - ? e1000_rev_polarity_reversed - : e1000_rev_polarity_normal; + phy->cable_polarity = ((data & IFE_PSC_FORCE_POLARITY) + ? e1000_rev_polarity_reversed + : e1000_rev_polarity_normal); } ret_val = e1e_rphy(hw, IFE_PHY_MDIX_CONTROL, &data); @@ -2119,7 +2132,7 @@ s32 e1000e_phy_hw_reset_generic(struct e1000_hw *hw) ew32(CTRL, ctrl); e1e_flush(); - udelay(150); + usleep_range(150, 300); phy->ops.release(hw); @@ -2375,13 +2388,13 @@ s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data) /* Page is shifted left, PHY expects (page x 32) */ ret_val = e1000e_write_phy_reg_mdic(hw, page_select, - (page << page_shift)); + (page << page_shift)); if (ret_val) goto release; } ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, - data); + data); release: hw->phy.ops.release(hw); @@ -2433,13 +2446,13 @@ s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data) /* Page is shifted left, PHY expects (page x 32) */ ret_val = e1000e_write_phy_reg_mdic(hw, page_select, - (page << page_shift)); + (page << page_shift)); if (ret_val) goto release; } ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, - data); + data); release: hw->phy.ops.release(hw); return ret_val; @@ -2674,7 +2687,7 @@ static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset, if (read) { /* Read the Wakeup register page value using opcode 0x12 */ ret_val = e1000e_read_phy_reg_mdic(hw, BM_WUC_DATA_OPCODE, - data); + data); } else { /* Write the Wakeup register page value using opcode 0x12 */ ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_DATA_OPCODE, @@ -2763,7 +2776,7 @@ static s32 __e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data, if (page > 0 && page < HV_INTC_FC_PAGE_START) { ret_val = e1000_access_phy_debug_regs_hv(hw, offset, - data, true); + data, true); goto out; } @@ -2786,8 +2799,7 @@ static s32 __e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data, e_dbg("reading PHY page %d (or 0x%x shifted) reg 0x%x\n", page, page << IGP_PAGE_SHIFT, reg); - ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg, - data); + ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg, data); out: if (!locked) hw->phy.ops.release(hw); @@ -2871,7 +2883,7 @@ static s32 __e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data, if (page > 0 && page < HV_INTC_FC_PAGE_START) { ret_val = e1000_access_phy_debug_regs_hv(hw, offset, - &data, false); + &data, false); goto out; } @@ -2910,7 +2922,7 @@ static s32 __e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data, page << IGP_PAGE_SHIFT, reg); ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg, - data); + data); out: if (!locked) @@ -2988,15 +3000,15 @@ static u32 e1000_get_phy_addr_for_hv_page(u32 page) * These accesses done with PHY address 2 and without using pages. **/ static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset, - u16 *data, bool read) + u16 *data, bool read) { s32 ret_val; u32 addr_reg; u32 data_reg; /* This takes care of the difference with desktop vs mobile phy */ - addr_reg = (hw->phy.type == e1000_phy_82578) ? - I82578_ADDR_REG : I82577_ADDR_REG; + addr_reg = ((hw->phy.type == e1000_phy_82578) ? + I82578_ADDR_REG : I82577_ADDR_REG); data_reg = addr_reg + 1; /* All operations in this function are phy address 2 */ @@ -3050,8 +3062,8 @@ s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw) if (ret_val) return ret_val; - data &= BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED | - BM_CS_STATUS_SPEED_MASK; + data &= (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED | + BM_CS_STATUS_SPEED_MASK); if (data != (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED | BM_CS_STATUS_SPEED_1000)) @@ -3086,9 +3098,9 @@ s32 e1000_check_polarity_82577(struct e1000_hw *hw) ret_val = e1e_rphy(hw, I82577_PHY_STATUS_2, &data); if (!ret_val) - phy->cable_polarity = (data & I82577_PHY_STATUS2_REV_POLARITY) - ? e1000_rev_polarity_reversed - : e1000_rev_polarity_normal; + phy->cable_polarity = ((data & I82577_PHY_STATUS2_REV_POLARITY) + ? e1000_rev_polarity_reversed + : e1000_rev_polarity_normal); return ret_val; } @@ -3215,8 +3227,8 @@ s32 e1000_get_cable_length_82577(struct e1000_hw *hw) if (ret_val) return ret_val; - length = (phy_data & I82577_DSTATUS_CABLE_LENGTH) >> - I82577_DSTATUS_CABLE_LENGTH_SHIFT; + length = ((phy_data & I82577_DSTATUS_CABLE_LENGTH) >> + I82577_DSTATUS_CABLE_LENGTH_SHIFT); if (length == E1000_CABLE_LENGTH_UNDEFINED) return -E1000_ERR_PHY; diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c index d60cd4393415..bea46bb26061 100644 --- a/drivers/net/ethernet/intel/igbvf/netdev.c +++ b/drivers/net/ethernet/intel/igbvf/netdev.c @@ -447,7 +447,6 @@ int igbvf_setup_tx_resources(struct igbvf_adapter *adapter, tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size, &tx_ring->dma, GFP_KERNEL); - if (!tx_ring->desc) goto err; @@ -488,7 +487,6 @@ int igbvf_setup_rx_resources(struct igbvf_adapter *adapter, rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size, &rx_ring->dma, GFP_KERNEL); - if (!rx_ring->desc) goto err; diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c index b5f94abe3cff..5dc119fd95a8 100644 --- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c +++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c @@ -717,14 +717,11 @@ ixgb_setup_tx_resources(struct ixgb_adapter *adapter) txdr->size = ALIGN(txdr->size, 4096); txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma, - GFP_KERNEL); + GFP_KERNEL | __GFP_ZERO); if (!txdr->desc) { vfree(txdr->buffer_info); - netif_err(adapter, probe, adapter->netdev, - "Unable to allocate transmit descriptor memory\n"); return -ENOMEM; } - memset(txdr->desc, 0, txdr->size); txdr->next_to_use = 0; txdr->next_to_clean = 0; @@ -807,8 +804,6 @@ ixgb_setup_rx_resources(struct ixgb_adapter *adapter) if (!rxdr->desc) { vfree(rxdr->buffer_info); - netif_err(adapter, probe, adapter->netdev, - "Unable to allocate receive descriptors\n"); return -ENOMEM; } memset(rxdr->desc, 0, rxdr->size); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index db5611ae407e..e56a3d169e30 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -7007,7 +7007,7 @@ static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], int err; if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) - return -EOPNOTSUPP; + return ndo_dflt_fdb_add(ndm, tb, dev, addr, flags); /* Hardware does not support aging addresses so if a * ndm_state is given only allow permanent addresses @@ -7038,44 +7038,6 @@ static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], return err; } -static int ixgbe_ndo_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], - struct net_device *dev, - const unsigned char *addr) -{ - struct ixgbe_adapter *adapter = netdev_priv(dev); - int err = -EOPNOTSUPP; - - if (ndm->ndm_state & NUD_PERMANENT) { - pr_info("%s: FDB only supports static addresses\n", - ixgbe_driver_name); - return -EINVAL; - } - - if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { - if (is_unicast_ether_addr(addr)) - err = dev_uc_del(dev, addr); - else if (is_multicast_ether_addr(addr)) - err = dev_mc_del(dev, addr); - else - err = -EINVAL; - } - - return err; -} - -static int ixgbe_ndo_fdb_dump(struct sk_buff *skb, - struct netlink_callback *cb, - struct net_device *dev, - int idx) -{ - struct ixgbe_adapter *adapter = netdev_priv(dev); - - if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) - idx = ndo_dflt_fdb_dump(skb, cb, dev, idx); - - return idx; -} - static int ixgbe_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh) { @@ -7171,8 +7133,6 @@ static const struct net_device_ops ixgbe_netdev_ops = { .ndo_set_features = ixgbe_set_features, .ndo_fix_features = ixgbe_fix_features, .ndo_fdb_add = ixgbe_ndo_fdb_add, - .ndo_fdb_del = ixgbe_ndo_fdb_del, - .ndo_fdb_dump = ixgbe_ndo_fdb_dump, .ndo_bridge_setlink = ixgbe_ndo_bridge_setlink, .ndo_bridge_getlink = ixgbe_ndo_bridge_getlink, }; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c index d44b4d21268c..b3e6530637e3 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c @@ -661,13 +661,7 @@ int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask) bool enable = ((event_mask & 0x10000000U) != 0); if (enable) { - eth_random_addr(vf_mac_addr); - e_info(probe, "IOV: VF %d is enabled MAC %pM\n", - vfn, vf_mac_addr); - /* - * Store away the VF "permananet" MAC address, it will ask - * for it later. - */ + eth_zero_addr(vf_mac_addr); memcpy(adapter->vfinfo[vfn].vf_mac_addresses, vf_mac_addr, 6); } @@ -688,7 +682,8 @@ static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf) ixgbe_vf_reset_event(adapter, vf); /* set vf mac address */ - ixgbe_set_vf_mac(adapter, vf, vf_mac); + if (!is_zero_ether_addr(vf_mac)) + ixgbe_set_vf_mac(adapter, vf, vf_mac); vf_shift = vf % 32; reg_offset = vf / 32; @@ -729,8 +724,16 @@ static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf) IXGBE_WRITE_REG(hw, IXGBE_VMECM(reg_offset), reg); /* reply to reset with ack and vf mac address */ - msgbuf[0] = IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK; - memcpy(addr, vf_mac, ETH_ALEN); + msgbuf[0] = IXGBE_VF_RESET; + if (!is_zero_ether_addr(vf_mac)) { + msgbuf[0] |= IXGBE_VT_MSGTYPE_ACK; + memcpy(addr, vf_mac, ETH_ALEN); + } else { + msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK; + dev_warn(&adapter->pdev->dev, + "VF %d has no MAC address assigned, you may have to assign one manually\n", + vf); + } /* * Piggyback the multicast filter type so VF can compute the diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h index fc0af9a3bb35..fff0d9867529 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h @@ -44,8 +44,8 @@ struct ixgbevf_tx_buffer { struct sk_buff *skb; dma_addr_t dma; unsigned long time_stamp; + union ixgbe_adv_tx_desc *next_to_watch; u16 length; - u16 next_to_watch; u16 mapped_as_page; }; diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 2b6cb5ca48ee..eeae9349f78b 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -76,12 +76,9 @@ static const struct ixgbevf_info *ixgbevf_info_tbl[] = { * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, * Class, Class Mask, private data (not used) } */ -static struct pci_device_id ixgbevf_pci_tbl[] = { - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF), - board_82599_vf}, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF), - board_X540_vf}, - +static DEFINE_PCI_DEVICE_TABLE(ixgbevf_pci_tbl) = { + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF), board_82599_vf }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF), board_X540_vf }, /* required last entry */ {0, } }; @@ -190,28 +187,37 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector, struct ixgbevf_adapter *adapter = q_vector->adapter; union ixgbe_adv_tx_desc *tx_desc, *eop_desc; struct ixgbevf_tx_buffer *tx_buffer_info; - unsigned int i, eop, count = 0; + unsigned int i, count = 0; unsigned int total_bytes = 0, total_packets = 0; if (test_bit(__IXGBEVF_DOWN, &adapter->state)) return true; i = tx_ring->next_to_clean; - eop = tx_ring->tx_buffer_info[i].next_to_watch; - eop_desc = IXGBEVF_TX_DESC(tx_ring, eop); + tx_buffer_info = &tx_ring->tx_buffer_info[i]; + eop_desc = tx_buffer_info->next_to_watch; - while ((eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) && - (count < tx_ring->count)) { + do { bool cleaned = false; - rmb(); /* read buffer_info after eop_desc */ - /* eop could change between read and DD-check */ - if (unlikely(eop != tx_ring->tx_buffer_info[i].next_to_watch)) - goto cont_loop; + + /* if next_to_watch is not set then there is no work pending */ + if (!eop_desc) + break; + + /* prevent any other reads prior to eop_desc */ + read_barrier_depends(); + + /* if DD is not set pending work has not been completed */ + if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD))) + break; + + /* clear next_to_watch to prevent false hangs */ + tx_buffer_info->next_to_watch = NULL; + for ( ; !cleaned; count++) { struct sk_buff *skb; tx_desc = IXGBEVF_TX_DESC(tx_ring, i); - tx_buffer_info = &tx_ring->tx_buffer_info[i]; - cleaned = (i == eop); + cleaned = (tx_desc == eop_desc); skb = tx_buffer_info->skb; if (cleaned && skb) { @@ -234,12 +240,12 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector, i++; if (i == tx_ring->count) i = 0; + + tx_buffer_info = &tx_ring->tx_buffer_info[i]; } -cont_loop: - eop = tx_ring->tx_buffer_info[i].next_to_watch; - eop_desc = IXGBEVF_TX_DESC(tx_ring, eop); - } + eop_desc = tx_buffer_info->next_to_watch; + } while (count < tx_ring->count); tx_ring->next_to_clean = i; @@ -2046,6 +2052,7 @@ static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; struct pci_dev *pdev = adapter->pdev; + struct net_device *netdev = adapter->netdev; int err; /* PCI config space info */ @@ -2065,18 +2072,26 @@ static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter) err = hw->mac.ops.reset_hw(hw); if (err) { dev_info(&pdev->dev, - "PF still in reset state, assigning new address\n"); - eth_hw_addr_random(adapter->netdev); - memcpy(adapter->hw.mac.addr, adapter->netdev->dev_addr, - adapter->netdev->addr_len); + "PF still in reset state. Is the PF interface up?\n"); } else { err = hw->mac.ops.init_hw(hw); if (err) { pr_err("init_shared_code failed: %d\n", err); goto out; } - memcpy(adapter->netdev->dev_addr, adapter->hw.mac.addr, - adapter->netdev->addr_len); + err = hw->mac.ops.get_mac_addr(hw, hw->mac.addr); + if (err) + dev_info(&pdev->dev, "Error reading MAC address\n"); + else if (is_zero_ether_addr(adapter->hw.mac.addr)) + dev_info(&pdev->dev, + "MAC address not assigned by administrator.\n"); + memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len); + } + + if (!is_valid_ether_addr(netdev->dev_addr)) { + dev_info(&pdev->dev, "Assigning random MAC address\n"); + eth_hw_addr_random(netdev); + memcpy(hw->mac.addr, netdev->dev_addr, netdev->addr_len); } /* lock to protect mailbox accesses */ @@ -2425,9 +2440,6 @@ int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *adapter, &rx_ring->dma, GFP_KERNEL); if (!rx_ring->desc) { - hw_dbg(&adapter->hw, - "Unable to allocate memory for " - "the receive descriptor ring\n"); vfree(rx_ring->rx_buffer_info); rx_ring->rx_buffer_info = NULL; goto alloc_failed; @@ -2822,8 +2834,7 @@ static bool ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring, } static int ixgbevf_tx_map(struct ixgbevf_ring *tx_ring, - struct sk_buff *skb, u32 tx_flags, - unsigned int first) + struct sk_buff *skb, u32 tx_flags) { struct ixgbevf_tx_buffer *tx_buffer_info; unsigned int len; @@ -2848,7 +2859,6 @@ static int ixgbevf_tx_map(struct ixgbevf_ring *tx_ring, size, DMA_TO_DEVICE); if (dma_mapping_error(tx_ring->dev, tx_buffer_info->dma)) goto dma_error; - tx_buffer_info->next_to_watch = i; len -= size; total -= size; @@ -2878,7 +2888,6 @@ static int ixgbevf_tx_map(struct ixgbevf_ring *tx_ring, tx_buffer_info->dma)) goto dma_error; tx_buffer_info->mapped_as_page = true; - tx_buffer_info->next_to_watch = i; len -= size; total -= size; @@ -2897,8 +2906,6 @@ static int ixgbevf_tx_map(struct ixgbevf_ring *tx_ring, else i = i - 1; tx_ring->tx_buffer_info[i].skb = skb; - tx_ring->tx_buffer_info[first].next_to_watch = i; - tx_ring->tx_buffer_info[first].time_stamp = jiffies; return count; @@ -2907,7 +2914,6 @@ dma_error: /* clear timestamp and dma mappings for failed tx_buffer_info map */ tx_buffer_info->dma = 0; - tx_buffer_info->next_to_watch = 0; count--; /* clear timestamp and dma mappings for remaining portion of packet */ @@ -2924,7 +2930,8 @@ dma_error: } static void ixgbevf_tx_queue(struct ixgbevf_ring *tx_ring, int tx_flags, - int count, u32 paylen, u8 hdr_len) + int count, unsigned int first, u32 paylen, + u8 hdr_len) { union ixgbe_adv_tx_desc *tx_desc = NULL; struct ixgbevf_tx_buffer *tx_buffer_info; @@ -2975,6 +2982,16 @@ static void ixgbevf_tx_queue(struct ixgbevf_ring *tx_ring, int tx_flags, tx_desc->read.cmd_type_len |= cpu_to_le32(txd_cmd); + tx_ring->tx_buffer_info[first].time_stamp = jiffies; + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + + tx_ring->tx_buffer_info[first].next_to_watch = tx_desc; tx_ring->next_to_use = i; } @@ -3066,15 +3083,8 @@ static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev) tx_flags |= IXGBE_TX_FLAGS_CSUM; ixgbevf_tx_queue(tx_ring, tx_flags, - ixgbevf_tx_map(tx_ring, skb, tx_flags, first), - skb->len, hdr_len); - /* - * Force memory writes to complete before letting h/w - * know there are new descriptors to fetch. (Only - * applicable for weak-ordered memory model archs, - * such as IA-64). - */ - wmb(); + ixgbevf_tx_map(tx_ring, skb, tx_flags), + first, skb->len, hdr_len); writel(tx_ring->next_to_use, adapter->hw.hw_addr + tx_ring->tail); diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c index 0c94557b53df..387b52635bc0 100644 --- a/drivers/net/ethernet/intel/ixgbevf/vf.c +++ b/drivers/net/ethernet/intel/ixgbevf/vf.c @@ -109,7 +109,12 @@ static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw) if (ret_val) return ret_val; - if (msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK)) + /* New versions of the PF may NACK the reset return message + * to indicate that no MAC address has yet been assigned for + * the VF. + */ + if (msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK) && + msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_NACK)) return IXGBE_ERR_INVALID_MAC_ADDR; memcpy(hw->mac.perm_addr, addr, ETH_ALEN); diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig index edfba9370922..5170ecb00acc 100644 --- a/drivers/net/ethernet/marvell/Kconfig +++ b/drivers/net/ethernet/marvell/Kconfig @@ -23,6 +23,7 @@ config MV643XX_ETH depends on (MV64X60 || PPC32 || PLAT_ORION) && INET select INET_LRO select PHYLIB + select MVMDIO ---help--- This driver supports the gigabit ethernet MACs in the Marvell Discovery PPC/MIPS chipset family (MV643XX) and @@ -38,9 +39,7 @@ config MVMDIO interface units of the Marvell EBU SoCs (Kirkwood, Orion5x, Dove, Armada 370 and Armada XP). - For now, this driver is only needed for the MVNETA driver - (used on Armada 370 and XP), but it could be used in the - future by the MV643XX_ETH driver. + This driver is used by the MV643XX_ETH and MVNETA drivers. config MVNETA tristate "Marvell Armada 370/XP network interface support" diff --git a/drivers/net/ethernet/marvell/Makefile b/drivers/net/ethernet/marvell/Makefile index 7f63b4aac434..5c4a7765ff0e 100644 --- a/drivers/net/ethernet/marvell/Makefile +++ b/drivers/net/ethernet/marvell/Makefile @@ -2,8 +2,8 @@ # Makefile for the Marvell device drivers. # -obj-$(CONFIG_MV643XX_ETH) += mv643xx_eth.o obj-$(CONFIG_MVMDIO) += mvmdio.o +obj-$(CONFIG_MV643XX_ETH) += mv643xx_eth.o obj-$(CONFIG_MVNETA) += mvneta.o obj-$(CONFIG_PXA168_ETH) += pxa168_eth.o obj-$(CONFIG_SKGE) += skge.o diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index 6562c736a1d8..aedbd8256ad1 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c @@ -20,6 +20,8 @@ * Copyright (C) 2007-2008 Marvell Semiconductor * Lennert Buytenhek <buytenh@marvell.com> * + * Copyright (C) 2013 Michael Stapelberg <michael@stapelberg.de> + * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 @@ -67,14 +69,6 @@ static char mv643xx_eth_driver_version[] = "1.4"; * Registers shared between all ports. */ #define PHY_ADDR 0x0000 -#define SMI_REG 0x0004 -#define SMI_BUSY 0x10000000 -#define SMI_READ_VALID 0x08000000 -#define SMI_OPCODE_READ 0x04000000 -#define SMI_OPCODE_WRITE 0x00000000 -#define ERR_INT_CAUSE 0x0080 -#define ERR_INT_SMI_DONE 0x00000010 -#define ERR_INT_MASK 0x0084 #define WINDOW_BASE(w) (0x0200 + ((w) << 3)) #define WINDOW_SIZE(w) (0x0204 + ((w) << 3)) #define WINDOW_REMAP_HIGH(w) (0x0280 + ((w) << 2)) @@ -264,25 +258,6 @@ struct mv643xx_eth_shared_private { void __iomem *base; /* - * Points at the right SMI instance to use. - */ - struct mv643xx_eth_shared_private *smi; - - /* - * Provides access to local SMI interface. - */ - struct mii_bus *smi_bus; - - /* - * If we have access to the error interrupt pin (which is - * somewhat misnamed as it not only reflects internal errors - * but also reflects SMI completion), use that to wait for - * SMI access completion instead of polling the SMI busy bit. - */ - int err_interrupt; - wait_queue_head_t smi_busy_wait; - - /* * Per-port MBUS window access register value. */ u32 win_protect; @@ -1120,97 +1095,6 @@ out_write: wrlp(mp, PORT_SERIAL_CONTROL, pscr); } -static irqreturn_t mv643xx_eth_err_irq(int irq, void *dev_id) -{ - struct mv643xx_eth_shared_private *msp = dev_id; - - if (readl(msp->base + ERR_INT_CAUSE) & ERR_INT_SMI_DONE) { - writel(~ERR_INT_SMI_DONE, msp->base + ERR_INT_CAUSE); - wake_up(&msp->smi_busy_wait); - return IRQ_HANDLED; - } - - return IRQ_NONE; -} - -static int smi_is_done(struct mv643xx_eth_shared_private *msp) -{ - return !(readl(msp->base + SMI_REG) & SMI_BUSY); -} - -static int smi_wait_ready(struct mv643xx_eth_shared_private *msp) -{ - if (msp->err_interrupt == NO_IRQ) { - int i; - - for (i = 0; !smi_is_done(msp); i++) { - if (i == 10) - return -ETIMEDOUT; - msleep(10); - } - - return 0; - } - - if (!smi_is_done(msp)) { - wait_event_timeout(msp->smi_busy_wait, smi_is_done(msp), - msecs_to_jiffies(100)); - if (!smi_is_done(msp)) - return -ETIMEDOUT; - } - - return 0; -} - -static int smi_bus_read(struct mii_bus *bus, int addr, int reg) -{ - struct mv643xx_eth_shared_private *msp = bus->priv; - void __iomem *smi_reg = msp->base + SMI_REG; - int ret; - - if (smi_wait_ready(msp)) { - pr_warn("SMI bus busy timeout\n"); - return -ETIMEDOUT; - } - - writel(SMI_OPCODE_READ | (reg << 21) | (addr << 16), smi_reg); - - if (smi_wait_ready(msp)) { - pr_warn("SMI bus busy timeout\n"); - return -ETIMEDOUT; - } - - ret = readl(smi_reg); - if (!(ret & SMI_READ_VALID)) { - pr_warn("SMI bus read not valid\n"); - return -ENODEV; - } - - return ret & 0xffff; -} - -static int smi_bus_write(struct mii_bus *bus, int addr, int reg, u16 val) -{ - struct mv643xx_eth_shared_private *msp = bus->priv; - void __iomem *smi_reg = msp->base + SMI_REG; - - if (smi_wait_ready(msp)) { - pr_warn("SMI bus busy timeout\n"); - return -ETIMEDOUT; - } - - writel(SMI_OPCODE_WRITE | (reg << 21) | - (addr << 16) | (val & 0xffff), smi_reg); - - if (smi_wait_ready(msp)) { - pr_warn("SMI bus busy timeout\n"); - return -ETIMEDOUT; - } - - return 0; -} - - /* statistics ***************************************************************/ static struct net_device_stats *mv643xx_eth_get_stats(struct net_device *dev) { @@ -1523,6 +1407,34 @@ mv643xx_eth_get_settings_phyless(struct mv643xx_eth_private *mp, return 0; } +static void +mv643xx_eth_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct mv643xx_eth_private *mp = netdev_priv(dev); + wol->supported = 0; + wol->wolopts = 0; + if (mp->phy) + phy_ethtool_get_wol(mp->phy, wol); +} + +static int +mv643xx_eth_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct mv643xx_eth_private *mp = netdev_priv(dev); + int err; + + if (mp->phy == NULL) + return -EOPNOTSUPP; + + err = phy_ethtool_set_wol(mp->phy, wol); + /* Given that mv643xx_eth works without the marvell-specific PHY driver, + * this debugging hint is useful to have. + */ + if (err == -EOPNOTSUPP) + netdev_info(dev, "The PHY does not support set_wol, was CONFIG_MARVELL_PHY enabled?\n"); + return err; +} + static int mv643xx_eth_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) { @@ -1708,6 +1620,8 @@ static const struct ethtool_ops mv643xx_eth_ethtool_ops = { .get_ethtool_stats = mv643xx_eth_get_ethtool_stats, .get_sset_count = mv643xx_eth_get_sset_count, .get_ts_info = ethtool_op_get_ts_info, + .get_wol = mv643xx_eth_get_wol, + .set_wol = mv643xx_eth_set_wol, }; @@ -2656,47 +2570,6 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev) goto out_free; /* - * Set up and register SMI bus. - */ - if (pd == NULL || pd->shared_smi == NULL) { - msp->smi_bus = mdiobus_alloc(); - if (msp->smi_bus == NULL) - goto out_unmap; - - msp->smi_bus->priv = msp; - msp->smi_bus->name = "mv643xx_eth smi"; - msp->smi_bus->read = smi_bus_read; - msp->smi_bus->write = smi_bus_write, - snprintf(msp->smi_bus->id, MII_BUS_ID_SIZE, "%s-%d", - pdev->name, pdev->id); - msp->smi_bus->parent = &pdev->dev; - msp->smi_bus->phy_mask = 0xffffffff; - if (mdiobus_register(msp->smi_bus) < 0) - goto out_free_mii_bus; - msp->smi = msp; - } else { - msp->smi = platform_get_drvdata(pd->shared_smi); - } - - msp->err_interrupt = NO_IRQ; - init_waitqueue_head(&msp->smi_busy_wait); - - /* - * Check whether the error interrupt is hooked up. - */ - res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); - if (res != NULL) { - int err; - - err = request_irq(res->start, mv643xx_eth_err_irq, - IRQF_SHARED, "mv643xx_eth", msp); - if (!err) { - writel(ERR_INT_SMI_DONE, msp->base + ERR_INT_MASK); - msp->err_interrupt = res->start; - } - } - - /* * (Re-)program MBUS remapping windows if we are asked to. */ dram = mv_mbus_dram_info(); @@ -2711,10 +2584,6 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev) return 0; -out_free_mii_bus: - mdiobus_free(msp->smi_bus); -out_unmap: - iounmap(msp->base); out_free: kfree(msp); out: @@ -2724,14 +2593,7 @@ out: static int mv643xx_eth_shared_remove(struct platform_device *pdev) { struct mv643xx_eth_shared_private *msp = platform_get_drvdata(pdev); - struct mv643xx_eth_shared_platform_data *pd = pdev->dev.platform_data; - if (pd == NULL || pd->shared_smi == NULL) { - mdiobus_unregister(msp->smi_bus); - mdiobus_free(msp->smi_bus); - } - if (msp->err_interrupt != NO_IRQ) - free_irq(msp->err_interrupt, msp); iounmap(msp->base); kfree(msp); @@ -2794,14 +2656,21 @@ static void set_params(struct mv643xx_eth_private *mp, mp->txq_count = pd->tx_queue_count ? : 1; } +static void mv643xx_eth_adjust_link(struct net_device *dev) +{ + struct mv643xx_eth_private *mp = netdev_priv(dev); + + mv643xx_adjust_pscr(mp); +} + static struct phy_device *phy_scan(struct mv643xx_eth_private *mp, int phy_addr) { - struct mii_bus *bus = mp->shared->smi->smi_bus; struct phy_device *phydev; int start; int num; int i; + char phy_id[MII_BUS_ID_SIZE + 3]; if (phy_addr == MV643XX_ETH_PHY_ADDR_DEFAULT) { start = phy_addr_get(mp) & 0x1f; @@ -2811,17 +2680,19 @@ static struct phy_device *phy_scan(struct mv643xx_eth_private *mp, num = 1; } - phydev = NULL; + /* Attempt to connect to the PHY using orion-mdio */ + phydev = ERR_PTR(-ENODEV); for (i = 0; i < num; i++) { int addr = (start + i) & 0x1f; - if (bus->phy_map[addr] == NULL) - mdiobus_scan(bus, addr); + snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT, + "orion-mdio-mii", addr); - if (phydev == NULL) { - phydev = bus->phy_map[addr]; - if (phydev != NULL) - phy_addr_set(mp, addr); + phydev = phy_connect(mp->dev, phy_id, mv643xx_eth_adjust_link, + PHY_INTERFACE_MODE_GMII); + if (!IS_ERR(phydev)) { + phy_addr_set(mp, addr); + break; } } @@ -2834,8 +2705,6 @@ static void phy_init(struct mv643xx_eth_private *mp, int speed, int duplex) phy_reset(mp); - phy_attach(mp->dev, dev_name(&phy->dev), PHY_INTERFACE_MODE_GMII); - if (speed == 0) { phy->autoneg = AUTONEG_ENABLE; phy->speed = 0; @@ -2943,11 +2812,17 @@ static int mv643xx_eth_probe(struct platform_device *pdev) netif_set_real_num_tx_queues(dev, mp->txq_count); netif_set_real_num_rx_queues(dev, mp->rxq_count); - if (pd->phy_addr != MV643XX_ETH_PHY_NONE) + if (pd->phy_addr != MV643XX_ETH_PHY_NONE) { mp->phy = phy_scan(mp, pd->phy_addr); - if (mp->phy != NULL) + if (IS_ERR(mp->phy)) { + err = PTR_ERR(mp->phy); + if (err == -ENODEV) + err = -EPROBE_DEFER; + goto out; + } phy_init(mp, pd->speed, pd->duplex); + } SET_ETHTOOL_OPS(dev, &mv643xx_eth_ethtool_ops); diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c index 77b7c80262f4..7b5158f654c2 100644 --- a/drivers/net/ethernet/marvell/mvmdio.c +++ b/drivers/net/ethernet/marvell/mvmdio.c @@ -24,10 +24,13 @@ #include <linux/module.h> #include <linux/mutex.h> #include <linux/phy.h> -#include <linux/of_address.h> -#include <linux/of_mdio.h> +#include <linux/interrupt.h> #include <linux/platform_device.h> #include <linux/delay.h> +#include <linux/io.h> +#include <linux/of_mdio.h> +#include <linux/sched.h> +#include <linux/wait.h> #define MVMDIO_SMI_DATA_SHIFT 0 #define MVMDIO_SMI_PHY_ADDR_SHIFT 16 @@ -36,33 +39,58 @@ #define MVMDIO_SMI_WRITE_OPERATION 0 #define MVMDIO_SMI_READ_VALID BIT(27) #define MVMDIO_SMI_BUSY BIT(28) +#define MVMDIO_ERR_INT_CAUSE 0x007C +#define MVMDIO_ERR_INT_SMI_DONE 0x00000010 +#define MVMDIO_ERR_INT_MASK 0x0080 struct orion_mdio_dev { struct mutex lock; - void __iomem *smireg; + void __iomem *regs; + /* + * If we have access to the error interrupt pin (which is + * somewhat misnamed as it not only reflects internal errors + * but also reflects SMI completion), use that to wait for + * SMI access completion instead of polling the SMI busy bit. + */ + int err_interrupt; + wait_queue_head_t smi_busy_wait; }; +static int orion_mdio_smi_is_done(struct orion_mdio_dev *dev) +{ + return !(readl(dev->regs) & MVMDIO_SMI_BUSY); +} + /* Wait for the SMI unit to be ready for another operation */ static int orion_mdio_wait_ready(struct mii_bus *bus) { struct orion_mdio_dev *dev = bus->priv; int count; - u32 val; - count = 0; - while (1) { - val = readl(dev->smireg); - if (!(val & MVMDIO_SMI_BUSY)) - break; + if (dev->err_interrupt <= 0) { + count = 0; + while (1) { + if (orion_mdio_smi_is_done(dev)) + break; - if (count > 100) { - dev_err(bus->parent, "Timeout: SMI busy for too long\n"); - return -ETIMEDOUT; - } + if (count > 100) { + dev_err(bus->parent, + "Timeout: SMI busy for too long\n"); + return -ETIMEDOUT; + } - udelay(10); - count++; + udelay(10); + count++; + } + } else { + if (!orion_mdio_smi_is_done(dev)) { + wait_event_timeout(dev->smi_busy_wait, + orion_mdio_smi_is_done(dev), + msecs_to_jiffies(100)); + if (!orion_mdio_smi_is_done(dev)) + return -ETIMEDOUT; + } } return 0; @@ -87,12 +115,12 @@ static int orion_mdio_read(struct mii_bus *bus, int mii_id, writel(((mii_id << MVMDIO_SMI_PHY_ADDR_SHIFT) | (regnum << MVMDIO_SMI_PHY_REG_SHIFT) | MVMDIO_SMI_READ_OPERATION), - dev->smireg); + dev->regs); /* Wait for the value to become available */ count = 0; while (1) { - val = readl(dev->smireg); + val = readl(dev->regs); if (val & MVMDIO_SMI_READ_VALID) break; @@ -129,7 +157,7 @@ static int orion_mdio_write(struct mii_bus *bus, int mii_id, (regnum << MVMDIO_SMI_PHY_REG_SHIFT) | MVMDIO_SMI_WRITE_OPERATION | (value << MVMDIO_SMI_DATA_SHIFT)), - dev->smireg); + dev->regs); mutex_unlock(&dev->lock); @@ -141,13 +169,34 @@ static int orion_mdio_reset(struct mii_bus *bus) return 0; } +static irqreturn_t orion_mdio_err_irq(int irq, void *dev_id) +{ + struct orion_mdio_dev *dev = dev_id; + + if (readl(dev->regs + MVMDIO_ERR_INT_CAUSE) & + MVMDIO_ERR_INT_SMI_DONE) { + writel(~MVMDIO_ERR_INT_SMI_DONE, + dev->regs + MVMDIO_ERR_INT_CAUSE); + wake_up(&dev->smi_busy_wait); + return IRQ_HANDLED; + } + + return IRQ_NONE; +} + static int orion_mdio_probe(struct platform_device *pdev) { - struct device_node *np = pdev->dev.of_node; + struct resource *r; struct mii_bus *bus; struct orion_mdio_dev *dev; int i, ret; + r = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!r) { + dev_err(&pdev->dev, "No SMI register address given\n"); + return -ENODEV; + } + bus = mdiobus_alloc_size(sizeof(struct orion_mdio_dev)); if (!bus) { dev_err(&pdev->dev, "Cannot allocate MDIO bus\n"); @@ -172,33 +221,54 @@ static int orion_mdio_probe(struct platform_device *pdev) bus->irq[i] = PHY_POLL; dev = bus->priv; - dev->smireg = of_iomap(pdev->dev.of_node, 0); - if (!dev->smireg) { - dev_err(&pdev->dev, "No SMI register address given in DT\n"); - kfree(bus->irq); - mdiobus_free(bus); - return -ENODEV; + dev->regs = devm_ioremap(&pdev->dev, r->start, resource_size(r)); + if (!dev->regs) { + dev_err(&pdev->dev, "Unable to remap SMI register\n"); + ret = -ENODEV; + goto out_mdio; + } + + init_waitqueue_head(&dev->smi_busy_wait); + + dev->err_interrupt = platform_get_irq(pdev, 0); + if (dev->err_interrupt != -ENXIO) { + ret = devm_request_irq(&pdev->dev, dev->err_interrupt, + orion_mdio_err_irq, + IRQF_SHARED, pdev->name, dev); + if (ret) + goto out_mdio; + + writel(MVMDIO_ERR_INT_SMI_DONE, + dev->regs + MVMDIO_ERR_INT_MASK); } mutex_init(&dev->lock); - ret = of_mdiobus_register(bus, np); + if (pdev->dev.of_node) + ret = of_mdiobus_register(bus, pdev->dev.of_node); + else + ret = mdiobus_register(bus); if (ret < 0) { dev_err(&pdev->dev, "Cannot register MDIO bus (%d)\n", ret); - iounmap(dev->smireg); - kfree(bus->irq); - mdiobus_free(bus); - return ret; + goto out_mdio; } platform_set_drvdata(pdev, bus); return 0; + +out_mdio: + kfree(bus->irq); + mdiobus_free(bus); + return ret; } static int orion_mdio_remove(struct platform_device *pdev) { struct mii_bus *bus = platform_get_drvdata(pdev); + struct orion_mdio_dev *dev = bus->priv; + + writel(0, dev->regs + MVMDIO_ERR_INT_MASK); mdiobus_unregister(bus); kfree(bus->irq); mdiobus_free(bus); @@ -225,3 +295,4 @@ module_platform_driver(orion_mdio_driver); MODULE_DESCRIPTION("Marvell MDIO interface driver"); MODULE_AUTHOR("Thomas Petazzoni <thomas.petazzoni@free-electrons.com>"); MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:orion-mdio"); diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index cd345b8969bc..e48261e468f3 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -1969,13 +1969,8 @@ static int mvneta_rxq_init(struct mvneta_port *pp, rxq->descs = dma_alloc_coherent(pp->dev->dev.parent, rxq->size * MVNETA_DESC_ALIGNED_SIZE, &rxq->descs_phys, GFP_KERNEL); - if (rxq->descs == NULL) { - netdev_err(pp->dev, - "rxq=%d: Can't allocate %d bytes for %d RX descr\n", - rxq->id, rxq->size * MVNETA_DESC_ALIGNED_SIZE, - rxq->size); + if (rxq->descs == NULL) return -ENOMEM; - } BUG_ON(rxq->descs != PTR_ALIGN(rxq->descs, MVNETA_CPU_D_CACHE_LINE_SIZE)); @@ -2029,13 +2024,8 @@ static int mvneta_txq_init(struct mvneta_port *pp, txq->descs = dma_alloc_coherent(pp->dev->dev.parent, txq->size * MVNETA_DESC_ALIGNED_SIZE, &txq->descs_phys, GFP_KERNEL); - if (txq->descs == NULL) { - netdev_err(pp->dev, - "txQ=%d: Can't allocate %d bytes for %d TX descr\n", - txq->id, txq->size * MVNETA_DESC_ALIGNED_SIZE, - txq->size); + if (txq->descs == NULL) return -ENOMEM; - } /* Make sure descriptor address is cache line size aligned */ BUG_ON(txq->descs != diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c index 037ed866c22f..339bb323cb0c 100644 --- a/drivers/net/ethernet/marvell/pxa168_eth.c +++ b/drivers/net/ethernet/marvell/pxa168_eth.c @@ -584,12 +584,14 @@ static int init_hash_table(struct pxa168_eth_private *pep) */ if (pep->htpr == NULL) { pep->htpr = dma_alloc_coherent(pep->dev->dev.parent, - HASH_ADDR_TABLE_SIZE, - &pep->htpr_dma, GFP_KERNEL); + HASH_ADDR_TABLE_SIZE, + &pep->htpr_dma, + GFP_KERNEL | __GFP_ZERO); if (pep->htpr == NULL) return -ENOMEM; + } else { + memset(pep->htpr, 0, HASH_ADDR_TABLE_SIZE); } - memset(pep->htpr, 0, HASH_ADDR_TABLE_SIZE); wrl(pep, HTPR, pep->htpr_dma); return 0; } @@ -1023,13 +1025,11 @@ static int rxq_init(struct net_device *dev) size = pep->rx_ring_size * sizeof(struct rx_desc); pep->rx_desc_area_size = size; pep->p_rx_desc_area = dma_alloc_coherent(pep->dev->dev.parent, size, - &pep->rx_desc_dma, GFP_KERNEL); - if (!pep->p_rx_desc_area) { - printk(KERN_ERR "%s: Cannot alloc RX ring (size %d bytes)\n", - dev->name, size); + &pep->rx_desc_dma, + GFP_KERNEL | __GFP_ZERO); + if (!pep->p_rx_desc_area) goto out; - } - memset((void *)pep->p_rx_desc_area, 0, size); + /* initialize the next_desc_ptr links in the Rx descriptors ring */ p_rx_desc = pep->p_rx_desc_area; for (i = 0; i < rx_desc_num; i++) { @@ -1086,13 +1086,10 @@ static int txq_init(struct net_device *dev) size = pep->tx_ring_size * sizeof(struct tx_desc); pep->tx_desc_area_size = size; pep->p_tx_desc_area = dma_alloc_coherent(pep->dev->dev.parent, size, - &pep->tx_desc_dma, GFP_KERNEL); - if (!pep->p_tx_desc_area) { - printk(KERN_ERR "%s: Cannot allocate Tx Ring (size %d bytes)\n", - dev->name, size); + &pep->tx_desc_dma, + GFP_KERNEL | __GFP_ZERO); + if (!pep->p_tx_desc_area) goto out; - } - memset((void *)pep->p_tx_desc_area, 0, pep->tx_desc_area_size); /* Initialize the next_desc_ptr links in the Tx descriptors ring */ p_tx_desc = pep->p_tx_desc_area; for (i = 0; i < tx_desc_num; i++) { diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c index fdc5f23d8e9f..05267d716e86 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c @@ -1837,10 +1837,8 @@ int mlx4_cmd_init(struct mlx4_dev *dev) priv->mfunc.vhcr = dma_alloc_coherent(&(dev->pdev->dev), PAGE_SIZE, &priv->mfunc.vhcr_dma, GFP_KERNEL); - if (!priv->mfunc.vhcr) { - mlx4_err(dev, "Couldn't allocate VHCR.\n"); + if (!priv->mfunc.vhcr) goto err_hcr; - } } priv->cmd.pool = pci_pool_create("mlx4_cmd", dev->pdev, diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index f278b10ef714..61b56781f7a5 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -1931,79 +1931,6 @@ static int mlx4_en_set_features(struct net_device *netdev, } -static int mlx4_en_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], - struct net_device *dev, - const unsigned char *addr, u16 flags) -{ - struct mlx4_en_priv *priv = netdev_priv(dev); - struct mlx4_dev *mdev = priv->mdev->dev; - int err; - - if (!mlx4_is_mfunc(mdev)) - return -EOPNOTSUPP; - - /* Hardware does not support aging addresses, allow only - * permanent addresses if ndm_state is given - */ - if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) { - en_info(priv, "Add FDB only supports static addresses\n"); - return -EINVAL; - } - - if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) - err = dev_uc_add_excl(dev, addr); - else if (is_multicast_ether_addr(addr)) - err = dev_mc_add_excl(dev, addr); - else - err = -EINVAL; - - /* Only return duplicate errors if NLM_F_EXCL is set */ - if (err == -EEXIST && !(flags & NLM_F_EXCL)) - err = 0; - - return err; -} - -static int mlx4_en_fdb_del(struct ndmsg *ndm, - struct nlattr *tb[], - struct net_device *dev, - const unsigned char *addr) -{ - struct mlx4_en_priv *priv = netdev_priv(dev); - struct mlx4_dev *mdev = priv->mdev->dev; - int err; - - if (!mlx4_is_mfunc(mdev)) - return -EOPNOTSUPP; - - if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) { - en_info(priv, "Del FDB only supports static addresses\n"); - return -EINVAL; - } - - if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) - err = dev_uc_del(dev, addr); - else if (is_multicast_ether_addr(addr)) - err = dev_mc_del(dev, addr); - else - err = -EINVAL; - - return err; -} - -static int mlx4_en_fdb_dump(struct sk_buff *skb, - struct netlink_callback *cb, - struct net_device *dev, int idx) -{ - struct mlx4_en_priv *priv = netdev_priv(dev); - struct mlx4_dev *mdev = priv->mdev->dev; - - if (mlx4_is_mfunc(mdev)) - idx = ndo_dflt_fdb_dump(skb, cb, dev, idx); - - return idx; -} - static const struct net_device_ops mlx4_netdev_ops = { .ndo_open = mlx4_en_open, .ndo_stop = mlx4_en_close, @@ -2025,9 +1952,6 @@ static const struct net_device_ops mlx4_netdev_ops = { #ifdef CONFIG_RFS_ACCEL .ndo_rx_flow_steer = mlx4_en_filter_rfs, #endif - .ndo_fdb_add = mlx4_en_fdb_add, - .ndo_fdb_del = mlx4_en_fdb_del, - .ndo_fdb_dump = mlx4_en_fdb_dump, }; int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, diff --git a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c index 3488c6d9e6b5..2448f0d669e6 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c @@ -58,10 +58,9 @@ static int mlx4_en_test_loopback_xmit(struct mlx4_en_priv *priv) /* build the pkt before xmit */ skb = netdev_alloc_skb(priv->dev, MLX4_LOOPBACK_TEST_PAYLOAD + ETH_HLEN + NET_IP_ALIGN); - if (!skb) { - en_err(priv, "-LOOPBACK_TEST_XMIT- failed to create skb for xmit\n"); + if (!skb) return -ENOMEM; - } + skb_reserve(skb, NET_IP_ALIGN); ethh = (struct ethhdr *)skb_put(skb, sizeof(struct ethhdr)); diff --git a/drivers/net/ethernet/micrel/ks8695net.c b/drivers/net/ethernet/micrel/ks8695net.c index 07a6ebc47c92..b6c60fdef4ff 100644 --- a/drivers/net/ethernet/micrel/ks8695net.c +++ b/drivers/net/ethernet/micrel/ks8695net.c @@ -1622,25 +1622,7 @@ static struct platform_driver ks8695_driver = { .resume = ks8695_drv_resume, }; -/* Module interface */ - -static int __init -ks8695_init(void) -{ - printk(KERN_INFO "%s Ethernet driver, V%s\n", - MODULENAME, MODULEVERSION); - - return platform_driver_register(&ks8695_driver); -} - -static void __exit -ks8695_cleanup(void) -{ - platform_driver_unregister(&ks8695_driver); -} - -module_init(ks8695_init); -module_exit(ks8695_cleanup); +module_platform_driver(ks8695_driver); MODULE_AUTHOR("Simtec Electronics"); MODULE_DESCRIPTION("Micrel KS8695 (Centaur) Ethernet driver"); diff --git a/drivers/net/ethernet/micrel/ks8851_mll.c b/drivers/net/ethernet/micrel/ks8851_mll.c index a343066f7b43..ddaf138ce0d4 100644 --- a/drivers/net/ethernet/micrel/ks8851_mll.c +++ b/drivers/net/ethernet/micrel/ks8851_mll.c @@ -792,20 +792,35 @@ static void ks_rcv(struct ks_net *ks, struct net_device *netdev) frame_hdr = ks->frame_head_info; while (ks->frame_cnt--) { + if (unlikely(!(frame_hdr->sts & RXFSHR_RXFV) || + frame_hdr->len >= RX_BUF_SIZE || + frame_hdr->len <= 0)) { + + /* discard an invalid packet */ + ks_wrreg16(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_RRXEF)); + netdev->stats.rx_dropped++; + if (!(frame_hdr->sts & RXFSHR_RXFV)) + netdev->stats.rx_frame_errors++; + else + netdev->stats.rx_length_errors++; + frame_hdr++; + continue; + } + skb = netdev_alloc_skb(netdev, frame_hdr->len + 16); - if (likely(skb && (frame_hdr->sts & RXFSHR_RXFV) && - (frame_hdr->len < RX_BUF_SIZE) && frame_hdr->len)) { + if (likely(skb)) { skb_reserve(skb, 2); /* read data block including CRC 4 bytes */ ks_read_qmu(ks, (u16 *)skb->data, frame_hdr->len); - skb_put(skb, frame_hdr->len); + skb_put(skb, frame_hdr->len - 4); skb->protocol = eth_type_trans(skb, netdev); netif_rx(skb); + /* exclude CRC size */ + netdev->stats.rx_bytes += frame_hdr->len - 4; + netdev->stats.rx_packets++; } else { - pr_err("%s: err:skb alloc\n", __func__); ks_wrreg16(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_RRXEF)); - if (skb) - dev_kfree_skb_irq(skb); + netdev->stats.rx_dropped++; } frame_hdr++; } @@ -877,6 +892,8 @@ static irqreturn_t ks_irq(int irq, void *pw) ks_wrreg16(ks, KS_PMECR, pmecr | PMECR_WKEVT_LINK); } + if (unlikely(status & IRQ_RXOI)) + ks->netdev->stats.rx_over_errors++; /* this should be the last in IRQ handler*/ ks_restore_cmd_reg(ks); return IRQ_HANDLED; @@ -1015,6 +1032,9 @@ static int ks_start_xmit(struct sk_buff *skb, struct net_device *netdev) if (likely(ks_tx_fifo_space(ks) >= skb->len + 12)) { ks_write_qmu(ks, skb->data, skb->len); + /* add tx statistics */ + netdev->stats.tx_bytes += skb->len; + netdev->stats.tx_packets++; dev_kfree_skb(skb); } else retv = NETDEV_TX_BUSY; diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c index 4f9937e026e5..d5ffdc8264eb 100644 --- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c +++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c @@ -3592,10 +3592,9 @@ static int myri10ge_alloc_slices(struct myri10ge_priv *mgp) bytes = mgp->max_intr_slots * sizeof(*ss->rx_done.entry); ss->rx_done.entry = dma_alloc_coherent(&pdev->dev, bytes, &ss->rx_done.bus, - GFP_KERNEL); + GFP_KERNEL | __GFP_ZERO); if (ss->rx_done.entry == NULL) goto abort; - memset(ss->rx_done.entry, 0, bytes); bytes = sizeof(*ss->fw_stats); ss->fw_stats = dma_alloc_coherent(&pdev->dev, bytes, &ss->fw_stats_bus, diff --git a/drivers/net/ethernet/natsemi/jazzsonic.c b/drivers/net/ethernet/natsemi/jazzsonic.c index b0b361546365..c20766c2f65b 100644 --- a/drivers/net/ethernet/natsemi/jazzsonic.c +++ b/drivers/net/ethernet/natsemi/jazzsonic.c @@ -175,13 +175,13 @@ static int sonic_probe1(struct net_device *dev) /* Allocate the entire chunk of memory for the descriptors. Note that this cannot cross a 64K boundary. */ - if ((lp->descriptors = dma_alloc_coherent(lp->device, - SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode), - &lp->descriptors_laddr, GFP_KERNEL)) == NULL) { - printk(KERN_ERR "%s: couldn't alloc DMA memory for descriptors.\n", - dev_name(lp->device)); + lp->descriptors = dma_alloc_coherent(lp->device, + SIZEOF_SONIC_DESC * + SONIC_BUS_SCALE(lp->dma_bitmode), + &lp->descriptors_laddr, + GFP_KERNEL); + if (lp->descriptors == NULL) goto out; - } /* Now set up the pointers to point to the appropriate places */ lp->cda = lp->descriptors; diff --git a/drivers/net/ethernet/natsemi/macsonic.c b/drivers/net/ethernet/natsemi/macsonic.c index 0ffde69c8d01..346a4e025c34 100644 --- a/drivers/net/ethernet/natsemi/macsonic.c +++ b/drivers/net/ethernet/natsemi/macsonic.c @@ -202,13 +202,13 @@ static int macsonic_init(struct net_device *dev) /* Allocate the entire chunk of memory for the descriptors. Note that this cannot cross a 64K boundary. */ - if ((lp->descriptors = dma_alloc_coherent(lp->device, - SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode), - &lp->descriptors_laddr, GFP_KERNEL)) == NULL) { - printk(KERN_ERR "%s: couldn't alloc DMA memory for descriptors.\n", - dev_name(lp->device)); + lp->descriptors = dma_alloc_coherent(lp->device, + SIZEOF_SONIC_DESC * + SONIC_BUS_SCALE(lp->dma_bitmode), + &lp->descriptors_laddr, + GFP_KERNEL); + if (lp->descriptors == NULL) return -ENOMEM; - } /* Now set up the pointers to point to the appropriate places */ lp->cda = lp->descriptors; diff --git a/drivers/net/ethernet/natsemi/sonic.c b/drivers/net/ethernet/natsemi/sonic.c index 46795e403467..1bd419dbda6d 100644 --- a/drivers/net/ethernet/natsemi/sonic.c +++ b/drivers/net/ethernet/natsemi/sonic.c @@ -424,7 +424,6 @@ static void sonic_rx(struct net_device *dev) /* Malloc up new buffer. */ new_skb = netdev_alloc_skb(dev, SONIC_RBSIZE + 2); if (new_skb == NULL) { - printk(KERN_ERR "%s: Memory squeeze, dropping packet.\n", dev->name); lp->stats.rx_dropped++; break; } diff --git a/drivers/net/ethernet/natsemi/xtsonic.c b/drivers/net/ethernet/natsemi/xtsonic.c index 5e4748e855f6..c2e0256fe3df 100644 --- a/drivers/net/ethernet/natsemi/xtsonic.c +++ b/drivers/net/ethernet/natsemi/xtsonic.c @@ -197,14 +197,12 @@ static int __init sonic_probe1(struct net_device *dev) * We also allocate extra space for a pointer to allow freeing * this structure later on (in xtsonic_cleanup_module()). */ - lp->descriptors = - dma_alloc_coherent(lp->device, - SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode), - &lp->descriptors_laddr, GFP_KERNEL); - + lp->descriptors = dma_alloc_coherent(lp->device, + SIZEOF_SONIC_DESC * + SONIC_BUS_SCALE(lp->dma_bitmode), + &lp->descriptors_laddr, + GFP_KERNEL); if (lp->descriptors == NULL) { - printk(KERN_ERR "%s: couldn't alloc DMA memory for " - " descriptors.\n", dev_name(lp->device)); err = -ENOMEM; goto out; } diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c index bfd887382e19..3371ff41bb34 100644 --- a/drivers/net/ethernet/neterion/s2io.c +++ b/drivers/net/ethernet/neterion/s2io.c @@ -80,6 +80,7 @@ #include <linux/slab.h> #include <linux/prefetch.h> #include <net/tcp.h> +#include <net/checksum.h> #include <asm/div64.h> #include <asm/irq.h> @@ -8337,16 +8338,13 @@ static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro) { struct iphdr *ip = lro->iph; struct tcphdr *tcp = lro->tcph; - __sum16 nchk; struct swStat *swstats = &sp->mac_control.stats_info->sw_stat; DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__); /* Update L3 header */ + csum_replace2(&ip->check, ip->tot_len, htons(lro->total_len)); ip->tot_len = htons(lro->total_len); - ip->check = 0; - nchk = ip_fast_csum((u8 *)lro->iph, ip->ihl); - ip->check = nchk; /* Update L4 header */ tcp->ack_seq = lro->tcp_ack; diff --git a/drivers/net/ethernet/netx-eth.c b/drivers/net/ethernet/netx-eth.c index 63e7af44366f..cb9e63831500 100644 --- a/drivers/net/ethernet/netx-eth.c +++ b/drivers/net/ethernet/netx-eth.c @@ -152,8 +152,6 @@ static void netx_eth_receive(struct net_device *ndev) skb = netdev_alloc_skb(ndev, len); if (unlikely(skb == NULL)) { - printk(KERN_NOTICE "%s: Low memory, packet dropped.\n", - ndev->name); ndev->stats.rx_dropped++; return; } diff --git a/drivers/net/ethernet/nuvoton/w90p910_ether.c b/drivers/net/ethernet/nuvoton/w90p910_ether.c index 162da8975b05..3df8287b7452 100644 --- a/drivers/net/ethernet/nuvoton/w90p910_ether.c +++ b/drivers/net/ethernet/nuvoton/w90p910_ether.c @@ -287,23 +287,16 @@ static int w90p910_init_desc(struct net_device *dev) ether = netdev_priv(dev); pdev = ether->pdev; - ether->tdesc = (struct tran_pdesc *) - dma_alloc_coherent(&pdev->dev, sizeof(struct tran_pdesc), - ðer->tdesc_phys, GFP_KERNEL); - - if (!ether->tdesc) { - dev_err(&pdev->dev, "Failed to allocate memory for tx desc\n"); + ether->tdesc = dma_alloc_coherent(&pdev->dev, sizeof(struct tran_pdesc), + ðer->tdesc_phys, GFP_KERNEL); + if (!ether->tdesc) return -ENOMEM; - } - - ether->rdesc = (struct recv_pdesc *) - dma_alloc_coherent(&pdev->dev, sizeof(struct recv_pdesc), - ðer->rdesc_phys, GFP_KERNEL); + ether->rdesc = dma_alloc_coherent(&pdev->dev, sizeof(struct recv_pdesc), + ðer->rdesc_phys, GFP_KERNEL); if (!ether->rdesc) { - dev_err(&pdev->dev, "Failed to allocate memory for rx desc\n"); dma_free_coherent(&pdev->dev, sizeof(struct tran_pdesc), - ether->tdesc, ether->tdesc_phys); + ether->tdesc, ether->tdesc_phys); return -ENOMEM; } @@ -737,7 +730,6 @@ static void netdev_rx(struct net_device *dev) data = ether->rdesc->recv_buf[ether->cur_rx]; skb = netdev_alloc_skb(dev, length + 2); if (!skb) { - dev_err(&pdev->dev, "get skb buffer error\n"); ether->stats.rx_dropped++; return; } diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c index 0b8de12bcbca..b62262cfe4d9 100644 --- a/drivers/net/ethernet/nvidia/forcedeth.c +++ b/drivers/net/ethernet/nvidia/forcedeth.c @@ -5025,7 +5025,6 @@ static int nv_loopback_test(struct net_device *dev) pkt_len = ETH_DATA_LEN; tx_skb = netdev_alloc_skb(dev, pkt_len); if (!tx_skb) { - netdev_err(dev, "netdev_alloc_skb() failed during loopback test\n"); ret = 0; goto out; } diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c index efa29b712d5f..89d1b0eadf3c 100644 --- a/drivers/net/ethernet/nxp/lpc_eth.c +++ b/drivers/net/ethernet/nxp/lpc_eth.c @@ -1409,9 +1409,7 @@ static int lpc_eth_drv_probe(struct platform_device *pdev) dma_alloc_coherent(&pldat->pdev->dev, pldat->dma_buff_size, &dma_handle, GFP_KERNEL); - if (pldat->dma_buff_base_v == NULL) { - dev_err(&pdev->dev, "error getting DMA region.\n"); ret = -ENOMEM; goto err_out_free_irq; } diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c index 73ce7dd6b954..60eb890800ec 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c @@ -1469,13 +1469,11 @@ pch_gbe_alloc_rx_buffers_pool(struct pch_gbe_adapter *adapter, size = rx_ring->count * bufsz + PCH_GBE_RESERVE_MEMORY; rx_ring->rx_buff_pool = dma_alloc_coherent(&pdev->dev, size, - &rx_ring->rx_buff_pool_logic, - GFP_KERNEL); - if (!rx_ring->rx_buff_pool) { - pr_err("Unable to allocate memory for the receive pool buffer\n"); + &rx_ring->rx_buff_pool_logic, + GFP_KERNEL | __GFP_ZERO); + if (!rx_ring->rx_buff_pool) return -ENOMEM; - } - memset(rx_ring->rx_buff_pool, 0, size); + rx_ring->rx_buff_pool_size = size; for (i = 0; i < rx_ring->count; i++) { buffer_info = &rx_ring->buffer_info[i]; @@ -1774,13 +1772,12 @@ int pch_gbe_setup_tx_resources(struct pch_gbe_adapter *adapter, tx_ring->size = tx_ring->count * (int)sizeof(struct pch_gbe_tx_desc); tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size, - &tx_ring->dma, GFP_KERNEL); + &tx_ring->dma, + GFP_KERNEL | __GFP_ZERO); if (!tx_ring->desc) { vfree(tx_ring->buffer_info); - pr_err("Unable to allocate memory for the transmit descriptor ring\n"); return -ENOMEM; } - memset(tx_ring->desc, 0, tx_ring->size); tx_ring->next_to_use = 0; tx_ring->next_to_clean = 0; @@ -1820,14 +1817,12 @@ int pch_gbe_setup_rx_resources(struct pch_gbe_adapter *adapter, rx_ring->size = rx_ring->count * (int)sizeof(struct pch_gbe_rx_desc); rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size, - &rx_ring->dma, GFP_KERNEL); - + &rx_ring->dma, + GFP_KERNEL | __GFP_ZERO); if (!rx_ring->desc) { - pr_err("Unable to allocate memory for the receive descriptor ring\n"); vfree(rx_ring->buffer_info); return -ENOMEM; } - memset(rx_ring->desc, 0, rx_ring->size); rx_ring->next_to_clean = 0; rx_ring->next_to_use = 0; for (desNo = 0; desNo < rx_ring->count; desNo++) { diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c index b1cfbb75ff1e..a5f0b5da6149 100644 --- a/drivers/net/ethernet/pasemi/pasemi_mac.c +++ b/drivers/net/ethernet/pasemi/pasemi_mac.c @@ -441,12 +441,11 @@ static int pasemi_mac_setup_rx_resources(const struct net_device *dev) ring->buffers = dma_alloc_coherent(&mac->dma_pdev->dev, RX_RING_SIZE * sizeof(u64), - &ring->buf_dma, GFP_KERNEL); + &ring->buf_dma, + GFP_KERNEL | __GFP_ZERO); if (!ring->buffers) goto out_ring_desc; - memset(ring->buffers, 0, RX_RING_SIZE * sizeof(u64)); - write_dma_reg(PAS_DMA_RXCHAN_BASEL(chno), PAS_DMA_RXCHAN_BASEL_BRBL(ring->chan.ring_dma)); diff --git a/drivers/net/ethernet/qlogic/Kconfig b/drivers/net/ethernet/qlogic/Kconfig index a8669adecc97..0e1797295a48 100644 --- a/drivers/net/ethernet/qlogic/Kconfig +++ b/drivers/net/ethernet/qlogic/Kconfig @@ -35,6 +35,16 @@ config QLCNIC This driver supports QLogic QLE8240 and QLE8242 Converged Ethernet devices. +config QLCNIC_SRIOV + bool "QLOGIC QLCNIC 83XX family SR-IOV Support" + depends on QLCNIC && PCI_IOV + default y + ---help--- + This configuration parameter enables Single Root Input Output + Virtualization support for QLE83XX Converged Ethernet devices. + This allows for virtual function acceleration in virtualized + environments. + config QLGE tristate "QLogic QLGE 10Gb Ethernet Driver Support" depends on PCI diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h index eb3dfdbb642b..322a36b76727 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h @@ -955,9 +955,10 @@ typedef struct nx_mac_list_s { uint8_t mac_addr[ETH_ALEN+2]; } nx_mac_list_t; -struct nx_vlan_ip_list { +struct nx_ip_list { struct list_head list; __be32 ip_addr; + bool master; }; /* @@ -1605,7 +1606,7 @@ struct netxen_adapter { struct net_device *netdev; struct pci_dev *pdev; struct list_head mac_list; - struct list_head vlan_ip_list; + struct list_head ip_list; spinlock_t tx_clean_lock; diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c index 4782dcfde736..7692dfd4f262 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c @@ -27,6 +27,7 @@ #include <linux/delay.h> #include <linux/slab.h> #include <linux/if_vlan.h> +#include <net/checksum.h> #include "netxen_nic.h" #include "netxen_nic_hw.h" @@ -1641,9 +1642,8 @@ netxen_process_lro(struct netxen_adapter *adapter, th = (struct tcphdr *)((skb->data + vhdr_len) + (iph->ihl << 2)); length = (iph->ihl << 2) + (th->doff << 2) + lro_length; + csum_replace2(&iph->check, iph->tot_len, htons(length)); iph->tot_len = htons(length); - iph->check = 0; - iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl); th->psh = push; th->seq = htonl(seq_number); diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c index 501f49207da5..7867aebc05f2 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c @@ -90,7 +90,7 @@ static irqreturn_t netxen_intr(int irq, void *data); static irqreturn_t netxen_msi_intr(int irq, void *data); static irqreturn_t netxen_msix_intr(int irq, void *data); -static void netxen_free_vlan_ip_list(struct netxen_adapter *); +static void netxen_free_ip_list(struct netxen_adapter *, bool); static void netxen_restore_indev_addr(struct net_device *dev, unsigned long); static struct rtnl_link_stats64 *netxen_nic_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats); @@ -1450,7 +1450,7 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) spin_lock_init(&adapter->tx_clean_lock); INIT_LIST_HEAD(&adapter->mac_list); - INIT_LIST_HEAD(&adapter->vlan_ip_list); + INIT_LIST_HEAD(&adapter->ip_list); err = netxen_setup_pci_map(adapter); if (err) @@ -1585,7 +1585,7 @@ static void netxen_nic_remove(struct pci_dev *pdev) cancel_work_sync(&adapter->tx_timeout_task); - netxen_free_vlan_ip_list(adapter); + netxen_free_ip_list(adapter, false); netxen_nic_detach(adapter); nx_decr_dev_ref_cnt(adapter); @@ -3137,62 +3137,77 @@ netxen_destip_supported(struct netxen_adapter *adapter) } static void -netxen_free_vlan_ip_list(struct netxen_adapter *adapter) +netxen_free_ip_list(struct netxen_adapter *adapter, bool master) { - struct nx_vlan_ip_list *cur; - struct list_head *head = &adapter->vlan_ip_list; + struct nx_ip_list *cur, *tmp_cur; - while (!list_empty(head)) { - cur = list_entry(head->next, struct nx_vlan_ip_list, list); - netxen_config_ipaddr(adapter, cur->ip_addr, NX_IP_DOWN); - list_del(&cur->list); - kfree(cur); + list_for_each_entry_safe(cur, tmp_cur, &adapter->ip_list, list) { + if (master) { + if (cur->master) { + netxen_config_ipaddr(adapter, cur->ip_addr, + NX_IP_DOWN); + list_del(&cur->list); + kfree(cur); + } + } else { + netxen_config_ipaddr(adapter, cur->ip_addr, NX_IP_DOWN); + list_del(&cur->list); + kfree(cur); + } } - } -static void -netxen_list_config_vlan_ip(struct netxen_adapter *adapter, + +static bool +netxen_list_config_ip(struct netxen_adapter *adapter, struct in_ifaddr *ifa, unsigned long event) { struct net_device *dev; - struct nx_vlan_ip_list *cur, *tmp_cur; + struct nx_ip_list *cur, *tmp_cur; struct list_head *head; + bool ret = false; dev = ifa->ifa_dev ? ifa->ifa_dev->dev : NULL; if (dev == NULL) - return; - - if (!is_vlan_dev(dev)) - return; + goto out; switch (event) { case NX_IP_UP: - list_for_each(head, &adapter->vlan_ip_list) { - cur = list_entry(head, struct nx_vlan_ip_list, list); + list_for_each(head, &adapter->ip_list) { + cur = list_entry(head, struct nx_ip_list, list); if (cur->ip_addr == ifa->ifa_address) - return; + goto out; } - cur = kzalloc(sizeof(struct nx_vlan_ip_list), GFP_ATOMIC); + cur = kzalloc(sizeof(struct nx_ip_list), GFP_ATOMIC); if (cur == NULL) - return; - + goto out; + if (dev->priv_flags & IFF_802_1Q_VLAN) + dev = vlan_dev_real_dev(dev); + cur->master = !!netif_is_bond_master(dev); cur->ip_addr = ifa->ifa_address; - list_add_tail(&cur->list, &adapter->vlan_ip_list); + list_add_tail(&cur->list, &adapter->ip_list); + netxen_config_ipaddr(adapter, ifa->ifa_address, NX_IP_UP); + ret = true; break; case NX_IP_DOWN: list_for_each_entry_safe(cur, tmp_cur, - &adapter->vlan_ip_list, list) { + &adapter->ip_list, list) { if (cur->ip_addr == ifa->ifa_address) { list_del(&cur->list); kfree(cur); + netxen_config_ipaddr(adapter, ifa->ifa_address, + NX_IP_DOWN); + ret = true; break; } } } +out: + return ret; } + static void netxen_config_indev_addr(struct netxen_adapter *adapter, struct net_device *dev, unsigned long event) @@ -3209,14 +3224,10 @@ netxen_config_indev_addr(struct netxen_adapter *adapter, for_ifa(indev) { switch (event) { case NETDEV_UP: - netxen_config_ipaddr(adapter, - ifa->ifa_address, NX_IP_UP); - netxen_list_config_vlan_ip(adapter, ifa, NX_IP_UP); + netxen_list_config_ip(adapter, ifa, NX_IP_UP); break; case NETDEV_DOWN: - netxen_config_ipaddr(adapter, - ifa->ifa_address, NX_IP_DOWN); - netxen_list_config_vlan_ip(adapter, ifa, NX_IP_DOWN); + netxen_list_config_ip(adapter, ifa, NX_IP_DOWN); break; default: break; @@ -3231,23 +3242,78 @@ netxen_restore_indev_addr(struct net_device *netdev, unsigned long event) { struct netxen_adapter *adapter = netdev_priv(netdev); - struct nx_vlan_ip_list *pos, *tmp_pos; + struct nx_ip_list *pos, *tmp_pos; unsigned long ip_event; ip_event = (event == NETDEV_UP) ? NX_IP_UP : NX_IP_DOWN; netxen_config_indev_addr(adapter, netdev, event); - list_for_each_entry_safe(pos, tmp_pos, &adapter->vlan_ip_list, list) { + list_for_each_entry_safe(pos, tmp_pos, &adapter->ip_list, list) { netxen_config_ipaddr(adapter, pos->ip_addr, ip_event); } } +static inline bool +netxen_config_checkdev(struct net_device *dev) +{ + struct netxen_adapter *adapter; + + if (!is_netxen_netdev(dev)) + return false; + adapter = netdev_priv(dev); + if (!adapter) + return false; + if (!netxen_destip_supported(adapter)) + return false; + if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC) + return false; + + return true; +} + +/** + * netxen_config_master - configure addresses based on master + * @dev: netxen device + * @event: netdev event + */ +static void netxen_config_master(struct net_device *dev, unsigned long event) +{ + struct net_device *master, *slave; + struct netxen_adapter *adapter = netdev_priv(dev); + + rcu_read_lock(); + master = netdev_master_upper_dev_get_rcu(dev); + /* + * This is the case where the netxen nic is being + * enslaved and is dev_open()ed in bond_enslave() + * Now we should program the bond's (and its vlans') + * addresses in the netxen NIC. + */ + if (master && netif_is_bond_master(master) && + !netif_is_bond_slave(dev)) { + netxen_config_indev_addr(adapter, master, event); + for_each_netdev_rcu(&init_net, slave) + if (slave->priv_flags & IFF_802_1Q_VLAN && + vlan_dev_real_dev(slave) == master) + netxen_config_indev_addr(adapter, slave, event); + } + rcu_read_unlock(); + /* + * This is the case where the netxen nic is being + * released and is dev_close()ed in bond_release() + * just before IFF_BONDING is stripped. + */ + if (!master && dev->priv_flags & IFF_BONDING) + netxen_free_ip_list(adapter, true); +} + static int netxen_netdev_event(struct notifier_block *this, unsigned long event, void *ptr) { struct netxen_adapter *adapter; struct net_device *dev = (struct net_device *)ptr; struct net_device *orig_dev = dev; + struct net_device *slave; recheck: if (dev == NULL) @@ -3257,19 +3323,28 @@ recheck: dev = vlan_dev_real_dev(dev); goto recheck; } - - if (!is_netxen_netdev(dev)) - goto done; - - adapter = netdev_priv(dev); - - if (!adapter) - goto done; - - if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC) - goto done; - - netxen_config_indev_addr(adapter, orig_dev, event); + if (event == NETDEV_UP || event == NETDEV_DOWN) { + /* If this is a bonding device, look for netxen-based slaves*/ + if (netif_is_bond_master(dev)) { + rcu_read_lock(); + for_each_netdev_in_bond_rcu(dev, slave) { + if (!netxen_config_checkdev(slave)) + continue; + adapter = netdev_priv(slave); + netxen_config_indev_addr(adapter, + orig_dev, event); + } + rcu_read_unlock(); + } else { + if (!netxen_config_checkdev(dev)) + goto done; + adapter = netdev_priv(dev); + /* Act only if the actual netxen is the target */ + if (orig_dev == dev) + netxen_config_master(dev, event); + netxen_config_indev_addr(adapter, orig_dev, event); + } + } done: return NOTIFY_DONE; } @@ -3279,12 +3354,12 @@ netxen_inetaddr_event(struct notifier_block *this, unsigned long event, void *ptr) { struct netxen_adapter *adapter; - struct net_device *dev; - + struct net_device *dev, *slave; struct in_ifaddr *ifa = (struct in_ifaddr *)ptr; + unsigned long ip_event; dev = ifa->ifa_dev ? ifa->ifa_dev->dev : NULL; - + ip_event = (event == NETDEV_UP) ? NX_IP_UP : NX_IP_DOWN; recheck: if (dev == NULL) goto done; @@ -3293,31 +3368,24 @@ recheck: dev = vlan_dev_real_dev(dev); goto recheck; } - - if (!is_netxen_netdev(dev)) - goto done; - - adapter = netdev_priv(dev); - - if (!adapter || !netxen_destip_supported(adapter)) - goto done; - - if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC) - goto done; - - switch (event) { - case NETDEV_UP: - netxen_config_ipaddr(adapter, ifa->ifa_address, NX_IP_UP); - netxen_list_config_vlan_ip(adapter, ifa, NX_IP_UP); - break; - case NETDEV_DOWN: - netxen_config_ipaddr(adapter, ifa->ifa_address, NX_IP_DOWN); - netxen_list_config_vlan_ip(adapter, ifa, NX_IP_DOWN); - break; - default: - break; + if (event == NETDEV_UP || event == NETDEV_DOWN) { + /* If this is a bonding device, look for netxen-based slaves*/ + if (netif_is_bond_master(dev)) { + rcu_read_lock(); + for_each_netdev_in_bond_rcu(dev, slave) { + if (!netxen_config_checkdev(slave)) + continue; + adapter = netdev_priv(slave); + netxen_list_config_ip(adapter, ifa, ip_event); + } + rcu_read_unlock(); + } else { + if (!netxen_config_checkdev(dev)) + goto done; + adapter = netdev_priv(dev); + netxen_list_config_ip(adapter, ifa, ip_event); + } } - done: return NOTIFY_DONE; } @@ -3334,7 +3402,7 @@ static void netxen_restore_indev_addr(struct net_device *dev, unsigned long event) { } static void -netxen_free_vlan_ip_list(struct netxen_adapter *adapter) +netxen_free_ip_list(struct netxen_adapter *adapter, bool master) { } #endif diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c index 8fd38cb6d26a..91a8fcd6c246 100644 --- a/drivers/net/ethernet/qlogic/qla3xxx.c +++ b/drivers/net/ethernet/qlogic/qla3xxx.c @@ -312,7 +312,6 @@ static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev, lrg_buf_cb->skb = netdev_alloc_skb(qdev->ndev, qdev->lrg_buffer_len); if (unlikely(!lrg_buf_cb->skb)) { - netdev_err(qdev->ndev, "failed netdev_alloc_skb()\n"); qdev->lrg_buf_skb_check++; } else { /* diff --git a/drivers/net/ethernet/qlogic/qlcnic/Makefile b/drivers/net/ethernet/qlogic/qlcnic/Makefile index 7722a203e388..4b1fb3faa3b7 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/Makefile +++ b/drivers/net/ethernet/qlogic/qlcnic/Makefile @@ -8,4 +8,6 @@ qlcnic-y := qlcnic_hw.o qlcnic_main.o qlcnic_init.o \ qlcnic_ethtool.o qlcnic_ctx.o qlcnic_io.o \ qlcnic_sysfs.o qlcnic_minidump.o qlcnic_83xx_hw.o \ qlcnic_83xx_init.o qlcnic_83xx_vnic.o \ - qlcnic_minidump.o + qlcnic_minidump.o qlcnic_sriov_common.o + +qlcnic-$(CONFIG_QLCNIC_SRIOV) += qlcnic_sriov_pf.o diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h index ba3c72fce1f2..e5af69df36e2 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h @@ -37,9 +37,9 @@ #include "qlcnic_83xx_hw.h" #define _QLCNIC_LINUX_MAJOR 5 -#define _QLCNIC_LINUX_MINOR 1 -#define _QLCNIC_LINUX_SUBVERSION 35 -#define QLCNIC_LINUX_VERSIONID "5.1.35" +#define _QLCNIC_LINUX_MINOR 2 +#define _QLCNIC_LINUX_SUBVERSION 39 +#define QLCNIC_LINUX_VERSIONID "5.2.39" #define QLCNIC_DRV_IDC_VER 0x01 #define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\ (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION)) @@ -449,6 +449,7 @@ struct qlcnic_hardware_context { struct qlc_83xx_idc idc; struct qlc_83xx_fw_info fw_info; struct qlcnic_intrpt_config *intr_tbl; + struct qlcnic_sriov *sriov; u32 *reg_tbl; u32 *ext_reg_tbl; u32 mbox_aen[QLC_83XX_MBX_AEN_CNT]; @@ -896,6 +897,7 @@ struct qlcnic_ipaddr { #define QLCNIC_FW_RESET_OWNER 0x2000 #define QLCNIC_FW_HANG 0x4000 #define QLCNIC_FW_LRO_MSS_CAP 0x8000 +#define QLCNIC_TX_INTR_SHARED 0x10000 #define QLCNIC_IS_MSI_FAMILY(adapter) \ ((adapter)->flags & (QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED)) @@ -914,7 +916,9 @@ struct qlcnic_ipaddr { #define __QLCNIC_AER 5 #define __QLCNIC_DIAG_RES_ALLOC 6 #define __QLCNIC_LED_ENABLE 7 -#define __QLCNIC_ELB_INPROGRESS 8 +#define __QLCNIC_ELB_INPROGRESS 8 +#define __QLCNIC_SRIOV_ENABLE 10 +#define __QLCNIC_SRIOV_CAPABLE 11 #define QLCNIC_INTERRUPT_TEST 1 #define QLCNIC_LOOPBACK_TEST 2 @@ -1009,6 +1013,7 @@ struct qlcnic_adapter { struct qlcnic_filter_hash fhash; struct qlcnic_filter_hash rx_fhash; + struct list_head vf_mc_list; spinlock_t tx_clean_lock; spinlock_t mac_learn_lock; @@ -1051,7 +1056,11 @@ struct qlcnic_info_le { u8 total_pf; u8 total_rss_engines; __le16 max_vports; - u8 reserved2[64]; + __le16 linkstate_reg_offset; + __le16 bit_offsets; + __le16 max_local_ipv6_addrs; + __le16 max_remote_ipv6_addrs; + u8 reserved2[56]; } __packed; struct qlcnic_info { @@ -1083,6 +1092,10 @@ struct qlcnic_info { u8 total_pf; u8 total_rss_engines; u16 max_vports; + u16 linkstate_reg_offset; + u16 bit_offsets; + u16 max_local_ipv6_addrs; + u16 max_remote_ipv6_addrs; }; struct qlcnic_pci_info_le { @@ -1348,6 +1361,7 @@ struct _cdrp_cmd { struct qlcnic_cmd_args { struct _cdrp_cmd req; struct _cdrp_cmd rsp; + int op_type; }; int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter); @@ -1430,6 +1444,7 @@ void qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter, struct qlcnic_host_rds_ring *rds_ring, u8 ring_id); int qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring, int max); void qlcnic_set_multi(struct net_device *netdev); +void __qlcnic_set_multi(struct net_device *netdev); int qlcnic_nic_add_mac(struct qlcnic_adapter *, const u8 *); int qlcnic_nic_del_mac(struct qlcnic_adapter *, const u8 *); void qlcnic_free_mac_list(struct qlcnic_adapter *adapter); @@ -1511,6 +1526,12 @@ int qlcnic_reset_npar_config(struct qlcnic_adapter *); int qlcnic_set_eswitch_port_config(struct qlcnic_adapter *); void qlcnic_add_lb_filter(struct qlcnic_adapter *, struct sk_buff *, int, __le16); +int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter); +int qlcnic_read_mac_addr(struct qlcnic_adapter *); +int qlcnic_setup_netdev(struct qlcnic_adapter *, struct net_device *, int); +void qlcnic_sriov_vf_schedule_multi(struct net_device *); +void qlcnic_vf_add_mc_list(struct net_device *); + /* * QLOGIC Board information */ @@ -1567,6 +1588,9 @@ struct qlcnic_hardware_ops { int (*create_rx_ctx) (struct qlcnic_adapter *); int (*create_tx_ctx) (struct qlcnic_adapter *, struct qlcnic_host_tx_ring *, int); + void (*del_rx_ctx) (struct qlcnic_adapter *); + void (*del_tx_ctx) (struct qlcnic_adapter *, + struct qlcnic_host_tx_ring *); int (*setup_link_event) (struct qlcnic_adapter *, int); int (*get_nic_info) (struct qlcnic_adapter *, struct qlcnic_info *, u8); int (*get_pci_info) (struct qlcnic_adapter *, struct qlcnic_pci_info *); @@ -1635,7 +1659,10 @@ static inline int qlcnic_alloc_mbx_args(struct qlcnic_cmd_args *mbx, static inline int qlcnic_issue_cmd(struct qlcnic_adapter *adapter, struct qlcnic_cmd_args *cmd) { - return adapter->ahw->hw_ops->mbx_cmd(adapter, cmd); + if (adapter->ahw->hw_ops->mbx_cmd) + return adapter->ahw->hw_ops->mbx_cmd(adapter, cmd); + + return -EIO; } static inline void qlcnic_get_func_no(struct qlcnic_adapter *adapter) @@ -1655,12 +1682,14 @@ static inline void qlcnic_api_unlock(struct qlcnic_adapter *adapter) static inline void qlcnic_add_sysfs(struct qlcnic_adapter *adapter) { - adapter->ahw->hw_ops->add_sysfs(adapter); + if (adapter->ahw->hw_ops->add_sysfs) + adapter->ahw->hw_ops->add_sysfs(adapter); } static inline void qlcnic_remove_sysfs(struct qlcnic_adapter *adapter) { - adapter->ahw->hw_ops->remove_sysfs(adapter); + if (adapter->ahw->hw_ops->remove_sysfs) + adapter->ahw->hw_ops->remove_sysfs(adapter); } static inline void @@ -1681,6 +1710,17 @@ static inline int qlcnic_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter, return adapter->ahw->hw_ops->create_tx_ctx(adapter, ptr, ring); } +static inline void qlcnic_fw_cmd_del_rx_ctx(struct qlcnic_adapter *adapter) +{ + return adapter->ahw->hw_ops->del_rx_ctx(adapter); +} + +static inline void qlcnic_fw_cmd_del_tx_ctx(struct qlcnic_adapter *adapter, + struct qlcnic_host_tx_ring *ptr) +{ + return adapter->ahw->hw_ops->del_tx_ctx(adapter, ptr); +} + static inline int qlcnic_linkevent_request(struct qlcnic_adapter *adapter, int enable) { @@ -1778,12 +1818,14 @@ static inline int qlcnic_get_board_info(struct qlcnic_adapter *adapter) static inline void qlcnic_dev_request_reset(struct qlcnic_adapter *adapter, u32 key) { - adapter->nic_ops->request_reset(adapter, key); + if (adapter->nic_ops->request_reset) + adapter->nic_ops->request_reset(adapter, key); } static inline void qlcnic_cancel_idc_work(struct qlcnic_adapter *adapter) { - adapter->nic_ops->cancel_idc_work(adapter); + if (adapter->nic_ops->cancel_idc_work) + adapter->nic_ops->cancel_idc_work(adapter); } static inline irqreturn_t @@ -1830,7 +1872,9 @@ extern const struct ethtool_ops qlcnic_ethtool_failed_ops; } while (0) #define PCI_DEVICE_ID_QLOGIC_QLE834X 0x8030 +#define PCI_DEVICE_ID_QLOGIC_VF_QLE834X 0x8430 #define PCI_DEVICE_ID_QLOGIC_QLE824X 0x8020 + static inline bool qlcnic_82xx_check(struct qlcnic_adapter *adapter) { unsigned short device = adapter->pdev->device; @@ -1840,8 +1884,23 @@ static inline bool qlcnic_82xx_check(struct qlcnic_adapter *adapter) static inline bool qlcnic_83xx_check(struct qlcnic_adapter *adapter) { unsigned short device = adapter->pdev->device; - return (device == PCI_DEVICE_ID_QLOGIC_QLE834X) ? true : false; + bool status; + + status = ((device == PCI_DEVICE_ID_QLOGIC_QLE834X) || + (device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X)) ? true : false; + + return status; +} + +static inline bool qlcnic_sriov_pf_check(struct qlcnic_adapter *adapter) +{ + return (adapter->ahw->op_mode == QLCNIC_SRIOV_PF_FUNC) ? true : false; } +static inline bool qlcnic_sriov_vf_check(struct qlcnic_adapter *adapter) +{ + unsigned short device = adapter->pdev->device; + return (device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X) ? true : false; +} #endif /* __QLCNIC_H_ */ diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c index cd5ae8813cb3..374fa8a3791b 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c @@ -6,6 +6,7 @@ */ #include "qlcnic.h" +#include "qlcnic_sriov.h" #include <linux/if_vlan.h> #include <linux/ipv6.h> #include <linux/ethtool.h> @@ -13,100 +14,7 @@ #define QLCNIC_MAX_TX_QUEUES 1 #define RSS_HASHTYPE_IP_TCP 0x3 - -/* status descriptor mailbox data - * @phy_addr: physical address of buffer - * @sds_ring_size: buffer size - * @intrpt_id: interrupt id - * @intrpt_val: source of interrupt - */ -struct qlcnic_sds_mbx { - u64 phy_addr; - u8 rsvd1[16]; - u16 sds_ring_size; - u16 rsvd2[3]; - u16 intrpt_id; - u8 intrpt_val; - u8 rsvd3[5]; -} __packed; - -/* receive descriptor buffer data - * phy_addr_reg: physical address of regular buffer - * phy_addr_jmb: physical address of jumbo buffer - * reg_ring_sz: size of regular buffer - * reg_ring_len: no. of entries in regular buffer - * jmb_ring_len: no. of entries in jumbo buffer - * jmb_ring_sz: size of jumbo buffer - */ -struct qlcnic_rds_mbx { - u64 phy_addr_reg; - u64 phy_addr_jmb; - u16 reg_ring_sz; - u16 reg_ring_len; - u16 jmb_ring_sz; - u16 jmb_ring_len; -} __packed; - -/* host producers for regular and jumbo rings */ -struct __host_producer_mbx { - u32 reg_buf; - u32 jmb_buf; -} __packed; - -/* Receive context mailbox data outbox registers - * @state: state of the context - * @vport_id: virtual port id - * @context_id: receive context id - * @num_pci_func: number of pci functions of the port - * @phy_port: physical port id - */ -struct qlcnic_rcv_mbx_out { - u8 rcv_num; - u8 sts_num; - u16 ctx_id; - u8 state; - u8 num_pci_func; - u8 phy_port; - u8 vport_id; - u32 host_csmr[QLCNIC_MAX_RING_SETS]; - struct __host_producer_mbx host_prod[QLCNIC_MAX_RING_SETS]; -} __packed; - -struct qlcnic_add_rings_mbx_out { - u8 rcv_num; - u8 sts_num; - u16 ctx_id; - u32 host_csmr[QLCNIC_MAX_RING_SETS]; - struct __host_producer_mbx host_prod[QLCNIC_MAX_RING_SETS]; -} __packed; - -/* Transmit context mailbox inbox registers - * @phys_addr: DMA address of the transmit buffer - * @cnsmr_index: host consumer index - * @size: legth of transmit buffer ring - * @intr_id: interrput id - * @src: src of interrupt - */ -struct qlcnic_tx_mbx { - u64 phys_addr; - u64 cnsmr_index; - u16 size; - u16 intr_id; - u8 src; - u8 rsvd[3]; -} __packed; - -/* Transmit context mailbox outbox registers - * @host_prod: host producer index - * @ctx_id: transmit context id - * @state: state of the transmit context - */ -struct qlcnic_tx_mbx_out { - u32 host_prod; - u16 ctx_id; - u8 state; - u8 rsvd; -} __packed; +#define QLC_83XX_FW_MBX_CMD 0 static const struct qlcnic_mailbox_metadata qlcnic_83xx_mbx_tbl[] = { {QLCNIC_CMD_CONFIGURE_IP_ADDR, 6, 1}, @@ -156,9 +64,11 @@ static const struct qlcnic_mailbox_metadata qlcnic_83xx_mbx_tbl[] = { {QLCNIC_CMD_SET_LED_CONFIG, 5, 1}, {QLCNIC_CMD_GET_LED_CONFIG, 1, 5}, {QLCNIC_CMD_ADD_RCV_RINGS, 130, 26}, + {QLCNIC_CMD_CONFIG_VPORT, 4, 4}, + {QLCNIC_CMD_BC_EVENT_SETUP, 2, 1}, }; -static const u32 qlcnic_83xx_ext_reg_tbl[] = { +const u32 qlcnic_83xx_ext_reg_tbl[] = { 0x38CC, /* Global Reset */ 0x38F0, /* Wildcard */ 0x38FC, /* Informant */ @@ -204,7 +114,7 @@ static const u32 qlcnic_83xx_ext_reg_tbl[] = { 0x34A4, /* QLC_83XX_ASIC_TEMP */ }; -static const u32 qlcnic_83xx_reg_tbl[] = { +const u32 qlcnic_83xx_reg_tbl[] = { 0x34A8, /* PEG_HALT_STAT1 */ 0x34AC, /* PEG_HALT_STAT2 */ 0x34B0, /* FW_HEARTBEAT */ @@ -247,6 +157,8 @@ static struct qlcnic_hardware_ops qlcnic_83xx_hw_ops = { .process_lb_rcv_ring_diag = qlcnic_83xx_process_rcv_ring_diag, .create_rx_ctx = qlcnic_83xx_create_rx_ctx, .create_tx_ctx = qlcnic_83xx_create_tx_ctx, + .del_rx_ctx = qlcnic_83xx_del_rx_ctx, + .del_tx_ctx = qlcnic_83xx_del_tx_ctx, .setup_link_event = qlcnic_83xx_setup_link_event, .get_nic_info = qlcnic_83xx_get_nic_info, .get_pci_info = qlcnic_83xx_get_pci_info, @@ -355,14 +267,20 @@ int qlcnic_83xx_setup_intr(struct qlcnic_adapter *adapter, u8 num_intr) num_intr)); /* account for AEN interrupt MSI-X based interrupts */ num_msix += 1; - num_msix += adapter->max_drv_tx_rings; + + if (!(adapter->flags & QLCNIC_TX_INTR_SHARED)) + num_msix += adapter->max_drv_tx_rings; + err = qlcnic_enable_msix(adapter, num_msix); if (err == -ENOMEM) return err; if (adapter->flags & QLCNIC_MSIX_ENABLED) num_msix = adapter->ahw->num_msix; - else + else { + if (qlcnic_sriov_vf_check(adapter)) + return -EINVAL; num_msix = 1; + } /* setup interrupt mapping table for fw */ ahw->intr_tbl = vzalloc(num_msix * sizeof(struct qlcnic_intrpt_config)); @@ -595,7 +513,7 @@ int qlcnic_83xx_setup_mbx_intr(struct qlcnic_adapter *adapter) void qlcnic_83xx_get_func_no(struct qlcnic_adapter *adapter) { u32 val = QLCRDX(adapter->ahw, QLCNIC_INFORMANT); - adapter->ahw->pci_func = val & 0xf; + adapter->ahw->pci_func = (val >> 24) & 0xff; } int qlcnic_83xx_cam_lock(struct qlcnic_adapter *adapter) @@ -707,6 +625,11 @@ void qlcnic_83xx_check_vf(struct qlcnic_adapter *adapter, ahw->fw_hal_version = 2; qlcnic_get_func_no(adapter); + if (qlcnic_sriov_vf_check(adapter)) { + qlcnic_sriov_vf_set_ops(adapter); + return; + } + /* Determine function privilege level */ op_mode = QLCRDX(adapter->ahw, QLC_83XX_DRV_OP_MODE); if (op_mode == QLC_83XX_DEFAULT_OPMODE) @@ -722,6 +645,9 @@ void qlcnic_83xx_check_vf(struct qlcnic_adapter *adapter, ahw->fw_hal_version); adapter->nic_ops = &qlcnic_vf_ops; } else { + if (pci_find_ext_capability(adapter->pdev, + PCI_EXT_CAP_ID_SRIOV)) + set_bit(__QLCNIC_SRIOV_CAPABLE, &adapter->state); adapter->nic_ops = &qlcnic_83xx_ops; } } @@ -755,7 +681,7 @@ static void qlcnic_dump_mbx(struct qlcnic_adapter *adapter, } /* Mailbox response for mac rcode */ -static u32 qlcnic_83xx_mac_rcode(struct qlcnic_adapter *adapter) +u32 qlcnic_83xx_mac_rcode(struct qlcnic_adapter *adapter) { u32 fw_data; u8 mac_cmd_rcode; @@ -769,7 +695,7 @@ static u32 qlcnic_83xx_mac_rcode(struct qlcnic_adapter *adapter) return 1; } -static u32 qlcnic_83xx_mbx_poll(struct qlcnic_adapter *adapter) +u32 qlcnic_83xx_mbx_poll(struct qlcnic_adapter *adapter) { u32 data; unsigned long wait_time = 0; @@ -884,6 +810,7 @@ int qlcnic_83xx_alloc_mbx_args(struct qlcnic_cmd_args *mbx, size = ARRAY_SIZE(qlcnic_83xx_mbx_tbl); for (i = 0; i < size; i++) { if (type == mbx_tbl[i].cmd) { + mbx->op_type = QLC_83XX_FW_MBX_CMD; mbx->req.num = mbx_tbl[i].in_args; mbx->rsp.num = mbx_tbl[i].out_args; mbx->req.arg = kcalloc(mbx->req.num, sizeof(u32), @@ -901,10 +828,10 @@ int qlcnic_83xx_alloc_mbx_args(struct qlcnic_cmd_args *mbx, memset(mbx->rsp.arg, 0, sizeof(u32) * mbx->rsp.num); temp = adapter->ahw->fw_hal_version << 29; mbx->req.arg[0] = (type | (mbx->req.num << 16) | temp); - break; + return 0; } } - return 0; + return -EINVAL; } void qlcnic_83xx_idc_aen_work(struct work_struct *work) @@ -960,6 +887,9 @@ void qlcnic_83xx_process_aen(struct qlcnic_adapter *adapter) break; case QLCNIC_MBX_TIME_EXTEND_EVENT: break; + case QLCNIC_MBX_BC_EVENT: + qlcnic_sriov_handle_bc_event(adapter, event[1]); + break; case QLCNIC_MBX_SFP_INSERT_EVENT: dev_info(&adapter->pdev->dev, "SFP+ Insert AEN:0x%x.\n", QLCNIC_MBX_RSP(event[0])); @@ -1004,7 +934,8 @@ static int qlcnic_83xx_add_rings(struct qlcnic_adapter *adapter) sds = &recv_ctx->sds_rings[i]; sds->consumer = 0; memset(sds->desc_head, 0, STATUS_DESC_RINGSIZE(sds)); - sds_mbx.phy_addr = sds->phys_addr; + sds_mbx.phy_addr_low = LSD(sds->phys_addr); + sds_mbx.phy_addr_high = MSD(sds->phys_addr); sds_mbx.sds_ring_size = sds->num_desc; if (adapter->flags & QLCNIC_MSIX_ENABLED) @@ -1050,6 +981,32 @@ out: return err; } +void qlcnic_83xx_del_rx_ctx(struct qlcnic_adapter *adapter) +{ + int err; + u32 temp = 0; + struct qlcnic_cmd_args cmd; + struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; + + if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DESTROY_RX_CTX)) + return; + + if (qlcnic_sriov_pf_check(adapter) || qlcnic_sriov_vf_check(adapter)) + cmd.req.arg[0] |= (0x3 << 29); + + if (qlcnic_sriov_pf_check(adapter)) + qlcnic_pf_set_interface_id_del_rx_ctx(adapter, &temp); + + cmd.req.arg[1] = recv_ctx->context_id | temp; + err = qlcnic_issue_cmd(adapter, &cmd); + if (err) + dev_err(&adapter->pdev->dev, + "Failed to destroy rx ctx in firmware\n"); + + recv_ctx->state = QLCNIC_HOST_CTX_STATE_FREED; + qlcnic_free_mbx_args(&cmd); +} + int qlcnic_83xx_create_rx_ctx(struct qlcnic_adapter *adapter) { int i, err, index, sds_mbx_size, rds_mbx_size; @@ -1080,9 +1037,17 @@ int qlcnic_83xx_create_rx_ctx(struct qlcnic_adapter *adapter) /* set mailbox hdr and capabilities */ qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CREATE_RX_CTX); + + if (qlcnic_sriov_pf_check(adapter) || qlcnic_sriov_vf_check(adapter)) + cmd.req.arg[0] |= (0x3 << 29); + cmd.req.arg[1] = cap; cmd.req.arg[5] = 1 | (num_rds << 5) | (num_sds << 8) | (QLC_83XX_HOST_RDS_MODE_UNIQUE << 16); + + if (qlcnic_sriov_pf_check(adapter)) + qlcnic_pf_set_interface_id_create_rx_ctx(adapter, + &cmd.req.arg[6]); /* set up status rings, mbx 8-57/87 */ index = QLC_83XX_HOST_SDS_MBX_IDX; for (i = 0; i < num_sds; i++) { @@ -1090,7 +1055,8 @@ int qlcnic_83xx_create_rx_ctx(struct qlcnic_adapter *adapter) sds = &recv_ctx->sds_rings[i]; sds->consumer = 0; memset(sds->desc_head, 0, STATUS_DESC_RINGSIZE(sds)); - sds_mbx.phy_addr = sds->phys_addr; + sds_mbx.phy_addr_low = LSD(sds->phys_addr); + sds_mbx.phy_addr_high = MSD(sds->phys_addr); sds_mbx.sds_ring_size = sds->num_desc; if (adapter->flags & QLCNIC_MSIX_ENABLED) intrpt_id = ahw->intr_tbl[i].id; @@ -1110,13 +1076,15 @@ int qlcnic_83xx_create_rx_ctx(struct qlcnic_adapter *adapter) rds = &recv_ctx->rds_rings[0]; rds->producer = 0; memset(&rds_mbx, 0, rds_mbx_size); - rds_mbx.phy_addr_reg = rds->phys_addr; + rds_mbx.phy_addr_reg_low = LSD(rds->phys_addr); + rds_mbx.phy_addr_reg_high = MSD(rds->phys_addr); rds_mbx.reg_ring_sz = rds->dma_size; rds_mbx.reg_ring_len = rds->num_desc; /* Jumbo ring */ rds = &recv_ctx->rds_rings[1]; rds->producer = 0; - rds_mbx.phy_addr_jmb = rds->phys_addr; + rds_mbx.phy_addr_jmb_low = LSD(rds->phys_addr); + rds_mbx.phy_addr_jmb_high = MSD(rds->phys_addr); rds_mbx.jmb_ring_sz = rds->dma_size; rds_mbx.jmb_ring_len = rds->num_desc; buf = &cmd.req.arg[index]; @@ -1163,16 +1131,39 @@ out: return err; } +void qlcnic_83xx_del_tx_ctx(struct qlcnic_adapter *adapter, + struct qlcnic_host_tx_ring *tx_ring) +{ + struct qlcnic_cmd_args cmd; + u32 temp = 0; + + if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DESTROY_TX_CTX)) + return; + + if (qlcnic_sriov_pf_check(adapter) || qlcnic_sriov_vf_check(adapter)) + cmd.req.arg[0] |= (0x3 << 29); + + if (qlcnic_sriov_pf_check(adapter)) + qlcnic_pf_set_interface_id_del_tx_ctx(adapter, &temp); + + cmd.req.arg[1] = tx_ring->ctx_id | temp; + if (qlcnic_issue_cmd(adapter, &cmd)) + dev_err(&adapter->pdev->dev, + "Failed to destroy tx ctx in firmware\n"); + qlcnic_free_mbx_args(&cmd); +} + int qlcnic_83xx_create_tx_ctx(struct qlcnic_adapter *adapter, struct qlcnic_host_tx_ring *tx, int ring) { int err; u16 msix_id; - u32 *buf, intr_mask; + u32 *buf, intr_mask, temp = 0; struct qlcnic_cmd_args cmd; struct qlcnic_tx_mbx mbx; struct qlcnic_tx_mbx_out *mbx_out; struct qlcnic_hardware_context *ahw = adapter->ahw; + u32 msix_vector; /* Reset host resources */ tx->producer = 0; @@ -1182,13 +1173,21 @@ int qlcnic_83xx_create_tx_ctx(struct qlcnic_adapter *adapter, memset(&mbx, 0, sizeof(struct qlcnic_tx_mbx)); /* setup mailbox inbox registerss */ - mbx.phys_addr = tx->phys_addr; - mbx.cnsmr_index = tx->hw_cons_phys_addr; + mbx.phys_addr_low = LSD(tx->phys_addr); + mbx.phys_addr_high = MSD(tx->phys_addr); + mbx.cnsmr_index_low = LSD(tx->hw_cons_phys_addr); + mbx.cnsmr_index_high = MSD(tx->hw_cons_phys_addr); mbx.size = tx->num_desc; - if (adapter->flags & QLCNIC_MSIX_ENABLED) - msix_id = ahw->intr_tbl[adapter->max_sds_rings + ring].id; - else + if (adapter->flags & QLCNIC_MSIX_ENABLED) { + if (!(adapter->flags & QLCNIC_TX_INTR_SHARED)) + msix_vector = adapter->max_sds_rings + ring; + else + msix_vector = adapter->max_sds_rings - 1; + msix_id = ahw->intr_tbl[msix_vector].id; + } else { msix_id = QLCRDX(ahw, QLCNIC_DEF_INT_ID); + } + if (adapter->ahw->diag_test != QLCNIC_LOOPBACK_TEST) mbx.intr_id = msix_id; else @@ -1196,8 +1195,15 @@ int qlcnic_83xx_create_tx_ctx(struct qlcnic_adapter *adapter, mbx.src = 0; qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CREATE_TX_CTX); + + if (qlcnic_sriov_pf_check(adapter) || qlcnic_sriov_vf_check(adapter)) + cmd.req.arg[0] |= (0x3 << 29); + + if (qlcnic_sriov_pf_check(adapter)) + qlcnic_pf_set_interface_id_create_tx_ctx(adapter, &temp); + cmd.req.arg[1] = QLCNIC_CAP0_LEGACY_CONTEXT; - cmd.req.arg[5] = QLCNIC_MAX_TX_QUEUES; + cmd.req.arg[5] = QLCNIC_MAX_TX_QUEUES | temp; buf = &cmd.req.arg[6]; memcpy(buf, &mbx, sizeof(struct qlcnic_tx_mbx)); /* send the mailbox command*/ @@ -1210,7 +1216,8 @@ int qlcnic_83xx_create_tx_ctx(struct qlcnic_adapter *adapter, mbx_out = (struct qlcnic_tx_mbx_out *)&cmd.rsp.arg[2]; tx->crb_cmd_producer = ahw->pci_base0 + mbx_out->host_prod; tx->ctx_id = mbx_out->ctx_id; - if (adapter->flags & QLCNIC_MSIX_ENABLED) { + if ((adapter->flags & QLCNIC_MSIX_ENABLED) && + !(adapter->flags & QLCNIC_TX_INTR_SHARED)) { intr_mask = ahw->intr_tbl[adapter->max_sds_rings + ring].src; tx->crb_intr_mask = ahw->pci_base0 + intr_mask; } @@ -1373,12 +1380,60 @@ mbx_err: } } +int qlcnic_83xx_set_led(struct net_device *netdev, + enum ethtool_phys_id_state state) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + int err = -EIO, active = 1; + + if (adapter->ahw->op_mode == QLCNIC_NON_PRIV_FUNC) { + netdev_warn(netdev, + "LED test is not supported in non-privileged mode\n"); + return -EOPNOTSUPP; + } + + switch (state) { + case ETHTOOL_ID_ACTIVE: + if (test_and_set_bit(__QLCNIC_LED_ENABLE, &adapter->state)) + return -EBUSY; + + if (test_bit(__QLCNIC_RESETTING, &adapter->state)) + break; + + err = qlcnic_83xx_config_led(adapter, active, 0); + if (err) + netdev_err(netdev, "Failed to set LED blink state\n"); + break; + case ETHTOOL_ID_INACTIVE: + active = 0; + + if (test_bit(__QLCNIC_RESETTING, &adapter->state)) + break; + + err = qlcnic_83xx_config_led(adapter, active, 0); + if (err) + netdev_err(netdev, "Failed to reset LED blink state\n"); + break; + + default: + return -EINVAL; + } + + if (!active || err) + clear_bit(__QLCNIC_LED_ENABLE, &adapter->state); + + return err; +} + void qlcnic_83xx_register_nic_idc_func(struct qlcnic_adapter *adapter, int enable) { struct qlcnic_cmd_args cmd; int status; + if (qlcnic_sriov_vf_check(adapter)) + return; + if (enable) { qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_INIT_NIC_FUNC); cmd.req.arg[1] = BIT_0 | BIT_31; @@ -1441,24 +1496,35 @@ int qlcnic_83xx_setup_link_event(struct qlcnic_adapter *adapter, int enable) return err; } +static void qlcnic_83xx_set_interface_id_promisc(struct qlcnic_adapter *adapter, + u32 *interface_id) +{ + if (qlcnic_sriov_pf_check(adapter)) { + qlcnic_pf_set_interface_id_promisc(adapter, interface_id); + } else { + if (!qlcnic_sriov_vf_check(adapter)) + *interface_id = adapter->recv_ctx->context_id << 16; + } +} + int qlcnic_83xx_nic_set_promisc(struct qlcnic_adapter *adapter, u32 mode) { int err; - u32 temp; + u32 temp = 0; struct qlcnic_cmd_args cmd; if (adapter->recv_ctx->state == QLCNIC_HOST_CTX_STATE_FREED) return -EIO; qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIGURE_MAC_RX_MODE); - temp = adapter->recv_ctx->context_id << 16; + qlcnic_83xx_set_interface_id_promisc(adapter, &temp); cmd.req.arg[1] = (mode ? 1 : 0) | temp; err = qlcnic_issue_cmd(adapter, &cmd); if (err) dev_info(&adapter->pdev->dev, "Promiscous mode config failed\n"); - qlcnic_free_mbx_args(&cmd); + qlcnic_free_mbx_args(&cmd); return err; } @@ -1598,21 +1664,31 @@ int qlcnic_83xx_clear_lb_mode(struct qlcnic_adapter *adapter, u8 mode) return status; } +static void qlcnic_83xx_set_interface_id_ipaddr(struct qlcnic_adapter *adapter, + u32 *interface_id) +{ + if (qlcnic_sriov_pf_check(adapter)) { + qlcnic_pf_set_interface_id_ipaddr(adapter, interface_id); + } else { + if (!qlcnic_sriov_vf_check(adapter)) + *interface_id = adapter->recv_ctx->context_id << 16; + } +} + void qlcnic_83xx_config_ipaddr(struct qlcnic_adapter *adapter, __be32 ip, int mode) { int err; - u32 temp, temp_ip; + u32 temp = 0, temp_ip; struct qlcnic_cmd_args cmd; qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIGURE_IP_ADDR); - if (mode == QLCNIC_IP_UP) { - temp = adapter->recv_ctx->context_id << 16; + qlcnic_83xx_set_interface_id_ipaddr(adapter, &temp); + + if (mode == QLCNIC_IP_UP) cmd.req.arg[1] = 1 | temp; - } else { - temp = adapter->recv_ctx->context_id << 16; + else cmd.req.arg[1] = 2 | temp; - } /* * Adapter needs IP address in network byte order. @@ -1629,6 +1705,7 @@ void qlcnic_83xx_config_ipaddr(struct qlcnic_adapter *adapter, __be32 ip, dev_err(&adapter->netdev->dev, "could not notify %s IP 0x%x request\n", (mode == QLCNIC_IP_UP) ? "Add" : "Remove", ip); + qlcnic_free_mbx_args(&cmd); } @@ -1695,11 +1772,22 @@ int qlcnic_83xx_config_rss(struct qlcnic_adapter *adapter, int enable) } +static void qlcnic_83xx_set_interface_id_macaddr(struct qlcnic_adapter *adapter, + u32 *interface_id) +{ + if (qlcnic_sriov_pf_check(adapter)) { + qlcnic_pf_set_interface_id_macaddr(adapter, interface_id); + } else { + if (!qlcnic_sriov_vf_check(adapter)) + *interface_id = adapter->recv_ctx->context_id << 16; + } +} + int qlcnic_83xx_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr, __le16 vlan_id, u8 op) { int err; - u32 *buf; + u32 *buf, temp = 0; struct qlcnic_cmd_args cmd; struct qlcnic_macvlan_mbx mv; @@ -1709,11 +1797,17 @@ int qlcnic_83xx_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr, err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_MAC_VLAN); if (err) return err; - cmd.req.arg[1] = op | (1 << 8) | - (adapter->recv_ctx->context_id << 16); + cmd.req.arg[1] = op | (1 << 8); + qlcnic_83xx_set_interface_id_macaddr(adapter, &temp); + cmd.req.arg[1] |= temp; mv.vlan = le16_to_cpu(vlan_id); - memcpy(&mv.mac, addr, ETH_ALEN); + mv.mac_addr0 = addr[0]; + mv.mac_addr1 = addr[1]; + mv.mac_addr2 = addr[2]; + mv.mac_addr3 = addr[3]; + mv.mac_addr4 = addr[4]; + mv.mac_addr5 = addr[5]; buf = &cmd.req.arg[2]; memcpy(buf, &mv, sizeof(struct qlcnic_macvlan_mbx)); err = qlcnic_issue_cmd(adapter, &cmd); @@ -2002,14 +2096,17 @@ int qlcnic_83xx_get_pci_info(struct qlcnic_adapter *adapter, int qlcnic_83xx_config_intrpt(struct qlcnic_adapter *adapter, bool op_type) { int i, index, err; - bool type; u8 max_ints; - u32 val, temp; + u32 val, temp, type; struct qlcnic_cmd_args cmd; max_ints = adapter->ahw->num_msix - 1; qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_INTRPT); cmd.req.arg[1] = max_ints; + + if (qlcnic_sriov_vf_check(adapter)) + cmd.req.arg[1] |= (adapter->ahw->pci_func << 8) | BIT_16; + for (i = 0, index = 2; i < max_ints; i++) { type = op_type ? QLCNIC_INTRPT_ADD : QLCNIC_INTRPT_DEL; val = type | (adapter->ahw->intr_tbl[i].type << 4); @@ -2163,7 +2260,7 @@ static int qlcnic_83xx_poll_flash_status_reg(struct qlcnic_adapter *adapter) return 0; } -static int qlcnic_83xx_enable_flash_write_op(struct qlcnic_adapter *adapter) +int qlcnic_83xx_enable_flash_write(struct qlcnic_adapter *adapter) { int ret; u32 cmd; @@ -2181,7 +2278,7 @@ static int qlcnic_83xx_enable_flash_write_op(struct qlcnic_adapter *adapter) return 0; } -static int qlcnic_83xx_disable_flash_write_op(struct qlcnic_adapter *adapter) +int qlcnic_83xx_disable_flash_write(struct qlcnic_adapter *adapter) { int ret; @@ -2255,7 +2352,7 @@ int qlcnic_83xx_erase_flash_sector(struct qlcnic_adapter *adapter, return -EIO; if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) { - ret = qlcnic_83xx_enable_flash_write_op(adapter); + ret = qlcnic_83xx_enable_flash_write(adapter); if (ret) { qlcnic_83xx_unlock_flash(adapter); dev_err(&adapter->pdev->dev, @@ -2297,7 +2394,7 @@ int qlcnic_83xx_erase_flash_sector(struct qlcnic_adapter *adapter, } if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) { - ret = qlcnic_83xx_disable_flash_write_op(adapter); + ret = qlcnic_83xx_disable_flash_write(adapter); if (ret) { qlcnic_83xx_unlock_flash(adapter); dev_err(&adapter->pdev->dev, @@ -2337,8 +2434,8 @@ int qlcnic_83xx_flash_bulk_write(struct qlcnic_adapter *adapter, u32 addr, u32 temp; int ret = -EIO; - if ((count < QLC_83XX_FLASH_BULK_WRITE_MIN) || - (count > QLC_83XX_FLASH_BULK_WRITE_MAX)) { + if ((count < QLC_83XX_FLASH_WRITE_MIN) || + (count > QLC_83XX_FLASH_WRITE_MAX)) { dev_err(&adapter->pdev->dev, "%s: Invalid word count\n", __func__); return -EIO; @@ -2616,13 +2713,19 @@ int qlcnic_83xx_flash_read32(struct qlcnic_adapter *adapter, u32 flash_addr, int qlcnic_83xx_test_link(struct qlcnic_adapter *adapter) { + u8 pci_func; int err; u32 config = 0, state; struct qlcnic_cmd_args cmd; struct qlcnic_hardware_context *ahw = adapter->ahw; - state = readl(ahw->pci_base0 + QLC_83XX_LINK_STATE(ahw->pci_func)); - if (!QLC_83xx_FUNC_VAL(state, ahw->pci_func)) { + if (qlcnic_sriov_vf_check(adapter)) + pci_func = adapter->portnum; + else + pci_func = ahw->pci_func; + + state = readl(ahw->pci_base0 + QLC_83XX_LINK_STATE(pci_func)); + if (!QLC_83xx_FUNC_VAL(state, pci_func)) { dev_info(&adapter->pdev->dev, "link state down\n"); return config; } diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h index 61f81f6c84a9..32ed4b4c4976 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h @@ -12,6 +12,8 @@ #include <linux/etherdevice.h> #include "qlcnic_hw.h" +#define QLCNIC_83XX_BAR0_LENGTH 0x4000 + /* Directly mapped registers */ #define QLC_83XX_CRB_WIN_BASE 0x3800 #define QLC_83XX_CRB_WIN_FUNC(f) (QLC_83XX_CRB_WIN_BASE+((f)*4)) @@ -86,6 +88,153 @@ #define QLC_83XX_MAX_RESET_SEQ_ENTRIES 16 +/* status descriptor mailbox data + * @phy_addr_{low|high}: physical address of buffer + * @sds_ring_size: buffer size + * @intrpt_id: interrupt id + * @intrpt_val: source of interrupt + */ +struct qlcnic_sds_mbx { + u32 phy_addr_low; + u32 phy_addr_high; + u32 rsvd1[4]; +#if defined(__LITTLE_ENDIAN) + u16 sds_ring_size; + u16 rsvd2; + u16 rsvd3[2]; + u16 intrpt_id; + u8 intrpt_val; + u8 rsvd4; +#elif defined(__BIG_ENDIAN) + u16 rsvd2; + u16 sds_ring_size; + u16 rsvd3[2]; + u8 rsvd4; + u8 intrpt_val; + u16 intrpt_id; +#endif + u32 rsvd5; +} __packed; + +/* receive descriptor buffer data + * phy_addr_reg_{low|high}: physical address of regular buffer + * phy_addr_jmb_{low|high}: physical address of jumbo buffer + * reg_ring_sz: size of regular buffer + * reg_ring_len: no. of entries in regular buffer + * jmb_ring_len: no. of entries in jumbo buffer + * jmb_ring_sz: size of jumbo buffer + */ +struct qlcnic_rds_mbx { + u32 phy_addr_reg_low; + u32 phy_addr_reg_high; + u32 phy_addr_jmb_low; + u32 phy_addr_jmb_high; +#if defined(__LITTLE_ENDIAN) + u16 reg_ring_sz; + u16 reg_ring_len; + u16 jmb_ring_sz; + u16 jmb_ring_len; +#elif defined(__BIG_ENDIAN) + u16 reg_ring_len; + u16 reg_ring_sz; + u16 jmb_ring_len; + u16 jmb_ring_sz; +#endif +} __packed; + +/* host producers for regular and jumbo rings */ +struct __host_producer_mbx { + u32 reg_buf; + u32 jmb_buf; +} __packed; + +/* Receive context mailbox data outbox registers + * @state: state of the context + * @vport_id: virtual port id + * @context_id: receive context id + * @num_pci_func: number of pci functions of the port + * @phy_port: physical port id + */ +struct qlcnic_rcv_mbx_out { +#if defined(__LITTLE_ENDIAN) + u8 rcv_num; + u8 sts_num; + u16 ctx_id; + u8 state; + u8 num_pci_func; + u8 phy_port; + u8 vport_id; +#elif defined(__BIG_ENDIAN) + u16 ctx_id; + u8 sts_num; + u8 rcv_num; + u8 vport_id; + u8 phy_port; + u8 num_pci_func; + u8 state; +#endif + u32 host_csmr[QLCNIC_MAX_RING_SETS]; + struct __host_producer_mbx host_prod[QLCNIC_MAX_RING_SETS]; +} __packed; + +struct qlcnic_add_rings_mbx_out { +#if defined(__LITTLE_ENDIAN) + u8 rcv_num; + u8 sts_num; + u16 ctx_id; +#elif defined(__BIG_ENDIAN) + u16 ctx_id; + u8 sts_num; + u8 rcv_num; +#endif + u32 host_csmr[QLCNIC_MAX_RING_SETS]; + struct __host_producer_mbx host_prod[QLCNIC_MAX_RING_SETS]; +} __packed; + +/* Transmit context mailbox inbox registers + * @phys_addr_{low|high}: DMA address of the transmit buffer + * @cnsmr_index_{low|high}: host consumer index + * @size: legth of transmit buffer ring + * @intr_id: interrput id + * @src: src of interrupt + */ +struct qlcnic_tx_mbx { + u32 phys_addr_low; + u32 phys_addr_high; + u32 cnsmr_index_low; + u32 cnsmr_index_high; +#if defined(__LITTLE_ENDIAN) + u16 size; + u16 intr_id; + u8 src; + u8 rsvd[3]; +#elif defined(__BIG_ENDIAN) + u16 intr_id; + u16 size; + u8 rsvd[3]; + u8 src; +#endif +} __packed; + +/* Transmit context mailbox outbox registers + * @host_prod: host producer index + * @ctx_id: transmit context id + * @state: state of the transmit context + */ + +struct qlcnic_tx_mbx_out { + u32 host_prod; +#if defined(__LITTLE_ENDIAN) + u16 ctx_id; + u8 state; + u8 rsvd; +#elif defined(__BIG_ENDIAN) + u8 rsvd; + u8 state; + u16 ctx_id; +#endif +} __packed; + struct qlcnic_intrpt_config { u8 type; u8 enabled; @@ -94,8 +243,23 @@ struct qlcnic_intrpt_config { }; struct qlcnic_macvlan_mbx { - u8 mac[ETH_ALEN]; +#if defined(__LITTLE_ENDIAN) + u8 mac_addr0; + u8 mac_addr1; + u8 mac_addr2; + u8 mac_addr3; + u8 mac_addr4; + u8 mac_addr5; u16 vlan; +#elif defined(__BIG_ENDIAN) + u8 mac_addr3; + u8 mac_addr2; + u8 mac_addr1; + u8 mac_addr0; + u16 vlan; + u8 mac_addr5; + u8 mac_addr4; +#endif }; struct qlc_83xx_fw_info { @@ -226,6 +390,7 @@ struct qlc_83xx_idc { #define QLC_83XX_GET_FW_LRO_MSS_CAPABILITY(val) (val & 0x20000) #define QLC_83XX_VIRTUAL_NIC_MODE 0xFF #define QLC_83XX_DEFAULT_MODE 0x0 +#define QLC_83XX_SRIOV_MODE 0x1 #define QLCNIC_BRDTYPE_83XX_10G 0x0083 #define QLC_83XX_FLASH_SPI_STATUS 0x2808E010 @@ -242,8 +407,8 @@ struct qlc_83xx_idc { #define QLC_83XX_FLASH_BULK_WRITE_CMD 0xcadcadca #define QLC_83XX_FLASH_READ_RETRY_COUNT 5000 #define QLC_83XX_FLASH_STATUS_READY 0x6 -#define QLC_83XX_FLASH_BULK_WRITE_MIN 2 -#define QLC_83XX_FLASH_BULK_WRITE_MAX 64 +#define QLC_83XX_FLASH_WRITE_MIN 2 +#define QLC_83XX_FLASH_WRITE_MAX 64 #define QLC_83XX_FLASH_STATUS_REG_POLL_DELAY 1 #define QLC_83XX_ERASE_MODE 1 #define QLC_83XX_WRITE_MODE 2 @@ -351,6 +516,9 @@ int qlcnic_ind_rd(struct qlcnic_adapter *, u32); int qlcnic_83xx_create_rx_ctx(struct qlcnic_adapter *); int qlcnic_83xx_create_tx_ctx(struct qlcnic_adapter *, struct qlcnic_host_tx_ring *, int); +void qlcnic_83xx_del_rx_ctx(struct qlcnic_adapter *); +void qlcnic_83xx_del_tx_ctx(struct qlcnic_adapter *, + struct qlcnic_host_tx_ring *); int qlcnic_83xx_get_nic_info(struct qlcnic_adapter *, struct qlcnic_info *, u8); int qlcnic_83xx_setup_link_event(struct qlcnic_adapter *, int); void qlcnic_83xx_process_rcv_ring_diag(struct qlcnic_host_sds_ring *); @@ -401,7 +569,7 @@ int qlcnic_83xx_read_flash_descriptor_table(struct qlcnic_adapter *); int qlcnic_83xx_flash_read32(struct qlcnic_adapter *, u32, u8 *, int); int qlcnic_83xx_lockless_flash_read32(struct qlcnic_adapter *, u32, u8 *, int); -int qlcnic_83xx_init(struct qlcnic_adapter *); +int qlcnic_83xx_init(struct qlcnic_adapter *, int); int qlcnic_83xx_idc_ready_state_entry(struct qlcnic_adapter *); int qlcnic_83xx_check_hw_status(struct qlcnic_adapter *p_dev); void qlcnic_83xx_idc_poll_dev_state(struct work_struct *); @@ -434,5 +602,10 @@ int qlcnic_83xx_get_regs_len(struct qlcnic_adapter *); int qlcnic_83xx_get_registers(struct qlcnic_adapter *, u32 *); int qlcnic_83xx_loopback_test(struct net_device *, u8); int qlcnic_83xx_interrupt_test(struct net_device *); +int qlcnic_83xx_set_led(struct net_device *, enum ethtool_phys_id_state); int qlcnic_83xx_flash_test(struct qlcnic_adapter *); +int qlcnic_83xx_enable_flash_write(struct qlcnic_adapter *); +int qlcnic_83xx_disable_flash_write(struct qlcnic_adapter *); +u32 qlcnic_83xx_mac_rcode(struct qlcnic_adapter *); +u32 qlcnic_83xx_mbx_poll(struct qlcnic_adapter *); #endif diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c index 5c033f268ca5..c302d118a0d0 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c @@ -5,6 +5,7 @@ * See LICENSE.qlcnic for copyright and licensing details. */ +#include "qlcnic_sriov.h" #include "qlcnic.h" #include "qlcnic_hw.h" @@ -25,12 +26,12 @@ #define QLC_83XX_OPCODE_POLL_READ_LIST 0x0100 static int qlcnic_83xx_init_default_driver(struct qlcnic_adapter *adapter); -static int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter); static int qlcnic_83xx_check_heartbeat(struct qlcnic_adapter *p_dev); static int qlcnic_83xx_restart_hw(struct qlcnic_adapter *adapter); /* Template header */ struct qlc_83xx_reset_hdr { +#if defined(__LITTLE_ENDIAN) u16 version; u16 signature; u16 size; @@ -39,14 +40,31 @@ struct qlc_83xx_reset_hdr { u16 checksum; u16 init_offset; u16 start_offset; +#elif defined(__BIG_ENDIAN) + u16 signature; + u16 version; + u16 entries; + u16 size; + u16 checksum; + u16 hdr_size; + u16 start_offset; + u16 init_offset; +#endif } __packed; /* Command entry header. */ struct qlc_83xx_entry_hdr { - u16 cmd; - u16 size; - u16 count; - u16 delay; +#if defined(__LITTLE_ENDIAN) + u16 cmd; + u16 size; + u16 count; + u16 delay; +#elif defined(__BIG_ENDIAN) + u16 size; + u16 cmd; + u16 delay; + u16 count; +#endif } __packed; /* Generic poll command */ @@ -60,10 +78,17 @@ struct qlc_83xx_rmw { u32 mask; u32 xor_value; u32 or_value; +#if defined(__LITTLE_ENDIAN) u8 shl; u8 shr; u8 index_a; u8 rsvd; +#elif defined(__BIG_ENDIAN) + u8 rsvd; + u8 index_a; + u8 shr; + u8 shl; +#endif } __packed; /* Generic command with 2 DWORD */ @@ -1893,6 +1918,9 @@ int qlcnic_83xx_config_default_opmode(struct qlcnic_adapter *adapter) qlcnic_get_func_no(adapter); op_mode = QLCRDX(ahw, QLC_83XX_DRV_OP_MODE); + if (test_bit(__QLCNIC_SRIOV_CAPABLE, &adapter->state)) + op_mode = QLC_83XX_DEFAULT_OPMODE; + if (op_mode == QLC_83XX_DEFAULT_OPMODE) { adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver; ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry; @@ -1922,6 +1950,16 @@ int qlcnic_83xx_get_nic_configuration(struct qlcnic_adapter *adapter) ahw->max_mac_filters = nic_info.max_mac_filters; ahw->max_mtu = nic_info.max_mtu; + /* VNIC mode is detected by BIT_23 in capabilities. This bit is also + * set in case device is SRIOV capable. VNIC and SRIOV are mutually + * exclusive. So in case of sriov capable device load driver in + * default mode + */ + if (test_bit(__QLCNIC_SRIOV_CAPABLE, &adapter->state)) { + ahw->nic_mode = QLC_83XX_DEFAULT_MODE; + return ahw->nic_mode; + } + if (ahw->capabilities & BIT_23) ahw->nic_mode = QLC_83XX_VIRTUAL_NIC_MODE; else @@ -1930,7 +1968,7 @@ int qlcnic_83xx_get_nic_configuration(struct qlcnic_adapter *adapter) return ahw->nic_mode; } -static int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter) +int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter) { int ret; @@ -2008,10 +2046,13 @@ static void qlcnic_83xx_clear_function_resources(struct qlcnic_adapter *adapter) } } -int qlcnic_83xx_init(struct qlcnic_adapter *adapter) +int qlcnic_83xx_init(struct qlcnic_adapter *adapter, int pci_using_dac) { struct qlcnic_hardware_context *ahw = adapter->ahw; + if (qlcnic_sriov_vf_check(adapter)) + return qlcnic_sriov_vf_init(adapter, pci_using_dac); + if (qlcnic_83xx_check_hw_status(adapter)) return -EIO; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c index a69097c6b84d..43562c256379 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c @@ -382,8 +382,7 @@ out_free_rq: return err; } -static void -qlcnic_fw_cmd_destroy_rx_ctx(struct qlcnic_adapter *adapter) +void qlcnic_82xx_fw_cmd_del_rx_ctx(struct qlcnic_adapter *adapter) { int err; struct qlcnic_cmd_args cmd; @@ -422,22 +421,20 @@ int qlcnic_82xx_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter, rq_size = SIZEOF_HOSTRQ_TX(struct qlcnic_hostrq_tx_ctx); rq_addr = dma_alloc_coherent(&adapter->pdev->dev, rq_size, - &rq_phys_addr, GFP_KERNEL); + &rq_phys_addr, GFP_KERNEL | __GFP_ZERO); if (!rq_addr) return -ENOMEM; rsp_size = SIZEOF_CARDRSP_TX(struct qlcnic_cardrsp_tx_ctx); rsp_addr = dma_alloc_coherent(&adapter->pdev->dev, rsp_size, - &rsp_phys_addr, GFP_KERNEL); + &rsp_phys_addr, GFP_KERNEL | __GFP_ZERO); if (!rsp_addr) { err = -ENOMEM; goto out_free_rq; } - memset(rq_addr, 0, rq_size); prq = rq_addr; - memset(rsp_addr, 0, rsp_size); prsp = rsp_addr; prq->host_rsp_dma_addr = cpu_to_le64(rsp_phys_addr); @@ -486,13 +483,13 @@ out_free_rq: return err; } -static void -qlcnic_fw_cmd_destroy_tx_ctx(struct qlcnic_adapter *adapter, - struct qlcnic_host_tx_ring *tx_ring) +void qlcnic_82xx_fw_cmd_del_tx_ctx(struct qlcnic_adapter *adapter, + struct qlcnic_host_tx_ring *tx_ring) { struct qlcnic_cmd_args cmd; qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DESTROY_TX_CTX); + cmd.req.arg[1] = tx_ring->ctx_id; if (qlcnic_issue_cmd(adapter, &cmd)) dev_err(&adapter->pdev->dev, @@ -532,20 +529,15 @@ int qlcnic_alloc_hw_resources(struct qlcnic_adapter *adapter) ptr = (__le32 *)dma_alloc_coherent(&pdev->dev, sizeof(u32), &tx_ring->hw_cons_phys_addr, GFP_KERNEL); - - if (ptr == NULL) { - dev_err(&pdev->dev, "failed to allocate tx consumer\n"); + if (ptr == NULL) return -ENOMEM; - } + tx_ring->hw_consumer = ptr; /* cmd desc ring */ addr = dma_alloc_coherent(&pdev->dev, TX_DESC_RINGSIZE(tx_ring), &tx_ring->phys_addr, GFP_KERNEL); - if (addr == NULL) { - dev_err(&pdev->dev, - "failed to allocate tx desc ring\n"); err = -ENOMEM; goto err_out_free; } @@ -556,11 +548,9 @@ int qlcnic_alloc_hw_resources(struct qlcnic_adapter *adapter) for (ring = 0; ring < adapter->max_rds_rings; ring++) { rds_ring = &recv_ctx->rds_rings[ring]; addr = dma_alloc_coherent(&adapter->pdev->dev, - RCV_DESC_RINGSIZE(rds_ring), - &rds_ring->phys_addr, GFP_KERNEL); + RCV_DESC_RINGSIZE(rds_ring), + &rds_ring->phys_addr, GFP_KERNEL); if (addr == NULL) { - dev_err(&pdev->dev, - "failed to allocate rds ring [%d]\n", ring); err = -ENOMEM; goto err_out_free; } @@ -572,11 +562,9 @@ int qlcnic_alloc_hw_resources(struct qlcnic_adapter *adapter) sds_ring = &recv_ctx->sds_rings[ring]; addr = dma_alloc_coherent(&adapter->pdev->dev, - STATUS_DESC_RINGSIZE(sds_ring), - &sds_ring->phys_addr, GFP_KERNEL); + STATUS_DESC_RINGSIZE(sds_ring), + &sds_ring->phys_addr, GFP_KERNEL); if (addr == NULL) { - dev_err(&pdev->dev, - "failed to allocate sds ring [%d]\n", ring); err = -ENOMEM; goto err_out_free; } @@ -616,13 +604,12 @@ int qlcnic_fw_create_ctx(struct qlcnic_adapter *dev) &dev->tx_ring[ring], ring); if (err) { - qlcnic_fw_cmd_destroy_rx_ctx(dev); + qlcnic_fw_cmd_del_rx_ctx(dev); if (ring == 0) goto err_out; for (i = 0; i < ring; i++) - qlcnic_fw_cmd_destroy_tx_ctx(dev, - &dev->tx_ring[i]); + qlcnic_fw_cmd_del_tx_ctx(dev, &dev->tx_ring[i]); goto err_out; } @@ -644,10 +631,10 @@ void qlcnic_fw_destroy_ctx(struct qlcnic_adapter *adapter) int ring; if (test_and_clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state)) { - qlcnic_fw_cmd_destroy_rx_ctx(adapter); + qlcnic_fw_cmd_del_rx_ctx(adapter); for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) - qlcnic_fw_cmd_destroy_tx_ctx(adapter, - &adapter->tx_ring[ring]); + qlcnic_fw_cmd_del_tx_ctx(adapter, + &adapter->tx_ring[ring]); if (qlcnic_83xx_check(adapter) && (adapter->flags & QLCNIC_MSIX_ENABLED)) { @@ -655,7 +642,7 @@ void qlcnic_fw_destroy_ctx(struct qlcnic_adapter *adapter) qlcnic_83xx_config_intrpt(adapter, 0); } /* Allow dma queues to drain after context reset */ - mdelay(20); + msleep(20); } } @@ -753,10 +740,9 @@ int qlcnic_82xx_get_nic_info(struct qlcnic_adapter *adapter, size_t nic_size = sizeof(struct qlcnic_info_le); nic_info_addr = dma_alloc_coherent(&adapter->pdev->dev, nic_size, - &nic_dma_t, GFP_KERNEL); + &nic_dma_t, GFP_KERNEL | __GFP_ZERO); if (!nic_info_addr) return -ENOMEM; - memset(nic_info_addr, 0, nic_size); nic_info = nic_info_addr; @@ -804,11 +790,10 @@ int qlcnic_82xx_set_nic_info(struct qlcnic_adapter *adapter, return err; nic_info_addr = dma_alloc_coherent(&adapter->pdev->dev, nic_size, - &nic_dma_t, GFP_KERNEL); + &nic_dma_t, GFP_KERNEL | __GFP_ZERO); if (!nic_info_addr) return -ENOMEM; - memset(nic_info_addr, 0, nic_size); nic_info = nic_info_addr; nic_info->pci_func = cpu_to_le16(nic->pci_func); @@ -854,10 +839,10 @@ int qlcnic_82xx_get_pci_info(struct qlcnic_adapter *adapter, size_t pci_size = npar_size * QLCNIC_MAX_PCI_FUNC; pci_info_addr = dma_alloc_coherent(&adapter->pdev->dev, pci_size, - &pci_info_dma_t, GFP_KERNEL); + &pci_info_dma_t, + GFP_KERNEL | __GFP_ZERO); if (!pci_info_addr) return -ENOMEM; - memset(pci_info_addr, 0, pci_size); npar = pci_info_addr; qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_PCI_INFO); @@ -949,12 +934,9 @@ int qlcnic_get_port_stats(struct qlcnic_adapter *adapter, const u8 func, } stats_addr = dma_alloc_coherent(&adapter->pdev->dev, stats_size, - &stats_dma_t, GFP_KERNEL); - if (!stats_addr) { - dev_err(&adapter->pdev->dev, "Unable to allocate memory\n"); + &stats_dma_t, GFP_KERNEL | __GFP_ZERO); + if (!stats_addr) return -ENOMEM; - } - memset(stats_addr, 0, stats_size); arg1 = func | QLCNIC_STATS_VERSION << 8 | QLCNIC_STATS_PORT << 12; arg1 |= rx_tx << 15 | stats_size << 16; @@ -1003,13 +985,10 @@ int qlcnic_get_mac_stats(struct qlcnic_adapter *adapter, return -ENOMEM; stats_addr = dma_alloc_coherent(&adapter->pdev->dev, stats_size, - &stats_dma_t, GFP_KERNEL); - if (!stats_addr) { - dev_err(&adapter->pdev->dev, - "%s: Unable to allocate memory.\n", __func__); + &stats_dma_t, GFP_KERNEL | __GFP_ZERO); + if (!stats_addr) return -ENOMEM; - } - memset(stats_addr, 0, stats_size); + qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_MAC_STATS); cmd.req.arg[1] = stats_size << 16; cmd.req.arg[2] = MSD(stats_dma_t); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c index 5641f8ec49ab..f4f279d5cba4 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c @@ -149,7 +149,8 @@ static const char qlcnic_gstrings_test[][ETH_GSTRING_LEN] = { static inline int qlcnic_82xx_statistics(void) { - return QLCNIC_STATS_LEN + ARRAY_SIZE(qlcnic_83xx_mac_stats_strings); + return ARRAY_SIZE(qlcnic_device_gstrings_stats) + + ARRAY_SIZE(qlcnic_83xx_mac_stats_strings); } static inline int qlcnic_83xx_statistics(void) @@ -1070,8 +1071,7 @@ qlcnic_get_strings(struct net_device *dev, u32 stringset, u8 *data) } } -static void -qlcnic_fill_stats(u64 *data, void *stats, int type) +static u64 *qlcnic_fill_stats(u64 *data, void *stats, int type) { if (type == QLCNIC_MAC_STATS) { struct qlcnic_mac_statistics *mac_stats = @@ -1120,6 +1120,7 @@ qlcnic_fill_stats(u64 *data, void *stats, int type) *data++ = QLCNIC_FILL_STATS(esw_stats->local_frames); *data++ = QLCNIC_FILL_STATS(esw_stats->numbytes); } + return data; } static void qlcnic_get_ethtool_stats(struct net_device *dev, @@ -1147,7 +1148,7 @@ static void qlcnic_get_ethtool_stats(struct net_device *dev, /* Retrieve MAC statistics from firmware */ memset(&mac_stats, 0, sizeof(struct qlcnic_mac_statistics)); qlcnic_get_mac_stats(adapter, &mac_stats); - qlcnic_fill_stats(data, &mac_stats, QLCNIC_MAC_STATS); + data = qlcnic_fill_stats(data, &mac_stats, QLCNIC_MAC_STATS); } if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED)) @@ -1159,7 +1160,7 @@ static void qlcnic_get_ethtool_stats(struct net_device *dev, if (ret) return; - qlcnic_fill_stats(data, &port_stats.rx, QLCNIC_ESW_STATS); + data = qlcnic_fill_stats(data, &port_stats.rx, QLCNIC_ESW_STATS); ret = qlcnic_get_port_stats(adapter, adapter->ahw->pci_func, QLCNIC_QUERY_TX_COUNTER, &port_stats.tx); if (ret) @@ -1176,7 +1177,8 @@ static int qlcnic_set_led(struct net_device *dev, int err = -EIO, active = 1; if (qlcnic_83xx_check(adapter)) - return -EOPNOTSUPP; + return qlcnic_83xx_set_led(dev, state); + if (adapter->ahw->op_mode == QLCNIC_NON_PRIV_FUNC) { netdev_warn(dev, "LED test not supported for non " "privilege function\n"); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h index 44197ca1456c..1cebd8900cf9 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h @@ -714,7 +714,9 @@ enum { QLCNIC_MGMT_FUNC = 0, QLCNIC_PRIV_FUNC = 1, QLCNIC_NON_PRIV_FUNC = 2, - QLCNIC_UNKNOWN_FUNC_MODE = 3 + QLCNIC_SRIOV_PF_FUNC = 3, + QLCNIC_SRIOV_VF_FUNC = 4, + QLCNIC_UNKNOWN_FUNC_MODE = 5 }; enum { diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c index f89cc7a3fe6c..ddc130b23378 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c @@ -496,7 +496,7 @@ int qlcnic_nic_add_mac(struct qlcnic_adapter *adapter, const u8 *addr) return 0; } -void qlcnic_set_multi(struct net_device *netdev) +void __qlcnic_set_multi(struct net_device *netdev) { struct qlcnic_adapter *adapter = netdev_priv(netdev); struct netdev_hw_addr *ha; @@ -508,7 +508,8 @@ void qlcnic_set_multi(struct net_device *netdev) if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state)) return; - qlcnic_nic_add_mac(adapter, adapter->mac_addr); + if (!qlcnic_sriov_vf_check(adapter)) + qlcnic_nic_add_mac(adapter, adapter->mac_addr); qlcnic_nic_add_mac(adapter, bcast_addr); if (netdev->flags & IFF_PROMISC) { @@ -523,23 +524,53 @@ void qlcnic_set_multi(struct net_device *netdev) goto send_fw_cmd; } - if (!netdev_mc_empty(netdev)) { + if (!netdev_mc_empty(netdev) && !qlcnic_sriov_vf_check(adapter)) { netdev_for_each_mc_addr(ha, netdev) { qlcnic_nic_add_mac(adapter, ha->addr); } } + if (qlcnic_sriov_vf_check(adapter)) + qlcnic_vf_add_mc_list(netdev); + send_fw_cmd: - if (mode == VPORT_MISS_MODE_ACCEPT_ALL && !adapter->fdb_mac_learn) { - qlcnic_alloc_lb_filters_mem(adapter); - adapter->drv_mac_learn = true; - } else { - adapter->drv_mac_learn = false; + if (!qlcnic_sriov_vf_check(adapter)) { + if (mode == VPORT_MISS_MODE_ACCEPT_ALL && + !adapter->fdb_mac_learn) { + qlcnic_alloc_lb_filters_mem(adapter); + adapter->drv_mac_learn = true; + } else { + adapter->drv_mac_learn = false; + } } qlcnic_nic_set_promisc(adapter, mode); } +void qlcnic_set_multi(struct net_device *netdev) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + struct netdev_hw_addr *ha; + struct qlcnic_mac_list_s *cur; + + if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state)) + return; + if (qlcnic_sriov_vf_check(adapter)) { + if (!netdev_mc_empty(netdev)) { + netdev_for_each_mc_addr(ha, netdev) { + cur = kzalloc(sizeof(struct qlcnic_mac_list_s), + GFP_ATOMIC); + memcpy(cur->mac_addr, + ha->addr, ETH_ALEN); + list_add_tail(&cur->list, &adapter->vf_mc_list); + } + } + qlcnic_sriov_vf_schedule_multi(adapter->netdev); + return; + } + __qlcnic_set_multi(netdev); +} + int qlcnic_82xx_nic_set_promisc(struct qlcnic_adapter *adapter, u32 mode) { struct qlcnic_nic_req req; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h index 5b8749eda11f..e862a77a626b 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h @@ -83,6 +83,8 @@ enum qlcnic_regs { #define QLCNIC_CMD_CONFIG_PORT 0x2e #define QLCNIC_CMD_TEMP_SIZE 0x2f #define QLCNIC_CMD_GET_TEMP_HDR 0x30 +#define QLCNIC_CMD_BC_EVENT_SETUP 0x31 +#define QLCNIC_CMD_CONFIG_VPORT 0x32 #define QLCNIC_CMD_GET_MAC_STATS 0x37 #define QLCNIC_CMD_SET_DRV_VER 0x38 #define QLCNIC_CMD_CONFIGURE_RSS 0x41 @@ -114,6 +116,7 @@ enum qlcnic_regs { #define QLCNIC_SET_FAC_DEF_MAC 5 #define QLCNIC_MBX_LINK_EVENT 0x8001 +#define QLCNIC_MBX_BC_EVENT 0x8002 #define QLCNIC_MBX_COMP_EVENT 0x8100 #define QLCNIC_MBX_REQUEST_EVENT 0x8101 #define QLCNIC_MBX_TIME_EXTEND_EVENT 0x8102 @@ -175,6 +178,9 @@ int qlcnic_82xx_issue_cmd(struct qlcnic_adapter *adapter, int qlcnic_82xx_fw_cmd_create_rx_ctx(struct qlcnic_adapter *); int qlcnic_82xx_fw_cmd_create_tx_ctx(struct qlcnic_adapter *, struct qlcnic_host_tx_ring *tx_ring, int); +void qlcnic_82xx_fw_cmd_del_rx_ctx(struct qlcnic_adapter *); +void qlcnic_82xx_fw_cmd_del_tx_ctx(struct qlcnic_adapter *, + struct qlcnic_host_tx_ring *); int qlcnic_82xx_sre_macaddr_change(struct qlcnic_adapter *, u8 *, __le16, u8); int qlcnic_82xx_get_mac_address(struct qlcnic_adapter *, u8*); int qlcnic_82xx_get_nic_info(struct qlcnic_adapter *, struct qlcnic_info *, u8); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c index 0e630061bff3..a85ca63a2c9e 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c @@ -9,6 +9,7 @@ #include <linux/if_vlan.h> #include <net/ip.h> #include <linux/ipv6.h> +#include <net/checksum.h> #include "qlcnic.h" @@ -146,7 +147,10 @@ static inline u8 qlcnic_mac_hash(u64 mac) static inline u32 qlcnic_get_ref_handle(struct qlcnic_adapter *adapter, u16 handle, u8 ring_id) { - if (adapter->pdev->device == PCI_DEVICE_ID_QLOGIC_QLE834X) + unsigned short device = adapter->pdev->device; + + if ((device == PCI_DEVICE_ID_QLOGIC_QLE834X) || + (device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X)) return handle | (ring_id << 15); else return handle; @@ -1132,9 +1136,8 @@ qlcnic_process_lro(struct qlcnic_adapter *adapter, iph = (struct iphdr *)skb->data; th = (struct tcphdr *)(skb->data + (iph->ihl << 2)); length = (iph->ihl << 2) + (th->doff << 2) + lro_length; + csum_replace2(&iph->check, iph->tot_len, htons(length)); iph->tot_len = htons(length); - iph->check = 0; - iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl); } th->psh = push; @@ -1595,9 +1598,8 @@ qlcnic_83xx_process_lro(struct qlcnic_adapter *adapter, iph = (struct iphdr *)skb->data; th = (struct tcphdr *)(skb->data + (iph->ihl << 2)); length = (iph->ihl << 2) + (th->doff << 2) + lro_length; + csum_replace2(&iph->check, iph->tot_len, htons(length)); iph->tot_len = htons(length); - iph->check = 0; - iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl); } th->psh = push; @@ -1692,6 +1694,29 @@ skip: return count; } +static int qlcnic_83xx_msix_sriov_vf_poll(struct napi_struct *napi, int budget) +{ + int tx_complete; + int work_done; + struct qlcnic_host_sds_ring *sds_ring; + struct qlcnic_adapter *adapter; + struct qlcnic_host_tx_ring *tx_ring; + + sds_ring = container_of(napi, struct qlcnic_host_sds_ring, napi); + adapter = sds_ring->adapter; + /* tx ring count = 1 */ + tx_ring = adapter->tx_ring; + + tx_complete = qlcnic_process_cmd_ring(adapter, tx_ring, budget); + work_done = qlcnic_83xx_process_rcv_ring(sds_ring, budget); + if ((work_done < budget) && tx_complete) { + napi_complete(&sds_ring->napi); + qlcnic_83xx_enable_intr(adapter, sds_ring); + } + + return work_done; +} + static int qlcnic_83xx_poll(struct napi_struct *napi, int budget) { int tx_complete; @@ -1769,7 +1794,8 @@ void qlcnic_83xx_napi_enable(struct qlcnic_adapter *adapter) qlcnic_83xx_enable_intr(adapter, sds_ring); } - if (adapter->flags & QLCNIC_MSIX_ENABLED) { + if ((adapter->flags & QLCNIC_MSIX_ENABLED) && + !(adapter->flags & QLCNIC_TX_INTR_SHARED)) { for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) { tx_ring = &adapter->tx_ring[ring]; napi_enable(&tx_ring->napi); @@ -1796,7 +1822,8 @@ void qlcnic_83xx_napi_disable(struct qlcnic_adapter *adapter) napi_disable(&sds_ring->napi); } - if (adapter->flags & QLCNIC_MSIX_ENABLED) { + if ((adapter->flags & QLCNIC_MSIX_ENABLED) && + !(adapter->flags & QLCNIC_TX_INTR_SHARED)) { for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) { tx_ring = &adapter->tx_ring[ring]; qlcnic_83xx_disable_tx_intr(adapter, tx_ring); @@ -1809,7 +1836,7 @@ void qlcnic_83xx_napi_disable(struct qlcnic_adapter *adapter) int qlcnic_83xx_napi_add(struct qlcnic_adapter *adapter, struct net_device *netdev) { - int ring, max_sds_rings; + int ring, max_sds_rings, temp; struct qlcnic_host_sds_ring *sds_ring; struct qlcnic_host_tx_ring *tx_ring; struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; @@ -1820,14 +1847,23 @@ int qlcnic_83xx_napi_add(struct qlcnic_adapter *adapter, max_sds_rings = adapter->max_sds_rings; for (ring = 0; ring < adapter->max_sds_rings; ring++) { sds_ring = &recv_ctx->sds_rings[ring]; - if (adapter->flags & QLCNIC_MSIX_ENABLED) - netif_napi_add(netdev, &sds_ring->napi, - qlcnic_83xx_rx_poll, - QLCNIC_NETDEV_WEIGHT * 2); - else + if (adapter->flags & QLCNIC_MSIX_ENABLED) { + if (!(adapter->flags & QLCNIC_TX_INTR_SHARED)) { + netif_napi_add(netdev, &sds_ring->napi, + qlcnic_83xx_rx_poll, + QLCNIC_NETDEV_WEIGHT * 2); + } else { + temp = QLCNIC_NETDEV_WEIGHT / max_sds_rings; + netif_napi_add(netdev, &sds_ring->napi, + qlcnic_83xx_msix_sriov_vf_poll, + temp); + } + + } else { netif_napi_add(netdev, &sds_ring->napi, qlcnic_83xx_poll, QLCNIC_NETDEV_WEIGHT / max_sds_rings); + } } if (qlcnic_alloc_tx_rings(adapter, netdev)) { @@ -1835,7 +1871,8 @@ int qlcnic_83xx_napi_add(struct qlcnic_adapter *adapter, return -ENOMEM; } - if (adapter->flags & QLCNIC_MSIX_ENABLED) { + if ((adapter->flags & QLCNIC_MSIX_ENABLED) && + !(adapter->flags & QLCNIC_TX_INTR_SHARED)) { for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) { tx_ring = &adapter->tx_ring[ring]; netif_napi_add(netdev, &tx_ring->napi, @@ -1861,7 +1898,8 @@ void qlcnic_83xx_napi_del(struct qlcnic_adapter *adapter) qlcnic_free_sds_rings(adapter->recv_ctx); - if ((adapter->flags & QLCNIC_MSIX_ENABLED)) { + if ((adapter->flags & QLCNIC_MSIX_ENABLED) && + !(adapter->flags & QLCNIC_TX_INTR_SHARED)) { for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) { tx_ring = &adapter->tx_ring[ring]; netif_napi_del(&tx_ring->napi); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index 28a6d4838364..3ee593ee13cf 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c @@ -9,6 +9,7 @@ #include <linux/interrupt.h> #include "qlcnic.h" +#include "qlcnic_sriov.h" #include "qlcnic_hw.h" #include <linux/swab.h> @@ -109,6 +110,7 @@ static u32 qlcnic_vlan_tx_check(struct qlcnic_adapter *adapter) static DEFINE_PCI_DEVICE_TABLE(qlcnic_pci_tbl) = { ENTRY(PCI_DEVICE_ID_QLOGIC_QLE824X), ENTRY(PCI_DEVICE_ID_QLOGIC_QLE834X), + ENTRY(PCI_DEVICE_ID_QLOGIC_VF_QLE834X), {0,} }; @@ -198,8 +200,7 @@ void qlcnic_free_sds_rings(struct qlcnic_recv_context *recv_ctx) recv_ctx->sds_rings = NULL; } -static int -qlcnic_read_mac_addr(struct qlcnic_adapter *adapter) +int qlcnic_read_mac_addr(struct qlcnic_adapter *adapter) { u8 mac_addr[ETH_ALEN]; struct net_device *netdev = adapter->netdev; @@ -225,6 +226,9 @@ static int qlcnic_set_mac(struct net_device *netdev, void *p) struct qlcnic_adapter *adapter = netdev_priv(netdev); struct sockaddr *addr = p; + if (qlcnic_sriov_vf_check(adapter)) + return -EINVAL; + if ((adapter->flags & QLCNIC_MAC_OVERRIDE_DISABLED)) return -EOPNOTSUPP; @@ -253,11 +257,8 @@ static int qlcnic_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], struct qlcnic_adapter *adapter = netdev_priv(netdev); int err = -EOPNOTSUPP; - if (!adapter->fdb_mac_learn) { - pr_info("%s: Driver mac learn is enabled, FDB operation not allowed\n", - __func__); - return err; - } + if (!adapter->fdb_mac_learn) + return ndo_dflt_fdb_del(ndm, tb, netdev, addr); if (adapter->flags & QLCNIC_ESWITCH_ENABLED) { if (is_unicast_ether_addr(addr)) @@ -277,11 +278,8 @@ static int qlcnic_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], struct qlcnic_adapter *adapter = netdev_priv(netdev); int err = 0; - if (!adapter->fdb_mac_learn) { - pr_info("%s: Driver mac learn is enabled, FDB operation not allowed\n", - __func__); - return -EOPNOTSUPP; - } + if (!adapter->fdb_mac_learn) + return ndo_dflt_fdb_add(ndm, tb, netdev, addr, flags); if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED)) { pr_info("%s: FDB e-switch is not enabled\n", __func__); @@ -306,11 +304,8 @@ static int qlcnic_fdb_dump(struct sk_buff *skb, struct netlink_callback *ncb, { struct qlcnic_adapter *adapter = netdev_priv(netdev); - if (!adapter->fdb_mac_learn) { - pr_info("%s: Driver mac learn is enabled, FDB operation not allowed\n", - __func__); - return -EOPNOTSUPP; - } + if (!adapter->fdb_mac_learn) + return ndo_dflt_fdb_dump(skb, ncb, netdev, idx); if (adapter->flags & QLCNIC_ESWITCH_ENABLED) idx = ndo_dflt_fdb_dump(skb, ncb, netdev, idx); @@ -387,6 +382,8 @@ static struct qlcnic_hardware_ops qlcnic_hw_ops = { .process_lb_rcv_ring_diag = qlcnic_82xx_process_rcv_ring_diag, .create_rx_ctx = qlcnic_82xx_fw_cmd_create_rx_ctx, .create_tx_ctx = qlcnic_82xx_fw_cmd_create_tx_ctx, + .del_rx_ctx = qlcnic_82xx_fw_cmd_del_rx_ctx, + .del_tx_ctx = qlcnic_82xx_fw_cmd_del_tx_ctx, .setup_link_event = qlcnic_82xx_linkevent_request, .get_nic_info = qlcnic_82xx_get_nic_info, .get_pci_info = qlcnic_82xx_get_pci_info, @@ -408,7 +405,15 @@ int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix) { struct pci_dev *pdev = adapter->pdev; int err = -1, i; - int max_tx_rings; + int max_tx_rings, tx_vector; + + if (adapter->flags & QLCNIC_TX_INTR_SHARED) { + max_tx_rings = 0; + tx_vector = 0; + } else { + max_tx_rings = adapter->max_drv_tx_rings; + tx_vector = 1; + } if (!adapter->msix_entries) { adapter->msix_entries = kcalloc(num_msix, @@ -431,7 +436,6 @@ int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix) if (qlcnic_83xx_check(adapter)) { adapter->ahw->num_msix = num_msix; /* subtract mail box and tx ring vectors */ - max_tx_rings = adapter->max_drv_tx_rings; adapter->max_sds_rings = num_msix - max_tx_rings - 1; } else { @@ -444,11 +448,11 @@ int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix) "Unable to allocate %d MSI-X interrupt vectors\n", num_msix); if (qlcnic_83xx_check(adapter)) { - if (err < QLC_83XX_MINIMUM_VECTOR) + if (err < (QLC_83XX_MINIMUM_VECTOR - tx_vector)) return err; - err -= (adapter->max_drv_tx_rings + 1); + err -= (max_tx_rings + 1); num_msix = rounddown_pow_of_two(err); - num_msix += (adapter->max_drv_tx_rings + 1); + num_msix += (max_tx_rings + 1); } else { num_msix = rounddown_pow_of_two(err); } @@ -721,6 +725,7 @@ static void qlcnic_get_bar_length(u32 dev_id, ulong *bar) *bar = QLCNIC_82XX_BAR0_LENGTH; break; case PCI_DEVICE_ID_QLOGIC_QLE834X: + case PCI_DEVICE_ID_QLOGIC_VF_QLE834X: *bar = QLCNIC_83XX_BAR0_LENGTH; break; default: @@ -751,7 +756,7 @@ static int qlcnic_setup_pci_map(struct pci_dev *pdev, return -EIO; } - dev_info(&pdev->dev, "%dMB memory map\n", (int)(mem_len>>20)); + dev_info(&pdev->dev, "%dKB memory map\n", (int)(mem_len >> 10)); ahw->pci_base0 = mem_ptr0; ahw->pci_len0 = pci_len0; @@ -1292,7 +1297,8 @@ qlcnic_request_irq(struct qlcnic_adapter *adapter) } } if (qlcnic_83xx_check(adapter) && - (adapter->flags & QLCNIC_MSIX_ENABLED)) { + (adapter->flags & QLCNIC_MSIX_ENABLED) && + !(adapter->flags & QLCNIC_TX_INTR_SHARED)) { handler = qlcnic_msix_tx_intr; for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) { @@ -1328,7 +1334,8 @@ qlcnic_free_irq(struct qlcnic_adapter *adapter) free_irq(sds_ring->irq, sds_ring); } } - if (qlcnic_83xx_check(adapter)) { + if (qlcnic_83xx_check(adapter) && + !(adapter->flags & QLCNIC_TX_INTR_SHARED)) { for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) { tx_ring = &adapter->tx_ring[ring]; @@ -1418,9 +1425,12 @@ void __qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev) if (!test_and_clear_bit(__QLCNIC_DEV_UP, &adapter->state)) return; + if (qlcnic_sriov_vf_check(adapter)) + qlcnic_sriov_cleanup_async_list(&adapter->ahw->sriov->bc); smp_mb(); spin_lock(&adapter->tx_clean_lock); netif_carrier_off(netdev); + adapter->ahw->linkup = 0; netif_tx_disable(netdev); qlcnic_free_mac_list(adapter); @@ -1685,7 +1695,7 @@ qlcnic_reset_context(struct qlcnic_adapter *adapter) return err; } -static int +int qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev, int pci_using_dac) { @@ -1820,6 +1830,9 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) u32 capab2; char board_name[QLCNIC_MAX_BOARD_NAME_LEN + 19]; /* MAC + ": " + name */ + if (pdev->is_virtfn) + return -ENODEV; + err = pci_enable_device(pdev); if (err) return err; @@ -1844,12 +1857,18 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (!ahw) goto err_out_free_res; - if (ent->device == PCI_DEVICE_ID_QLOGIC_QLE824X) { + switch (ent->device) { + case PCI_DEVICE_ID_QLOGIC_QLE824X: ahw->hw_ops = &qlcnic_hw_ops; - ahw->reg_tbl = (u32 *)qlcnic_reg_tbl; - } else if (ent->device == PCI_DEVICE_ID_QLOGIC_QLE834X) { + ahw->reg_tbl = (u32 *) qlcnic_reg_tbl; + break; + case PCI_DEVICE_ID_QLOGIC_QLE834X: qlcnic_83xx_register_map(ahw); - } else { + break; + case PCI_DEVICE_ID_QLOGIC_VF_QLE834X: + qlcnic_sriov_vf_register_map(ahw); + break; + default: goto err_out_free_hw_res; } @@ -1911,11 +1930,13 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) } else if (qlcnic_83xx_check(adapter)) { qlcnic_83xx_check_vf(adapter, ent); adapter->portnum = adapter->ahw->pci_func; - err = qlcnic_83xx_init(adapter); + err = qlcnic_83xx_init(adapter, pci_using_dac); if (err) { dev_err(&pdev->dev, "%s: failed\n", __func__); goto err_out_free_hw; } + if (qlcnic_sriov_vf_check(adapter)) + return 0; } else { dev_err(&pdev->dev, "%s: failed. Please Reboot\n", __func__); @@ -1932,6 +1953,12 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) module_name(THIS_MODULE), board_name, adapter->ahw->revision_id); } + + if (qlcnic_83xx_check(adapter) && !qlcnic_use_msi_x && + !!qlcnic_use_msi) + dev_warn(&pdev->dev, + "83xx adapter do not support MSI interrupts\n"); + err = qlcnic_setup_intr(adapter, 0); if (err) { dev_err(&pdev->dev, "Failed to setup interrupt\n"); @@ -2024,11 +2051,13 @@ static void qlcnic_remove(struct pci_dev *pdev) return; netdev = adapter->netdev; + qlcnic_sriov_pf_disable(adapter); qlcnic_cancel_idc_work(adapter); ahw = adapter->ahw; unregister_netdev(netdev); + qlcnic_sriov_cleanup(adapter); if (qlcnic_83xx_check(adapter)) { qlcnic_83xx_free_mbx_intr(adapter); @@ -3432,7 +3461,10 @@ static struct pci_driver qlcnic_driver = { .resume = qlcnic_resume, #endif .shutdown = qlcnic_shutdown, - .err_handler = &qlcnic_err_handler + .err_handler = &qlcnic_err_handler, +#ifdef CONFIG_QLCNIC_SRIOV + .sriov_configure = qlcnic_pci_sriov_configure, +#endif }; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c index abbd22c814a6..4b9bab18ebd9 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c @@ -810,11 +810,8 @@ static int __qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter, tmp_addr = dma_alloc_coherent(&adapter->pdev->dev, temp_size, &tmp_addr_t, GFP_KERNEL); - if (!tmp_addr) { - dev_err(&adapter->pdev->dev, - "Can't get memory for FW dump template\n"); + if (!tmp_addr) return -ENOMEM; - } if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_TEMP_HDR)) { err = -ENOMEM; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h new file mode 100644 index 000000000000..b476ebac2439 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h @@ -0,0 +1,214 @@ +/* + * QLogic qlcnic NIC Driver + * Copyright (c) 2009-2013 QLogic Corporation + * + * See LICENSE.qlcnic for copyright and licensing details. + */ + +#ifndef _QLCNIC_83XX_SRIOV_H_ +#define _QLCNIC_83XX_SRIOV_H_ + +#include "qlcnic.h" +#include <linux/types.h> +#include <linux/pci.h> + +extern const u32 qlcnic_83xx_reg_tbl[]; +extern const u32 qlcnic_83xx_ext_reg_tbl[]; + +struct qlcnic_bc_payload { + u64 payload[126]; +}; + +struct qlcnic_bc_hdr { +#if defined(__LITTLE_ENDIAN) + u8 version; + u8 msg_type:4; + u8 rsvd1:3; + u8 op_type:1; + u8 num_cmds; + u8 num_frags; + u8 frag_num; + u8 cmd_op; + u16 seq_id; + u64 rsvd3; +#elif defined(__BIG_ENDIAN) + u8 num_frags; + u8 num_cmds; + u8 op_type:1; + u8 rsvd1:3; + u8 msg_type:4; + u8 version; + u16 seq_id; + u8 cmd_op; + u8 frag_num; + u64 rsvd3; +#endif +}; + +enum qlcnic_bc_commands { + QLCNIC_BC_CMD_CHANNEL_INIT = 0x0, + QLCNIC_BC_CMD_CHANNEL_TERM = 0x1, +}; + +#define QLC_BC_CMD 1 + +struct qlcnic_trans_list { + /* Lock for manipulating list */ + spinlock_t lock; + struct list_head wait_list; + int count; +}; + +enum qlcnic_trans_state { + QLC_INIT = 0, + QLC_WAIT_FOR_CHANNEL_FREE, + QLC_WAIT_FOR_RESP, + QLC_ABORT, + QLC_END, +}; + +struct qlcnic_bc_trans { + u8 func_id; + u8 active; + u8 curr_rsp_frag; + u8 curr_req_frag; + u16 cmd_id; + u16 req_pay_size; + u16 rsp_pay_size; + u32 trans_id; + enum qlcnic_trans_state trans_state; + struct list_head list; + struct qlcnic_bc_hdr *req_hdr; + struct qlcnic_bc_hdr *rsp_hdr; + struct qlcnic_bc_payload *req_pay; + struct qlcnic_bc_payload *rsp_pay; + struct completion resp_cmpl; + struct qlcnic_vf_info *vf; +}; + +enum qlcnic_vf_state { + QLC_BC_VF_SEND = 0, + QLC_BC_VF_RECV, + QLC_BC_VF_CHANNEL, + QLC_BC_VF_STATE, +}; + +struct qlcnic_resources { + u16 num_tx_mac_filters; + u16 num_rx_ucast_mac_filters; + u16 num_rx_mcast_mac_filters; + + u16 num_txvlan_keys; + + u16 num_rx_queues; + u16 num_tx_queues; + + u16 num_rx_buf_rings; + u16 num_rx_status_rings; + + u16 num_destip; + u32 num_lro_flows_supported; + u16 max_local_ipv6_addrs; + u16 max_remote_ipv6_addrs; +}; + +struct qlcnic_vport { + u16 handle; + u8 mac[6]; +}; + +struct qlcnic_vf_info { + u8 pci_func; + u16 rx_ctx_id; + u16 tx_ctx_id; + unsigned long state; + struct completion ch_free_cmpl; + struct work_struct trans_work; + /* It synchronizes commands sent from VF */ + struct mutex send_cmd_lock; + struct qlcnic_bc_trans *send_cmd; + struct qlcnic_trans_list rcv_act; + struct qlcnic_trans_list rcv_pend; + struct qlcnic_adapter *adapter; + struct qlcnic_vport *vp; +}; + +struct qlcnic_async_work_list { + struct list_head list; + struct work_struct work; + void *ptr; +}; + +struct qlcnic_back_channel { + u16 trans_counter; + struct workqueue_struct *bc_trans_wq; + struct workqueue_struct *bc_async_wq; + struct list_head async_list; +}; + +struct qlcnic_sriov { + u16 vp_handle; + u8 num_vfs; + struct qlcnic_resources ff_max; + struct qlcnic_back_channel bc; + struct qlcnic_vf_info *vf_info; +}; + +int qlcnic_sriov_init(struct qlcnic_adapter *, int); +void qlcnic_sriov_cleanup(struct qlcnic_adapter *); +void __qlcnic_sriov_cleanup(struct qlcnic_adapter *); +void qlcnic_sriov_vf_register_map(struct qlcnic_hardware_context *); +int qlcnic_sriov_vf_init(struct qlcnic_adapter *, int); +void qlcnic_sriov_vf_set_ops(struct qlcnic_adapter *); +int qlcnic_sriov_func_to_index(struct qlcnic_adapter *, u8); +int qlcnic_sriov_channel_cfg_cmd(struct qlcnic_adapter *, u8); +void qlcnic_sriov_handle_bc_event(struct qlcnic_adapter *, u32); +int qlcnic_sriov_cfg_bc_intr(struct qlcnic_adapter *, u8); +void qlcnic_sriov_cleanup_async_list(struct qlcnic_back_channel *); + +static inline bool qlcnic_sriov_enable_check(struct qlcnic_adapter *adapter) +{ + return test_bit(__QLCNIC_SRIOV_ENABLE, &adapter->state) ? true : false; +} + +#ifdef CONFIG_QLCNIC_SRIOV +void qlcnic_sriov_pf_process_bc_cmd(struct qlcnic_adapter *, + struct qlcnic_bc_trans *, + struct qlcnic_cmd_args *); +void qlcnic_sriov_pf_disable(struct qlcnic_adapter *); +void qlcnic_sriov_pf_cleanup(struct qlcnic_adapter *); +int qlcnic_pci_sriov_configure(struct pci_dev *, int); +void qlcnic_pf_set_interface_id_create_rx_ctx(struct qlcnic_adapter *, u32 *); +void qlcnic_pf_set_interface_id_create_tx_ctx(struct qlcnic_adapter *, u32 *); +void qlcnic_pf_set_interface_id_del_rx_ctx(struct qlcnic_adapter *, u32 *); +void qlcnic_pf_set_interface_id_del_tx_ctx(struct qlcnic_adapter *, u32 *); +void qlcnic_pf_set_interface_id_promisc(struct qlcnic_adapter *, u32 *); +void qlcnic_pf_set_interface_id_ipaddr(struct qlcnic_adapter *, u32 *); +void qlcnic_pf_set_interface_id_macaddr(struct qlcnic_adapter *, u32 *); +#else +static inline void qlcnic_sriov_pf_disable(struct qlcnic_adapter *adapter) {} +static inline void qlcnic_sriov_pf_cleanup(struct qlcnic_adapter *adapter) {} +static inline void +qlcnic_pf_set_interface_id_create_rx_ctx(struct qlcnic_adapter *adapter, + u32 *int_id) {} +static inline void +qlcnic_pf_set_interface_id_create_tx_ctx(struct qlcnic_adapter *adapter, + u32 *int_id) {} +static inline void +qlcnic_pf_set_interface_id_del_rx_ctx(struct qlcnic_adapter *adapter, + u32 *int_id) {} +static inline void +qlcnic_pf_set_interface_id_del_tx_ctx(struct qlcnic_adapter *adapter, + u32 *int_id) {} +static inline void +qlcnic_pf_set_interface_id_ipaddr(struct qlcnic_adapter *adapter, u32 *int_id) +{} +static inline void +qlcnic_pf_set_interface_id_macaddr(struct qlcnic_adapter *adapter, u32 *int_id) +{} +static inline void +qlcnic_pf_set_interface_id_promisc(struct qlcnic_adapter *adapter, u32 *int_id) +{} +#endif + +#endif diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c new file mode 100644 index 000000000000..14e9ebd3b73a --- /dev/null +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c @@ -0,0 +1,1297 @@ +/* + * QLogic qlcnic NIC Driver + * Copyright (c) 2009-2013 QLogic Corporation + * + * See LICENSE.qlcnic for copyright and licensing details. + */ + +#include "qlcnic_sriov.h" +#include "qlcnic.h" +#include "qlcnic_83xx_hw.h" +#include <linux/types.h> + +#define QLC_BC_COMMAND 0 +#define QLC_BC_RESPONSE 1 + +#define QLC_MBOX_RESP_TIMEOUT (10 * HZ) +#define QLC_MBOX_CH_FREE_TIMEOUT (10 * HZ) + +#define QLC_BC_MSG 0 +#define QLC_BC_CFREE 1 +#define QLC_BC_HDR_SZ 16 +#define QLC_BC_PAYLOAD_SZ (1024 - QLC_BC_HDR_SZ) + +#define QLC_DEFAULT_RCV_DESCRIPTORS_SRIOV_VF 2048 +#define QLC_DEFAULT_JUMBO_RCV_DESCRIPTORS_SRIOV_VF 512 + +static int qlcnic_sriov_vf_mbx_op(struct qlcnic_adapter *, + struct qlcnic_cmd_args *); + +static struct qlcnic_hardware_ops qlcnic_sriov_vf_hw_ops = { + .read_crb = qlcnic_83xx_read_crb, + .write_crb = qlcnic_83xx_write_crb, + .read_reg = qlcnic_83xx_rd_reg_indirect, + .write_reg = qlcnic_83xx_wrt_reg_indirect, + .get_mac_address = qlcnic_83xx_get_mac_address, + .setup_intr = qlcnic_83xx_setup_intr, + .alloc_mbx_args = qlcnic_83xx_alloc_mbx_args, + .mbx_cmd = qlcnic_sriov_vf_mbx_op, + .get_func_no = qlcnic_83xx_get_func_no, + .api_lock = qlcnic_83xx_cam_lock, + .api_unlock = qlcnic_83xx_cam_unlock, + .process_lb_rcv_ring_diag = qlcnic_83xx_process_rcv_ring_diag, + .create_rx_ctx = qlcnic_83xx_create_rx_ctx, + .create_tx_ctx = qlcnic_83xx_create_tx_ctx, + .del_rx_ctx = qlcnic_83xx_del_rx_ctx, + .del_tx_ctx = qlcnic_83xx_del_tx_ctx, + .setup_link_event = qlcnic_83xx_setup_link_event, + .get_nic_info = qlcnic_83xx_get_nic_info, + .get_pci_info = qlcnic_83xx_get_pci_info, + .set_nic_info = qlcnic_83xx_set_nic_info, + .change_macvlan = qlcnic_83xx_sre_macaddr_change, + .napi_enable = qlcnic_83xx_napi_enable, + .napi_disable = qlcnic_83xx_napi_disable, + .config_intr_coal = qlcnic_83xx_config_intr_coal, + .config_rss = qlcnic_83xx_config_rss, + .config_hw_lro = qlcnic_83xx_config_hw_lro, + .config_promisc_mode = qlcnic_83xx_nic_set_promisc, + .change_l2_filter = qlcnic_83xx_change_l2_filter, + .get_board_info = qlcnic_83xx_get_port_info, +}; + +static struct qlcnic_nic_template qlcnic_sriov_vf_ops = { + .config_bridged_mode = qlcnic_config_bridged_mode, + .config_led = qlcnic_config_led, + .cancel_idc_work = qlcnic_83xx_idc_exit, + .napi_add = qlcnic_83xx_napi_add, + .napi_del = qlcnic_83xx_napi_del, + .config_ipaddr = qlcnic_83xx_config_ipaddr, + .clear_legacy_intr = qlcnic_83xx_clear_legacy_intr, +}; + +static const struct qlcnic_mailbox_metadata qlcnic_sriov_bc_mbx_tbl[] = { + {QLCNIC_BC_CMD_CHANNEL_INIT, 2, 2}, + {QLCNIC_BC_CMD_CHANNEL_TERM, 2, 2}, +}; + +static inline bool qlcnic_sriov_bc_msg_check(u32 val) +{ + return (val & (1 << QLC_BC_MSG)) ? true : false; +} + +static inline bool qlcnic_sriov_channel_free_check(u32 val) +{ + return (val & (1 << QLC_BC_CFREE)) ? true : false; +} + +static inline u8 qlcnic_sriov_target_func_id(u32 val) +{ + return (val >> 4) & 0xff; +} + +static int qlcnic_sriov_virtid_fn(struct qlcnic_adapter *adapter, int vf_id) +{ + struct pci_dev *dev = adapter->pdev; + int pos; + u16 stride, offset; + + if (qlcnic_sriov_vf_check(adapter)) + return 0; + + pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV); + pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &offset); + pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &stride); + + return (dev->devfn + offset + stride * vf_id) & 0xff; +} + +int qlcnic_sriov_init(struct qlcnic_adapter *adapter, int num_vfs) +{ + struct qlcnic_sriov *sriov; + struct qlcnic_back_channel *bc; + struct workqueue_struct *wq; + struct qlcnic_vport *vp; + struct qlcnic_vf_info *vf; + int err, i; + + if (!qlcnic_sriov_enable_check(adapter)) + return -EIO; + + sriov = kzalloc(sizeof(struct qlcnic_sriov), GFP_KERNEL); + if (!sriov) + return -ENOMEM; + + adapter->ahw->sriov = sriov; + sriov->num_vfs = num_vfs; + bc = &sriov->bc; + sriov->vf_info = kzalloc(sizeof(struct qlcnic_vf_info) * + num_vfs, GFP_KERNEL); + if (!sriov->vf_info) { + err = -ENOMEM; + goto qlcnic_free_sriov; + } + + wq = create_singlethread_workqueue("bc-trans"); + if (wq == NULL) { + err = -ENOMEM; + dev_err(&adapter->pdev->dev, + "Cannot create bc-trans workqueue\n"); + goto qlcnic_free_vf_info; + } + + bc->bc_trans_wq = wq; + + wq = create_singlethread_workqueue("async"); + if (wq == NULL) { + err = -ENOMEM; + dev_err(&adapter->pdev->dev, "Cannot create async workqueue\n"); + goto qlcnic_destroy_trans_wq; + } + + bc->bc_async_wq = wq; + INIT_LIST_HEAD(&bc->async_list); + + for (i = 0; i < num_vfs; i++) { + vf = &sriov->vf_info[i]; + vf->adapter = adapter; + vf->pci_func = qlcnic_sriov_virtid_fn(adapter, i); + mutex_init(&vf->send_cmd_lock); + INIT_LIST_HEAD(&vf->rcv_act.wait_list); + INIT_LIST_HEAD(&vf->rcv_pend.wait_list); + spin_lock_init(&vf->rcv_act.lock); + spin_lock_init(&vf->rcv_pend.lock); + init_completion(&vf->ch_free_cmpl); + + if (qlcnic_sriov_pf_check(adapter)) { + vp = kzalloc(sizeof(struct qlcnic_vport), GFP_KERNEL); + if (!vp) { + err = -ENOMEM; + goto qlcnic_destroy_async_wq; + } + sriov->vf_info[i].vp = vp; + random_ether_addr(vp->mac); + dev_info(&adapter->pdev->dev, + "MAC Address %pM is configured for VF %d\n", + vp->mac, i); + } + } + + return 0; + +qlcnic_destroy_async_wq: + destroy_workqueue(bc->bc_async_wq); + +qlcnic_destroy_trans_wq: + destroy_workqueue(bc->bc_trans_wq); + +qlcnic_free_vf_info: + kfree(sriov->vf_info); + +qlcnic_free_sriov: + kfree(adapter->ahw->sriov); + return err; +} + +void __qlcnic_sriov_cleanup(struct qlcnic_adapter *adapter) +{ + struct qlcnic_sriov *sriov = adapter->ahw->sriov; + struct qlcnic_back_channel *bc = &sriov->bc; + int i; + + if (!qlcnic_sriov_enable_check(adapter)) + return; + + qlcnic_sriov_cleanup_async_list(bc); + destroy_workqueue(bc->bc_async_wq); + destroy_workqueue(bc->bc_trans_wq); + + for (i = 0; i < sriov->num_vfs; i++) + kfree(sriov->vf_info[i].vp); + + kfree(sriov->vf_info); + kfree(adapter->ahw->sriov); +} + +static void qlcnic_sriov_vf_cleanup(struct qlcnic_adapter *adapter) +{ + qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_TERM); + qlcnic_sriov_cfg_bc_intr(adapter, 0); + __qlcnic_sriov_cleanup(adapter); +} + +void qlcnic_sriov_cleanup(struct qlcnic_adapter *adapter) +{ + if (qlcnic_sriov_pf_check(adapter)) + qlcnic_sriov_pf_cleanup(adapter); + + if (qlcnic_sriov_vf_check(adapter)) + qlcnic_sriov_vf_cleanup(adapter); +} + +static int qlcnic_sriov_post_bc_msg(struct qlcnic_adapter *adapter, u32 *hdr, + u32 *pay, u8 pci_func, u8 size) +{ + struct qlcnic_hardware_context *ahw = adapter->ahw; + unsigned long flags; + u32 rsp, mbx_val, fw_data, rsp_num, mbx_cmd, val; + u16 opcode; + u8 mbx_err_code; + int i, j; + + opcode = ((struct qlcnic_bc_hdr *)hdr)->cmd_op; + + if (!test_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status)) { + dev_info(&adapter->pdev->dev, + "Mailbox cmd attempted, 0x%x\n", opcode); + dev_info(&adapter->pdev->dev, "Mailbox detached\n"); + return 0; + } + + spin_lock_irqsave(&ahw->mbx_lock, flags); + + mbx_val = QLCRDX(ahw, QLCNIC_HOST_MBX_CTRL); + if (mbx_val) { + QLCDB(adapter, DRV, "Mailbox cmd attempted, 0x%x\n", opcode); + spin_unlock_irqrestore(&ahw->mbx_lock, flags); + return QLCNIC_RCODE_TIMEOUT; + } + /* Fill in mailbox registers */ + val = size + (sizeof(struct qlcnic_bc_hdr) / sizeof(u32)); + mbx_cmd = 0x31 | (val << 16) | (adapter->ahw->fw_hal_version << 29); + + writel(mbx_cmd, QLCNIC_MBX_HOST(ahw, 0)); + mbx_cmd = 0x1 | (1 << 4); + + if (qlcnic_sriov_pf_check(adapter)) + mbx_cmd |= (pci_func << 5); + + writel(mbx_cmd, QLCNIC_MBX_HOST(ahw, 1)); + for (i = 2, j = 0; j < (sizeof(struct qlcnic_bc_hdr) / sizeof(u32)); + i++, j++) { + writel(*(hdr++), QLCNIC_MBX_HOST(ahw, i)); + } + for (j = 0; j < size; j++, i++) + writel(*(pay++), QLCNIC_MBX_HOST(ahw, i)); + + /* Signal FW about the impending command */ + QLCWRX(ahw, QLCNIC_HOST_MBX_CTRL, QLCNIC_SET_OWNER); + + /* Waiting for the mailbox cmd to complete and while waiting here + * some AEN might arrive. If more than 5 seconds expire we can + * assume something is wrong. + */ +poll: + rsp = qlcnic_83xx_mbx_poll(adapter); + if (rsp != QLCNIC_RCODE_TIMEOUT) { + /* Get the FW response data */ + fw_data = readl(QLCNIC_MBX_FW(ahw, 0)); + if (fw_data & QLCNIC_MBX_ASYNC_EVENT) { + qlcnic_83xx_process_aen(adapter); + mbx_val = QLCRDX(ahw, QLCNIC_HOST_MBX_CTRL); + if (mbx_val) + goto poll; + } + mbx_err_code = QLCNIC_MBX_STATUS(fw_data); + rsp_num = QLCNIC_MBX_NUM_REGS(fw_data); + opcode = QLCNIC_MBX_RSP(fw_data); + + switch (mbx_err_code) { + case QLCNIC_MBX_RSP_OK: + case QLCNIC_MBX_PORT_RSP_OK: + rsp = QLCNIC_RCODE_SUCCESS; + break; + default: + if (opcode == QLCNIC_CMD_CONFIG_MAC_VLAN) { + rsp = qlcnic_83xx_mac_rcode(adapter); + if (!rsp) + goto out; + } + dev_err(&adapter->pdev->dev, + "MBX command 0x%x failed with err:0x%x\n", + opcode, mbx_err_code); + rsp = mbx_err_code; + break; + } + goto out; + } + + dev_err(&adapter->pdev->dev, "MBX command 0x%x timed out\n", + QLCNIC_MBX_RSP(mbx_cmd)); + rsp = QLCNIC_RCODE_TIMEOUT; +out: + /* clear fw mbx control register */ + QLCWRX(ahw, QLCNIC_FW_MBX_CTRL, QLCNIC_CLR_OWNER); + spin_unlock_irqrestore(&adapter->ahw->mbx_lock, flags); + return rsp; +} + +static void qlcnic_sriov_vf_cfg_buff_desc(struct qlcnic_adapter *adapter) +{ + adapter->num_rxd = QLC_DEFAULT_RCV_DESCRIPTORS_SRIOV_VF; + adapter->max_rxd = MAX_RCV_DESCRIPTORS_10G; + adapter->num_jumbo_rxd = QLC_DEFAULT_JUMBO_RCV_DESCRIPTORS_SRIOV_VF; + adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G; + adapter->num_txd = MAX_CMD_DESCRIPTORS; + adapter->max_rds_rings = MAX_RDS_RINGS; +} + +static int qlcnic_sriov_vf_init_driver(struct qlcnic_adapter *adapter) +{ + struct qlcnic_info nic_info; + struct qlcnic_hardware_context *ahw = adapter->ahw; + int err; + + err = qlcnic_get_nic_info(adapter, &nic_info, ahw->pci_func); + if (err) + return -EIO; + + if (qlcnic_83xx_get_port_info(adapter)) + return -EIO; + + qlcnic_sriov_vf_cfg_buff_desc(adapter); + adapter->flags |= QLCNIC_ADAPTER_INITIALIZED; + dev_info(&adapter->pdev->dev, "HAL Version: %d\n", + adapter->ahw->fw_hal_version); + + ahw->physical_port = (u8) nic_info.phys_port; + ahw->switch_mode = nic_info.switch_mode; + ahw->max_mtu = nic_info.max_mtu; + ahw->op_mode = nic_info.op_mode; + ahw->capabilities = nic_info.capabilities; + return 0; +} + +static int qlcnic_sriov_setup_vf(struct qlcnic_adapter *adapter, + int pci_using_dac) +{ + int err; + + INIT_LIST_HEAD(&adapter->vf_mc_list); + if (!qlcnic_use_msi_x && !!qlcnic_use_msi) + dev_warn(&adapter->pdev->dev, + "83xx adapter do not support MSI interrupts\n"); + + err = qlcnic_setup_intr(adapter, 1); + if (err) { + dev_err(&adapter->pdev->dev, "Failed to setup interrupt\n"); + goto err_out_disable_msi; + } + + err = qlcnic_83xx_setup_mbx_intr(adapter); + if (err) + goto err_out_disable_msi; + + err = qlcnic_sriov_init(adapter, 1); + if (err) + goto err_out_disable_mbx_intr; + + err = qlcnic_sriov_cfg_bc_intr(adapter, 1); + if (err) + goto err_out_cleanup_sriov; + + err = qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_INIT); + if (err) + goto err_out_disable_bc_intr; + + err = qlcnic_sriov_vf_init_driver(adapter); + if (err) + goto err_out_send_channel_term; + + err = qlcnic_setup_netdev(adapter, adapter->netdev, pci_using_dac); + if (err) + goto err_out_send_channel_term; + + pci_set_drvdata(adapter->pdev, adapter); + dev_info(&adapter->pdev->dev, "%s: XGbE port initialized\n", + adapter->netdev->name); + return 0; + +err_out_send_channel_term: + qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_TERM); + +err_out_disable_bc_intr: + qlcnic_sriov_cfg_bc_intr(adapter, 0); + +err_out_cleanup_sriov: + __qlcnic_sriov_cleanup(adapter); + +err_out_disable_mbx_intr: + qlcnic_83xx_free_mbx_intr(adapter); + +err_out_disable_msi: + qlcnic_teardown_intr(adapter); + return err; +} + +int qlcnic_sriov_vf_init(struct qlcnic_adapter *adapter, int pci_using_dac) +{ + struct qlcnic_hardware_context *ahw = adapter->ahw; + + spin_lock_init(&ahw->mbx_lock); + set_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status); + ahw->msix_supported = 1; + adapter->flags |= QLCNIC_TX_INTR_SHARED; + + if (qlcnic_sriov_setup_vf(adapter, pci_using_dac)) + return -EIO; + + if (qlcnic_read_mac_addr(adapter)) + dev_warn(&adapter->pdev->dev, "failed to read mac addr\n"); + + set_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status); + adapter->ahw->idc.delay = QLC_83XX_IDC_FW_POLL_DELAY; + adapter->ahw->reset_context = 0; + adapter->fw_fail_cnt = 0; + clear_bit(__QLCNIC_RESETTING, &adapter->state); + adapter->need_fw_reset = 0; + return 0; +} + +void qlcnic_sriov_vf_set_ops(struct qlcnic_adapter *adapter) +{ + struct qlcnic_hardware_context *ahw = adapter->ahw; + + ahw->op_mode = QLCNIC_SRIOV_VF_FUNC; + dev_info(&adapter->pdev->dev, + "HAL Version: %d Non Privileged SRIOV function\n", + ahw->fw_hal_version); + adapter->nic_ops = &qlcnic_sriov_vf_ops; + set_bit(__QLCNIC_SRIOV_ENABLE, &adapter->state); + return; +} + +void qlcnic_sriov_vf_register_map(struct qlcnic_hardware_context *ahw) +{ + ahw->hw_ops = &qlcnic_sriov_vf_hw_ops; + ahw->reg_tbl = (u32 *)qlcnic_83xx_reg_tbl; + ahw->ext_reg_tbl = (u32 *)qlcnic_83xx_ext_reg_tbl; +} + +static u32 qlcnic_sriov_get_bc_paysize(u32 real_pay_size, u8 curr_frag) +{ + u32 pay_size; + + pay_size = real_pay_size / ((curr_frag + 1) * QLC_BC_PAYLOAD_SZ); + + if (pay_size) + pay_size = QLC_BC_PAYLOAD_SZ; + else + pay_size = real_pay_size % QLC_BC_PAYLOAD_SZ; + + return pay_size; +} + +int qlcnic_sriov_func_to_index(struct qlcnic_adapter *adapter, u8 pci_func) +{ + struct qlcnic_vf_info *vf_info = adapter->ahw->sriov->vf_info; + u8 i; + + if (qlcnic_sriov_vf_check(adapter)) + return 0; + + for (i = 0; i < adapter->ahw->sriov->num_vfs; i++) { + if (vf_info[i].pci_func == pci_func) + return i; + } + + return -EINVAL; +} + +static inline int qlcnic_sriov_alloc_bc_trans(struct qlcnic_bc_trans **trans) +{ + *trans = kzalloc(sizeof(struct qlcnic_bc_trans), GFP_ATOMIC); + if (!*trans) + return -ENOMEM; + + init_completion(&(*trans)->resp_cmpl); + return 0; +} + +static inline int qlcnic_sriov_alloc_bc_msg(struct qlcnic_bc_hdr **hdr, + u32 size) +{ + *hdr = kzalloc(sizeof(struct qlcnic_bc_hdr) * size, GFP_ATOMIC); + if (!*hdr) + return -ENOMEM; + + return 0; +} + +static int qlcnic_sriov_alloc_bc_mbx_args(struct qlcnic_cmd_args *mbx, u32 type) +{ + const struct qlcnic_mailbox_metadata *mbx_tbl; + int i, size; + + mbx_tbl = qlcnic_sriov_bc_mbx_tbl; + size = ARRAY_SIZE(qlcnic_sriov_bc_mbx_tbl); + + for (i = 0; i < size; i++) { + if (type == mbx_tbl[i].cmd) { + mbx->op_type = QLC_BC_CMD; + mbx->req.num = mbx_tbl[i].in_args; + mbx->rsp.num = mbx_tbl[i].out_args; + mbx->req.arg = kcalloc(mbx->req.num, sizeof(u32), + GFP_ATOMIC); + if (!mbx->req.arg) + return -ENOMEM; + mbx->rsp.arg = kcalloc(mbx->rsp.num, sizeof(u32), + GFP_ATOMIC); + if (!mbx->rsp.arg) { + kfree(mbx->req.arg); + mbx->req.arg = NULL; + return -ENOMEM; + } + memset(mbx->req.arg, 0, sizeof(u32) * mbx->req.num); + memset(mbx->rsp.arg, 0, sizeof(u32) * mbx->rsp.num); + mbx->req.arg[0] = (type | (mbx->req.num << 16) | + (3 << 29)); + return 0; + } + } + return -EINVAL; +} + +static int qlcnic_sriov_prepare_bc_hdr(struct qlcnic_bc_trans *trans, + struct qlcnic_cmd_args *cmd, + u16 seq, u8 msg_type) +{ + struct qlcnic_bc_hdr *hdr; + int i; + u32 num_regs, bc_pay_sz; + u16 remainder; + u8 cmd_op, num_frags, t_num_frags; + + bc_pay_sz = QLC_BC_PAYLOAD_SZ; + if (msg_type == QLC_BC_COMMAND) { + trans->req_pay = (struct qlcnic_bc_payload *)cmd->req.arg; + trans->rsp_pay = (struct qlcnic_bc_payload *)cmd->rsp.arg; + num_regs = cmd->req.num; + trans->req_pay_size = (num_regs * 4); + num_regs = cmd->rsp.num; + trans->rsp_pay_size = (num_regs * 4); + cmd_op = cmd->req.arg[0] & 0xff; + remainder = (trans->req_pay_size) % (bc_pay_sz); + num_frags = (trans->req_pay_size) / (bc_pay_sz); + if (remainder) + num_frags++; + t_num_frags = num_frags; + if (qlcnic_sriov_alloc_bc_msg(&trans->req_hdr, num_frags)) + return -ENOMEM; + remainder = (trans->rsp_pay_size) % (bc_pay_sz); + num_frags = (trans->rsp_pay_size) / (bc_pay_sz); + if (remainder) + num_frags++; + if (qlcnic_sriov_alloc_bc_msg(&trans->rsp_hdr, num_frags)) + return -ENOMEM; + num_frags = t_num_frags; + hdr = trans->req_hdr; + } else { + cmd->req.arg = (u32 *)trans->req_pay; + cmd->rsp.arg = (u32 *)trans->rsp_pay; + cmd_op = cmd->req.arg[0] & 0xff; + remainder = (trans->rsp_pay_size) % (bc_pay_sz); + num_frags = (trans->rsp_pay_size) / (bc_pay_sz); + if (remainder) + num_frags++; + cmd->req.num = trans->req_pay_size / 4; + cmd->rsp.num = trans->rsp_pay_size / 4; + hdr = trans->rsp_hdr; + } + + trans->trans_id = seq; + trans->cmd_id = cmd_op; + for (i = 0; i < num_frags; i++) { + hdr[i].version = 2; + hdr[i].msg_type = msg_type; + hdr[i].op_type = cmd->op_type; + hdr[i].num_cmds = 1; + hdr[i].num_frags = num_frags; + hdr[i].frag_num = i + 1; + hdr[i].cmd_op = cmd_op; + hdr[i].seq_id = seq; + } + return 0; +} + +static void qlcnic_sriov_cleanup_transaction(struct qlcnic_bc_trans *trans) +{ + if (!trans) + return; + kfree(trans->req_hdr); + kfree(trans->rsp_hdr); + kfree(trans); +} + +static int qlcnic_sriov_clear_trans(struct qlcnic_vf_info *vf, + struct qlcnic_bc_trans *trans, u8 type) +{ + struct qlcnic_trans_list *t_list; + unsigned long flags; + int ret = 0; + + if (type == QLC_BC_RESPONSE) { + t_list = &vf->rcv_act; + spin_lock_irqsave(&t_list->lock, flags); + t_list->count--; + list_del(&trans->list); + if (t_list->count > 0) + ret = 1; + spin_unlock_irqrestore(&t_list->lock, flags); + } + if (type == QLC_BC_COMMAND) { + while (test_and_set_bit(QLC_BC_VF_SEND, &vf->state)) + msleep(100); + vf->send_cmd = NULL; + clear_bit(QLC_BC_VF_SEND, &vf->state); + } + return ret; +} + +static void qlcnic_sriov_schedule_bc_cmd(struct qlcnic_sriov *sriov, + struct qlcnic_vf_info *vf, + work_func_t func) +{ + INIT_WORK(&vf->trans_work, func); + queue_work(sriov->bc.bc_trans_wq, &vf->trans_work); +} + +static inline void qlcnic_sriov_wait_for_resp(struct qlcnic_bc_trans *trans) +{ + struct completion *cmpl = &trans->resp_cmpl; + + if (wait_for_completion_timeout(cmpl, QLC_MBOX_RESP_TIMEOUT)) + trans->trans_state = QLC_END; + else + trans->trans_state = QLC_ABORT; + + return; +} + +static void qlcnic_sriov_handle_multi_frags(struct qlcnic_bc_trans *trans, + u8 type) +{ + if (type == QLC_BC_RESPONSE) { + trans->curr_rsp_frag++; + if (trans->curr_rsp_frag < trans->rsp_hdr->num_frags) + trans->trans_state = QLC_INIT; + else + trans->trans_state = QLC_END; + } else { + trans->curr_req_frag++; + if (trans->curr_req_frag < trans->req_hdr->num_frags) + trans->trans_state = QLC_INIT; + else + trans->trans_state = QLC_WAIT_FOR_RESP; + } +} + +static void qlcnic_sriov_wait_for_channel_free(struct qlcnic_bc_trans *trans, + u8 type) +{ + struct qlcnic_vf_info *vf = trans->vf; + struct completion *cmpl = &vf->ch_free_cmpl; + + if (!wait_for_completion_timeout(cmpl, QLC_MBOX_CH_FREE_TIMEOUT)) { + trans->trans_state = QLC_ABORT; + return; + } + + clear_bit(QLC_BC_VF_CHANNEL, &vf->state); + qlcnic_sriov_handle_multi_frags(trans, type); +} + +static void qlcnic_sriov_pull_bc_msg(struct qlcnic_adapter *adapter, + u32 *hdr, u32 *pay, u32 size) +{ + struct qlcnic_hardware_context *ahw = adapter->ahw; + u32 fw_mbx; + u8 i, max = 2, hdr_size, j; + + hdr_size = (sizeof(struct qlcnic_bc_hdr) / sizeof(u32)); + max = (size / sizeof(u32)) + hdr_size; + + fw_mbx = readl(QLCNIC_MBX_FW(ahw, 0)); + for (i = 2, j = 0; j < hdr_size; i++, j++) + *(hdr++) = readl(QLCNIC_MBX_FW(ahw, i)); + for (; j < max; i++, j++) + *(pay++) = readl(QLCNIC_MBX_FW(ahw, i)); +} + +static int __qlcnic_sriov_issue_bc_post(struct qlcnic_vf_info *vf) +{ + int ret = -EBUSY; + u32 timeout = 10000; + + do { + if (!test_and_set_bit(QLC_BC_VF_CHANNEL, &vf->state)) { + ret = 0; + break; + } + mdelay(1); + } while (--timeout); + + return ret; +} + +static int qlcnic_sriov_issue_bc_post(struct qlcnic_bc_trans *trans, u8 type) +{ + struct qlcnic_vf_info *vf = trans->vf; + u32 pay_size, hdr_size; + u32 *hdr, *pay; + int ret; + u8 pci_func = trans->func_id; + + if (__qlcnic_sriov_issue_bc_post(vf)) + return -EBUSY; + + if (type == QLC_BC_COMMAND) { + hdr = (u32 *)(trans->req_hdr + trans->curr_req_frag); + pay = (u32 *)(trans->req_pay + trans->curr_req_frag); + hdr_size = (sizeof(struct qlcnic_bc_hdr) / sizeof(u32)); + pay_size = qlcnic_sriov_get_bc_paysize(trans->req_pay_size, + trans->curr_req_frag); + pay_size = (pay_size / sizeof(u32)); + } else { + hdr = (u32 *)(trans->rsp_hdr + trans->curr_rsp_frag); + pay = (u32 *)(trans->rsp_pay + trans->curr_rsp_frag); + hdr_size = (sizeof(struct qlcnic_bc_hdr) / sizeof(u32)); + pay_size = qlcnic_sriov_get_bc_paysize(trans->rsp_pay_size, + trans->curr_rsp_frag); + pay_size = (pay_size / sizeof(u32)); + } + + ret = qlcnic_sriov_post_bc_msg(vf->adapter, hdr, pay, + pci_func, pay_size); + return ret; +} + +static int __qlcnic_sriov_send_bc_msg(struct qlcnic_bc_trans *trans, + struct qlcnic_vf_info *vf, u8 type) +{ + int err; + bool flag = true; + + while (flag) { + switch (trans->trans_state) { + case QLC_INIT: + trans->trans_state = QLC_WAIT_FOR_CHANNEL_FREE; + if (qlcnic_sriov_issue_bc_post(trans, type)) + trans->trans_state = QLC_ABORT; + break; + case QLC_WAIT_FOR_CHANNEL_FREE: + qlcnic_sriov_wait_for_channel_free(trans, type); + break; + case QLC_WAIT_FOR_RESP: + qlcnic_sriov_wait_for_resp(trans); + break; + case QLC_END: + err = 0; + flag = false; + break; + case QLC_ABORT: + err = -EIO; + flag = false; + clear_bit(QLC_BC_VF_CHANNEL, &vf->state); + break; + default: + err = -EIO; + flag = false; + } + } + return err; +} + +static int qlcnic_sriov_send_bc_cmd(struct qlcnic_adapter *adapter, + struct qlcnic_bc_trans *trans, int pci_func) +{ + struct qlcnic_vf_info *vf; + int err, index = qlcnic_sriov_func_to_index(adapter, pci_func); + + if (index < 0) + return -EIO; + + vf = &adapter->ahw->sriov->vf_info[index]; + trans->vf = vf; + trans->func_id = pci_func; + + if (!test_bit(QLC_BC_VF_STATE, &vf->state)) { + if (qlcnic_sriov_pf_check(adapter)) + return -EIO; + if (qlcnic_sriov_vf_check(adapter) && + trans->cmd_id != QLCNIC_BC_CMD_CHANNEL_INIT) + return -EIO; + } + + mutex_lock(&vf->send_cmd_lock); + vf->send_cmd = trans; + err = __qlcnic_sriov_send_bc_msg(trans, vf, QLC_BC_COMMAND); + qlcnic_sriov_clear_trans(vf, trans, QLC_BC_COMMAND); + mutex_unlock(&vf->send_cmd_lock); + return err; +} + +static void __qlcnic_sriov_process_bc_cmd(struct qlcnic_adapter *adapter, + struct qlcnic_bc_trans *trans, + struct qlcnic_cmd_args *cmd) +{ +#ifdef CONFIG_QLCNIC_SRIOV + if (qlcnic_sriov_pf_check(adapter)) { + qlcnic_sriov_pf_process_bc_cmd(adapter, trans, cmd); + return; + } +#endif + cmd->rsp.arg[0] |= (0x9 << 25); + return; +} + +static void qlcnic_sriov_process_bc_cmd(struct work_struct *work) +{ + struct qlcnic_vf_info *vf = container_of(work, struct qlcnic_vf_info, + trans_work); + struct qlcnic_bc_trans *trans = NULL; + struct qlcnic_adapter *adapter = vf->adapter; + struct qlcnic_cmd_args cmd; + u8 req; + + trans = list_first_entry(&vf->rcv_act.wait_list, + struct qlcnic_bc_trans, list); + adapter = vf->adapter; + + if (qlcnic_sriov_prepare_bc_hdr(trans, &cmd, trans->req_hdr->seq_id, + QLC_BC_RESPONSE)) + goto cleanup_trans; + + __qlcnic_sriov_process_bc_cmd(adapter, trans, &cmd); + trans->trans_state = QLC_INIT; + __qlcnic_sriov_send_bc_msg(trans, vf, QLC_BC_RESPONSE); + +cleanup_trans: + qlcnic_free_mbx_args(&cmd); + req = qlcnic_sriov_clear_trans(vf, trans, QLC_BC_RESPONSE); + qlcnic_sriov_cleanup_transaction(trans); + if (req) + qlcnic_sriov_schedule_bc_cmd(adapter->ahw->sriov, vf, + qlcnic_sriov_process_bc_cmd); +} + +static void qlcnic_sriov_handle_bc_resp(struct qlcnic_bc_hdr *hdr, + struct qlcnic_vf_info *vf) +{ + struct qlcnic_bc_trans *trans; + u32 pay_size; + + if (test_and_set_bit(QLC_BC_VF_SEND, &vf->state)) + return; + + trans = vf->send_cmd; + + if (trans == NULL) + goto clear_send; + + if (trans->trans_id != hdr->seq_id) + goto clear_send; + + pay_size = qlcnic_sriov_get_bc_paysize(trans->rsp_pay_size, + trans->curr_rsp_frag); + qlcnic_sriov_pull_bc_msg(vf->adapter, + (u32 *)(trans->rsp_hdr + trans->curr_rsp_frag), + (u32 *)(trans->rsp_pay + trans->curr_rsp_frag), + pay_size); + if (++trans->curr_rsp_frag < trans->rsp_hdr->num_frags) + goto clear_send; + + complete(&trans->resp_cmpl); + +clear_send: + clear_bit(QLC_BC_VF_SEND, &vf->state); +} + +static int qlcnic_sriov_add_act_list(struct qlcnic_sriov *sriov, + struct qlcnic_vf_info *vf, + struct qlcnic_bc_trans *trans) +{ + struct qlcnic_trans_list *t_list = &vf->rcv_act; + + spin_lock(&t_list->lock); + t_list->count++; + list_add_tail(&trans->list, &t_list->wait_list); + if (t_list->count == 1) + qlcnic_sriov_schedule_bc_cmd(sriov, vf, + qlcnic_sriov_process_bc_cmd); + spin_unlock(&t_list->lock); + return 0; +} + +static void qlcnic_sriov_handle_pending_trans(struct qlcnic_sriov *sriov, + struct qlcnic_vf_info *vf, + struct qlcnic_bc_hdr *hdr) +{ + struct qlcnic_bc_trans *trans = NULL; + struct list_head *node; + u32 pay_size, curr_frag; + u8 found = 0, active = 0; + + spin_lock(&vf->rcv_pend.lock); + if (vf->rcv_pend.count > 0) { + list_for_each(node, &vf->rcv_pend.wait_list) { + trans = list_entry(node, struct qlcnic_bc_trans, list); + if (trans->trans_id == hdr->seq_id) { + found = 1; + break; + } + } + } + + if (found) { + curr_frag = trans->curr_req_frag; + pay_size = qlcnic_sriov_get_bc_paysize(trans->req_pay_size, + curr_frag); + qlcnic_sriov_pull_bc_msg(vf->adapter, + (u32 *)(trans->req_hdr + curr_frag), + (u32 *)(trans->req_pay + curr_frag), + pay_size); + trans->curr_req_frag++; + if (trans->curr_req_frag >= hdr->num_frags) { + vf->rcv_pend.count--; + list_del(&trans->list); + active = 1; + } + } + spin_unlock(&vf->rcv_pend.lock); + + if (active) + if (qlcnic_sriov_add_act_list(sriov, vf, trans)) + qlcnic_sriov_cleanup_transaction(trans); + + return; +} + +static void qlcnic_sriov_handle_bc_cmd(struct qlcnic_sriov *sriov, + struct qlcnic_bc_hdr *hdr, + struct qlcnic_vf_info *vf) +{ + struct qlcnic_bc_trans *trans; + struct qlcnic_adapter *adapter = vf->adapter; + struct qlcnic_cmd_args cmd; + u32 pay_size; + int err; + u8 cmd_op; + + if (!test_bit(QLC_BC_VF_STATE, &vf->state) && + hdr->op_type != QLC_BC_CMD && + hdr->cmd_op != QLCNIC_BC_CMD_CHANNEL_INIT) + return; + + if (hdr->frag_num > 1) { + qlcnic_sriov_handle_pending_trans(sriov, vf, hdr); + return; + } + + cmd_op = hdr->cmd_op; + if (qlcnic_sriov_alloc_bc_trans(&trans)) + return; + + if (hdr->op_type == QLC_BC_CMD) + err = qlcnic_sriov_alloc_bc_mbx_args(&cmd, cmd_op); + else + err = qlcnic_alloc_mbx_args(&cmd, adapter, cmd_op); + + if (err) { + qlcnic_sriov_cleanup_transaction(trans); + return; + } + + cmd.op_type = hdr->op_type; + if (qlcnic_sriov_prepare_bc_hdr(trans, &cmd, hdr->seq_id, + QLC_BC_COMMAND)) { + qlcnic_free_mbx_args(&cmd); + qlcnic_sriov_cleanup_transaction(trans); + return; + } + + pay_size = qlcnic_sriov_get_bc_paysize(trans->req_pay_size, + trans->curr_req_frag); + qlcnic_sriov_pull_bc_msg(vf->adapter, + (u32 *)(trans->req_hdr + trans->curr_req_frag), + (u32 *)(trans->req_pay + trans->curr_req_frag), + pay_size); + trans->func_id = vf->pci_func; + trans->vf = vf; + trans->trans_id = hdr->seq_id; + trans->curr_req_frag++; + if (trans->curr_req_frag == trans->req_hdr->num_frags) { + if (qlcnic_sriov_add_act_list(sriov, vf, trans)) { + qlcnic_free_mbx_args(&cmd); + qlcnic_sriov_cleanup_transaction(trans); + } + } else { + spin_lock(&vf->rcv_pend.lock); + list_add_tail(&trans->list, &vf->rcv_pend.wait_list); + vf->rcv_pend.count++; + spin_unlock(&vf->rcv_pend.lock); + } +} + +static void qlcnic_sriov_handle_msg_event(struct qlcnic_sriov *sriov, + struct qlcnic_vf_info *vf) +{ + struct qlcnic_bc_hdr hdr; + u32 *ptr = (u32 *)&hdr; + u8 msg_type, i; + + for (i = 2; i < 6; i++) + ptr[i - 2] = readl(QLCNIC_MBX_FW(vf->adapter->ahw, i)); + msg_type = hdr.msg_type; + + switch (msg_type) { + case QLC_BC_COMMAND: + qlcnic_sriov_handle_bc_cmd(sriov, &hdr, vf); + break; + case QLC_BC_RESPONSE: + qlcnic_sriov_handle_bc_resp(&hdr, vf); + break; + } +} + +void qlcnic_sriov_handle_bc_event(struct qlcnic_adapter *adapter, u32 event) +{ + struct qlcnic_vf_info *vf; + struct qlcnic_sriov *sriov; + int index; + u8 pci_func; + + sriov = adapter->ahw->sriov; + pci_func = qlcnic_sriov_target_func_id(event); + index = qlcnic_sriov_func_to_index(adapter, pci_func); + + if (index < 0) + return; + + vf = &sriov->vf_info[index]; + vf->pci_func = pci_func; + + if (qlcnic_sriov_channel_free_check(event)) + complete(&vf->ch_free_cmpl); + + if (qlcnic_sriov_bc_msg_check(event)) + qlcnic_sriov_handle_msg_event(sriov, vf); +} + +int qlcnic_sriov_cfg_bc_intr(struct qlcnic_adapter *adapter, u8 enable) +{ + struct qlcnic_cmd_args cmd; + int err; + + if (!test_bit(__QLCNIC_SRIOV_ENABLE, &adapter->state)) + return 0; + + if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_BC_EVENT_SETUP)) + return -ENOMEM; + + if (enable) + cmd.req.arg[1] = (1 << 4) | (1 << 5) | (1 << 6) | (1 << 7); + + err = qlcnic_83xx_mbx_op(adapter, &cmd); + + if (err != QLCNIC_RCODE_SUCCESS) { + dev_err(&adapter->pdev->dev, + "Failed to %s bc events, err=%d\n", + (enable ? "enable" : "disable"), err); + } + + qlcnic_free_mbx_args(&cmd); + return err; +} + +static int qlcnic_sriov_vf_mbx_op(struct qlcnic_adapter *adapter, + struct qlcnic_cmd_args *cmd) +{ + struct qlcnic_bc_trans *trans; + int err; + u32 rsp_data, opcode, mbx_err_code, rsp; + u16 seq = ++adapter->ahw->sriov->bc.trans_counter; + + if (qlcnic_sriov_alloc_bc_trans(&trans)) + return -ENOMEM; + + if (qlcnic_sriov_prepare_bc_hdr(trans, cmd, seq, QLC_BC_COMMAND)) + return -ENOMEM; + + if (!test_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status)) { + rsp = -EIO; + QLCDB(adapter, DRV, "MBX not Ready!(cmd 0x%x) for VF 0x%x\n", + QLCNIC_MBX_RSP(cmd->req.arg[0]), adapter->ahw->pci_func); + goto err_out; + } + + err = qlcnic_sriov_send_bc_cmd(adapter, trans, adapter->ahw->pci_func); + if (err) { + dev_err(&adapter->pdev->dev, + "MBX command 0x%x timed out for VF %d\n", + (cmd->req.arg[0] & 0xffff), adapter->ahw->pci_func); + rsp = QLCNIC_RCODE_TIMEOUT; + goto err_out; + } + + rsp_data = cmd->rsp.arg[0]; + mbx_err_code = QLCNIC_MBX_STATUS(rsp_data); + opcode = QLCNIC_MBX_RSP(cmd->req.arg[0]); + + if ((mbx_err_code == QLCNIC_MBX_RSP_OK) || + (mbx_err_code == QLCNIC_MBX_PORT_RSP_OK)) { + rsp = QLCNIC_RCODE_SUCCESS; + } else { + rsp = mbx_err_code; + if (!rsp) + rsp = 1; + dev_err(&adapter->pdev->dev, + "MBX command 0x%x failed with err:0x%x for VF %d\n", + opcode, mbx_err_code, adapter->ahw->pci_func); + } + +err_out: + qlcnic_sriov_cleanup_transaction(trans); + return rsp; +} + +int qlcnic_sriov_channel_cfg_cmd(struct qlcnic_adapter *adapter, u8 cmd_op) +{ + struct qlcnic_cmd_args cmd; + struct qlcnic_vf_info *vf = &adapter->ahw->sriov->vf_info[0]; + int ret; + + if (qlcnic_sriov_alloc_bc_mbx_args(&cmd, cmd_op)) + return -ENOMEM; + + ret = qlcnic_issue_cmd(adapter, &cmd); + if (ret) { + dev_err(&adapter->pdev->dev, + "Failed bc channel %s %d\n", cmd_op ? "term" : "init", + ret); + goto out; + } + + cmd_op = (cmd.rsp.arg[0] & 0xff); + if (cmd.rsp.arg[0] >> 25 == 2) + return 2; + if (cmd_op == QLCNIC_BC_CMD_CHANNEL_INIT) + set_bit(QLC_BC_VF_STATE, &vf->state); + else + clear_bit(QLC_BC_VF_STATE, &vf->state); + +out: + qlcnic_free_mbx_args(&cmd); + return ret; +} + +void qlcnic_vf_add_mc_list(struct net_device *netdev) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + struct qlcnic_mac_list_s *cur; + struct list_head *head, tmp_list; + + INIT_LIST_HEAD(&tmp_list); + head = &adapter->vf_mc_list; + netif_addr_lock_bh(netdev); + + while (!list_empty(head)) { + cur = list_entry(head->next, struct qlcnic_mac_list_s, list); + list_move(&cur->list, &tmp_list); + } + + netif_addr_unlock_bh(netdev); + + while (!list_empty(&tmp_list)) { + cur = list_entry((&tmp_list)->next, + struct qlcnic_mac_list_s, list); + qlcnic_nic_add_mac(adapter, cur->mac_addr); + list_del(&cur->list); + kfree(cur); + } +} + +void qlcnic_sriov_cleanup_async_list(struct qlcnic_back_channel *bc) +{ + struct list_head *head = &bc->async_list; + struct qlcnic_async_work_list *entry; + + while (!list_empty(head)) { + entry = list_entry(head->next, struct qlcnic_async_work_list, + list); + cancel_work_sync(&entry->work); + list_del(&entry->list); + kfree(entry); + } +} + +static void qlcnic_sriov_vf_set_multi(struct net_device *netdev) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + + if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state)) + return; + + __qlcnic_set_multi(netdev); +} + +static void qlcnic_sriov_handle_async_multi(struct work_struct *work) +{ + struct qlcnic_async_work_list *entry; + struct net_device *netdev; + + entry = container_of(work, struct qlcnic_async_work_list, work); + netdev = (struct net_device *)entry->ptr; + + qlcnic_sriov_vf_set_multi(netdev); + return; +} + +static struct qlcnic_async_work_list * +qlcnic_sriov_get_free_node_async_work(struct qlcnic_back_channel *bc) +{ + struct list_head *node; + struct qlcnic_async_work_list *entry = NULL; + u8 empty = 0; + + list_for_each(node, &bc->async_list) { + entry = list_entry(node, struct qlcnic_async_work_list, list); + if (!work_pending(&entry->work)) { + empty = 1; + break; + } + } + + if (!empty) { + entry = kzalloc(sizeof(struct qlcnic_async_work_list), + GFP_ATOMIC); + if (entry == NULL) + return NULL; + list_add_tail(&entry->list, &bc->async_list); + } + + return entry; +} + +static void qlcnic_sriov_schedule_bc_async_work(struct qlcnic_back_channel *bc, + work_func_t func, void *data) +{ + struct qlcnic_async_work_list *entry = NULL; + + entry = qlcnic_sriov_get_free_node_async_work(bc); + if (!entry) + return; + + entry->ptr = data; + INIT_WORK(&entry->work, func); + queue_work(bc->bc_async_wq, &entry->work); +} + +void qlcnic_sriov_vf_schedule_multi(struct net_device *netdev) +{ + + struct qlcnic_adapter *adapter = netdev_priv(netdev); + struct qlcnic_back_channel *bc = &adapter->ahw->sriov->bc; + + qlcnic_sriov_schedule_bc_async_work(bc, qlcnic_sriov_handle_async_multi, + netdev); +} diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c new file mode 100644 index 000000000000..d6ac7dcef1e4 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c @@ -0,0 +1,1176 @@ +/* + * QLogic qlcnic NIC Driver + * Copyright (c) 2009-2013 QLogic Corporation + * + * See LICENSE.qlcnic for copyright and licensing details. + */ + +#include "qlcnic_sriov.h" +#include "qlcnic.h" +#include <linux/types.h> + +#define QLCNIC_SRIOV_VF_MAX_MAC 1 + +static int qlcnic_sriov_pf_get_vport_handle(struct qlcnic_adapter *, u8); + +struct qlcnic_sriov_cmd_handler { + int (*fn) (struct qlcnic_bc_trans *, struct qlcnic_cmd_args *); +}; + +struct qlcnic_sriov_fw_cmd_handler { + u32 cmd; + int (*fn) (struct qlcnic_bc_trans *, struct qlcnic_cmd_args *); +}; + +static int qlcnic_sriov_pf_set_vport_info(struct qlcnic_adapter *adapter, + struct qlcnic_info *npar_info, + u16 vport_id) +{ + struct qlcnic_cmd_args cmd; + int err; + + if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_NIC_INFO)) + return -ENOMEM; + + cmd.req.arg[1] = (vport_id << 16) | 0x1; + cmd.req.arg[2] = npar_info->bit_offsets; + cmd.req.arg[2] |= npar_info->min_tx_bw << 16; + cmd.req.arg[3] = npar_info->max_tx_bw | (npar_info->max_tx_ques << 16); + cmd.req.arg[4] = npar_info->max_tx_mac_filters; + cmd.req.arg[4] |= npar_info->max_rx_mcast_mac_filters << 16; + cmd.req.arg[5] = npar_info->max_rx_ucast_mac_filters | + (npar_info->max_rx_ip_addr << 16); + cmd.req.arg[6] = npar_info->max_rx_lro_flow | + (npar_info->max_rx_status_rings << 16); + cmd.req.arg[7] = npar_info->max_rx_buf_rings | + (npar_info->max_rx_ques << 16); + cmd.req.arg[8] = npar_info->max_tx_vlan_keys; + cmd.req.arg[8] |= npar_info->max_local_ipv6_addrs << 16; + cmd.req.arg[9] = npar_info->max_remote_ipv6_addrs; + + err = qlcnic_issue_cmd(adapter, &cmd); + if (err) + dev_err(&adapter->pdev->dev, + "Failed to set vport info, err=%d\n", err); + + qlcnic_free_mbx_args(&cmd); + return err; +} + +static int qlcnic_sriov_pf_cal_res_limit(struct qlcnic_adapter *adapter, + struct qlcnic_info *info, u16 func) +{ + struct qlcnic_sriov *sriov = adapter->ahw->sriov; + struct qlcnic_resources *res = &sriov->ff_max; + int ret = -EIO, vpid; + u32 temp, num_vf_macs, num_vfs, max; + + vpid = qlcnic_sriov_pf_get_vport_handle(adapter, func); + if (vpid < 0) + return -EINVAL; + + num_vfs = sriov->num_vfs; + max = num_vfs + 1; + info->bit_offsets = 0xffff; + info->min_tx_bw = 0; + info->max_tx_bw = MAX_BW; + info->max_tx_ques = res->num_tx_queues / max; + info->max_rx_mcast_mac_filters = res->num_rx_mcast_mac_filters; + num_vf_macs = QLCNIC_SRIOV_VF_MAX_MAC; + + if (adapter->ahw->pci_func == func) { + temp = res->num_rx_mcast_mac_filters - (num_vfs * num_vf_macs); + info->max_rx_ucast_mac_filters = temp; + temp = res->num_tx_mac_filters - (num_vfs * num_vf_macs); + info->max_tx_mac_filters = temp; + } else { + info->max_rx_ucast_mac_filters = num_vf_macs; + info->max_tx_mac_filters = num_vf_macs; + } + + info->max_rx_ip_addr = res->num_destip / max; + info->max_rx_status_rings = res->num_rx_status_rings / max; + info->max_rx_buf_rings = res->num_rx_buf_rings / max; + info->max_rx_ques = res->num_rx_queues / max; + info->max_rx_lro_flow = res->num_lro_flows_supported / max; + info->max_tx_vlan_keys = res->num_txvlan_keys; + info->max_local_ipv6_addrs = res->max_local_ipv6_addrs; + info->max_remote_ipv6_addrs = res->max_remote_ipv6_addrs; + + ret = qlcnic_sriov_pf_set_vport_info(adapter, info, vpid); + if (ret) + return ret; + + return 0; +} + +static void qlcnic_sriov_pf_set_ff_max_res(struct qlcnic_adapter *adapter, + struct qlcnic_info *info) +{ + struct qlcnic_resources *ff_max = &adapter->ahw->sriov->ff_max; + + ff_max->num_tx_mac_filters = info->max_tx_mac_filters; + ff_max->num_rx_ucast_mac_filters = info->max_rx_ucast_mac_filters; + ff_max->num_rx_mcast_mac_filters = info->max_rx_mcast_mac_filters; + ff_max->num_txvlan_keys = info->max_tx_vlan_keys; + ff_max->num_rx_queues = info->max_rx_ques; + ff_max->num_tx_queues = info->max_tx_ques; + ff_max->num_lro_flows_supported = info->max_rx_lro_flow; + ff_max->num_destip = info->max_rx_ip_addr; + ff_max->num_rx_buf_rings = info->max_rx_buf_rings; + ff_max->num_rx_status_rings = info->max_rx_status_rings; + ff_max->max_remote_ipv6_addrs = info->max_remote_ipv6_addrs; + ff_max->max_local_ipv6_addrs = info->max_local_ipv6_addrs; +} + +static int qlcnic_sriov_get_pf_info(struct qlcnic_adapter *adapter, + struct qlcnic_info *npar_info) +{ + int err; + struct qlcnic_cmd_args cmd; + + if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_NIC_INFO)) + return -ENOMEM; + + cmd.req.arg[1] = 0x2; + err = qlcnic_issue_cmd(adapter, &cmd); + if (err) { + dev_err(&adapter->pdev->dev, + "Failed to get PF info, err=%d\n", err); + goto out; + } + + npar_info->total_pf = cmd.rsp.arg[2] & 0xff; + npar_info->total_rss_engines = (cmd.rsp.arg[2] >> 8) & 0xff; + npar_info->max_vports = MSW(cmd.rsp.arg[2]); + npar_info->max_tx_ques = LSW(cmd.rsp.arg[3]); + npar_info->max_tx_mac_filters = MSW(cmd.rsp.arg[3]); + npar_info->max_rx_mcast_mac_filters = LSW(cmd.rsp.arg[4]); + npar_info->max_rx_ucast_mac_filters = MSW(cmd.rsp.arg[4]); + npar_info->max_rx_ip_addr = LSW(cmd.rsp.arg[5]); + npar_info->max_rx_lro_flow = MSW(cmd.rsp.arg[5]); + npar_info->max_rx_status_rings = LSW(cmd.rsp.arg[6]); + npar_info->max_rx_buf_rings = MSW(cmd.rsp.arg[6]); + npar_info->max_rx_ques = LSW(cmd.rsp.arg[7]); + npar_info->max_tx_vlan_keys = MSW(cmd.rsp.arg[7]); + npar_info->max_local_ipv6_addrs = LSW(cmd.rsp.arg[8]); + npar_info->max_remote_ipv6_addrs = MSW(cmd.rsp.arg[8]); + + dev_info(&adapter->pdev->dev, + "\n\ttotal_pf: %d,\n" + "\n\ttotal_rss_engines: %d max_vports: %d max_tx_ques %d,\n" + "\tmax_tx_mac_filters: %d max_rx_mcast_mac_filters: %d,\n" + "\tmax_rx_ucast_mac_filters: 0x%x, max_rx_ip_addr: %d,\n" + "\tmax_rx_lro_flow: %d max_rx_status_rings: %d,\n" + "\tmax_rx_buf_rings: %d, max_rx_ques: %d, max_tx_vlan_keys %d\n" + "\tmax_local_ipv6_addrs: %d, max_remote_ipv6_addrs: %d\n", + npar_info->total_pf, npar_info->total_rss_engines, + npar_info->max_vports, npar_info->max_tx_ques, + npar_info->max_tx_mac_filters, + npar_info->max_rx_mcast_mac_filters, + npar_info->max_rx_ucast_mac_filters, npar_info->max_rx_ip_addr, + npar_info->max_rx_lro_flow, npar_info->max_rx_status_rings, + npar_info->max_rx_buf_rings, npar_info->max_rx_ques, + npar_info->max_tx_vlan_keys, npar_info->max_local_ipv6_addrs, + npar_info->max_remote_ipv6_addrs); + +out: + qlcnic_free_mbx_args(&cmd); + return err; +} + +static void qlcnic_sriov_pf_reset_vport_handle(struct qlcnic_adapter *adapter, + u8 func) +{ + struct qlcnic_sriov *sriov = adapter->ahw->sriov; + struct qlcnic_vport *vp; + int index; + + if (adapter->ahw->pci_func == func) { + sriov->vp_handle = 0; + } else { + index = qlcnic_sriov_func_to_index(adapter, func); + if (index < 0) + return; + vp = sriov->vf_info[index].vp; + vp->handle = 0; + } +} + +static void qlcnic_sriov_pf_set_vport_handle(struct qlcnic_adapter *adapter, + u16 vport_handle, u8 func) +{ + struct qlcnic_sriov *sriov = adapter->ahw->sriov; + struct qlcnic_vport *vp; + int index; + + if (adapter->ahw->pci_func == func) { + sriov->vp_handle = vport_handle; + } else { + index = qlcnic_sriov_func_to_index(adapter, func); + if (index < 0) + return; + vp = sriov->vf_info[index].vp; + vp->handle = vport_handle; + } +} + +static int qlcnic_sriov_pf_get_vport_handle(struct qlcnic_adapter *adapter, + u8 func) +{ + struct qlcnic_sriov *sriov = adapter->ahw->sriov; + struct qlcnic_vf_info *vf_info; + int index; + + if (adapter->ahw->pci_func == func) { + return sriov->vp_handle; + } else { + index = qlcnic_sriov_func_to_index(adapter, func); + if (index >= 0) { + vf_info = &sriov->vf_info[index]; + return vf_info->vp->handle; + } + } + + return -EINVAL; +} + +static int qlcnic_sriov_pf_config_vport(struct qlcnic_adapter *adapter, + u8 flag, u16 func) +{ + struct qlcnic_cmd_args cmd; + int ret; + int vpid; + + if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_VPORT)) + return -ENOMEM; + + if (flag) { + cmd.req.arg[3] = func << 8; + } else { + vpid = qlcnic_sriov_pf_get_vport_handle(adapter, func); + if (vpid < 0) { + ret = -EINVAL; + goto out; + } + cmd.req.arg[3] = ((vpid & 0xffff) << 8) | 1; + } + + ret = qlcnic_issue_cmd(adapter, &cmd); + if (ret) { + dev_err(&adapter->pdev->dev, + "Failed %s vport, err %d for func 0x%x\n", + (flag ? "enable" : "disable"), ret, func); + goto out; + } + + if (flag) { + vpid = cmd.rsp.arg[2] & 0xffff; + qlcnic_sriov_pf_set_vport_handle(adapter, vpid, func); + } else { + qlcnic_sriov_pf_reset_vport_handle(adapter, func); + } + +out: + qlcnic_free_mbx_args(&cmd); + return ret; +} + +static int qlcnic_sriov_pf_cfg_eswitch(struct qlcnic_adapter *adapter, + u8 func, u8 enable) +{ + struct qlcnic_cmd_args cmd; + int err = -EIO; + + if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_TOGGLE_ESWITCH)) + return -ENOMEM; + + cmd.req.arg[0] |= (3 << 29); + cmd.req.arg[1] = ((func & 0xf) << 2) | BIT_6 | BIT_1; + if (enable) + cmd.req.arg[1] |= BIT_0; + + err = qlcnic_issue_cmd(adapter, &cmd); + + if (err != QLCNIC_RCODE_SUCCESS) { + dev_err(&adapter->pdev->dev, + "Failed to enable sriov eswitch%d\n", err); + err = -EIO; + } + + qlcnic_free_mbx_args(&cmd); + return err; +} + +void qlcnic_sriov_pf_cleanup(struct qlcnic_adapter *adapter) +{ + u8 func = adapter->ahw->pci_func; + + if (!qlcnic_sriov_enable_check(adapter)) + return; + + qlcnic_sriov_cfg_bc_intr(adapter, 0); + qlcnic_sriov_pf_config_vport(adapter, 0, func); + qlcnic_sriov_pf_cfg_eswitch(adapter, func, 0); + __qlcnic_sriov_cleanup(adapter); + adapter->ahw->op_mode = QLCNIC_MGMT_FUNC; + clear_bit(__QLCNIC_SRIOV_ENABLE, &adapter->state); +} + +void qlcnic_sriov_pf_disable(struct qlcnic_adapter *adapter) +{ + if (!qlcnic_sriov_pf_check(adapter)) + return; + + if (!qlcnic_sriov_enable_check(adapter)) + return; + + pci_disable_sriov(adapter->pdev); + netdev_info(adapter->netdev, + "SR-IOV is disabled successfully on port %d\n", + adapter->portnum); +} + +static int qlcnic_pci_sriov_disable(struct qlcnic_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + if (netif_running(netdev)) + __qlcnic_down(adapter, netdev); + + qlcnic_sriov_pf_disable(adapter); + + qlcnic_sriov_pf_cleanup(adapter); + + /* After disabling SRIOV re-init the driver in default mode + configure opmode based on op_mode of function + */ + if (qlcnic_83xx_configure_opmode(adapter)) + return -EIO; + + if (netif_running(netdev)) + __qlcnic_up(adapter, netdev); + + return 0; +} + +static int qlcnic_sriov_pf_init(struct qlcnic_adapter *adapter) +{ + struct qlcnic_hardware_context *ahw = adapter->ahw; + struct qlcnic_info nic_info, pf_info, vp_info; + int err; + u8 func = ahw->pci_func; + + if (!qlcnic_sriov_enable_check(adapter)) + return 0; + + err = qlcnic_sriov_pf_cfg_eswitch(adapter, func, 1); + if (err) + goto clear_sriov_enable; + + err = qlcnic_sriov_pf_config_vport(adapter, 1, func); + if (err) + goto disable_eswitch; + + err = qlcnic_sriov_get_pf_info(adapter, &pf_info); + if (err) + goto delete_vport; + + qlcnic_sriov_pf_set_ff_max_res(adapter, &pf_info); + + err = qlcnic_get_nic_info(adapter, &nic_info, func); + if (err) + goto delete_vport; + + err = qlcnic_sriov_pf_cal_res_limit(adapter, &vp_info, func); + if (err) + goto delete_vport; + + err = qlcnic_sriov_cfg_bc_intr(adapter, 1); + if (err) + goto delete_vport; + + ahw->physical_port = (u8) nic_info.phys_port; + ahw->switch_mode = nic_info.switch_mode; + ahw->max_mtu = nic_info.max_mtu; + ahw->capabilities = nic_info.capabilities; + ahw->nic_mode = QLC_83XX_SRIOV_MODE; + return err; + +delete_vport: + qlcnic_sriov_pf_config_vport(adapter, 0, func); + +disable_eswitch: + qlcnic_sriov_pf_cfg_eswitch(adapter, func, 0); + +clear_sriov_enable: + __qlcnic_sriov_cleanup(adapter); + adapter->ahw->op_mode = QLCNIC_MGMT_FUNC; + clear_bit(__QLCNIC_SRIOV_ENABLE, &adapter->state); + return err; +} + +static int qlcnic_sriov_pf_enable(struct qlcnic_adapter *adapter, int num_vfs) +{ + int err; + + if (!qlcnic_sriov_enable_check(adapter)) + return 0; + + err = pci_enable_sriov(adapter->pdev, num_vfs); + if (err) + qlcnic_sriov_pf_cleanup(adapter); + + return err; +} + +static int __qlcnic_pci_sriov_enable(struct qlcnic_adapter *adapter, + int num_vfs) +{ + int err = 0; + + set_bit(__QLCNIC_SRIOV_ENABLE, &adapter->state); + adapter->ahw->op_mode = QLCNIC_SRIOV_PF_FUNC; + + if (qlcnic_sriov_init(adapter, num_vfs)) { + clear_bit(__QLCNIC_SRIOV_ENABLE, &adapter->state); + adapter->ahw->op_mode = QLCNIC_MGMT_FUNC; + return -EIO; + } + + if (qlcnic_sriov_pf_init(adapter)) + return -EIO; + + err = qlcnic_sriov_pf_enable(adapter, num_vfs); + return err; +} + +static int qlcnic_pci_sriov_enable(struct qlcnic_adapter *adapter, int num_vfs) +{ + struct net_device *netdev = adapter->netdev; + int err; + + if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) { + netdev_err(netdev, + "SR-IOV cannot be enabled, when legacy interrupts are enabled\n"); + return -EIO; + } + + if (netif_running(netdev)) + __qlcnic_down(adapter, netdev); + + err = __qlcnic_pci_sriov_enable(adapter, num_vfs); + if (err) { + netdev_info(netdev, "Failed to enable SR-IOV on port %d\n", + adapter->portnum); + + if (qlcnic_83xx_configure_opmode(adapter)) + goto error; + } else { + netdev_info(adapter->netdev, + "SR-IOV is enabled successfully on port %d\n", + adapter->portnum); + } + if (netif_running(netdev)) + __qlcnic_up(adapter, netdev); + +error: + return err; +} + +int qlcnic_pci_sriov_configure(struct pci_dev *dev, int num_vfs) +{ + struct qlcnic_adapter *adapter = pci_get_drvdata(dev); + int err; + + if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) + return -EBUSY; + + if (num_vfs == 0) + err = qlcnic_pci_sriov_disable(adapter); + else + err = qlcnic_pci_sriov_enable(adapter, num_vfs); + + clear_bit(__QLCNIC_RESETTING, &adapter->state); + return err; +} + +static int qlcnic_sriov_set_vf_vport_info(struct qlcnic_adapter *adapter, + u16 func) +{ + struct qlcnic_info defvp_info; + int err; + + err = qlcnic_sriov_pf_cal_res_limit(adapter, &defvp_info, func); + if (err) + return -EIO; + + return 0; +} + +static int qlcnic_sriov_pf_channel_cfg_cmd(struct qlcnic_bc_trans *trans, + struct qlcnic_cmd_args *cmd) +{ + struct qlcnic_vf_info *vf = trans->vf; + struct qlcnic_adapter *adapter = vf->adapter; + int err; + u16 func = vf->pci_func; + + cmd->rsp.arg[0] = trans->req_hdr->cmd_op; + cmd->rsp.arg[0] |= (1 << 16); + + if (trans->req_hdr->cmd_op == QLCNIC_BC_CMD_CHANNEL_INIT) { + err = qlcnic_sriov_pf_config_vport(adapter, 1, func); + if (!err) { + err = qlcnic_sriov_set_vf_vport_info(adapter, func); + if (err) + qlcnic_sriov_pf_config_vport(adapter, 0, func); + } + } else { + err = qlcnic_sriov_pf_config_vport(adapter, 0, func); + } + + if (err) + goto err_out; + + cmd->rsp.arg[0] |= (1 << 25); + + if (trans->req_hdr->cmd_op == QLCNIC_BC_CMD_CHANNEL_INIT) + set_bit(QLC_BC_VF_STATE, &vf->state); + else + clear_bit(QLC_BC_VF_STATE, &vf->state); + + return err; + +err_out: + cmd->rsp.arg[0] |= (2 << 25); + return err; +} + +static int qlcnic_sriov_cfg_vf_def_mac(struct qlcnic_adapter *adapter, + struct qlcnic_vport *vp, + u16 func, __le16 vlan, u8 op) +{ + struct qlcnic_cmd_args cmd; + struct qlcnic_macvlan_mbx mv; + u8 *addr; + int err; + u32 *buf; + int vpid; + + if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_MAC_VLAN)) + return -ENOMEM; + + vpid = qlcnic_sriov_pf_get_vport_handle(adapter, func); + if (vpid < 0) { + err = -EINVAL; + goto out; + } + + if (vlan) + op = ((op == QLCNIC_MAC_ADD || op == QLCNIC_MAC_VLAN_ADD) ? + QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_VLAN_DEL); + + cmd.req.arg[1] = op | (1 << 8) | (3 << 6); + cmd.req.arg[1] |= ((vpid & 0xffff) << 16) | BIT_31; + + addr = vp->mac; + mv.vlan = le16_to_cpu(vlan); + mv.mac_addr0 = addr[0]; + mv.mac_addr1 = addr[1]; + mv.mac_addr2 = addr[2]; + mv.mac_addr3 = addr[3]; + mv.mac_addr4 = addr[4]; + mv.mac_addr5 = addr[5]; + buf = &cmd.req.arg[2]; + memcpy(buf, &mv, sizeof(struct qlcnic_macvlan_mbx)); + + err = qlcnic_issue_cmd(adapter, &cmd); + + if (err) + dev_err(&adapter->pdev->dev, + "MAC-VLAN %s to CAM failed, err=%d.\n", + ((op == 1) ? "add " : "delete "), err); + +out: + qlcnic_free_mbx_args(&cmd); + return err; +} + +static int qlcnic_sriov_validate_create_rx_ctx(struct qlcnic_cmd_args *cmd) +{ + if ((cmd->req.arg[0] >> 29) != 0x3) + return -EINVAL; + + return 0; +} + +static int qlcnic_sriov_pf_create_rx_ctx_cmd(struct qlcnic_bc_trans *tran, + struct qlcnic_cmd_args *cmd) +{ + struct qlcnic_vf_info *vf = tran->vf; + struct qlcnic_adapter *adapter = vf->adapter; + struct qlcnic_rcv_mbx_out *mbx_out; + int err; + + err = qlcnic_sriov_validate_create_rx_ctx(cmd); + if (err) { + cmd->rsp.arg[0] |= (0x6 << 25); + return err; + } + + cmd->req.arg[6] = vf->vp->handle; + err = qlcnic_issue_cmd(adapter, cmd); + + if (!err) { + mbx_out = (struct qlcnic_rcv_mbx_out *)&cmd->rsp.arg[1]; + vf->rx_ctx_id = mbx_out->ctx_id; + qlcnic_sriov_cfg_vf_def_mac(adapter, vf->vp, vf->pci_func, + 0, QLCNIC_MAC_ADD); + } else { + vf->rx_ctx_id = 0; + } + + return err; +} + +static int qlcnic_sriov_pf_mac_address_cmd(struct qlcnic_bc_trans *trans, + struct qlcnic_cmd_args *cmd) +{ + struct qlcnic_vf_info *vf = trans->vf; + u8 type, *mac; + + type = cmd->req.arg[1]; + switch (type) { + case QLCNIC_SET_STATION_MAC: + case QLCNIC_SET_FAC_DEF_MAC: + cmd->rsp.arg[0] = (2 << 25); + break; + case QLCNIC_GET_CURRENT_MAC: + cmd->rsp.arg[0] = (1 << 25); + mac = vf->vp->mac; + cmd->rsp.arg[2] = mac[1] | ((mac[0] << 8) & 0xff00); + cmd->rsp.arg[1] = mac[5] | ((mac[4] << 8) & 0xff00) | + ((mac[3]) << 16 & 0xff0000) | + ((mac[2]) << 24 & 0xff000000); + } + + return 0; +} + +static int qlcnic_sriov_validate_create_tx_ctx(struct qlcnic_cmd_args *cmd) +{ + if ((cmd->req.arg[0] >> 29) != 0x3) + return -EINVAL; + + return 0; +} + +static int qlcnic_sriov_pf_create_tx_ctx_cmd(struct qlcnic_bc_trans *trans, + struct qlcnic_cmd_args *cmd) +{ + struct qlcnic_vf_info *vf = trans->vf; + struct qlcnic_adapter *adapter = vf->adapter; + struct qlcnic_tx_mbx_out *mbx_out; + int err; + + err = qlcnic_sriov_validate_create_tx_ctx(cmd); + if (err) { + cmd->rsp.arg[0] |= (0x6 << 25); + return err; + } + + cmd->req.arg[5] |= vf->vp->handle << 16; + err = qlcnic_issue_cmd(adapter, cmd); + if (!err) { + mbx_out = (struct qlcnic_tx_mbx_out *)&cmd->rsp.arg[2]; + vf->tx_ctx_id = mbx_out->ctx_id; + } else { + vf->tx_ctx_id = 0; + } + + return err; +} + +static int qlcnic_sriov_validate_del_rx_ctx(struct qlcnic_vf_info *vf, + struct qlcnic_cmd_args *cmd) +{ + if ((cmd->req.arg[0] >> 29) != 0x3) + return -EINVAL; + + if ((cmd->req.arg[1] & 0xffff) != vf->rx_ctx_id) + return -EINVAL; + + return 0; +} + +static int qlcnic_sriov_pf_del_rx_ctx_cmd(struct qlcnic_bc_trans *trans, + struct qlcnic_cmd_args *cmd) +{ + struct qlcnic_vf_info *vf = trans->vf; + struct qlcnic_adapter *adapter = vf->adapter; + int err; + + err = qlcnic_sriov_validate_del_rx_ctx(vf, cmd); + if (err) { + cmd->rsp.arg[0] |= (0x6 << 25); + return err; + } + + qlcnic_sriov_cfg_vf_def_mac(adapter, vf->vp, vf->pci_func, + 0, QLCNIC_MAC_DEL); + cmd->req.arg[1] |= vf->vp->handle << 16; + err = qlcnic_issue_cmd(adapter, cmd); + + if (!err) + vf->rx_ctx_id = 0; + + return err; +} + +static int qlcnic_sriov_validate_del_tx_ctx(struct qlcnic_vf_info *vf, + struct qlcnic_cmd_args *cmd) +{ + if ((cmd->req.arg[0] >> 29) != 0x3) + return -EINVAL; + + if ((cmd->req.arg[1] & 0xffff) != vf->tx_ctx_id) + return -EINVAL; + + return 0; +} + +static int qlcnic_sriov_pf_del_tx_ctx_cmd(struct qlcnic_bc_trans *trans, + struct qlcnic_cmd_args *cmd) +{ + struct qlcnic_vf_info *vf = trans->vf; + struct qlcnic_adapter *adapter = vf->adapter; + int err; + + err = qlcnic_sriov_validate_del_tx_ctx(vf, cmd); + if (err) { + cmd->rsp.arg[0] |= (0x6 << 25); + return err; + } + + cmd->req.arg[1] |= vf->vp->handle << 16; + err = qlcnic_issue_cmd(adapter, cmd); + + if (!err) + vf->tx_ctx_id = 0; + + return err; +} + +static int qlcnic_sriov_validate_cfg_lro(struct qlcnic_vf_info *vf, + struct qlcnic_cmd_args *cmd) +{ + if ((cmd->req.arg[1] >> 16) != vf->rx_ctx_id) + return -EINVAL; + + return 0; +} + +static int qlcnic_sriov_pf_cfg_lro_cmd(struct qlcnic_bc_trans *trans, + struct qlcnic_cmd_args *cmd) +{ + struct qlcnic_vf_info *vf = trans->vf; + struct qlcnic_adapter *adapter = vf->adapter; + int err; + + err = qlcnic_sriov_validate_cfg_lro(vf, cmd); + if (err) { + cmd->rsp.arg[0] |= (0x6 << 25); + return err; + } + + err = qlcnic_issue_cmd(adapter, cmd); + return err; +} + +static int qlcnic_sriov_pf_cfg_ip_cmd(struct qlcnic_bc_trans *trans, + struct qlcnic_cmd_args *cmd) +{ + struct qlcnic_vf_info *vf = trans->vf; + struct qlcnic_adapter *adapter = vf->adapter; + int err = -EIO; + u8 op; + + op = cmd->req.arg[1] & 0xff; + + cmd->req.arg[1] |= vf->vp->handle << 16; + cmd->req.arg[1] |= BIT_31; + + err = qlcnic_issue_cmd(adapter, cmd); + return err; +} + +static int qlcnic_sriov_validate_cfg_intrpt(struct qlcnic_vf_info *vf, + struct qlcnic_cmd_args *cmd) +{ + if (((cmd->req.arg[1] >> 8) & 0xff) != vf->pci_func) + return -EINVAL; + + if (!(cmd->req.arg[1] & BIT_16)) + return -EINVAL; + + if ((cmd->req.arg[1] & 0xff) != 0x1) + return -EINVAL; + + return 0; +} + +static int qlcnic_sriov_pf_cfg_intrpt_cmd(struct qlcnic_bc_trans *trans, + struct qlcnic_cmd_args *cmd) +{ + struct qlcnic_vf_info *vf = trans->vf; + struct qlcnic_adapter *adapter = vf->adapter; + int err; + + err = qlcnic_sriov_validate_cfg_intrpt(vf, cmd); + if (err) + cmd->rsp.arg[0] |= (0x6 << 25); + else + err = qlcnic_issue_cmd(adapter, cmd); + + return err; +} + +static int qlcnic_sriov_validate_mtu(struct qlcnic_adapter *adapter, + struct qlcnic_vf_info *vf, + struct qlcnic_cmd_args *cmd) +{ + if (cmd->req.arg[1] != vf->rx_ctx_id) + return -EINVAL; + + if (cmd->req.arg[2] > adapter->ahw->max_mtu) + return -EINVAL; + + return 0; +} + +static int qlcnic_sriov_pf_set_mtu_cmd(struct qlcnic_bc_trans *trans, + struct qlcnic_cmd_args *cmd) +{ + struct qlcnic_vf_info *vf = trans->vf; + struct qlcnic_adapter *adapter = vf->adapter; + int err; + + err = qlcnic_sriov_validate_mtu(adapter, vf, cmd); + if (err) + cmd->rsp.arg[0] |= (0x6 << 25); + else + err = qlcnic_issue_cmd(adapter, cmd); + + return err; +} + +static int qlcnic_sriov_validate_get_nic_info(struct qlcnic_vf_info *vf, + struct qlcnic_cmd_args *cmd) +{ + if (cmd->req.arg[1] & BIT_31) { + if (((cmd->req.arg[1] >> 16) & 0x7fff) != vf->pci_func) + return -EINVAL; + } else { + cmd->req.arg[1] |= vf->vp->handle << 16; + } + + return 0; +} + +static int qlcnic_sriov_pf_get_nic_info_cmd(struct qlcnic_bc_trans *trans, + struct qlcnic_cmd_args *cmd) +{ + struct qlcnic_vf_info *vf = trans->vf; + struct qlcnic_adapter *adapter = vf->adapter; + int err; + + err = qlcnic_sriov_validate_get_nic_info(vf, cmd); + if (err) { + cmd->rsp.arg[0] |= (0x6 << 25); + return err; + } + + err = qlcnic_issue_cmd(adapter, cmd); + return err; +} + +static int qlcnic_sriov_validate_cfg_rss(struct qlcnic_vf_info *vf, + struct qlcnic_cmd_args *cmd) +{ + if (cmd->req.arg[1] != vf->rx_ctx_id) + return -EINVAL; + + return 0; +} + +static int qlcnic_sriov_pf_cfg_rss_cmd(struct qlcnic_bc_trans *trans, + struct qlcnic_cmd_args *cmd) +{ + struct qlcnic_vf_info *vf = trans->vf; + struct qlcnic_adapter *adapter = vf->adapter; + int err; + + err = qlcnic_sriov_validate_cfg_rss(vf, cmd); + if (err) + cmd->rsp.arg[0] |= (0x6 << 25); + else + err = qlcnic_issue_cmd(adapter, cmd); + + return err; +} + +static int qlcnic_sriov_validate_cfg_intrcoal(struct qlcnic_adapter *adapter, + struct qlcnic_vf_info *vf, + struct qlcnic_cmd_args *cmd) +{ + struct qlcnic_nic_intr_coalesce *coal = &adapter->ahw->coal; + u16 ctx_id, pkts, time; + + ctx_id = cmd->req.arg[1] >> 16; + pkts = cmd->req.arg[2] & 0xffff; + time = cmd->req.arg[2] >> 16; + + if (ctx_id != vf->rx_ctx_id) + return -EINVAL; + if (pkts > coal->rx_packets) + return -EINVAL; + if (time < coal->rx_time_us) + return -EINVAL; + + return 0; +} + +static int qlcnic_sriov_pf_cfg_intrcoal_cmd(struct qlcnic_bc_trans *tran, + struct qlcnic_cmd_args *cmd) +{ + struct qlcnic_vf_info *vf = tran->vf; + struct qlcnic_adapter *adapter = vf->adapter; + int err; + + err = qlcnic_sriov_validate_cfg_intrcoal(adapter, vf, cmd); + if (err) { + cmd->rsp.arg[0] |= (0x6 << 25); + return err; + } + + err = qlcnic_issue_cmd(adapter, cmd); + return err; +} + +static int qlcnic_sriov_validate_cfg_macvlan(struct qlcnic_adapter *adapter, + struct qlcnic_vf_info *vf, + struct qlcnic_cmd_args *cmd) +{ + struct qlcnic_macvlan_mbx *macvlan; + + if (!(cmd->req.arg[1] & BIT_8)) + return -EINVAL; + + cmd->req.arg[1] |= (vf->vp->handle << 16); + cmd->req.arg[1] |= BIT_31; + + macvlan = (struct qlcnic_macvlan_mbx *)&cmd->req.arg[2]; + if (!(macvlan->mac_addr0 & BIT_0)) { + dev_err(&adapter->pdev->dev, + "MAC address change is not allowed from VF %d", + vf->pci_func); + return -EINVAL; + } + + return 0; +} + +static int qlcnic_sriov_pf_cfg_macvlan_cmd(struct qlcnic_bc_trans *trans, + struct qlcnic_cmd_args *cmd) +{ + struct qlcnic_vf_info *vf = trans->vf; + struct qlcnic_adapter *adapter = vf->adapter; + int err; + + err = qlcnic_sriov_validate_cfg_macvlan(adapter, vf, cmd); + if (err) { + cmd->rsp.arg[0] |= (0x6 << 25); + return err; + } + + err = qlcnic_issue_cmd(adapter, cmd); + return err; +} + +static int qlcnic_sriov_validate_linkevent(struct qlcnic_vf_info *vf, + struct qlcnic_cmd_args *cmd) +{ + if ((cmd->req.arg[1] >> 16) != vf->rx_ctx_id) + return -EINVAL; + + if (!(cmd->req.arg[1] & BIT_8)) + return -EINVAL; + + return 0; +} + +static int qlcnic_sriov_pf_linkevent_cmd(struct qlcnic_bc_trans *trans, + struct qlcnic_cmd_args *cmd) +{ + struct qlcnic_vf_info *vf = trans->vf; + struct qlcnic_adapter *adapter = vf->adapter; + int err; + + err = qlcnic_sriov_validate_linkevent(vf, cmd); + if (err) { + cmd->rsp.arg[0] |= (0x6 << 25); + return err; + } + + err = qlcnic_issue_cmd(adapter, cmd); + return err; +} + +static int qlcnic_sriov_pf_cfg_promisc_cmd(struct qlcnic_bc_trans *trans, + struct qlcnic_cmd_args *cmd) +{ + struct qlcnic_vf_info *vf = trans->vf; + struct qlcnic_adapter *adapter = vf->adapter; + int err; + + cmd->req.arg[1] |= vf->vp->handle << 16; + cmd->req.arg[1] |= BIT_31; + err = qlcnic_issue_cmd(adapter, cmd); + return err; +} + +static const int qlcnic_pf_passthru_supp_cmds[] = { + QLCNIC_CMD_GET_STATISTICS, + QLCNIC_CMD_GET_PORT_CONFIG, + QLCNIC_CMD_GET_LINK_STATUS, +}; + +static const struct qlcnic_sriov_cmd_handler qlcnic_pf_bc_cmd_hdlr[] = { + [QLCNIC_BC_CMD_CHANNEL_INIT] = {&qlcnic_sriov_pf_channel_cfg_cmd}, + [QLCNIC_BC_CMD_CHANNEL_TERM] = {&qlcnic_sriov_pf_channel_cfg_cmd}, +}; + +static const struct qlcnic_sriov_fw_cmd_handler qlcnic_pf_fw_cmd_hdlr[] = { + {QLCNIC_CMD_CREATE_RX_CTX, qlcnic_sriov_pf_create_rx_ctx_cmd}, + {QLCNIC_CMD_CREATE_TX_CTX, qlcnic_sriov_pf_create_tx_ctx_cmd}, + {QLCNIC_CMD_MAC_ADDRESS, qlcnic_sriov_pf_mac_address_cmd}, + {QLCNIC_CMD_DESTROY_RX_CTX, qlcnic_sriov_pf_del_rx_ctx_cmd}, + {QLCNIC_CMD_DESTROY_TX_CTX, qlcnic_sriov_pf_del_tx_ctx_cmd}, + {QLCNIC_CMD_CONFIGURE_HW_LRO, qlcnic_sriov_pf_cfg_lro_cmd}, + {QLCNIC_CMD_CONFIGURE_IP_ADDR, qlcnic_sriov_pf_cfg_ip_cmd}, + {QLCNIC_CMD_CONFIG_INTRPT, qlcnic_sriov_pf_cfg_intrpt_cmd}, + {QLCNIC_CMD_SET_MTU, qlcnic_sriov_pf_set_mtu_cmd}, + {QLCNIC_CMD_GET_NIC_INFO, qlcnic_sriov_pf_get_nic_info_cmd}, + {QLCNIC_CMD_CONFIGURE_RSS, qlcnic_sriov_pf_cfg_rss_cmd}, + {QLCNIC_CMD_CONFIG_INTR_COAL, qlcnic_sriov_pf_cfg_intrcoal_cmd}, + {QLCNIC_CMD_CONFIG_MAC_VLAN, qlcnic_sriov_pf_cfg_macvlan_cmd}, + {QLCNIC_CMD_GET_LINK_EVENT, qlcnic_sriov_pf_linkevent_cmd}, + {QLCNIC_CMD_CONFIGURE_MAC_RX_MODE, qlcnic_sriov_pf_cfg_promisc_cmd}, +}; + +void qlcnic_sriov_pf_process_bc_cmd(struct qlcnic_adapter *adapter, + struct qlcnic_bc_trans *trans, + struct qlcnic_cmd_args *cmd) +{ + u8 size, cmd_op; + + cmd_op = trans->req_hdr->cmd_op; + + if (trans->req_hdr->op_type == QLC_BC_CMD) { + size = ARRAY_SIZE(qlcnic_pf_bc_cmd_hdlr); + if (cmd_op < size) { + qlcnic_pf_bc_cmd_hdlr[cmd_op].fn(trans, cmd); + return; + } + } else { + int i; + size = ARRAY_SIZE(qlcnic_pf_fw_cmd_hdlr); + for (i = 0; i < size; i++) { + if (cmd_op == qlcnic_pf_fw_cmd_hdlr[i].cmd) { + qlcnic_pf_fw_cmd_hdlr[i].fn(trans, cmd); + return; + } + } + + size = ARRAY_SIZE(qlcnic_pf_passthru_supp_cmds); + for (i = 0; i < size; i++) { + if (cmd_op == qlcnic_pf_passthru_supp_cmds[i]) { + qlcnic_issue_cmd(adapter, cmd); + return; + } + } + } + + cmd->rsp.arg[0] |= (0x9 << 25); +} + +void qlcnic_pf_set_interface_id_create_rx_ctx(struct qlcnic_adapter *adapter, + u32 *int_id) +{ + u16 vpid; + + vpid = qlcnic_sriov_pf_get_vport_handle(adapter, + adapter->ahw->pci_func); + *int_id |= vpid; +} + +void qlcnic_pf_set_interface_id_del_rx_ctx(struct qlcnic_adapter *adapter, + u32 *int_id) +{ + u16 vpid; + + vpid = qlcnic_sriov_pf_get_vport_handle(adapter, + adapter->ahw->pci_func); + *int_id |= vpid << 16; +} + +void qlcnic_pf_set_interface_id_create_tx_ctx(struct qlcnic_adapter *adapter, + u32 *int_id) +{ + int vpid; + + vpid = qlcnic_sriov_pf_get_vport_handle(adapter, + adapter->ahw->pci_func); + *int_id |= vpid << 16; +} + +void qlcnic_pf_set_interface_id_del_tx_ctx(struct qlcnic_adapter *adapter, + u32 *int_id) +{ + u16 vpid; + + vpid = qlcnic_sriov_pf_get_vport_handle(adapter, + adapter->ahw->pci_func); + *int_id |= vpid << 16; +} + +void qlcnic_pf_set_interface_id_promisc(struct qlcnic_adapter *adapter, + u32 *int_id) +{ + u16 vpid; + + vpid = qlcnic_sriov_pf_get_vport_handle(adapter, + adapter->ahw->pci_func); + *int_id |= (vpid << 16) | BIT_31; +} + +void qlcnic_pf_set_interface_id_ipaddr(struct qlcnic_adapter *adapter, + u32 *int_id) +{ + u16 vpid; + + vpid = qlcnic_sriov_pf_get_vport_handle(adapter, + adapter->ahw->pci_func); + *int_id |= (vpid << 16) | BIT_31; +} + +void qlcnic_pf_set_interface_id_macaddr(struct qlcnic_adapter *adapter, + u32 *int_id) +{ + u16 vpid; + + vpid = qlcnic_sriov_pf_get_vport_handle(adapter, + adapter->ahw->pci_func); + *int_id |= (vpid << 16) | BIT_31; +} diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c index 987fb6f8adc3..c77675da671f 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c @@ -21,8 +21,6 @@ #include <linux/aer.h> #include <linux/log2.h> -#include <linux/sysfs.h> - #define QLC_STATUS_UNSUPPORTED_CMD -2 int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable) @@ -886,6 +884,244 @@ static ssize_t qlcnic_sysfs_read_pci_config(struct file *file, return size; } +static ssize_t qlcnic_83xx_sysfs_flash_read_handler(struct file *filp, + struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t offset, + size_t size) +{ + unsigned char *p_read_buf; + int ret, count; + struct device *dev = container_of(kobj, struct device, kobj); + struct qlcnic_adapter *adapter = dev_get_drvdata(dev); + + if (!size) + return QL_STATUS_INVALID_PARAM; + if (!buf) + return QL_STATUS_INVALID_PARAM; + + count = size / sizeof(u32); + + if (size % sizeof(u32)) + count++; + + p_read_buf = kcalloc(size, sizeof(unsigned char), GFP_KERNEL); + if (!p_read_buf) + return -ENOMEM; + if (qlcnic_83xx_lock_flash(adapter) != 0) { + kfree(p_read_buf); + return -EIO; + } + + ret = qlcnic_83xx_lockless_flash_read32(adapter, offset, p_read_buf, + count); + + if (ret) { + qlcnic_83xx_unlock_flash(adapter); + kfree(p_read_buf); + return ret; + } + + qlcnic_83xx_unlock_flash(adapter); + memcpy(buf, p_read_buf, size); + kfree(p_read_buf); + + return size; +} + +static int qlcnic_83xx_sysfs_flash_bulk_write(struct qlcnic_adapter *adapter, + char *buf, loff_t offset, + size_t size) +{ + int i, ret, count; + unsigned char *p_cache, *p_src; + + p_cache = kcalloc(size, sizeof(unsigned char), GFP_KERNEL); + if (!p_cache) + return -ENOMEM; + + memcpy(p_cache, buf, size); + p_src = p_cache; + count = size / sizeof(u32); + + if (qlcnic_83xx_lock_flash(adapter) != 0) { + kfree(p_cache); + return -EIO; + } + + if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) { + ret = qlcnic_83xx_enable_flash_write(adapter); + if (ret) { + kfree(p_cache); + qlcnic_83xx_unlock_flash(adapter); + return -EIO; + } + } + + for (i = 0; i < count / QLC_83XX_FLASH_WRITE_MAX; i++) { + ret = qlcnic_83xx_flash_bulk_write(adapter, offset, + (u32 *)p_src, + QLC_83XX_FLASH_WRITE_MAX); + + if (ret) { + if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) { + ret = qlcnic_83xx_disable_flash_write(adapter); + if (ret) { + kfree(p_cache); + qlcnic_83xx_unlock_flash(adapter); + return -EIO; + } + } + + kfree(p_cache); + qlcnic_83xx_unlock_flash(adapter); + return -EIO; + } + + p_src = p_src + sizeof(u32)*QLC_83XX_FLASH_WRITE_MAX; + offset = offset + sizeof(u32)*QLC_83XX_FLASH_WRITE_MAX; + } + + if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) { + ret = qlcnic_83xx_disable_flash_write(adapter); + if (ret) { + kfree(p_cache); + qlcnic_83xx_unlock_flash(adapter); + return -EIO; + } + } + + kfree(p_cache); + qlcnic_83xx_unlock_flash(adapter); + + return 0; +} + +static int qlcnic_83xx_sysfs_flash_write(struct qlcnic_adapter *adapter, + char *buf, loff_t offset, size_t size) +{ + int i, ret, count; + unsigned char *p_cache, *p_src; + + p_cache = kcalloc(size, sizeof(unsigned char), GFP_KERNEL); + if (!p_cache) + return -ENOMEM; + + memcpy(p_cache, buf, size); + p_src = p_cache; + count = size / sizeof(u32); + + if (qlcnic_83xx_lock_flash(adapter) != 0) { + kfree(p_cache); + return -EIO; + } + + if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) { + ret = qlcnic_83xx_enable_flash_write(adapter); + if (ret) { + kfree(p_cache); + qlcnic_83xx_unlock_flash(adapter); + return -EIO; + } + } + + for (i = 0; i < count; i++) { + ret = qlcnic_83xx_flash_write32(adapter, offset, (u32 *)p_src); + if (ret) { + if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) { + ret = qlcnic_83xx_disable_flash_write(adapter); + if (ret) { + kfree(p_cache); + qlcnic_83xx_unlock_flash(adapter); + return -EIO; + } + } + kfree(p_cache); + qlcnic_83xx_unlock_flash(adapter); + return -EIO; + } + + p_src = p_src + sizeof(u32); + offset = offset + sizeof(u32); + } + + if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) { + ret = qlcnic_83xx_disable_flash_write(adapter); + if (ret) { + kfree(p_cache); + qlcnic_83xx_unlock_flash(adapter); + return -EIO; + } + } + + kfree(p_cache); + qlcnic_83xx_unlock_flash(adapter); + + return 0; +} + +static ssize_t qlcnic_83xx_sysfs_flash_write_handler(struct file *filp, + struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t offset, + size_t size) +{ + int ret; + static int flash_mode; + unsigned long data; + struct device *dev = container_of(kobj, struct device, kobj); + struct qlcnic_adapter *adapter = dev_get_drvdata(dev); + + if (!buf) + return QL_STATUS_INVALID_PARAM; + + ret = kstrtoul(buf, 16, &data); + + switch (data) { + case QLC_83XX_FLASH_SECTOR_ERASE_CMD: + flash_mode = QLC_83XX_ERASE_MODE; + ret = qlcnic_83xx_erase_flash_sector(adapter, offset); + if (ret) { + dev_err(&adapter->pdev->dev, + "%s failed at %d\n", __func__, __LINE__); + return -EIO; + } + break; + + case QLC_83XX_FLASH_BULK_WRITE_CMD: + flash_mode = QLC_83XX_BULK_WRITE_MODE; + break; + + case QLC_83XX_FLASH_WRITE_CMD: + flash_mode = QLC_83XX_WRITE_MODE; + break; + default: + if (flash_mode == QLC_83XX_BULK_WRITE_MODE) { + ret = qlcnic_83xx_sysfs_flash_bulk_write(adapter, buf, + offset, size); + if (ret) { + dev_err(&adapter->pdev->dev, + "%s failed at %d\n", + __func__, __LINE__); + return -EIO; + } + } + + if (flash_mode == QLC_83XX_WRITE_MODE) { + ret = qlcnic_83xx_sysfs_flash_write(adapter, buf, + offset, size); + if (ret) { + dev_err(&adapter->pdev->dev, + "%s failed at %d\n", __func__, + __LINE__); + return -EIO; + } + } + } + + return size; +} + static struct device_attribute dev_attr_bridged_mode = { .attr = {.name = "bridged_mode", .mode = (S_IRUGO | S_IWUSR)}, .show = qlcnic_show_bridged_mode, @@ -960,6 +1196,13 @@ static struct bin_attribute bin_attr_pm_config = { .write = qlcnic_sysfs_write_pm_config, }; +static struct bin_attribute bin_attr_flash = { + .attr = {.name = "flash", .mode = (S_IRUGO | S_IWUSR)}, + .size = 0, + .read = qlcnic_83xx_sysfs_flash_read_handler, + .write = qlcnic_83xx_sysfs_flash_write_handler, +}; + void qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter) { struct device *dev = &adapter->pdev->dev; @@ -1048,10 +1291,18 @@ void qlcnic_82xx_remove_sysfs(struct qlcnic_adapter *adapter) void qlcnic_83xx_add_sysfs(struct qlcnic_adapter *adapter) { + struct device *dev = &adapter->pdev->dev; + qlcnic_create_diag_entries(adapter); + + if (sysfs_create_bin_file(&dev->kobj, &bin_attr_flash)) + dev_info(dev, "failed to create flash sysfs entry\n"); } void qlcnic_83xx_remove_sysfs(struct qlcnic_adapter *adapter) { + struct device *dev = &adapter->pdev->dev; + qlcnic_remove_diag_entries(adapter); + sysfs_remove_bin_file(&dev->kobj, &bin_attr_flash); } diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c index b13ab544a7eb..1dd778a6f01e 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c +++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c @@ -1211,8 +1211,6 @@ static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring) netdev_alloc_skb(qdev->ndev, SMALL_BUFFER_SIZE); if (sbq_desc->p.skb == NULL) { - netif_err(qdev, probe, qdev->ndev, - "Couldn't get an skb.\n"); rx_ring->sbq_clean_idx = clean_idx; return; } @@ -1519,8 +1517,6 @@ static void ql_process_mac_rx_page(struct ql_adapter *qdev, skb = netdev_alloc_skb(ndev, length); if (!skb) { - netif_err(qdev, drv, qdev->ndev, - "Couldn't get an skb, need to unwind!.\n"); rx_ring->rx_dropped++; put_page(lbq_desc->p.pg_chunk.page); return; @@ -1605,8 +1601,6 @@ static void ql_process_mac_rx_skb(struct ql_adapter *qdev, /* Allocate new_skb and copy */ new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN); if (new_skb == NULL) { - netif_err(qdev, probe, qdev->ndev, - "No skb available, drop the packet.\n"); rx_ring->rx_dropped++; return; } diff --git a/drivers/net/ethernet/rdc/r6040.c b/drivers/net/ethernet/rdc/r6040.c index 5b4103db70f5..e9dc84943cfc 100644 --- a/drivers/net/ethernet/rdc/r6040.c +++ b/drivers/net/ethernet/rdc/r6040.c @@ -224,11 +224,14 @@ static int r6040_phy_read(void __iomem *ioaddr, int phy_addr, int reg) break; } + if (limit < 0) + return -ETIMEDOUT; + return ioread16(ioaddr + MMRD); } /* Write a word data from PHY Chip */ -static void r6040_phy_write(void __iomem *ioaddr, +static int r6040_phy_write(void __iomem *ioaddr, int phy_addr, int reg, u16 val) { int limit = MAC_DEF_TIMEOUT; @@ -243,6 +246,8 @@ static void r6040_phy_write(void __iomem *ioaddr, if (!(cmd & MDIO_WRITE)) break; } + + return (limit < 0) ? -ETIMEDOUT : 0; } static int r6040_mdiobus_read(struct mii_bus *bus, int phy_addr, int reg) @@ -261,9 +266,7 @@ static int r6040_mdiobus_write(struct mii_bus *bus, int phy_addr, struct r6040_private *lp = netdev_priv(dev); void __iomem *ioaddr = lp->base; - r6040_phy_write(ioaddr, phy_addr, reg, value); - - return 0; + return r6040_phy_write(ioaddr, phy_addr, reg, value); } static int r6040_mdiobus_reset(struct mii_bus *bus) @@ -347,7 +350,6 @@ static int r6040_alloc_rxbufs(struct net_device *dev) do { skb = netdev_alloc_skb(dev, MAX_BUF_SIZE); if (!skb) { - netdev_err(dev, "failed to alloc skb for rx\n"); rc = -ENOMEM; goto err_exit; } diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c index 1276ac71353a..3ccedeb8aba0 100644 --- a/drivers/net/ethernet/realtek/8139too.c +++ b/drivers/net/ethernet/realtek/8139too.c @@ -2041,8 +2041,6 @@ keep_pkt: netif_receive_skb (skb); } else { - if (net_ratelimit()) - netdev_warn(dev, "Memory squeeze, dropping packet\n"); dev->stats.rx_dropped++; } received++; diff --git a/drivers/net/ethernet/realtek/atp.c b/drivers/net/ethernet/realtek/atp.c index 9f2d416de750..d77d60ea8202 100644 --- a/drivers/net/ethernet/realtek/atp.c +++ b/drivers/net/ethernet/realtek/atp.c @@ -782,8 +782,6 @@ static void net_rx(struct net_device *dev) skb = netdev_alloc_skb(dev, pkt_len + 2); if (skb == NULL) { - printk(KERN_ERR "%s: Memory squeeze, dropping packet.\n", - dev->name); dev->stats.rx_dropped++; goto done; } diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 6ed333fe5c04..a7499cbf4503 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -2,7 +2,8 @@ * SuperH Ethernet device driver * * Copyright (C) 2006-2012 Nobuhiro Iwamatsu - * Copyright (C) 2008-2012 Renesas Solutions Corp. + * Copyright (C) 2008-2013 Renesas Solutions Corp. + * Copyright (C) 2013 Cogent Embedded, Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -49,6 +50,269 @@ NETIF_MSG_RX_ERR| \ NETIF_MSG_TX_ERR) +static const u16 sh_eth_offset_gigabit[SH_ETH_MAX_REGISTER_OFFSET] = { + [EDSR] = 0x0000, + [EDMR] = 0x0400, + [EDTRR] = 0x0408, + [EDRRR] = 0x0410, + [EESR] = 0x0428, + [EESIPR] = 0x0430, + [TDLAR] = 0x0010, + [TDFAR] = 0x0014, + [TDFXR] = 0x0018, + [TDFFR] = 0x001c, + [RDLAR] = 0x0030, + [RDFAR] = 0x0034, + [RDFXR] = 0x0038, + [RDFFR] = 0x003c, + [TRSCER] = 0x0438, + [RMFCR] = 0x0440, + [TFTR] = 0x0448, + [FDR] = 0x0450, + [RMCR] = 0x0458, + [RPADIR] = 0x0460, + [FCFTR] = 0x0468, + [CSMR] = 0x04E4, + + [ECMR] = 0x0500, + [ECSR] = 0x0510, + [ECSIPR] = 0x0518, + [PIR] = 0x0520, + [PSR] = 0x0528, + [PIPR] = 0x052c, + [RFLR] = 0x0508, + [APR] = 0x0554, + [MPR] = 0x0558, + [PFTCR] = 0x055c, + [PFRCR] = 0x0560, + [TPAUSER] = 0x0564, + [GECMR] = 0x05b0, + [BCULR] = 0x05b4, + [MAHR] = 0x05c0, + [MALR] = 0x05c8, + [TROCR] = 0x0700, + [CDCR] = 0x0708, + [LCCR] = 0x0710, + [CEFCR] = 0x0740, + [FRECR] = 0x0748, + [TSFRCR] = 0x0750, + [TLFRCR] = 0x0758, + [RFCR] = 0x0760, + [CERCR] = 0x0768, + [CEECR] = 0x0770, + [MAFCR] = 0x0778, + [RMII_MII] = 0x0790, + + [ARSTR] = 0x0000, + [TSU_CTRST] = 0x0004, + [TSU_FWEN0] = 0x0010, + [TSU_FWEN1] = 0x0014, + [TSU_FCM] = 0x0018, + [TSU_BSYSL0] = 0x0020, + [TSU_BSYSL1] = 0x0024, + [TSU_PRISL0] = 0x0028, + [TSU_PRISL1] = 0x002c, + [TSU_FWSL0] = 0x0030, + [TSU_FWSL1] = 0x0034, + [TSU_FWSLC] = 0x0038, + [TSU_QTAG0] = 0x0040, + [TSU_QTAG1] = 0x0044, + [TSU_FWSR] = 0x0050, + [TSU_FWINMK] = 0x0054, + [TSU_ADQT0] = 0x0048, + [TSU_ADQT1] = 0x004c, + [TSU_VTAG0] = 0x0058, + [TSU_VTAG1] = 0x005c, + [TSU_ADSBSY] = 0x0060, + [TSU_TEN] = 0x0064, + [TSU_POST1] = 0x0070, + [TSU_POST2] = 0x0074, + [TSU_POST3] = 0x0078, + [TSU_POST4] = 0x007c, + [TSU_ADRH0] = 0x0100, + [TSU_ADRL0] = 0x0104, + [TSU_ADRH31] = 0x01f8, + [TSU_ADRL31] = 0x01fc, + + [TXNLCR0] = 0x0080, + [TXALCR0] = 0x0084, + [RXNLCR0] = 0x0088, + [RXALCR0] = 0x008c, + [FWNLCR0] = 0x0090, + [FWALCR0] = 0x0094, + [TXNLCR1] = 0x00a0, + [TXALCR1] = 0x00a0, + [RXNLCR1] = 0x00a8, + [RXALCR1] = 0x00ac, + [FWNLCR1] = 0x00b0, + [FWALCR1] = 0x00b4, +}; + +static const u16 sh_eth_offset_fast_rcar[SH_ETH_MAX_REGISTER_OFFSET] = { + [ECMR] = 0x0300, + [RFLR] = 0x0308, + [ECSR] = 0x0310, + [ECSIPR] = 0x0318, + [PIR] = 0x0320, + [PSR] = 0x0328, + [RDMLR] = 0x0340, + [IPGR] = 0x0350, + [APR] = 0x0354, + [MPR] = 0x0358, + [RFCF] = 0x0360, + [TPAUSER] = 0x0364, + [TPAUSECR] = 0x0368, + [MAHR] = 0x03c0, + [MALR] = 0x03c8, + [TROCR] = 0x03d0, + [CDCR] = 0x03d4, + [LCCR] = 0x03d8, + [CNDCR] = 0x03dc, + [CEFCR] = 0x03e4, + [FRECR] = 0x03e8, + [TSFRCR] = 0x03ec, + [TLFRCR] = 0x03f0, + [RFCR] = 0x03f4, + [MAFCR] = 0x03f8, + + [EDMR] = 0x0200, + [EDTRR] = 0x0208, + [EDRRR] = 0x0210, + [TDLAR] = 0x0218, + [RDLAR] = 0x0220, + [EESR] = 0x0228, + [EESIPR] = 0x0230, + [TRSCER] = 0x0238, + [RMFCR] = 0x0240, + [TFTR] = 0x0248, + [FDR] = 0x0250, + [RMCR] = 0x0258, + [TFUCR] = 0x0264, + [RFOCR] = 0x0268, + [FCFTR] = 0x0270, + [TRIMD] = 0x027c, +}; + +static const u16 sh_eth_offset_fast_sh4[SH_ETH_MAX_REGISTER_OFFSET] = { + [ECMR] = 0x0100, + [RFLR] = 0x0108, + [ECSR] = 0x0110, + [ECSIPR] = 0x0118, + [PIR] = 0x0120, + [PSR] = 0x0128, + [RDMLR] = 0x0140, + [IPGR] = 0x0150, + [APR] = 0x0154, + [MPR] = 0x0158, + [TPAUSER] = 0x0164, + [RFCF] = 0x0160, + [TPAUSECR] = 0x0168, + [BCFRR] = 0x016c, + [MAHR] = 0x01c0, + [MALR] = 0x01c8, + [TROCR] = 0x01d0, + [CDCR] = 0x01d4, + [LCCR] = 0x01d8, + [CNDCR] = 0x01dc, + [CEFCR] = 0x01e4, + [FRECR] = 0x01e8, + [TSFRCR] = 0x01ec, + [TLFRCR] = 0x01f0, + [RFCR] = 0x01f4, + [MAFCR] = 0x01f8, + [RTRATE] = 0x01fc, + + [EDMR] = 0x0000, + [EDTRR] = 0x0008, + [EDRRR] = 0x0010, + [TDLAR] = 0x0018, + [RDLAR] = 0x0020, + [EESR] = 0x0028, + [EESIPR] = 0x0030, + [TRSCER] = 0x0038, + [RMFCR] = 0x0040, + [TFTR] = 0x0048, + [FDR] = 0x0050, + [RMCR] = 0x0058, + [TFUCR] = 0x0064, + [RFOCR] = 0x0068, + [FCFTR] = 0x0070, + [RPADIR] = 0x0078, + [TRIMD] = 0x007c, + [RBWAR] = 0x00c8, + [RDFAR] = 0x00cc, + [TBRAR] = 0x00d4, + [TDFAR] = 0x00d8, +}; + +static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = { + [ECMR] = 0x0160, + [ECSR] = 0x0164, + [ECSIPR] = 0x0168, + [PIR] = 0x016c, + [MAHR] = 0x0170, + [MALR] = 0x0174, + [RFLR] = 0x0178, + [PSR] = 0x017c, + [TROCR] = 0x0180, + [CDCR] = 0x0184, + [LCCR] = 0x0188, + [CNDCR] = 0x018c, + [CEFCR] = 0x0194, + [FRECR] = 0x0198, + [TSFRCR] = 0x019c, + [TLFRCR] = 0x01a0, + [RFCR] = 0x01a4, + [MAFCR] = 0x01a8, + [IPGR] = 0x01b4, + [APR] = 0x01b8, + [MPR] = 0x01bc, + [TPAUSER] = 0x01c4, + [BCFR] = 0x01cc, + + [ARSTR] = 0x0000, + [TSU_CTRST] = 0x0004, + [TSU_FWEN0] = 0x0010, + [TSU_FWEN1] = 0x0014, + [TSU_FCM] = 0x0018, + [TSU_BSYSL0] = 0x0020, + [TSU_BSYSL1] = 0x0024, + [TSU_PRISL0] = 0x0028, + [TSU_PRISL1] = 0x002c, + [TSU_FWSL0] = 0x0030, + [TSU_FWSL1] = 0x0034, + [TSU_FWSLC] = 0x0038, + [TSU_QTAGM0] = 0x0040, + [TSU_QTAGM1] = 0x0044, + [TSU_ADQT0] = 0x0048, + [TSU_ADQT1] = 0x004c, + [TSU_FWSR] = 0x0050, + [TSU_FWINMK] = 0x0054, + [TSU_ADSBSY] = 0x0060, + [TSU_TEN] = 0x0064, + [TSU_POST1] = 0x0070, + [TSU_POST2] = 0x0074, + [TSU_POST3] = 0x0078, + [TSU_POST4] = 0x007c, + + [TXNLCR0] = 0x0080, + [TXALCR0] = 0x0084, + [RXNLCR0] = 0x0088, + [RXALCR0] = 0x008c, + [FWNLCR0] = 0x0090, + [FWALCR0] = 0x0094, + [TXNLCR1] = 0x00a0, + [TXALCR1] = 0x00a0, + [RXNLCR1] = 0x00a8, + [RXALCR1] = 0x00ac, + [FWNLCR1] = 0x00b0, + [FWALCR1] = 0x00b4, + + [TSU_ADRH0] = 0x0100, + [TSU_ADRL0] = 0x0104, + [TSU_ADRL31] = 0x01fc, +}; + #if defined(CONFIG_CPU_SUBTYPE_SH7734) || \ defined(CONFIG_CPU_SUBTYPE_SH7763) || \ defined(CONFIG_ARCH_R8A7740) @@ -78,7 +342,7 @@ static void sh_eth_select_mii(struct net_device *ndev) #endif /* There is CPU dependent code */ -#if defined(CONFIG_CPU_SUBTYPE_SH7724) || defined(CONFIG_ARCH_R8A7779) +#if defined(CONFIG_ARCH_R8A7779) #define SH_ETH_RESET_DEFAULT 1 static void sh_eth_set_duplex(struct net_device *ndev) { @@ -93,18 +357,60 @@ static void sh_eth_set_duplex(struct net_device *ndev) static void sh_eth_set_rate(struct net_device *ndev) { struct sh_eth_private *mdp = netdev_priv(ndev); - unsigned int bits = ECMR_RTM; -#if defined(CONFIG_ARCH_R8A7779) - bits |= ECMR_ELB; -#endif + switch (mdp->speed) { + case 10: /* 10BASE */ + sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_ELB, ECMR); + break; + case 100:/* 100BASE */ + sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_ELB, ECMR); + break; + default: + break; + } +} + +/* R8A7779 */ +static struct sh_eth_cpu_data sh_eth_my_cpu_data = { + .set_duplex = sh_eth_set_duplex, + .set_rate = sh_eth_set_rate, + + .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD, + .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP, + .eesipr_value = 0x01ff009f, + + .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO, + .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RDE | + EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI, + .tx_error_check = EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE, + + .apr = 1, + .mpr = 1, + .tpauser = 1, + .hw_swap = 1, +}; +#elif defined(CONFIG_CPU_SUBTYPE_SH7724) +#define SH_ETH_RESET_DEFAULT 1 +static void sh_eth_set_duplex(struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + + if (mdp->duplex) /* Full */ + sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR); + else /* Half */ + sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR); +} + +static void sh_eth_set_rate(struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); switch (mdp->speed) { case 10: /* 10BASE */ - sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~bits, ECMR); + sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_RTM, ECMR); break; case 100:/* 100BASE */ - sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | bits, ECMR); + sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_RTM, ECMR); break; default: break; @@ -592,7 +898,7 @@ static int sh_eth_check_reset(struct net_device *ndev) cnt--; } if (cnt < 0) { - printk(KERN_ERR "Device reset fail\n"); + pr_err("Device reset fail\n"); ret = -ETIMEDOUT; } return ret; @@ -908,11 +1214,8 @@ static int sh_eth_ring_init(struct net_device *ndev) /* Allocate all Rx descriptors. */ rx_ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring; mdp->rx_ring = dma_alloc_coherent(NULL, rx_ringsize, &mdp->rx_desc_dma, - GFP_KERNEL); - + GFP_KERNEL); if (!mdp->rx_ring) { - dev_err(&ndev->dev, "Cannot allocate Rx Ring (size %d bytes)\n", - rx_ringsize); ret = -ENOMEM; goto desc_ring_free; } @@ -922,10 +1225,8 @@ static int sh_eth_ring_init(struct net_device *ndev) /* Allocate all Tx descriptors. */ tx_ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring; mdp->tx_ring = dma_alloc_coherent(NULL, tx_ringsize, &mdp->tx_desc_dma, - GFP_KERNEL); + GFP_KERNEL); if (!mdp->tx_ring) { - dev_err(&ndev->dev, "Cannot allocate Tx Ring (size %d bytes)\n", - tx_ringsize); ret = -ENOMEM; goto desc_ring_free; } @@ -2228,7 +2529,6 @@ static void sh_eth_tsu_init(struct sh_eth_private *mdp) /* MDIO bus release function */ static int sh_mdio_release(struct net_device *ndev) { - struct sh_eth_private *mdp = netdev_priv(ndev); struct mii_bus *bus = dev_get_drvdata(&ndev->dev); /* unregister mdio bus */ @@ -2237,15 +2537,9 @@ static int sh_mdio_release(struct net_device *ndev) /* remove mdio bus info from net_device */ dev_set_drvdata(&ndev->dev, NULL); - /* free interrupts memory */ - kfree(bus->irq); - /* free bitbang info */ free_mdio_bitbang(bus); - /* free bitbang memory */ - kfree(mdp->bitbang); - return 0; } @@ -2258,7 +2552,8 @@ static int sh_mdio_init(struct net_device *ndev, int id, struct sh_eth_private *mdp = netdev_priv(ndev); /* create bit control struct for PHY */ - bitbang = kzalloc(sizeof(struct bb_info), GFP_KERNEL); + bitbang = devm_kzalloc(&ndev->dev, sizeof(struct bb_info), + GFP_KERNEL); if (!bitbang) { ret = -ENOMEM; goto out; @@ -2267,18 +2562,17 @@ static int sh_mdio_init(struct net_device *ndev, int id, /* bitbang init */ bitbang->addr = mdp->addr + mdp->reg_offset[PIR]; bitbang->set_gate = pd->set_mdio_gate; - bitbang->mdi_msk = 0x08; - bitbang->mdo_msk = 0x04; - bitbang->mmd_msk = 0x02;/* MMD */ - bitbang->mdc_msk = 0x01; + bitbang->mdi_msk = PIR_MDI; + bitbang->mdo_msk = PIR_MDO; + bitbang->mmd_msk = PIR_MMD; + bitbang->mdc_msk = PIR_MDC; bitbang->ctrl.ops = &bb_ops; /* MII controller setting */ - mdp->bitbang = bitbang; mdp->mii_bus = alloc_mdio_bitbang(&bitbang->ctrl); if (!mdp->mii_bus) { ret = -ENOMEM; - goto out_free_bitbang; + goto out; } /* Hook up MII support for ethtool */ @@ -2288,7 +2582,9 @@ static int sh_mdio_init(struct net_device *ndev, int id, mdp->pdev->name, id); /* PHY IRQ */ - mdp->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL); + mdp->mii_bus->irq = devm_kzalloc(&ndev->dev, + sizeof(int) * PHY_MAX_ADDR, + GFP_KERNEL); if (!mdp->mii_bus->irq) { ret = -ENOMEM; goto out_free_bus; @@ -2300,21 +2596,15 @@ static int sh_mdio_init(struct net_device *ndev, int id, /* register mdio bus */ ret = mdiobus_register(mdp->mii_bus); if (ret) - goto out_free_irq; + goto out_free_bus; dev_set_drvdata(&ndev->dev, mdp->mii_bus); return 0; -out_free_irq: - kfree(mdp->mii_bus->irq); - out_free_bus: free_mdio_bitbang(mdp->mii_bus); -out_free_bitbang: - kfree(bitbang); - out: return ret; } @@ -2327,6 +2617,9 @@ static const u16 *sh_eth_get_register_offset(int register_type) case SH_ETH_REG_GIGABIT: reg_offset = sh_eth_offset_gigabit; break; + case SH_ETH_REG_FAST_RCAR: + reg_offset = sh_eth_offset_fast_rcar; + break; case SH_ETH_REG_FAST_SH4: reg_offset = sh_eth_offset_fast_sh4; break; @@ -2334,7 +2627,7 @@ static const u16 *sh_eth_get_register_offset(int register_type) reg_offset = sh_eth_offset_fast_sh3_sh2; break; default: - printk(KERN_ERR "Unknown register type (%d)\n", register_type); + pr_err("Unknown register type (%d)\n", register_type); break; } @@ -2364,7 +2657,7 @@ static int sh_eth_drv_probe(struct platform_device *pdev) struct resource *res; struct net_device *ndev = NULL; struct sh_eth_private *mdp = NULL; - struct sh_eth_plat_data *pd; + struct sh_eth_plat_data *pd = pdev->dev.platform_data; /* get base addr */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); @@ -2402,10 +2695,9 @@ static int sh_eth_drv_probe(struct platform_device *pdev) mdp = netdev_priv(ndev); mdp->num_tx_ring = TX_RING_SIZE; mdp->num_rx_ring = RX_RING_SIZE; - mdp->addr = ioremap(res->start, resource_size(res)); - if (mdp->addr == NULL) { - ret = -ENOMEM; - dev_err(&pdev->dev, "ioremap failed.\n"); + mdp->addr = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(mdp->addr)) { + ret = PTR_ERR(mdp->addr); goto out_release; } @@ -2414,7 +2706,6 @@ static int sh_eth_drv_probe(struct platform_device *pdev) pm_runtime_enable(&pdev->dev); pm_runtime_resume(&pdev->dev); - pd = (struct sh_eth_plat_data *)(pdev->dev.platform_data); /* get PHY ID */ mdp->phy_id = pd->phy; mdp->phy_interface = pd->phy_interface; @@ -2452,11 +2743,9 @@ static int sh_eth_drv_probe(struct platform_device *pdev) ret = -ENODEV; goto out_release; } - mdp->tsu_addr = ioremap(rtsu->start, - resource_size(rtsu)); - if (mdp->tsu_addr == NULL) { - ret = -ENOMEM; - dev_err(&pdev->dev, "TSU ioremap failed.\n"); + mdp->tsu_addr = devm_ioremap_resource(&pdev->dev, rtsu); + if (IS_ERR(mdp->tsu_addr)) { + ret = PTR_ERR(mdp->tsu_addr); goto out_release; } mdp->port = devno % 2; @@ -2497,10 +2786,6 @@ out_unregister: out_release: /* net_dev free */ - if (mdp && mdp->addr) - iounmap(mdp->addr); - if (mdp && mdp->tsu_addr) - iounmap(mdp->tsu_addr); if (ndev) free_netdev(ndev); @@ -2511,14 +2796,10 @@ out: static int sh_eth_drv_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); - struct sh_eth_private *mdp = netdev_priv(ndev); - if (mdp->cd->tsu) - iounmap(mdp->tsu_addr); sh_mdio_release(ndev); unregister_netdev(ndev); pm_runtime_disable(&pdev->dev); - iounmap(mdp->addr); free_netdev(ndev); platform_set_drvdata(pdev, NULL); diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h index 828be4515008..1ddc9f235bcb 100644 --- a/drivers/net/ethernet/renesas/sh_eth.h +++ b/drivers/net/ethernet/renesas/sh_eth.h @@ -156,225 +156,6 @@ enum { SH_ETH_MAX_REGISTER_OFFSET, }; -static const u16 sh_eth_offset_gigabit[SH_ETH_MAX_REGISTER_OFFSET] = { - [EDSR] = 0x0000, - [EDMR] = 0x0400, - [EDTRR] = 0x0408, - [EDRRR] = 0x0410, - [EESR] = 0x0428, - [EESIPR] = 0x0430, - [TDLAR] = 0x0010, - [TDFAR] = 0x0014, - [TDFXR] = 0x0018, - [TDFFR] = 0x001c, - [RDLAR] = 0x0030, - [RDFAR] = 0x0034, - [RDFXR] = 0x0038, - [RDFFR] = 0x003c, - [TRSCER] = 0x0438, - [RMFCR] = 0x0440, - [TFTR] = 0x0448, - [FDR] = 0x0450, - [RMCR] = 0x0458, - [RPADIR] = 0x0460, - [FCFTR] = 0x0468, - [CSMR] = 0x04E4, - - [ECMR] = 0x0500, - [ECSR] = 0x0510, - [ECSIPR] = 0x0518, - [PIR] = 0x0520, - [PSR] = 0x0528, - [PIPR] = 0x052c, - [RFLR] = 0x0508, - [APR] = 0x0554, - [MPR] = 0x0558, - [PFTCR] = 0x055c, - [PFRCR] = 0x0560, - [TPAUSER] = 0x0564, - [GECMR] = 0x05b0, - [BCULR] = 0x05b4, - [MAHR] = 0x05c0, - [MALR] = 0x05c8, - [TROCR] = 0x0700, - [CDCR] = 0x0708, - [LCCR] = 0x0710, - [CEFCR] = 0x0740, - [FRECR] = 0x0748, - [TSFRCR] = 0x0750, - [TLFRCR] = 0x0758, - [RFCR] = 0x0760, - [CERCR] = 0x0768, - [CEECR] = 0x0770, - [MAFCR] = 0x0778, - [RMII_MII] = 0x0790, - - [ARSTR] = 0x0000, - [TSU_CTRST] = 0x0004, - [TSU_FWEN0] = 0x0010, - [TSU_FWEN1] = 0x0014, - [TSU_FCM] = 0x0018, - [TSU_BSYSL0] = 0x0020, - [TSU_BSYSL1] = 0x0024, - [TSU_PRISL0] = 0x0028, - [TSU_PRISL1] = 0x002c, - [TSU_FWSL0] = 0x0030, - [TSU_FWSL1] = 0x0034, - [TSU_FWSLC] = 0x0038, - [TSU_QTAG0] = 0x0040, - [TSU_QTAG1] = 0x0044, - [TSU_FWSR] = 0x0050, - [TSU_FWINMK] = 0x0054, - [TSU_ADQT0] = 0x0048, - [TSU_ADQT1] = 0x004c, - [TSU_VTAG0] = 0x0058, - [TSU_VTAG1] = 0x005c, - [TSU_ADSBSY] = 0x0060, - [TSU_TEN] = 0x0064, - [TSU_POST1] = 0x0070, - [TSU_POST2] = 0x0074, - [TSU_POST3] = 0x0078, - [TSU_POST4] = 0x007c, - [TSU_ADRH0] = 0x0100, - [TSU_ADRL0] = 0x0104, - [TSU_ADRH31] = 0x01f8, - [TSU_ADRL31] = 0x01fc, - - [TXNLCR0] = 0x0080, - [TXALCR0] = 0x0084, - [RXNLCR0] = 0x0088, - [RXALCR0] = 0x008c, - [FWNLCR0] = 0x0090, - [FWALCR0] = 0x0094, - [TXNLCR1] = 0x00a0, - [TXALCR1] = 0x00a0, - [RXNLCR1] = 0x00a8, - [RXALCR1] = 0x00ac, - [FWNLCR1] = 0x00b0, - [FWALCR1] = 0x00b4, -}; - -static const u16 sh_eth_offset_fast_sh4[SH_ETH_MAX_REGISTER_OFFSET] = { - [ECMR] = 0x0100, - [RFLR] = 0x0108, - [ECSR] = 0x0110, - [ECSIPR] = 0x0118, - [PIR] = 0x0120, - [PSR] = 0x0128, - [RDMLR] = 0x0140, - [IPGR] = 0x0150, - [APR] = 0x0154, - [MPR] = 0x0158, - [TPAUSER] = 0x0164, - [RFCF] = 0x0160, - [TPAUSECR] = 0x0168, - [BCFRR] = 0x016c, - [MAHR] = 0x01c0, - [MALR] = 0x01c8, - [TROCR] = 0x01d0, - [CDCR] = 0x01d4, - [LCCR] = 0x01d8, - [CNDCR] = 0x01dc, - [CEFCR] = 0x01e4, - [FRECR] = 0x01e8, - [TSFRCR] = 0x01ec, - [TLFRCR] = 0x01f0, - [RFCR] = 0x01f4, - [MAFCR] = 0x01f8, - [RTRATE] = 0x01fc, - - [EDMR] = 0x0000, - [EDTRR] = 0x0008, - [EDRRR] = 0x0010, - [TDLAR] = 0x0018, - [RDLAR] = 0x0020, - [EESR] = 0x0028, - [EESIPR] = 0x0030, - [TRSCER] = 0x0038, - [RMFCR] = 0x0040, - [TFTR] = 0x0048, - [FDR] = 0x0050, - [RMCR] = 0x0058, - [TFUCR] = 0x0064, - [RFOCR] = 0x0068, - [FCFTR] = 0x0070, - [RPADIR] = 0x0078, - [TRIMD] = 0x007c, - [RBWAR] = 0x00c8, - [RDFAR] = 0x00cc, - [TBRAR] = 0x00d4, - [TDFAR] = 0x00d8, -}; - -static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = { - [ECMR] = 0x0160, - [ECSR] = 0x0164, - [ECSIPR] = 0x0168, - [PIR] = 0x016c, - [MAHR] = 0x0170, - [MALR] = 0x0174, - [RFLR] = 0x0178, - [PSR] = 0x017c, - [TROCR] = 0x0180, - [CDCR] = 0x0184, - [LCCR] = 0x0188, - [CNDCR] = 0x018c, - [CEFCR] = 0x0194, - [FRECR] = 0x0198, - [TSFRCR] = 0x019c, - [TLFRCR] = 0x01a0, - [RFCR] = 0x01a4, - [MAFCR] = 0x01a8, - [IPGR] = 0x01b4, - [APR] = 0x01b8, - [MPR] = 0x01bc, - [TPAUSER] = 0x01c4, - [BCFR] = 0x01cc, - - [ARSTR] = 0x0000, - [TSU_CTRST] = 0x0004, - [TSU_FWEN0] = 0x0010, - [TSU_FWEN1] = 0x0014, - [TSU_FCM] = 0x0018, - [TSU_BSYSL0] = 0x0020, - [TSU_BSYSL1] = 0x0024, - [TSU_PRISL0] = 0x0028, - [TSU_PRISL1] = 0x002c, - [TSU_FWSL0] = 0x0030, - [TSU_FWSL1] = 0x0034, - [TSU_FWSLC] = 0x0038, - [TSU_QTAGM0] = 0x0040, - [TSU_QTAGM1] = 0x0044, - [TSU_ADQT0] = 0x0048, - [TSU_ADQT1] = 0x004c, - [TSU_FWSR] = 0x0050, - [TSU_FWINMK] = 0x0054, - [TSU_ADSBSY] = 0x0060, - [TSU_TEN] = 0x0064, - [TSU_POST1] = 0x0070, - [TSU_POST2] = 0x0074, - [TSU_POST3] = 0x0078, - [TSU_POST4] = 0x007c, - - [TXNLCR0] = 0x0080, - [TXALCR0] = 0x0084, - [RXNLCR0] = 0x0088, - [RXALCR0] = 0x008c, - [FWNLCR0] = 0x0090, - [FWALCR0] = 0x0094, - [TXNLCR1] = 0x00a0, - [TXALCR1] = 0x00a0, - [RXNLCR1] = 0x00a8, - [RXALCR1] = 0x00ac, - [FWNLCR1] = 0x00b0, - [FWALCR1] = 0x00b4, - - [TSU_ADRH0] = 0x0100, - [TSU_ADRL0] = 0x0104, - [TSU_ADRL31] = 0x01fc, - -}; - /* Driver's parameters */ #if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE) #define SH4_SKB_RX_ALIGN 32 @@ -705,7 +486,6 @@ struct sh_eth_private { const u16 *reg_offset; void __iomem *addr; void __iomem *tsu_addr; - struct bb_info *bitbang; u32 num_rx_ring; u32 num_tx_ring; dma_addr_t rx_desc_dma; diff --git a/drivers/net/ethernet/s6gmac.c b/drivers/net/ethernet/s6gmac.c index 21683e2b1ff4..b6739afeaca1 100644 --- a/drivers/net/ethernet/s6gmac.c +++ b/drivers/net/ethernet/s6gmac.c @@ -998,6 +998,7 @@ static int s6gmac_probe(struct platform_device *pdev) mb = mdiobus_alloc(); if (!mb) { printk(KERN_ERR DRV_PRMT "error allocating mii bus\n"); + res = -ENOMEM; goto errmii; } mb->name = "s6gmac_mii"; @@ -1053,20 +1054,7 @@ static struct platform_driver s6gmac_driver = { }, }; -static int __init s6gmac_init(void) -{ - printk(KERN_INFO DRV_PRMT "S6 GMAC ethernet driver\n"); - return platform_driver_register(&s6gmac_driver); -} - - -static void __exit s6gmac_exit(void) -{ - platform_driver_unregister(&s6gmac_driver); -} - -module_init(s6gmac_init); -module_exit(s6gmac_exit); +module_platform_driver(s6gmac_driver); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("S6105 on chip Ethernet driver"); diff --git a/drivers/net/ethernet/seeq/ether3.c b/drivers/net/ethernet/seeq/ether3.c index 3aca57853ed4..bdac936a68bc 100644 --- a/drivers/net/ethernet/seeq/ether3.c +++ b/drivers/net/ethernet/seeq/ether3.c @@ -651,8 +651,11 @@ if (next_ptr < RX_START || next_ptr >= RX_END) { skb->protocol = eth_type_trans(skb, dev); netif_rx(skb); received ++; - } else - goto dropping; + } else { + ether3_outw(next_ptr >> 8, REG_RECVEND); + dev->stats.rx_dropped++; + goto done; + } } else { struct net_device_stats *stats = &dev->stats; ether3_outw(next_ptr >> 8, REG_RECVEND); @@ -679,21 +682,6 @@ done: } return maxcnt; - -dropping:{ - static unsigned long last_warned; - - ether3_outw(next_ptr >> 8, REG_RECVEND); - /* - * Don't print this message too many times... - */ - if (time_after(jiffies, last_warned + 10 * HZ)) { - last_warned = jiffies; - printk("%s: memory squeeze, dropping packet.\n", dev->name); - } - dev->stats.rx_dropped++; - goto done; - } } /* diff --git a/drivers/net/ethernet/seeq/sgiseeq.c b/drivers/net/ethernet/seeq/sgiseeq.c index 0fde9ca28269..0ad5694b41f8 100644 --- a/drivers/net/ethernet/seeq/sgiseeq.c +++ b/drivers/net/ethernet/seeq/sgiseeq.c @@ -381,8 +381,6 @@ memory_squeeze: dev->stats.rx_packets++; dev->stats.rx_bytes += len; } else { - printk(KERN_NOTICE "%s: Memory squeeze, deferring packet.\n", - dev->name); dev->stats.rx_dropped++; } } else { diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 0bc00991d310..01b99206139a 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -22,6 +22,7 @@ #include <linux/topology.h> #include <linux/gfp.h> #include <linux/cpu_rmap.h> +#include <linux/aer.h> #include "net_driver.h" #include "efx.h" #include "nic.h" @@ -71,21 +72,21 @@ const char *const efx_loopback_mode_names[] = { const unsigned int efx_reset_type_max = RESET_TYPE_MAX; const char *const efx_reset_type_names[] = { - [RESET_TYPE_INVISIBLE] = "INVISIBLE", - [RESET_TYPE_ALL] = "ALL", - [RESET_TYPE_WORLD] = "WORLD", - [RESET_TYPE_DISABLE] = "DISABLE", - [RESET_TYPE_TX_WATCHDOG] = "TX_WATCHDOG", - [RESET_TYPE_INT_ERROR] = "INT_ERROR", - [RESET_TYPE_RX_RECOVERY] = "RX_RECOVERY", - [RESET_TYPE_RX_DESC_FETCH] = "RX_DESC_FETCH", - [RESET_TYPE_TX_DESC_FETCH] = "TX_DESC_FETCH", - [RESET_TYPE_TX_SKIP] = "TX_SKIP", - [RESET_TYPE_MC_FAILURE] = "MC_FAILURE", + [RESET_TYPE_INVISIBLE] = "INVISIBLE", + [RESET_TYPE_ALL] = "ALL", + [RESET_TYPE_RECOVER_OR_ALL] = "RECOVER_OR_ALL", + [RESET_TYPE_WORLD] = "WORLD", + [RESET_TYPE_RECOVER_OR_DISABLE] = "RECOVER_OR_DISABLE", + [RESET_TYPE_DISABLE] = "DISABLE", + [RESET_TYPE_TX_WATCHDOG] = "TX_WATCHDOG", + [RESET_TYPE_INT_ERROR] = "INT_ERROR", + [RESET_TYPE_RX_RECOVERY] = "RX_RECOVERY", + [RESET_TYPE_RX_DESC_FETCH] = "RX_DESC_FETCH", + [RESET_TYPE_TX_DESC_FETCH] = "TX_DESC_FETCH", + [RESET_TYPE_TX_SKIP] = "TX_SKIP", + [RESET_TYPE_MC_FAILURE] = "MC_FAILURE", }; -#define EFX_MAX_MTU (9 * 1024) - /* Reset workqueue. If any NIC has a hardware failure then a reset will be * queued onto this work queue. This is not a per-nic work queue, because * efx_reset_work() acquires the rtnl lock, so resets are naturally serialised. @@ -117,9 +118,12 @@ MODULE_PARM_DESC(separate_tx_channels, static int napi_weight = 64; /* This is the time (in jiffies) between invocations of the hardware - * monitor. On Falcon-based NICs, this will: + * monitor. + * On Falcon-based NICs, this will: * - Check the on-board hardware monitor; * - Poll the link state and reconfigure the hardware as necessary. + * On Siena-based NICs for power systems with EEH support, this will give EEH a + * chance to start. */ static unsigned int efx_monitor_interval = 1 * HZ; @@ -203,13 +207,14 @@ static void efx_stop_all(struct efx_nic *efx); #define EFX_ASSERT_RESET_SERIALISED(efx) \ do { \ if ((efx->state == STATE_READY) || \ + (efx->state == STATE_RECOVERY) || \ (efx->state == STATE_DISABLED)) \ ASSERT_RTNL(); \ } while (0) static int efx_check_disabled(struct efx_nic *efx) { - if (efx->state == STATE_DISABLED) { + if (efx->state == STATE_DISABLED || efx->state == STATE_RECOVERY) { netif_err(efx, drv, efx->net_dev, "device is disabled due to earlier errors\n"); return -EIO; @@ -242,15 +247,9 @@ static int efx_process_channel(struct efx_channel *channel, int budget) struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel); - /* Deliver last RX packet. */ - if (channel->rx_pkt) { - __efx_rx_packet(channel, channel->rx_pkt); - channel->rx_pkt = NULL; - } - if (rx_queue->enabled) { - efx_rx_strategy(channel); + efx_rx_flush_packet(channel); + if (rx_queue->enabled) efx_fast_push_rx_descriptors(rx_queue); - } } return spent; @@ -625,20 +624,51 @@ fail: */ static void efx_start_datapath(struct efx_nic *efx) { + bool old_rx_scatter = efx->rx_scatter; struct efx_tx_queue *tx_queue; struct efx_rx_queue *rx_queue; struct efx_channel *channel; + size_t rx_buf_len; /* Calculate the rx buffer allocation parameters required to * support the current MTU, including padding for header * alignment and overruns. */ - efx->rx_buffer_len = (max(EFX_PAGE_IP_ALIGN, NET_IP_ALIGN) + - EFX_MAX_FRAME_LEN(efx->net_dev->mtu) + - efx->type->rx_buffer_hash_size + - efx->type->rx_buffer_padding); - efx->rx_buffer_order = get_order(efx->rx_buffer_len + - sizeof(struct efx_rx_page_state)); + efx->rx_dma_len = (efx->type->rx_buffer_hash_size + + EFX_MAX_FRAME_LEN(efx->net_dev->mtu) + + efx->type->rx_buffer_padding); + rx_buf_len = (sizeof(struct efx_rx_page_state) + + EFX_PAGE_IP_ALIGN + efx->rx_dma_len); + if (rx_buf_len <= PAGE_SIZE) { + efx->rx_scatter = false; + efx->rx_buffer_order = 0; + } else if (efx->type->can_rx_scatter) { + BUILD_BUG_ON(sizeof(struct efx_rx_page_state) + + EFX_PAGE_IP_ALIGN + EFX_RX_USR_BUF_SIZE > + PAGE_SIZE / 2); + efx->rx_scatter = true; + efx->rx_dma_len = EFX_RX_USR_BUF_SIZE; + efx->rx_buffer_order = 0; + } else { + efx->rx_scatter = false; + efx->rx_buffer_order = get_order(rx_buf_len); + } + + efx_rx_config_page_split(efx); + if (efx->rx_buffer_order) + netif_dbg(efx, drv, efx->net_dev, + "RX buf len=%u; page order=%u batch=%u\n", + efx->rx_dma_len, efx->rx_buffer_order, + efx->rx_pages_per_batch); + else + netif_dbg(efx, drv, efx->net_dev, + "RX buf len=%u step=%u bpp=%u; page batch=%u\n", + efx->rx_dma_len, efx->rx_page_buf_step, + efx->rx_bufs_per_page, efx->rx_pages_per_batch); + + /* RX filters also have scatter-enabled flags */ + if (efx->rx_scatter != old_rx_scatter) + efx_filter_update_rx_scatter(efx); /* We must keep at least one descriptor in a TX ring empty. * We could avoid this when the queue size does not exactly @@ -655,16 +685,12 @@ static void efx_start_datapath(struct efx_nic *efx) efx_for_each_channel_tx_queue(tx_queue, channel) efx_init_tx_queue(tx_queue); - /* The rx buffer allocation strategy is MTU dependent */ - efx_rx_strategy(channel); - efx_for_each_channel_rx_queue(rx_queue, channel) { efx_init_rx_queue(rx_queue); efx_nic_generate_fill_event(rx_queue); } - WARN_ON(channel->rx_pkt != NULL); - efx_rx_strategy(channel); + WARN_ON(channel->rx_pkt_n_frags); } if (netif_device_present(efx->net_dev)) @@ -683,7 +709,7 @@ static void efx_stop_datapath(struct efx_nic *efx) BUG_ON(efx->port_enabled); /* Only perform flush if dma is enabled */ - if (dev->is_busmaster) { + if (dev->is_busmaster && efx->state != STATE_RECOVERY) { rc = efx_nic_flush_queues(efx); if (rc && EFX_WORKAROUND_7803(efx)) { @@ -1596,13 +1622,15 @@ static void efx_start_all(struct efx_nic *efx) efx_start_port(efx); efx_start_datapath(efx); - /* Start the hardware monitor if there is one. Otherwise (we're link - * event driven), we have to poll the PHY because after an event queue - * flush, we could have a missed a link state change */ - if (efx->type->monitor != NULL) { + /* Start the hardware monitor if there is one */ + if (efx->type->monitor != NULL) queue_delayed_work(efx->workqueue, &efx->monitor_work, efx_monitor_interval); - } else { + + /* If link state detection is normally event-driven, we have + * to poll now because we could have missed a change + */ + if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) { mutex_lock(&efx->mac_lock); if (efx->phy_op->poll(efx)) efx_link_status_changed(efx); @@ -2309,7 +2337,9 @@ int efx_reset(struct efx_nic *efx, enum reset_type method) out: /* Leave device stopped if necessary */ - disabled = rc || method == RESET_TYPE_DISABLE; + disabled = rc || + method == RESET_TYPE_DISABLE || + method == RESET_TYPE_RECOVER_OR_DISABLE; rc2 = efx_reset_up(efx, method, !disabled); if (rc2) { disabled = true; @@ -2328,13 +2358,48 @@ out: return rc; } +/* Try recovery mechanisms. + * For now only EEH is supported. + * Returns 0 if the recovery mechanisms are unsuccessful. + * Returns a non-zero value otherwise. + */ +static int efx_try_recovery(struct efx_nic *efx) +{ +#ifdef CONFIG_EEH + /* A PCI error can occur and not be seen by EEH because nothing + * happens on the PCI bus. In this case the driver may fail and + * schedule a 'recover or reset', leading to this recovery handler. + * Manually call the eeh failure check function. + */ + struct eeh_dev *eehdev = + of_node_to_eeh_dev(pci_device_to_OF_node(efx->pci_dev)); + + if (eeh_dev_check_failure(eehdev)) { + /* The EEH mechanisms will handle the error and reset the + * device if necessary. + */ + return 1; + } +#endif + return 0; +} + /* The worker thread exists so that code that cannot sleep can * schedule a reset for later. */ static void efx_reset_work(struct work_struct *data) { struct efx_nic *efx = container_of(data, struct efx_nic, reset_work); - unsigned long pending = ACCESS_ONCE(efx->reset_pending); + unsigned long pending; + enum reset_type method; + + pending = ACCESS_ONCE(efx->reset_pending); + method = fls(pending) - 1; + + if ((method == RESET_TYPE_RECOVER_OR_DISABLE || + method == RESET_TYPE_RECOVER_OR_ALL) && + efx_try_recovery(efx)) + return; if (!pending) return; @@ -2346,7 +2411,7 @@ static void efx_reset_work(struct work_struct *data) * it cannot change again. */ if (efx->state == STATE_READY) - (void)efx_reset(efx, fls(pending) - 1); + (void)efx_reset(efx, method); rtnl_unlock(); } @@ -2355,11 +2420,20 @@ void efx_schedule_reset(struct efx_nic *efx, enum reset_type type) { enum reset_type method; + if (efx->state == STATE_RECOVERY) { + netif_dbg(efx, drv, efx->net_dev, + "recovering: skip scheduling %s reset\n", + RESET_TYPE(type)); + return; + } + switch (type) { case RESET_TYPE_INVISIBLE: case RESET_TYPE_ALL: + case RESET_TYPE_RECOVER_OR_ALL: case RESET_TYPE_WORLD: case RESET_TYPE_DISABLE: + case RESET_TYPE_RECOVER_OR_DISABLE: method = type; netif_dbg(efx, drv, efx->net_dev, "scheduling %s reset\n", RESET_TYPE(method)); @@ -2569,6 +2643,8 @@ static void efx_pci_remove(struct pci_dev *pci_dev) efx_fini_struct(efx); pci_set_drvdata(pci_dev, NULL); free_netdev(efx->net_dev); + + pci_disable_pcie_error_reporting(pci_dev); }; /* NIC VPD information @@ -2741,6 +2817,11 @@ static int efx_pci_probe(struct pci_dev *pci_dev, netif_warn(efx, probe, efx->net_dev, "failed to create MTDs (%d)\n", rc); + rc = pci_enable_pcie_error_reporting(pci_dev); + if (rc && rc != -EINVAL) + netif_warn(efx, probe, efx->net_dev, + "pci_enable_pcie_error_reporting failed (%d)\n", rc); + return 0; fail4: @@ -2865,12 +2946,112 @@ static const struct dev_pm_ops efx_pm_ops = { .restore = efx_pm_resume, }; +/* A PCI error affecting this device was detected. + * At this point MMIO and DMA may be disabled. + * Stop the software path and request a slot reset. + */ +static pci_ers_result_t efx_io_error_detected(struct pci_dev *pdev, + enum pci_channel_state state) +{ + pci_ers_result_t status = PCI_ERS_RESULT_RECOVERED; + struct efx_nic *efx = pci_get_drvdata(pdev); + + if (state == pci_channel_io_perm_failure) + return PCI_ERS_RESULT_DISCONNECT; + + rtnl_lock(); + + if (efx->state != STATE_DISABLED) { + efx->state = STATE_RECOVERY; + efx->reset_pending = 0; + + efx_device_detach_sync(efx); + + efx_stop_all(efx); + efx_stop_interrupts(efx, false); + + status = PCI_ERS_RESULT_NEED_RESET; + } else { + /* If the interface is disabled we don't want to do anything + * with it. + */ + status = PCI_ERS_RESULT_RECOVERED; + } + + rtnl_unlock(); + + pci_disable_device(pdev); + + return status; +} + +/* Fake a successfull reset, which will be performed later in efx_io_resume. */ +static pci_ers_result_t efx_io_slot_reset(struct pci_dev *pdev) +{ + struct efx_nic *efx = pci_get_drvdata(pdev); + pci_ers_result_t status = PCI_ERS_RESULT_RECOVERED; + int rc; + + if (pci_enable_device(pdev)) { + netif_err(efx, hw, efx->net_dev, + "Cannot re-enable PCI device after reset.\n"); + status = PCI_ERS_RESULT_DISCONNECT; + } + + rc = pci_cleanup_aer_uncorrect_error_status(pdev); + if (rc) { + netif_err(efx, hw, efx->net_dev, + "pci_cleanup_aer_uncorrect_error_status failed (%d)\n", rc); + /* Non-fatal error. Continue. */ + } + + return status; +} + +/* Perform the actual reset and resume I/O operations. */ +static void efx_io_resume(struct pci_dev *pdev) +{ + struct efx_nic *efx = pci_get_drvdata(pdev); + int rc; + + rtnl_lock(); + + if (efx->state == STATE_DISABLED) + goto out; + + rc = efx_reset(efx, RESET_TYPE_ALL); + if (rc) { + netif_err(efx, hw, efx->net_dev, + "efx_reset failed after PCI error (%d)\n", rc); + } else { + efx->state = STATE_READY; + netif_dbg(efx, hw, efx->net_dev, + "Done resetting and resuming IO after PCI error.\n"); + } + +out: + rtnl_unlock(); +} + +/* For simplicity and reliability, we always require a slot reset and try to + * reset the hardware when a pci error affecting the device is detected. + * We leave both the link_reset and mmio_enabled callback unimplemented: + * with our request for slot reset the mmio_enabled callback will never be + * called, and the link_reset callback is not used by AER or EEH mechanisms. + */ +static struct pci_error_handlers efx_err_handlers = { + .error_detected = efx_io_error_detected, + .slot_reset = efx_io_slot_reset, + .resume = efx_io_resume, +}; + static struct pci_driver efx_pci_driver = { .name = KBUILD_MODNAME, .id_table = efx_pci_table, .probe = efx_pci_probe, .remove = efx_pci_remove, .driver.pm = &efx_pm_ops, + .err_handler = &efx_err_handlers, }; /************************************************************************** diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h index d2f790df6dcb..8372da239b43 100644 --- a/drivers/net/ethernet/sfc/efx.h +++ b/drivers/net/ethernet/sfc/efx.h @@ -33,17 +33,22 @@ extern int efx_setup_tc(struct net_device *net_dev, u8 num_tc); extern unsigned int efx_tx_max_skb_descs(struct efx_nic *efx); /* RX */ +extern void efx_rx_config_page_split(struct efx_nic *efx); extern int efx_probe_rx_queue(struct efx_rx_queue *rx_queue); extern void efx_remove_rx_queue(struct efx_rx_queue *rx_queue); extern void efx_init_rx_queue(struct efx_rx_queue *rx_queue); extern void efx_fini_rx_queue(struct efx_rx_queue *rx_queue); -extern void efx_rx_strategy(struct efx_channel *channel); extern void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue); extern void efx_rx_slow_fill(unsigned long context); -extern void __efx_rx_packet(struct efx_channel *channel, - struct efx_rx_buffer *rx_buf); -extern void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, +extern void __efx_rx_packet(struct efx_channel *channel); +extern void efx_rx_packet(struct efx_rx_queue *rx_queue, + unsigned int index, unsigned int n_frags, unsigned int len, u16 flags); +static inline void efx_rx_flush_packet(struct efx_channel *channel) +{ + if (channel->rx_pkt_n_frags) + __efx_rx_packet(channel); +} extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue); #define EFX_MAX_DMAQ_SIZE 4096UL @@ -67,6 +72,7 @@ extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue); extern int efx_probe_filters(struct efx_nic *efx); extern void efx_restore_filters(struct efx_nic *efx); extern void efx_remove_filters(struct efx_nic *efx); +extern void efx_filter_update_rx_scatter(struct efx_nic *efx); extern s32 efx_filter_insert_filter(struct efx_nic *efx, struct efx_filter_spec *spec, bool replace); diff --git a/drivers/net/ethernet/sfc/enum.h b/drivers/net/ethernet/sfc/enum.h index 182dbe2cc6e4..ab8fb5889e55 100644 --- a/drivers/net/ethernet/sfc/enum.h +++ b/drivers/net/ethernet/sfc/enum.h @@ -137,8 +137,12 @@ enum efx_loopback_mode { * Reset methods are numbered in order of increasing scope. * * @RESET_TYPE_INVISIBLE: Reset datapath and MAC (Falcon only) + * @RESET_TYPE_RECOVER_OR_ALL: Try to recover. Apply RESET_TYPE_ALL + * if unsuccessful. * @RESET_TYPE_ALL: Reset datapath, MAC and PHY * @RESET_TYPE_WORLD: Reset as much as possible + * @RESET_TYPE_RECOVER_OR_DISABLE: Try to recover. Apply RESET_TYPE_DISABLE if + * unsuccessful. * @RESET_TYPE_DISABLE: Reset datapath, MAC and PHY; leave NIC disabled * @RESET_TYPE_TX_WATCHDOG: reset due to TX watchdog * @RESET_TYPE_INT_ERROR: reset due to internal error @@ -150,9 +154,11 @@ enum efx_loopback_mode { */ enum reset_type { RESET_TYPE_INVISIBLE = 0, - RESET_TYPE_ALL = 1, - RESET_TYPE_WORLD = 2, - RESET_TYPE_DISABLE = 3, + RESET_TYPE_RECOVER_OR_ALL = 1, + RESET_TYPE_ALL = 2, + RESET_TYPE_WORLD = 3, + RESET_TYPE_RECOVER_OR_DISABLE = 4, + RESET_TYPE_DISABLE = 5, RESET_TYPE_MAX_METHOD, RESET_TYPE_TX_WATCHDOG, RESET_TYPE_INT_ERROR, diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c index 8e61cd06f66a..6e768175e7e0 100644 --- a/drivers/net/ethernet/sfc/ethtool.c +++ b/drivers/net/ethernet/sfc/ethtool.c @@ -154,6 +154,7 @@ static const struct efx_ethtool_stat efx_ethtool_stats[] = { EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tcp_udp_chksum_err), EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_mcast_mismatch), EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_frm_trunc), + EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_nodesc_trunc), }; /* Number of ethtool statistics */ @@ -978,7 +979,8 @@ static int efx_ethtool_set_class_rule(struct efx_nic *efx, rule->m_ext.data[1])) return -EINVAL; - efx_filter_init_rx(&spec, EFX_FILTER_PRI_MANUAL, 0, + efx_filter_init_rx(&spec, EFX_FILTER_PRI_MANUAL, + efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0, (rule->ring_cookie == RX_CLS_FLOW_DISC) ? 0xfff : rule->ring_cookie); diff --git a/drivers/net/ethernet/sfc/falcon.c b/drivers/net/ethernet/sfc/falcon.c index 49bcd196e10d..4486102fa9b3 100644 --- a/drivers/net/ethernet/sfc/falcon.c +++ b/drivers/net/ethernet/sfc/falcon.c @@ -1546,10 +1546,6 @@ static int falcon_probe_nic(struct efx_nic *efx) static void falcon_init_rx_cfg(struct efx_nic *efx) { - /* Prior to Siena the RX DMA engine will split each frame at - * intervals of RX_USR_BUF_SIZE (32-byte units). We set it to - * be so large that that never happens. */ - const unsigned huge_buf_size = (3 * 4096) >> 5; /* RX control FIFO thresholds (32 entries) */ const unsigned ctrl_xon_thr = 20; const unsigned ctrl_xoff_thr = 25; @@ -1557,10 +1553,15 @@ static void falcon_init_rx_cfg(struct efx_nic *efx) efx_reado(efx, ®, FR_AZ_RX_CFG); if (efx_nic_rev(efx) <= EFX_REV_FALCON_A1) { - /* Data FIFO size is 5.5K */ + /* Data FIFO size is 5.5K. The RX DMA engine only + * supports scattering for user-mode queues, but will + * split DMA writes at intervals of RX_USR_BUF_SIZE + * (32-byte units) even for kernel-mode queues. We + * set it to be so large that that never happens. + */ EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_DESC_PUSH_EN, 0); EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_USR_BUF_SIZE, - huge_buf_size); + (3 * 4096) >> 5); EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XON_MAC_TH, 512 >> 8); EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XOFF_MAC_TH, 2048 >> 8); EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XON_TX_TH, ctrl_xon_thr); @@ -1569,7 +1570,7 @@ static void falcon_init_rx_cfg(struct efx_nic *efx) /* Data FIFO size is 80K; register fields moved */ EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_DESC_PUSH_EN, 0); EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_USR_BUF_SIZE, - huge_buf_size); + EFX_RX_USR_BUF_SIZE >> 5); /* Send XON and XOFF at ~3 * max MTU away from empty/full */ EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XON_MAC_TH, 27648 >> 8); EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XOFF_MAC_TH, 54272 >> 8); @@ -1815,6 +1816,7 @@ const struct efx_nic_type falcon_a1_nic_type = { .evq_rptr_tbl_base = FR_AA_EVQ_RPTR_KER, .max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH), .rx_buffer_padding = 0x24, + .can_rx_scatter = false, .max_interrupt_mode = EFX_INT_MODE_MSI, .phys_addr_channels = 4, .timer_period_max = 1 << FRF_AB_TC_TIMER_VAL_WIDTH, @@ -1865,6 +1867,7 @@ const struct efx_nic_type falcon_b0_nic_type = { .max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH), .rx_buffer_hash_size = 0x10, .rx_buffer_padding = 0, + .can_rx_scatter = true, .max_interrupt_mode = EFX_INT_MODE_MSIX, .phys_addr_channels = 32, /* Hardware limit is 64, but the legacy * interrupt handler only supports 32 diff --git a/drivers/net/ethernet/sfc/filter.c b/drivers/net/ethernet/sfc/filter.c index 8af42cd1feda..2397f0e8d3eb 100644 --- a/drivers/net/ethernet/sfc/filter.c +++ b/drivers/net/ethernet/sfc/filter.c @@ -66,6 +66,10 @@ struct efx_filter_state { #endif }; +static void efx_filter_table_clear_entry(struct efx_nic *efx, + struct efx_filter_table *table, + unsigned int filter_idx); + /* The filter hash function is LFSR polynomial x^16 + x^3 + 1 of a 32-bit * key derived from the n-tuple. The initial LFSR state is 0xffff. */ static u16 efx_filter_hash(u32 key) @@ -168,6 +172,25 @@ static void efx_filter_push_rx_config(struct efx_nic *efx) filter_ctl, FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED, !!(table->spec[EFX_FILTER_INDEX_MC_DEF].flags & EFX_FILTER_FLAG_RX_RSS)); + + /* There is a single bit to enable RX scatter for all + * unmatched packets. Only set it if scatter is + * enabled in both filter specs. + */ + EFX_SET_OWORD_FIELD( + filter_ctl, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q, + !!(table->spec[EFX_FILTER_INDEX_UC_DEF].flags & + table->spec[EFX_FILTER_INDEX_MC_DEF].flags & + EFX_FILTER_FLAG_RX_SCATTER)); + } else if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { + /* We don't expose 'default' filters because unmatched + * packets always go to the queue number found in the + * RSS table. But we still need to set the RX scatter + * bit here. + */ + EFX_SET_OWORD_FIELD( + filter_ctl, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q, + efx->rx_scatter); } efx_writeo(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL); @@ -409,9 +432,18 @@ static void efx_filter_reset_rx_def(struct efx_nic *efx, unsigned filter_idx) struct efx_filter_state *state = efx->filter_state; struct efx_filter_table *table = &state->table[EFX_FILTER_TABLE_RX_DEF]; struct efx_filter_spec *spec = &table->spec[filter_idx]; + enum efx_filter_flags flags = 0; + + /* If there's only one channel then disable RSS for non VF + * traffic, thereby allowing VFs to use RSS when the PF can't. + */ + if (efx->n_rx_channels > 1) + flags |= EFX_FILTER_FLAG_RX_RSS; - efx_filter_init_rx(spec, EFX_FILTER_PRI_MANUAL, - EFX_FILTER_FLAG_RX_RSS, 0); + if (efx->rx_scatter) + flags |= EFX_FILTER_FLAG_RX_SCATTER; + + efx_filter_init_rx(spec, EFX_FILTER_PRI_MANUAL, flags, 0); spec->type = EFX_FILTER_UC_DEF + filter_idx; table->used_bitmap[0] |= 1 << filter_idx; } @@ -463,13 +495,6 @@ static u32 efx_filter_build(efx_oword_t *filter, struct efx_filter_spec *spec) break; } - case EFX_FILTER_TABLE_RX_DEF: - /* One filter spec per type */ - BUILD_BUG_ON(EFX_FILTER_INDEX_UC_DEF != 0); - BUILD_BUG_ON(EFX_FILTER_INDEX_MC_DEF != - EFX_FILTER_MC_DEF - EFX_FILTER_UC_DEF); - return spec->type - EFX_FILTER_UC_DEF; - case EFX_FILTER_TABLE_RX_MAC: { bool is_wild = spec->type == EFX_FILTER_MAC_WILD; EFX_POPULATE_OWORD_7( @@ -520,42 +545,6 @@ static bool efx_filter_equal(const struct efx_filter_spec *left, return true; } -static int efx_filter_search(struct efx_filter_table *table, - struct efx_filter_spec *spec, u32 key, - bool for_insert, unsigned int *depth_required) -{ - unsigned hash, incr, filter_idx, depth, depth_max; - - hash = efx_filter_hash(key); - incr = efx_filter_increment(key); - - filter_idx = hash & (table->size - 1); - depth = 1; - depth_max = (for_insert ? - (spec->priority <= EFX_FILTER_PRI_HINT ? - FILTER_CTL_SRCH_HINT_MAX : FILTER_CTL_SRCH_MAX) : - table->search_depth[spec->type]); - - for (;;) { - /* Return success if entry is used and matches this spec - * or entry is unused and we are trying to insert. - */ - if (test_bit(filter_idx, table->used_bitmap) ? - efx_filter_equal(spec, &table->spec[filter_idx]) : - for_insert) { - *depth_required = depth; - return filter_idx; - } - - /* Return failure if we reached the maximum search depth */ - if (depth == depth_max) - return for_insert ? -EBUSY : -ENOENT; - - filter_idx = (filter_idx + incr) & (table->size - 1); - ++depth; - } -} - /* * Construct/deconstruct external filter IDs. At least the RX filter * IDs must be ordered by matching priority, for RX NFC semantics. @@ -650,44 +639,111 @@ u32 efx_filter_get_rx_id_limit(struct efx_nic *efx) * efx_filter_insert_filter - add or replace a filter * @efx: NIC in which to insert the filter * @spec: Specification for the filter - * @replace: Flag for whether the specified filter may replace a filter - * with an identical match expression and equal or lower priority + * @replace_equal: Flag for whether the specified filter may replace an + * existing filter with equal priority * * On success, return the filter ID. * On failure, return a negative error code. + * + * If an existing filter has equal match values to the new filter + * spec, then the new filter might replace it, depending on the + * relative priorities. If the existing filter has lower priority, or + * if @replace_equal is set and it has equal priority, then it is + * replaced. Otherwise the function fails, returning -%EPERM if + * the existing filter has higher priority or -%EEXIST if it has + * equal priority. */ s32 efx_filter_insert_filter(struct efx_nic *efx, struct efx_filter_spec *spec, - bool replace) + bool replace_equal) { struct efx_filter_state *state = efx->filter_state; struct efx_filter_table *table = efx_filter_spec_table(state, spec); - struct efx_filter_spec *saved_spec; efx_oword_t filter; - unsigned int filter_idx, depth = 0; - u32 key; + int rep_index, ins_index; + unsigned int depth = 0; int rc; if (!table || table->size == 0) return -EINVAL; - key = efx_filter_build(&filter, spec); - netif_vdbg(efx, hw, efx->net_dev, "%s: type %d search_depth=%d", __func__, spec->type, table->search_depth[spec->type]); - spin_lock_bh(&state->lock); + if (table->id == EFX_FILTER_TABLE_RX_DEF) { + /* One filter spec per type */ + BUILD_BUG_ON(EFX_FILTER_INDEX_UC_DEF != 0); + BUILD_BUG_ON(EFX_FILTER_INDEX_MC_DEF != + EFX_FILTER_MC_DEF - EFX_FILTER_UC_DEF); + rep_index = spec->type - EFX_FILTER_INDEX_UC_DEF; + ins_index = rep_index; - rc = efx_filter_search(table, spec, key, true, &depth); - if (rc < 0) - goto out; - filter_idx = rc; - BUG_ON(filter_idx >= table->size); - saved_spec = &table->spec[filter_idx]; - - if (test_bit(filter_idx, table->used_bitmap)) { - /* Should we replace the existing filter? */ - if (!replace) { + spin_lock_bh(&state->lock); + } else { + /* Search concurrently for + * (1) a filter to be replaced (rep_index): any filter + * with the same match values, up to the current + * search depth for this type, and + * (2) the insertion point (ins_index): (1) or any + * free slot before it or up to the maximum search + * depth for this priority + * We fail if we cannot find (2). + * + * We can stop once either + * (a) we find (1), in which case we have definitely + * found (2) as well; or + * (b) we have searched exhaustively for (1), and have + * either found (2) or searched exhaustively for it + */ + u32 key = efx_filter_build(&filter, spec); + unsigned int hash = efx_filter_hash(key); + unsigned int incr = efx_filter_increment(key); + unsigned int max_rep_depth = table->search_depth[spec->type]; + unsigned int max_ins_depth = + spec->priority <= EFX_FILTER_PRI_HINT ? + FILTER_CTL_SRCH_HINT_MAX : FILTER_CTL_SRCH_MAX; + unsigned int i = hash & (table->size - 1); + + ins_index = -1; + depth = 1; + + spin_lock_bh(&state->lock); + + for (;;) { + if (!test_bit(i, table->used_bitmap)) { + if (ins_index < 0) + ins_index = i; + } else if (efx_filter_equal(spec, &table->spec[i])) { + /* Case (a) */ + if (ins_index < 0) + ins_index = i; + rep_index = i; + break; + } + + if (depth >= max_rep_depth && + (ins_index >= 0 || depth >= max_ins_depth)) { + /* Case (b) */ + if (ins_index < 0) { + rc = -EBUSY; + goto out; + } + rep_index = -1; + break; + } + + i = (i + incr) & (table->size - 1); + ++depth; + } + } + + /* If we found a filter to be replaced, check whether we + * should do so + */ + if (rep_index >= 0) { + struct efx_filter_spec *saved_spec = &table->spec[rep_index]; + + if (spec->priority == saved_spec->priority && !replace_equal) { rc = -EEXIST; goto out; } @@ -695,11 +751,14 @@ s32 efx_filter_insert_filter(struct efx_nic *efx, struct efx_filter_spec *spec, rc = -EPERM; goto out; } - } else { - __set_bit(filter_idx, table->used_bitmap); + } + + /* Insert the filter */ + if (ins_index != rep_index) { + __set_bit(ins_index, table->used_bitmap); ++table->used; } - *saved_spec = *spec; + table->spec[ins_index] = *spec; if (table->id == EFX_FILTER_TABLE_RX_DEF) { efx_filter_push_rx_config(efx); @@ -713,13 +772,19 @@ s32 efx_filter_insert_filter(struct efx_nic *efx, struct efx_filter_spec *spec, } efx_writeo(efx, &filter, - table->offset + table->step * filter_idx); + table->offset + table->step * ins_index); + + /* If we were able to replace a filter by inserting + * at a lower depth, clear the replaced filter + */ + if (ins_index != rep_index && rep_index >= 0) + efx_filter_table_clear_entry(efx, table, rep_index); } netif_vdbg(efx, hw, efx->net_dev, "%s: filter type %d index %d rxq %u set", - __func__, spec->type, filter_idx, spec->dmaq_id); - rc = efx_filter_make_id(spec, filter_idx); + __func__, spec->type, ins_index, spec->dmaq_id); + rc = efx_filter_make_id(spec, ins_index); out: spin_unlock_bh(&state->lock); @@ -1060,6 +1125,50 @@ void efx_remove_filters(struct efx_nic *efx) kfree(state); } +/* Update scatter enable flags for filters pointing to our own RX queues */ +void efx_filter_update_rx_scatter(struct efx_nic *efx) +{ + struct efx_filter_state *state = efx->filter_state; + enum efx_filter_table_id table_id; + struct efx_filter_table *table; + efx_oword_t filter; + unsigned int filter_idx; + + spin_lock_bh(&state->lock); + + for (table_id = EFX_FILTER_TABLE_RX_IP; + table_id <= EFX_FILTER_TABLE_RX_DEF; + table_id++) { + table = &state->table[table_id]; + + for (filter_idx = 0; filter_idx < table->size; filter_idx++) { + if (!test_bit(filter_idx, table->used_bitmap) || + table->spec[filter_idx].dmaq_id >= + efx->n_rx_channels) + continue; + + if (efx->rx_scatter) + table->spec[filter_idx].flags |= + EFX_FILTER_FLAG_RX_SCATTER; + else + table->spec[filter_idx].flags &= + ~EFX_FILTER_FLAG_RX_SCATTER; + + if (table_id == EFX_FILTER_TABLE_RX_DEF) + /* Pushed by efx_filter_push_rx_config() */ + continue; + + efx_filter_build(&filter, &table->spec[filter_idx]); + efx_writeo(efx, &filter, + table->offset + table->step * filter_idx); + } + } + + efx_filter_push_rx_config(efx); + + spin_unlock_bh(&state->lock); +} + #ifdef CONFIG_RFS_ACCEL int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, diff --git a/drivers/net/ethernet/sfc/mcdi_pcol.h b/drivers/net/ethernet/sfc/mcdi_pcol.h index 9d426d0457bd..c5c9747861ba 100644 --- a/drivers/net/ethernet/sfc/mcdi_pcol.h +++ b/drivers/net/ethernet/sfc/mcdi_pcol.h @@ -553,6 +553,7 @@ #define MC_CMD_PTP_MODE_V1_VLAN 0x1 /* enum */ #define MC_CMD_PTP_MODE_V2 0x2 /* enum */ #define MC_CMD_PTP_MODE_V2_VLAN 0x3 /* enum */ +#define MC_CMD_PTP_MODE_V2_ENHANCED 0x4 /* enum */ /* MC_CMD_PTP_IN_DISABLE msgrequest */ #define MC_CMD_PTP_IN_DISABLE_LEN 8 diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index 0a90abd2421b..9bd433a095c5 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@ -69,6 +69,12 @@ #define EFX_TXQ_TYPES 4 #define EFX_MAX_TX_QUEUES (EFX_TXQ_TYPES * EFX_MAX_CHANNELS) +/* Maximum possible MTU the driver supports */ +#define EFX_MAX_MTU (9 * 1024) + +/* Size of an RX scatter buffer. Small enough to pack 2 into a 4K page. */ +#define EFX_RX_USR_BUF_SIZE 1824 + /* Forward declare Precision Time Protocol (PTP) support structure. */ struct efx_ptp_data; @@ -206,25 +212,23 @@ struct efx_tx_queue { /** * struct efx_rx_buffer - An Efx RX data buffer * @dma_addr: DMA base address of the buffer - * @skb: The associated socket buffer. Valid iff !(@flags & %EFX_RX_BUF_PAGE). - * Will be %NULL if the buffer slot is currently free. - * @page: The associated page buffer. Valif iff @flags & %EFX_RX_BUF_PAGE. + * @page: The associated page buffer. * Will be %NULL if the buffer slot is currently free. - * @page_offset: Offset within page. Valid iff @flags & %EFX_RX_BUF_PAGE. - * @len: Buffer length, in bytes. - * @flags: Flags for buffer and packet state. + * @page_offset: If pending: offset in @page of DMA base address. + * If completed: offset in @page of Ethernet header. + * @len: If pending: length for DMA descriptor. + * If completed: received length, excluding hash prefix. + * @flags: Flags for buffer and packet state. These are only set on the + * first buffer of a scattered packet. */ struct efx_rx_buffer { dma_addr_t dma_addr; - union { - struct sk_buff *skb; - struct page *page; - } u; + struct page *page; u16 page_offset; u16 len; u16 flags; }; -#define EFX_RX_BUF_PAGE 0x0001 +#define EFX_RX_BUF_LAST_IN_PAGE 0x0001 #define EFX_RX_PKT_CSUMMED 0x0002 #define EFX_RX_PKT_DISCARD 0x0004 @@ -260,14 +264,23 @@ struct efx_rx_page_state { * @added_count: Number of buffers added to the receive queue. * @notified_count: Number of buffers given to NIC (<= @added_count). * @removed_count: Number of buffers removed from the receive queue. + * @scatter_n: Number of buffers used by current packet + * @page_ring: The ring to store DMA mapped pages for reuse. + * @page_add: Counter to calculate the write pointer for the recycle ring. + * @page_remove: Counter to calculate the read pointer for the recycle ring. + * @page_recycle_count: The number of pages that have been recycled. + * @page_recycle_failed: The number of pages that couldn't be recycled because + * the kernel still held a reference to them. + * @page_recycle_full: The number of pages that were released because the + * recycle ring was full. + * @page_ptr_mask: The number of pages in the RX recycle ring minus 1. * @max_fill: RX descriptor maximum fill level (<= ring size) * @fast_fill_trigger: RX descriptor fill level that will trigger a fast fill * (<= @max_fill) * @min_fill: RX descriptor minimum non-zero fill level. * This records the minimum fill level observed when a ring * refill was triggered. - * @alloc_page_count: RX allocation strategy counter. - * @alloc_skb_count: RX allocation strategy counter. + * @recycle_count: RX buffer recycle counter. * @slow_fill: Timer used to defer efx_nic_generate_fill_event(). */ struct efx_rx_queue { @@ -279,15 +292,22 @@ struct efx_rx_queue { bool enabled; bool flush_pending; - int added_count; - int notified_count; - int removed_count; + unsigned int added_count; + unsigned int notified_count; + unsigned int removed_count; + unsigned int scatter_n; + struct page **page_ring; + unsigned int page_add; + unsigned int page_remove; + unsigned int page_recycle_count; + unsigned int page_recycle_failed; + unsigned int page_recycle_full; + unsigned int page_ptr_mask; unsigned int max_fill; unsigned int fast_fill_trigger; unsigned int min_fill; unsigned int min_overfill; - unsigned int alloc_page_count; - unsigned int alloc_skb_count; + unsigned int recycle_count; struct timer_list slow_fill; unsigned int slow_fill_count; }; @@ -336,10 +356,6 @@ enum efx_rx_alloc_method { * @event_test_cpu: Last CPU to handle interrupt or test event for this channel * @irq_count: Number of IRQs since last adaptive moderation decision * @irq_mod_score: IRQ moderation score - * @rx_alloc_level: Watermark based heuristic counter for pushing descriptors - * and diagnostic counters - * @rx_alloc_push_pages: RX allocation method currently in use for pushing - * descriptors * @n_rx_tobe_disc: Count of RX_TOBE_DISC errors * @n_rx_ip_hdr_chksum_err: Count of RX IP header checksum errors * @n_rx_tcp_udp_chksum_err: Count of RX TCP and UDP checksum errors @@ -347,6 +363,12 @@ enum efx_rx_alloc_method { * @n_rx_frm_trunc: Count of RX_FRM_TRUNC errors * @n_rx_overlength: Count of RX_OVERLENGTH errors * @n_skbuff_leaks: Count of skbuffs leaked due to RX overrun + * @n_rx_nodesc_trunc: Number of RX packets truncated and then dropped due to + * lack of descriptors + * @rx_pkt_n_frags: Number of fragments in next packet to be delivered by + * __efx_rx_packet(), or zero if there is none + * @rx_pkt_index: Ring index of first buffer for next packet to be delivered + * by __efx_rx_packet(), if @rx_pkt_n_frags != 0 * @rx_queue: RX queue for this channel * @tx_queue: TX queues for this channel */ @@ -371,9 +393,6 @@ struct efx_channel { unsigned int rfs_filters_added; #endif - int rx_alloc_level; - int rx_alloc_push_pages; - unsigned n_rx_tobe_disc; unsigned n_rx_ip_hdr_chksum_err; unsigned n_rx_tcp_udp_chksum_err; @@ -381,11 +400,10 @@ struct efx_channel { unsigned n_rx_frm_trunc; unsigned n_rx_overlength; unsigned n_skbuff_leaks; + unsigned int n_rx_nodesc_trunc; - /* Used to pipeline received packets in order to optimise memory - * access with prefetches. - */ - struct efx_rx_buffer *rx_pkt; + unsigned int rx_pkt_n_frags; + unsigned int rx_pkt_index; struct efx_rx_queue rx_queue; struct efx_tx_queue tx_queue[EFX_TXQ_TYPES]; @@ -410,7 +428,7 @@ struct efx_channel_type { void (*post_remove)(struct efx_channel *); void (*get_name)(struct efx_channel *, char *buf, size_t len); struct efx_channel *(*copy)(const struct efx_channel *); - void (*receive_skb)(struct efx_channel *, struct sk_buff *); + bool (*receive_skb)(struct efx_channel *, struct sk_buff *); bool keep_eventq; }; @@ -446,6 +464,7 @@ enum nic_state { STATE_UNINIT = 0, /* device being probed/removed or is frozen */ STATE_READY = 1, /* hardware ready and netdev registered */ STATE_DISABLED = 2, /* device disabled due to hardware errors */ + STATE_RECOVERY = 3, /* device recovering from PCI error */ }; /* @@ -684,10 +703,13 @@ struct vfdi_status; * @n_channels: Number of channels in use * @n_rx_channels: Number of channels used for RX (= number of RX queues) * @n_tx_channels: Number of channels used for TX - * @rx_buffer_len: RX buffer length + * @rx_dma_len: Current maximum RX DMA length * @rx_buffer_order: Order (log2) of number of pages for each RX buffer + * @rx_buffer_truesize: Amortised allocation size of an RX buffer, + * for use in sk_buff::truesize * @rx_hash_key: Toeplitz hash key for RSS * @rx_indir_table: Indirection table for RSS + * @rx_scatter: Scatter mode enabled for receives * @int_error_count: Number of internal errors seen recently * @int_error_expire: Time at which error count will be expired * @irq_status: Interrupt status buffer @@ -800,10 +822,15 @@ struct efx_nic { unsigned rss_spread; unsigned tx_channel_offset; unsigned n_tx_channels; - unsigned int rx_buffer_len; + unsigned int rx_dma_len; unsigned int rx_buffer_order; + unsigned int rx_buffer_truesize; + unsigned int rx_page_buf_step; + unsigned int rx_bufs_per_page; + unsigned int rx_pages_per_batch; u8 rx_hash_key[40]; u32 rx_indir_table[128]; + bool rx_scatter; unsigned int_error_count; unsigned long int_error_expire; @@ -934,8 +961,9 @@ static inline unsigned int efx_port_num(struct efx_nic *efx) * @evq_ptr_tbl_base: Event queue pointer table base address * @evq_rptr_tbl_base: Event queue read-pointer table base address * @max_dma_mask: Maximum possible DMA mask - * @rx_buffer_hash_size: Size of hash at start of RX buffer - * @rx_buffer_padding: Size of padding at end of RX buffer + * @rx_buffer_hash_size: Size of hash at start of RX packet + * @rx_buffer_padding: Size of padding at end of RX packet + * @can_rx_scatter: NIC is able to scatter packet to multiple buffers * @max_interrupt_mode: Highest capability interrupt mode supported * from &enum efx_init_mode. * @phys_addr_channels: Number of channels with physically addressed @@ -983,6 +1011,7 @@ struct efx_nic_type { u64 max_dma_mask; unsigned int rx_buffer_hash_size; unsigned int rx_buffer_padding; + bool can_rx_scatter; unsigned int max_interrupt_mode; unsigned int phys_addr_channels; unsigned int timer_period_max; diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c index eaa8e874a3cb..b0503cd8c2a0 100644 --- a/drivers/net/ethernet/sfc/nic.c +++ b/drivers/net/ethernet/sfc/nic.c @@ -305,11 +305,11 @@ int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer, unsigned int len) { buffer->addr = dma_alloc_coherent(&efx->pci_dev->dev, len, - &buffer->dma_addr, GFP_ATOMIC); + &buffer->dma_addr, + GFP_ATOMIC | __GFP_ZERO); if (!buffer->addr) return -ENOMEM; buffer->len = len; - memset(buffer->addr, 0, len); return 0; } @@ -592,12 +592,22 @@ void efx_nic_init_rx(struct efx_rx_queue *rx_queue) struct efx_nic *efx = rx_queue->efx; bool is_b0 = efx_nic_rev(efx) >= EFX_REV_FALCON_B0; bool iscsi_digest_en = is_b0; + bool jumbo_en; + + /* For kernel-mode queues in Falcon A1, the JUMBO flag enables + * DMA to continue after a PCIe page boundary (and scattering + * is not possible). In Falcon B0 and Siena, it enables + * scatter. + */ + jumbo_en = !is_b0 || efx->rx_scatter; netif_dbg(efx, hw, efx->net_dev, "RX queue %d ring in special buffers %d-%d\n", efx_rx_queue_index(rx_queue), rx_queue->rxd.index, rx_queue->rxd.index + rx_queue->rxd.entries - 1); + rx_queue->scatter_n = 0; + /* Pin RX descriptor ring */ efx_init_special_buffer(efx, &rx_queue->rxd); @@ -614,8 +624,7 @@ void efx_nic_init_rx(struct efx_rx_queue *rx_queue) FRF_AZ_RX_DESCQ_SIZE, __ffs(rx_queue->rxd.entries), FRF_AZ_RX_DESCQ_TYPE, 0 /* kernel queue */ , - /* For >=B0 this is scatter so disable */ - FRF_AZ_RX_DESCQ_JUMBO, !is_b0, + FRF_AZ_RX_DESCQ_JUMBO, jumbo_en, FRF_AZ_RX_DESCQ_EN, 1); efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base, efx_rx_queue_index(rx_queue)); @@ -969,13 +978,24 @@ static u16 efx_handle_rx_not_ok(struct efx_rx_queue *rx_queue, EFX_RX_PKT_DISCARD : 0; } -/* Handle receive events that are not in-order. */ -static void +/* Handle receive events that are not in-order. Return true if this + * can be handled as a partial packet discard, false if it's more + * serious. + */ +static bool efx_handle_rx_bad_index(struct efx_rx_queue *rx_queue, unsigned index) { + struct efx_channel *channel = efx_rx_queue_channel(rx_queue); struct efx_nic *efx = rx_queue->efx; unsigned expected, dropped; + if (rx_queue->scatter_n && + index == ((rx_queue->removed_count + rx_queue->scatter_n - 1) & + rx_queue->ptr_mask)) { + ++channel->n_rx_nodesc_trunc; + return true; + } + expected = rx_queue->removed_count & rx_queue->ptr_mask; dropped = (index - expected) & rx_queue->ptr_mask; netif_info(efx, rx_err, efx->net_dev, @@ -984,6 +1004,7 @@ efx_handle_rx_bad_index(struct efx_rx_queue *rx_queue, unsigned index) efx_schedule_reset(efx, EFX_WORKAROUND_5676(efx) ? RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE); + return false; } /* Handle a packet received event @@ -999,7 +1020,7 @@ efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event) unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt; unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt; unsigned expected_ptr; - bool rx_ev_pkt_ok; + bool rx_ev_pkt_ok, rx_ev_sop, rx_ev_cont; u16 flags; struct efx_rx_queue *rx_queue; struct efx_nic *efx = channel->efx; @@ -1007,21 +1028,56 @@ efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event) if (unlikely(ACCESS_ONCE(efx->reset_pending))) return; - /* Basic packet information */ - rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT); - rx_ev_pkt_ok = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_OK); - rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE); - WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT)); - WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_SOP) != 1); + rx_ev_cont = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT); + rx_ev_sop = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_SOP); WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_Q_LABEL) != channel->channel); rx_queue = efx_channel_get_rx_queue(channel); rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR); - expected_ptr = rx_queue->removed_count & rx_queue->ptr_mask; - if (unlikely(rx_ev_desc_ptr != expected_ptr)) - efx_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr); + expected_ptr = ((rx_queue->removed_count + rx_queue->scatter_n) & + rx_queue->ptr_mask); + + /* Check for partial drops and other errors */ + if (unlikely(rx_ev_desc_ptr != expected_ptr) || + unlikely(rx_ev_sop != (rx_queue->scatter_n == 0))) { + if (rx_ev_desc_ptr != expected_ptr && + !efx_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr)) + return; + + /* Discard all pending fragments */ + if (rx_queue->scatter_n) { + efx_rx_packet( + rx_queue, + rx_queue->removed_count & rx_queue->ptr_mask, + rx_queue->scatter_n, 0, EFX_RX_PKT_DISCARD); + rx_queue->removed_count += rx_queue->scatter_n; + rx_queue->scatter_n = 0; + } + + /* Return if there is no new fragment */ + if (rx_ev_desc_ptr != expected_ptr) + return; + + /* Discard new fragment if not SOP */ + if (!rx_ev_sop) { + efx_rx_packet( + rx_queue, + rx_queue->removed_count & rx_queue->ptr_mask, + 1, 0, EFX_RX_PKT_DISCARD); + ++rx_queue->removed_count; + return; + } + } + + ++rx_queue->scatter_n; + if (rx_ev_cont) + return; + + rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT); + rx_ev_pkt_ok = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_OK); + rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE); if (likely(rx_ev_pkt_ok)) { /* If packet is marked as OK and packet type is TCP/IP or @@ -1049,7 +1105,11 @@ efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event) channel->irq_mod_score += 2; /* Handle received packet */ - efx_rx_packet(rx_queue, rx_ev_desc_ptr, rx_ev_byte_cnt, flags); + efx_rx_packet(rx_queue, + rx_queue->removed_count & rx_queue->ptr_mask, + rx_queue->scatter_n, rx_ev_byte_cnt, flags); + rx_queue->removed_count += rx_queue->scatter_n; + rx_queue->scatter_n = 0; } /* If this flush done event corresponds to a &struct efx_tx_queue, then diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c index 3f93624fc273..07f6baa15c0c 100644 --- a/drivers/net/ethernet/sfc/ptp.c +++ b/drivers/net/ethernet/sfc/ptp.c @@ -99,6 +99,9 @@ #define PTP_V2_VERSION_LENGTH 1 #define PTP_V2_VERSION_OFFSET 29 +#define PTP_V2_UUID_LENGTH 8 +#define PTP_V2_UUID_OFFSET 48 + /* Although PTP V2 UUIDs are comprised a ClockIdentity (8) and PortNumber (2), * the MC only captures the last six bytes of the clock identity. These values * reflect those, not the ones used in the standard. The standard permits @@ -429,13 +432,10 @@ static int efx_ptp_process_times(struct efx_nic *efx, u8 *synch_buf, unsigned number_readings = (response_length / MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_LEN); unsigned i; - unsigned min; - unsigned min_set = 0; unsigned total; unsigned ngood = 0; unsigned last_good = 0; struct efx_ptp_data *ptp = efx->ptp_data; - bool min_valid = false; u32 last_sec; u32 start_sec; struct timespec delta; @@ -443,35 +443,17 @@ static int efx_ptp_process_times(struct efx_nic *efx, u8 *synch_buf, if (number_readings == 0) return -EAGAIN; - /* Find minimum value in this set of results, discarding clearly - * erroneous results. + /* Read the set of results and increment stats for any results that + * appera to be erroneous. */ for (i = 0; i < number_readings; i++) { efx_ptp_read_timeset(synch_buf, &ptp->timeset[i]); synch_buf += MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_LEN; - if (ptp->timeset[i].window > SYNCHRONISATION_GRANULARITY_NS) { - if (min_valid) { - if (ptp->timeset[i].window < min_set) - min_set = ptp->timeset[i].window; - } else { - min_valid = true; - min_set = ptp->timeset[i].window; - } - } - } - - if (min_valid) { - if (ptp->base_sync_valid && (min_set > ptp->base_sync_ns)) - min = ptp->base_sync_ns; - else - min = min_set; - } else { - min = SYNCHRONISATION_GRANULARITY_NS; } - /* Discard excessively long synchronise durations. The MC times - * when it finishes reading the host time so the corrected window - * time should be fairly constant for a given platform. + /* Find the last good host-MC synchronization result. The MC times + * when it finishes reading the host time so the corrected window time + * should be fairly constant for a given platform. */ total = 0; for (i = 0; i < number_readings; i++) @@ -489,8 +471,8 @@ static int efx_ptp_process_times(struct efx_nic *efx, u8 *synch_buf, if (ngood == 0) { netif_warn(efx, drv, efx->net_dev, - "PTP no suitable synchronisations %dns %dns\n", - ptp->base_sync_ns, min_set); + "PTP no suitable synchronisations %dns\n", + ptp->base_sync_ns); return -EAGAIN; } @@ -1006,43 +988,53 @@ bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb) * the receive timestamp from the MC - this will probably occur after the * packet arrival because of the processing in the MC. */ -static void efx_ptp_rx(struct efx_channel *channel, struct sk_buff *skb) +static bool efx_ptp_rx(struct efx_channel *channel, struct sk_buff *skb) { struct efx_nic *efx = channel->efx; struct efx_ptp_data *ptp = efx->ptp_data; struct efx_ptp_match *match = (struct efx_ptp_match *)skb->cb; - u8 *data; + u8 *match_data_012, *match_data_345; unsigned int version; match->expiry = jiffies + msecs_to_jiffies(PKT_EVENT_LIFETIME_MS); /* Correct version? */ if (ptp->mode == MC_CMD_PTP_MODE_V1) { - if (skb->len < PTP_V1_MIN_LENGTH) { - netif_receive_skb(skb); - return; + if (!pskb_may_pull(skb, PTP_V1_MIN_LENGTH)) { + return false; } version = ntohs(*(__be16 *)&skb->data[PTP_V1_VERSION_OFFSET]); if (version != PTP_VERSION_V1) { - netif_receive_skb(skb); - return; + return false; } + + /* PTP V1 uses all six bytes of the UUID to match the packet + * to the timestamp + */ + match_data_012 = skb->data + PTP_V1_UUID_OFFSET; + match_data_345 = skb->data + PTP_V1_UUID_OFFSET + 3; } else { - if (skb->len < PTP_V2_MIN_LENGTH) { - netif_receive_skb(skb); - return; + if (!pskb_may_pull(skb, PTP_V2_MIN_LENGTH)) { + return false; } version = skb->data[PTP_V2_VERSION_OFFSET]; - - BUG_ON(ptp->mode != MC_CMD_PTP_MODE_V2); - BUILD_BUG_ON(PTP_V1_UUID_OFFSET != PTP_V2_MC_UUID_OFFSET); - BUILD_BUG_ON(PTP_V1_UUID_LENGTH != PTP_V2_MC_UUID_LENGTH); - BUILD_BUG_ON(PTP_V1_SEQUENCE_OFFSET != PTP_V2_SEQUENCE_OFFSET); - BUILD_BUG_ON(PTP_V1_SEQUENCE_LENGTH != PTP_V2_SEQUENCE_LENGTH); - if ((version & PTP_VERSION_V2_MASK) != PTP_VERSION_V2) { - netif_receive_skb(skb); - return; + return false; + } + + /* The original V2 implementation uses bytes 2-7 of + * the UUID to match the packet to the timestamp. This + * discards two of the bytes of the MAC address used + * to create the UUID (SF bug 33070). The PTP V2 + * enhanced mode fixes this issue and uses bytes 0-2 + * and byte 5-7 of the UUID. + */ + match_data_345 = skb->data + PTP_V2_UUID_OFFSET + 5; + if (ptp->mode == MC_CMD_PTP_MODE_V2) { + match_data_012 = skb->data + PTP_V2_UUID_OFFSET + 2; + } else { + match_data_012 = skb->data + PTP_V2_UUID_OFFSET + 0; + BUG_ON(ptp->mode != MC_CMD_PTP_MODE_V2_ENHANCED); } } @@ -1056,14 +1048,19 @@ static void efx_ptp_rx(struct efx_channel *channel, struct sk_buff *skb) timestamps = skb_hwtstamps(skb); memset(timestamps, 0, sizeof(*timestamps)); + /* We expect the sequence number to be in the same position in + * the packet for PTP V1 and V2 + */ + BUILD_BUG_ON(PTP_V1_SEQUENCE_OFFSET != PTP_V2_SEQUENCE_OFFSET); + BUILD_BUG_ON(PTP_V1_SEQUENCE_LENGTH != PTP_V2_SEQUENCE_LENGTH); + /* Extract UUID/Sequence information */ - data = skb->data + PTP_V1_UUID_OFFSET; - match->words[0] = (data[0] | - (data[1] << 8) | - (data[2] << 16) | - (data[3] << 24)); - match->words[1] = (data[4] | - (data[5] << 8) | + match->words[0] = (match_data_012[0] | + (match_data_012[1] << 8) | + (match_data_012[2] << 16) | + (match_data_345[0] << 24)); + match->words[1] = (match_data_345[1] | + (match_data_345[2] << 8) | (skb->data[PTP_V1_SEQUENCE_OFFSET + PTP_V1_SEQUENCE_LENGTH - 1] << 16)); @@ -1073,6 +1070,8 @@ static void efx_ptp_rx(struct efx_channel *channel, struct sk_buff *skb) skb_queue_tail(&ptp->rxq, skb); queue_work(ptp->workwq, &ptp->work); + + return true; } /* Transmit a PTP packet. This has to be transmitted by the MC @@ -1167,7 +1166,7 @@ static int efx_ptp_ts_init(struct efx_nic *efx, struct hwtstamp_config *init) * timestamped */ init->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT; - new_mode = MC_CMD_PTP_MODE_V2; + new_mode = MC_CMD_PTP_MODE_V2_ENHANCED; enable_wanted = true; break; case HWTSTAMP_FILTER_PTP_V2_EVENT: @@ -1186,7 +1185,14 @@ static int efx_ptp_ts_init(struct efx_nic *efx, struct hwtstamp_config *init) if (init->tx_type != HWTSTAMP_TX_OFF) enable_wanted = true; + /* Old versions of the firmware do not support the improved + * UUID filtering option (SF bug 33070). If the firmware does + * not accept the enhanced mode, fall back to the standard PTP + * v2 UUID filtering. + */ rc = efx_ptp_change_mode(efx, enable_wanted, new_mode); + if ((rc != 0) && (new_mode == MC_CMD_PTP_MODE_V2_ENHANCED)) + rc = efx_ptp_change_mode(efx, enable_wanted, MC_CMD_PTP_MODE_V2); if (rc != 0) return rc; diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c index bb579a6128c8..e73e30bac10e 100644 --- a/drivers/net/ethernet/sfc/rx.c +++ b/drivers/net/ethernet/sfc/rx.c @@ -16,6 +16,7 @@ #include <linux/udp.h> #include <linux/prefetch.h> #include <linux/moduleparam.h> +#include <linux/iommu.h> #include <net/ip.h> #include <net/checksum.h> #include "net_driver.h" @@ -24,85 +25,39 @@ #include "selftest.h" #include "workarounds.h" -/* Number of RX descriptors pushed at once. */ -#define EFX_RX_BATCH 8 +/* Preferred number of descriptors to fill at once */ +#define EFX_RX_PREFERRED_BATCH 8U -/* Maximum size of a buffer sharing a page */ -#define EFX_RX_HALF_PAGE ((PAGE_SIZE >> 1) - sizeof(struct efx_rx_page_state)) +/* Number of RX buffers to recycle pages for. When creating the RX page recycle + * ring, this number is divided by the number of buffers per page to calculate + * the number of pages to store in the RX page recycle ring. + */ +#define EFX_RECYCLE_RING_SIZE_IOMMU 4096 +#define EFX_RECYCLE_RING_SIZE_NOIOMMU (2 * EFX_RX_PREFERRED_BATCH) /* Size of buffer allocated for skb header area. */ #define EFX_SKB_HEADERS 64u -/* - * rx_alloc_method - RX buffer allocation method - * - * This driver supports two methods for allocating and using RX buffers: - * each RX buffer may be backed by an skb or by an order-n page. - * - * When GRO is in use then the second method has a lower overhead, - * since we don't have to allocate then free skbs on reassembled frames. - * - * Values: - * - RX_ALLOC_METHOD_AUTO = 0 - * - RX_ALLOC_METHOD_SKB = 1 - * - RX_ALLOC_METHOD_PAGE = 2 - * - * The heuristic for %RX_ALLOC_METHOD_AUTO is a simple hysteresis count - * controlled by the parameters below. - * - * - Since pushing and popping descriptors are separated by the rx_queue - * size, so the watermarks should be ~rxd_size. - * - The performance win by using page-based allocation for GRO is less - * than the performance hit of using page-based allocation of non-GRO, - * so the watermarks should reflect this. - * - * Per channel we maintain a single variable, updated by each channel: - * - * rx_alloc_level += (gro_performed ? RX_ALLOC_FACTOR_GRO : - * RX_ALLOC_FACTOR_SKB) - * Per NAPI poll interval, we constrain rx_alloc_level to 0..MAX (which - * limits the hysteresis), and update the allocation strategy: - * - * rx_alloc_method = (rx_alloc_level > RX_ALLOC_LEVEL_GRO ? - * RX_ALLOC_METHOD_PAGE : RX_ALLOC_METHOD_SKB) - */ -static int rx_alloc_method = RX_ALLOC_METHOD_AUTO; - -#define RX_ALLOC_LEVEL_GRO 0x2000 -#define RX_ALLOC_LEVEL_MAX 0x3000 -#define RX_ALLOC_FACTOR_GRO 1 -#define RX_ALLOC_FACTOR_SKB (-2) - /* This is the percentage fill level below which new RX descriptors * will be added to the RX descriptor ring. */ static unsigned int rx_refill_threshold; +/* Each packet can consume up to ceil(max_frame_len / buffer_size) buffers */ +#define EFX_RX_MAX_FRAGS DIV_ROUND_UP(EFX_MAX_FRAME_LEN(EFX_MAX_MTU), \ + EFX_RX_USR_BUF_SIZE) + /* * RX maximum head room required. * - * This must be at least 1 to prevent overflow and at least 2 to allow - * pipelined receives. + * This must be at least 1 to prevent overflow, plus one packet-worth + * to allow pipelined receives. */ -#define EFX_RXD_HEAD_ROOM 2 +#define EFX_RXD_HEAD_ROOM (1 + EFX_RX_MAX_FRAGS) -/* Offset of ethernet header within page */ -static inline unsigned int efx_rx_buf_offset(struct efx_nic *efx, - struct efx_rx_buffer *buf) +static inline u8 *efx_rx_buf_va(struct efx_rx_buffer *buf) { - return buf->page_offset + efx->type->rx_buffer_hash_size; -} -static inline unsigned int efx_rx_buf_size(struct efx_nic *efx) -{ - return PAGE_SIZE << efx->rx_buffer_order; -} - -static u8 *efx_rx_buf_eh(struct efx_nic *efx, struct efx_rx_buffer *buf) -{ - if (buf->flags & EFX_RX_BUF_PAGE) - return page_address(buf->u.page) + efx_rx_buf_offset(efx, buf); - else - return (u8 *)buf->u.skb->data + efx->type->rx_buffer_hash_size; + return page_address(buf->page) + buf->page_offset; } static inline u32 efx_rx_buf_hash(const u8 *eh) @@ -119,66 +74,81 @@ static inline u32 efx_rx_buf_hash(const u8 *eh) #endif } -/** - * efx_init_rx_buffers_skb - create EFX_RX_BATCH skb-based RX buffers - * - * @rx_queue: Efx RX queue - * - * This allocates EFX_RX_BATCH skbs, maps them for DMA, and populates a - * struct efx_rx_buffer for each one. Return a negative error code or 0 - * on success. May fail having only inserted fewer than EFX_RX_BATCH - * buffers. - */ -static int efx_init_rx_buffers_skb(struct efx_rx_queue *rx_queue) +static inline struct efx_rx_buffer * +efx_rx_buf_next(struct efx_rx_queue *rx_queue, struct efx_rx_buffer *rx_buf) +{ + if (unlikely(rx_buf == efx_rx_buffer(rx_queue, rx_queue->ptr_mask))) + return efx_rx_buffer(rx_queue, 0); + else + return rx_buf + 1; +} + +static inline void efx_sync_rx_buffer(struct efx_nic *efx, + struct efx_rx_buffer *rx_buf, + unsigned int len) +{ + dma_sync_single_for_cpu(&efx->pci_dev->dev, rx_buf->dma_addr, len, + DMA_FROM_DEVICE); +} + +void efx_rx_config_page_split(struct efx_nic *efx) +{ + efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + EFX_PAGE_IP_ALIGN, + L1_CACHE_BYTES); + efx->rx_bufs_per_page = efx->rx_buffer_order ? 1 : + ((PAGE_SIZE - sizeof(struct efx_rx_page_state)) / + efx->rx_page_buf_step); + efx->rx_buffer_truesize = (PAGE_SIZE << efx->rx_buffer_order) / + efx->rx_bufs_per_page; + efx->rx_pages_per_batch = DIV_ROUND_UP(EFX_RX_PREFERRED_BATCH, + efx->rx_bufs_per_page); +} + +/* Check the RX page recycle ring for a page that can be reused. */ +static struct page *efx_reuse_page(struct efx_rx_queue *rx_queue) { struct efx_nic *efx = rx_queue->efx; - struct net_device *net_dev = efx->net_dev; - struct efx_rx_buffer *rx_buf; - struct sk_buff *skb; - int skb_len = efx->rx_buffer_len; - unsigned index, count; + struct page *page; + struct efx_rx_page_state *state; + unsigned index; - for (count = 0; count < EFX_RX_BATCH; ++count) { - index = rx_queue->added_count & rx_queue->ptr_mask; - rx_buf = efx_rx_buffer(rx_queue, index); - - rx_buf->u.skb = skb = netdev_alloc_skb(net_dev, skb_len); - if (unlikely(!skb)) - return -ENOMEM; - - /* Adjust the SKB for padding */ - skb_reserve(skb, NET_IP_ALIGN); - rx_buf->len = skb_len - NET_IP_ALIGN; - rx_buf->flags = 0; - - rx_buf->dma_addr = dma_map_single(&efx->pci_dev->dev, - skb->data, rx_buf->len, - DMA_FROM_DEVICE); - if (unlikely(dma_mapping_error(&efx->pci_dev->dev, - rx_buf->dma_addr))) { - dev_kfree_skb_any(skb); - rx_buf->u.skb = NULL; - return -EIO; - } + index = rx_queue->page_remove & rx_queue->page_ptr_mask; + page = rx_queue->page_ring[index]; + if (page == NULL) + return NULL; + + rx_queue->page_ring[index] = NULL; + /* page_remove cannot exceed page_add. */ + if (rx_queue->page_remove != rx_queue->page_add) + ++rx_queue->page_remove; - ++rx_queue->added_count; - ++rx_queue->alloc_skb_count; + /* If page_count is 1 then we hold the only reference to this page. */ + if (page_count(page) == 1) { + ++rx_queue->page_recycle_count; + return page; + } else { + state = page_address(page); + dma_unmap_page(&efx->pci_dev->dev, state->dma_addr, + PAGE_SIZE << efx->rx_buffer_order, + DMA_FROM_DEVICE); + put_page(page); + ++rx_queue->page_recycle_failed; } - return 0; + return NULL; } /** - * efx_init_rx_buffers_page - create EFX_RX_BATCH page-based RX buffers + * efx_init_rx_buffers - create EFX_RX_BATCH page-based RX buffers * * @rx_queue: Efx RX queue * - * This allocates memory for EFX_RX_BATCH receive buffers, maps them for DMA, - * and populates struct efx_rx_buffers for each one. Return a negative error - * code or 0 on success. If a single page can be split between two buffers, - * then the page will either be inserted fully, or not at at all. + * This allocates a batch of pages, maps them for DMA, and populates + * struct efx_rx_buffers for each one. Return a negative error code or + * 0 on success. If a single page can be used for multiple buffers, + * then the page will either be inserted fully, or not at all. */ -static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue) +static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue) { struct efx_nic *efx = rx_queue->efx; struct efx_rx_buffer *rx_buf; @@ -188,150 +158,140 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue) dma_addr_t dma_addr; unsigned index, count; - /* We can split a page between two buffers */ - BUILD_BUG_ON(EFX_RX_BATCH & 1); - - for (count = 0; count < EFX_RX_BATCH; ++count) { - page = alloc_pages(__GFP_COLD | __GFP_COMP | GFP_ATOMIC, - efx->rx_buffer_order); - if (unlikely(page == NULL)) - return -ENOMEM; - dma_addr = dma_map_page(&efx->pci_dev->dev, page, 0, - efx_rx_buf_size(efx), - DMA_FROM_DEVICE); - if (unlikely(dma_mapping_error(&efx->pci_dev->dev, dma_addr))) { - __free_pages(page, efx->rx_buffer_order); - return -EIO; + count = 0; + do { + page = efx_reuse_page(rx_queue); + if (page == NULL) { + page = alloc_pages(__GFP_COLD | __GFP_COMP | GFP_ATOMIC, + efx->rx_buffer_order); + if (unlikely(page == NULL)) + return -ENOMEM; + dma_addr = + dma_map_page(&efx->pci_dev->dev, page, 0, + PAGE_SIZE << efx->rx_buffer_order, + DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(&efx->pci_dev->dev, + dma_addr))) { + __free_pages(page, efx->rx_buffer_order); + return -EIO; + } + state = page_address(page); + state->dma_addr = dma_addr; + } else { + state = page_address(page); + dma_addr = state->dma_addr; } - state = page_address(page); - state->refcnt = 0; - state->dma_addr = dma_addr; dma_addr += sizeof(struct efx_rx_page_state); page_offset = sizeof(struct efx_rx_page_state); - split: - index = rx_queue->added_count & rx_queue->ptr_mask; - rx_buf = efx_rx_buffer(rx_queue, index); - rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN; - rx_buf->u.page = page; - rx_buf->page_offset = page_offset + EFX_PAGE_IP_ALIGN; - rx_buf->len = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN; - rx_buf->flags = EFX_RX_BUF_PAGE; - ++rx_queue->added_count; - ++rx_queue->alloc_page_count; - ++state->refcnt; - - if ((~count & 1) && (efx->rx_buffer_len <= EFX_RX_HALF_PAGE)) { - /* Use the second half of the page */ + do { + index = rx_queue->added_count & rx_queue->ptr_mask; + rx_buf = efx_rx_buffer(rx_queue, index); + rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN; + rx_buf->page = page; + rx_buf->page_offset = page_offset + EFX_PAGE_IP_ALIGN; + rx_buf->len = efx->rx_dma_len; + rx_buf->flags = 0; + ++rx_queue->added_count; get_page(page); - dma_addr += (PAGE_SIZE >> 1); - page_offset += (PAGE_SIZE >> 1); - ++count; - goto split; - } - } + dma_addr += efx->rx_page_buf_step; + page_offset += efx->rx_page_buf_step; + } while (page_offset + efx->rx_page_buf_step <= PAGE_SIZE); + + rx_buf->flags = EFX_RX_BUF_LAST_IN_PAGE; + } while (++count < efx->rx_pages_per_batch); return 0; } +/* Unmap a DMA-mapped page. This function is only called for the final RX + * buffer in a page. + */ static void efx_unmap_rx_buffer(struct efx_nic *efx, - struct efx_rx_buffer *rx_buf, - unsigned int used_len) + struct efx_rx_buffer *rx_buf) { - if ((rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.page) { - struct efx_rx_page_state *state; - - state = page_address(rx_buf->u.page); - if (--state->refcnt == 0) { - dma_unmap_page(&efx->pci_dev->dev, - state->dma_addr, - efx_rx_buf_size(efx), - DMA_FROM_DEVICE); - } else if (used_len) { - dma_sync_single_for_cpu(&efx->pci_dev->dev, - rx_buf->dma_addr, used_len, - DMA_FROM_DEVICE); - } - } else if (!(rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.skb) { - dma_unmap_single(&efx->pci_dev->dev, rx_buf->dma_addr, - rx_buf->len, DMA_FROM_DEVICE); + struct page *page = rx_buf->page; + + if (page) { + struct efx_rx_page_state *state = page_address(page); + dma_unmap_page(&efx->pci_dev->dev, + state->dma_addr, + PAGE_SIZE << efx->rx_buffer_order, + DMA_FROM_DEVICE); } } -static void efx_free_rx_buffer(struct efx_nic *efx, - struct efx_rx_buffer *rx_buf) +static void efx_free_rx_buffer(struct efx_rx_buffer *rx_buf) { - if ((rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.page) { - __free_pages(rx_buf->u.page, efx->rx_buffer_order); - rx_buf->u.page = NULL; - } else if (!(rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.skb) { - dev_kfree_skb_any(rx_buf->u.skb); - rx_buf->u.skb = NULL; + if (rx_buf->page) { + put_page(rx_buf->page); + rx_buf->page = NULL; } } -static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue, - struct efx_rx_buffer *rx_buf) +/* Attempt to recycle the page if there is an RX recycle ring; the page can + * only be added if this is the final RX buffer, to prevent pages being used in + * the descriptor ring and appearing in the recycle ring simultaneously. + */ +static void efx_recycle_rx_page(struct efx_channel *channel, + struct efx_rx_buffer *rx_buf) { - efx_unmap_rx_buffer(rx_queue->efx, rx_buf, 0); - efx_free_rx_buffer(rx_queue->efx, rx_buf); -} + struct page *page = rx_buf->page; + struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel); + struct efx_nic *efx = rx_queue->efx; + unsigned index; -/* Attempt to resurrect the other receive buffer that used to share this page, - * which had previously been passed up to the kernel and freed. */ -static void efx_resurrect_rx_buffer(struct efx_rx_queue *rx_queue, - struct efx_rx_buffer *rx_buf) -{ - struct efx_rx_page_state *state = page_address(rx_buf->u.page); - struct efx_rx_buffer *new_buf; - unsigned fill_level, index; - - /* +1 because efx_rx_packet() incremented removed_count. +1 because - * we'd like to insert an additional descriptor whilst leaving - * EFX_RXD_HEAD_ROOM for the non-recycle path */ - fill_level = (rx_queue->added_count - rx_queue->removed_count + 2); - if (unlikely(fill_level > rx_queue->max_fill)) { - /* We could place "state" on a list, and drain the list in - * efx_fast_push_rx_descriptors(). For now, this will do. */ + /* Only recycle the page after processing the final buffer. */ + if (!(rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE)) return; - } - ++state->refcnt; - get_page(rx_buf->u.page); + index = rx_queue->page_add & rx_queue->page_ptr_mask; + if (rx_queue->page_ring[index] == NULL) { + unsigned read_index = rx_queue->page_remove & + rx_queue->page_ptr_mask; - index = rx_queue->added_count & rx_queue->ptr_mask; - new_buf = efx_rx_buffer(rx_queue, index); - new_buf->dma_addr = rx_buf->dma_addr ^ (PAGE_SIZE >> 1); - new_buf->u.page = rx_buf->u.page; - new_buf->len = rx_buf->len; - new_buf->flags = EFX_RX_BUF_PAGE; - ++rx_queue->added_count; + /* The next slot in the recycle ring is available, but + * increment page_remove if the read pointer currently + * points here. + */ + if (read_index == index) + ++rx_queue->page_remove; + rx_queue->page_ring[index] = page; + ++rx_queue->page_add; + return; + } + ++rx_queue->page_recycle_full; + efx_unmap_rx_buffer(efx, rx_buf); + put_page(rx_buf->page); } -/* Recycle the given rx buffer directly back into the rx_queue. There is - * always room to add this buffer, because we've just popped a buffer. */ -static void efx_recycle_rx_buffer(struct efx_channel *channel, - struct efx_rx_buffer *rx_buf) +static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue, + struct efx_rx_buffer *rx_buf) { - struct efx_nic *efx = channel->efx; - struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel); - struct efx_rx_buffer *new_buf; - unsigned index; - - rx_buf->flags &= EFX_RX_BUF_PAGE; - - if ((rx_buf->flags & EFX_RX_BUF_PAGE) && - efx->rx_buffer_len <= EFX_RX_HALF_PAGE && - page_count(rx_buf->u.page) == 1) - efx_resurrect_rx_buffer(rx_queue, rx_buf); + /* Release the page reference we hold for the buffer. */ + if (rx_buf->page) + put_page(rx_buf->page); + + /* If this is the last buffer in a page, unmap and free it. */ + if (rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE) { + efx_unmap_rx_buffer(rx_queue->efx, rx_buf); + efx_free_rx_buffer(rx_buf); + } + rx_buf->page = NULL; +} - index = rx_queue->added_count & rx_queue->ptr_mask; - new_buf = efx_rx_buffer(rx_queue, index); +/* Recycle the pages that are used by buffers that have just been received. */ +static void efx_recycle_rx_buffers(struct efx_channel *channel, + struct efx_rx_buffer *rx_buf, + unsigned int n_frags) +{ + struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel); - memcpy(new_buf, rx_buf, sizeof(*new_buf)); - rx_buf->u.page = NULL; - ++rx_queue->added_count; + do { + efx_recycle_rx_page(channel, rx_buf); + rx_buf = efx_rx_buf_next(rx_queue, rx_buf); + } while (--n_frags); } /** @@ -348,8 +308,8 @@ static void efx_recycle_rx_buffer(struct efx_channel *channel, */ void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue) { - struct efx_channel *channel = efx_rx_queue_channel(rx_queue); - unsigned fill_level; + struct efx_nic *efx = rx_queue->efx; + unsigned int fill_level, batch_size; int space, rc = 0; /* Calculate current fill level, and exit if we don't need to fill */ @@ -364,28 +324,26 @@ void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue) rx_queue->min_fill = fill_level; } + batch_size = efx->rx_pages_per_batch * efx->rx_bufs_per_page; space = rx_queue->max_fill - fill_level; - EFX_BUG_ON_PARANOID(space < EFX_RX_BATCH); + EFX_BUG_ON_PARANOID(space < batch_size); netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev, "RX queue %d fast-filling descriptor ring from" - " level %d to level %d using %s allocation\n", + " level %d to level %d\n", efx_rx_queue_index(rx_queue), fill_level, - rx_queue->max_fill, - channel->rx_alloc_push_pages ? "page" : "skb"); + rx_queue->max_fill); + do { - if (channel->rx_alloc_push_pages) - rc = efx_init_rx_buffers_page(rx_queue); - else - rc = efx_init_rx_buffers_skb(rx_queue); + rc = efx_init_rx_buffers(rx_queue); if (unlikely(rc)) { /* Ensure that we don't leave the rx queue empty */ if (rx_queue->added_count == rx_queue->removed_count) efx_schedule_slow_fill(rx_queue); goto out; } - } while ((space -= EFX_RX_BATCH) >= EFX_RX_BATCH); + } while ((space -= batch_size) >= batch_size); netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev, "RX queue %d fast-filled descriptor ring " @@ -408,7 +366,7 @@ void efx_rx_slow_fill(unsigned long context) static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue, struct efx_rx_buffer *rx_buf, - int len, bool *leak_packet) + int len) { struct efx_nic *efx = rx_queue->efx; unsigned max_len = rx_buf->len - efx->type->rx_buffer_padding; @@ -428,11 +386,6 @@ static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue, "RX event (0x%x > 0x%x+0x%x). Leaking\n", efx_rx_queue_index(rx_queue), len, max_len, efx->type->rx_buffer_padding); - /* If this buffer was skb-allocated, then the meta - * data at the end of the skb will be trashed. So - * we have no choice but to leak the fragment. - */ - *leak_packet = !(rx_buf->flags & EFX_RX_BUF_PAGE); efx_schedule_reset(efx, RESET_TYPE_RX_RECOVERY); } else { if (net_ratelimit()) @@ -448,212 +401,238 @@ static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue, /* Pass a received packet up through GRO. GRO can handle pages * regardless of checksum state and skbs with a good checksum. */ -static void efx_rx_packet_gro(struct efx_channel *channel, - struct efx_rx_buffer *rx_buf, - const u8 *eh) +static void +efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf, + unsigned int n_frags, u8 *eh) { struct napi_struct *napi = &channel->napi_str; gro_result_t gro_result; + struct efx_nic *efx = channel->efx; + struct sk_buff *skb; - if (rx_buf->flags & EFX_RX_BUF_PAGE) { - struct efx_nic *efx = channel->efx; - struct page *page = rx_buf->u.page; - struct sk_buff *skb; + skb = napi_get_frags(napi); + if (unlikely(!skb)) { + while (n_frags--) { + put_page(rx_buf->page); + rx_buf->page = NULL; + rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf); + } + return; + } - rx_buf->u.page = NULL; + if (efx->net_dev->features & NETIF_F_RXHASH) + skb->rxhash = efx_rx_buf_hash(eh); + skb->ip_summed = ((rx_buf->flags & EFX_RX_PKT_CSUMMED) ? + CHECKSUM_UNNECESSARY : CHECKSUM_NONE); + + for (;;) { + skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, + rx_buf->page, rx_buf->page_offset, + rx_buf->len); + rx_buf->page = NULL; + skb->len += rx_buf->len; + if (skb_shinfo(skb)->nr_frags == n_frags) + break; + + rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf); + } - skb = napi_get_frags(napi); - if (!skb) { - put_page(page); - return; - } + skb->data_len = skb->len; + skb->truesize += n_frags * efx->rx_buffer_truesize; + + skb_record_rx_queue(skb, channel->rx_queue.core_index); + + gro_result = napi_gro_frags(napi); + if (gro_result != GRO_DROP) + channel->irq_mod_score += 2; +} - if (efx->net_dev->features & NETIF_F_RXHASH) - skb->rxhash = efx_rx_buf_hash(eh); +/* Allocate and construct an SKB around page fragments */ +static struct sk_buff *efx_rx_mk_skb(struct efx_channel *channel, + struct efx_rx_buffer *rx_buf, + unsigned int n_frags, + u8 *eh, int hdr_len) +{ + struct efx_nic *efx = channel->efx; + struct sk_buff *skb; - skb_fill_page_desc(skb, 0, page, - efx_rx_buf_offset(efx, rx_buf), rx_buf->len); + /* Allocate an SKB to store the headers */ + skb = netdev_alloc_skb(efx->net_dev, hdr_len + EFX_PAGE_SKB_ALIGN); + if (unlikely(skb == NULL)) + return NULL; - skb->len = rx_buf->len; - skb->data_len = rx_buf->len; - skb->truesize += rx_buf->len; - skb->ip_summed = ((rx_buf->flags & EFX_RX_PKT_CSUMMED) ? - CHECKSUM_UNNECESSARY : CHECKSUM_NONE); + EFX_BUG_ON_PARANOID(rx_buf->len < hdr_len); - skb_record_rx_queue(skb, channel->rx_queue.core_index); + skb_reserve(skb, EFX_PAGE_SKB_ALIGN); + memcpy(__skb_put(skb, hdr_len), eh, hdr_len); - gro_result = napi_gro_frags(napi); - } else { - struct sk_buff *skb = rx_buf->u.skb; + /* Append the remaining page(s) onto the frag list */ + if (rx_buf->len > hdr_len) { + rx_buf->page_offset += hdr_len; + rx_buf->len -= hdr_len; - EFX_BUG_ON_PARANOID(!(rx_buf->flags & EFX_RX_PKT_CSUMMED)); - rx_buf->u.skb = NULL; - skb->ip_summed = CHECKSUM_UNNECESSARY; + for (;;) { + skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, + rx_buf->page, rx_buf->page_offset, + rx_buf->len); + rx_buf->page = NULL; + skb->len += rx_buf->len; + skb->data_len += rx_buf->len; + if (skb_shinfo(skb)->nr_frags == n_frags) + break; - gro_result = napi_gro_receive(napi, skb); + rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf); + } + } else { + __free_pages(rx_buf->page, efx->rx_buffer_order); + rx_buf->page = NULL; + n_frags = 0; } - if (gro_result == GRO_NORMAL) { - channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB; - } else if (gro_result != GRO_DROP) { - channel->rx_alloc_level += RX_ALLOC_FACTOR_GRO; - channel->irq_mod_score += 2; - } + skb->truesize += n_frags * efx->rx_buffer_truesize; + + /* Move past the ethernet header */ + skb->protocol = eth_type_trans(skb, efx->net_dev); + + return skb; } void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, - unsigned int len, u16 flags) + unsigned int n_frags, unsigned int len, u16 flags) { struct efx_nic *efx = rx_queue->efx; struct efx_channel *channel = efx_rx_queue_channel(rx_queue); struct efx_rx_buffer *rx_buf; - bool leak_packet = false; rx_buf = efx_rx_buffer(rx_queue, index); rx_buf->flags |= flags; - /* This allows the refill path to post another buffer. - * EFX_RXD_HEAD_ROOM ensures that the slot we are using - * isn't overwritten yet. - */ - rx_queue->removed_count++; - - /* Validate the length encoded in the event vs the descriptor pushed */ - efx_rx_packet__check_len(rx_queue, rx_buf, len, &leak_packet); + /* Validate the number of fragments and completed length */ + if (n_frags == 1) { + efx_rx_packet__check_len(rx_queue, rx_buf, len); + } else if (unlikely(n_frags > EFX_RX_MAX_FRAGS) || + unlikely(len <= (n_frags - 1) * EFX_RX_USR_BUF_SIZE) || + unlikely(len > n_frags * EFX_RX_USR_BUF_SIZE) || + unlikely(!efx->rx_scatter)) { + /* If this isn't an explicit discard request, either + * the hardware or the driver is broken. + */ + WARN_ON(!(len == 0 && rx_buf->flags & EFX_RX_PKT_DISCARD)); + rx_buf->flags |= EFX_RX_PKT_DISCARD; + } netif_vdbg(efx, rx_status, efx->net_dev, - "RX queue %d received id %x at %llx+%x %s%s\n", + "RX queue %d received ids %x-%x len %d %s%s\n", efx_rx_queue_index(rx_queue), index, - (unsigned long long)rx_buf->dma_addr, len, + (index + n_frags - 1) & rx_queue->ptr_mask, len, (rx_buf->flags & EFX_RX_PKT_CSUMMED) ? " [SUMMED]" : "", (rx_buf->flags & EFX_RX_PKT_DISCARD) ? " [DISCARD]" : ""); - /* Discard packet, if instructed to do so */ + /* Discard packet, if instructed to do so. Process the + * previous receive first. + */ if (unlikely(rx_buf->flags & EFX_RX_PKT_DISCARD)) { - if (unlikely(leak_packet)) - channel->n_skbuff_leaks++; - else - efx_recycle_rx_buffer(channel, rx_buf); - - /* Don't hold off the previous receive */ - rx_buf = NULL; - goto out; + efx_rx_flush_packet(channel); + put_page(rx_buf->page); + efx_recycle_rx_buffers(channel, rx_buf, n_frags); + return; } - /* Release and/or sync DMA mapping - assumes all RX buffers - * consumed in-order per RX queue + if (n_frags == 1) + rx_buf->len = len; + + /* Release and/or sync the DMA mapping - assumes all RX buffers + * consumed in-order per RX queue. */ - efx_unmap_rx_buffer(efx, rx_buf, len); + efx_sync_rx_buffer(efx, rx_buf, rx_buf->len); /* Prefetch nice and early so data will (hopefully) be in cache by * the time we look at it. */ - prefetch(efx_rx_buf_eh(efx, rx_buf)); + prefetch(efx_rx_buf_va(rx_buf)); + + rx_buf->page_offset += efx->type->rx_buffer_hash_size; + rx_buf->len -= efx->type->rx_buffer_hash_size; + + if (n_frags > 1) { + /* Release/sync DMA mapping for additional fragments. + * Fix length for last fragment. + */ + unsigned int tail_frags = n_frags - 1; + + for (;;) { + rx_buf = efx_rx_buf_next(rx_queue, rx_buf); + if (--tail_frags == 0) + break; + efx_sync_rx_buffer(efx, rx_buf, EFX_RX_USR_BUF_SIZE); + } + rx_buf->len = len - (n_frags - 1) * EFX_RX_USR_BUF_SIZE; + efx_sync_rx_buffer(efx, rx_buf, rx_buf->len); + } + + /* All fragments have been DMA-synced, so recycle buffers and pages. */ + rx_buf = efx_rx_buffer(rx_queue, index); + efx_recycle_rx_buffers(channel, rx_buf, n_frags); /* Pipeline receives so that we give time for packet headers to be * prefetched into cache. */ - rx_buf->len = len - efx->type->rx_buffer_hash_size; -out: - if (channel->rx_pkt) - __efx_rx_packet(channel, channel->rx_pkt); - channel->rx_pkt = rx_buf; + efx_rx_flush_packet(channel); + channel->rx_pkt_n_frags = n_frags; + channel->rx_pkt_index = index; } -static void efx_rx_deliver(struct efx_channel *channel, - struct efx_rx_buffer *rx_buf) +static void efx_rx_deliver(struct efx_channel *channel, u8 *eh, + struct efx_rx_buffer *rx_buf, + unsigned int n_frags) { struct sk_buff *skb; + u16 hdr_len = min_t(u16, rx_buf->len, EFX_SKB_HEADERS); - /* We now own the SKB */ - skb = rx_buf->u.skb; - rx_buf->u.skb = NULL; + skb = efx_rx_mk_skb(channel, rx_buf, n_frags, eh, hdr_len); + if (unlikely(skb == NULL)) { + efx_free_rx_buffer(rx_buf); + return; + } + skb_record_rx_queue(skb, channel->rx_queue.core_index); /* Set the SKB flags */ skb_checksum_none_assert(skb); - /* Record the rx_queue */ - skb_record_rx_queue(skb, channel->rx_queue.core_index); - - /* Pass the packet up */ if (channel->type->receive_skb) - channel->type->receive_skb(channel, skb); - else - netif_receive_skb(skb); + if (channel->type->receive_skb(channel, skb)) + return; - /* Update allocation strategy method */ - channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB; + /* Pass the packet up */ + netif_receive_skb(skb); } /* Handle a received packet. Second half: Touches packet payload. */ -void __efx_rx_packet(struct efx_channel *channel, struct efx_rx_buffer *rx_buf) +void __efx_rx_packet(struct efx_channel *channel) { struct efx_nic *efx = channel->efx; - u8 *eh = efx_rx_buf_eh(efx, rx_buf); + struct efx_rx_buffer *rx_buf = + efx_rx_buffer(&channel->rx_queue, channel->rx_pkt_index); + u8 *eh = efx_rx_buf_va(rx_buf); /* If we're in loopback test, then pass the packet directly to the * loopback layer, and free the rx_buf here */ if (unlikely(efx->loopback_selftest)) { efx_loopback_rx_packet(efx, eh, rx_buf->len); - efx_free_rx_buffer(efx, rx_buf); - return; - } - - if (!(rx_buf->flags & EFX_RX_BUF_PAGE)) { - struct sk_buff *skb = rx_buf->u.skb; - - prefetch(skb_shinfo(skb)); - - skb_reserve(skb, efx->type->rx_buffer_hash_size); - skb_put(skb, rx_buf->len); - - if (efx->net_dev->features & NETIF_F_RXHASH) - skb->rxhash = efx_rx_buf_hash(eh); - - /* Move past the ethernet header. rx_buf->data still points - * at the ethernet header */ - skb->protocol = eth_type_trans(skb, efx->net_dev); - - skb_record_rx_queue(skb, channel->rx_queue.core_index); + efx_free_rx_buffer(rx_buf); + goto out; } if (unlikely(!(efx->net_dev->features & NETIF_F_RXCSUM))) rx_buf->flags &= ~EFX_RX_PKT_CSUMMED; - if (likely(rx_buf->flags & (EFX_RX_BUF_PAGE | EFX_RX_PKT_CSUMMED)) && - !channel->type->receive_skb) - efx_rx_packet_gro(channel, rx_buf, eh); + if (!channel->type->receive_skb) + efx_rx_packet_gro(channel, rx_buf, channel->rx_pkt_n_frags, eh); else - efx_rx_deliver(channel, rx_buf); -} - -void efx_rx_strategy(struct efx_channel *channel) -{ - enum efx_rx_alloc_method method = rx_alloc_method; - - if (channel->type->receive_skb) { - channel->rx_alloc_push_pages = false; - return; - } - - /* Only makes sense to use page based allocation if GRO is enabled */ - if (!(channel->efx->net_dev->features & NETIF_F_GRO)) { - method = RX_ALLOC_METHOD_SKB; - } else if (method == RX_ALLOC_METHOD_AUTO) { - /* Constrain the rx_alloc_level */ - if (channel->rx_alloc_level < 0) - channel->rx_alloc_level = 0; - else if (channel->rx_alloc_level > RX_ALLOC_LEVEL_MAX) - channel->rx_alloc_level = RX_ALLOC_LEVEL_MAX; - - /* Decide on the allocation method */ - method = ((channel->rx_alloc_level > RX_ALLOC_LEVEL_GRO) ? - RX_ALLOC_METHOD_PAGE : RX_ALLOC_METHOD_SKB); - } - - /* Push the option */ - channel->rx_alloc_push_pages = (method == RX_ALLOC_METHOD_PAGE); + efx_rx_deliver(channel, eh, rx_buf, channel->rx_pkt_n_frags); +out: + channel->rx_pkt_n_frags = 0; } int efx_probe_rx_queue(struct efx_rx_queue *rx_queue) @@ -683,9 +662,32 @@ int efx_probe_rx_queue(struct efx_rx_queue *rx_queue) kfree(rx_queue->buffer); rx_queue->buffer = NULL; } + return rc; } +static void efx_init_rx_recycle_ring(struct efx_nic *efx, + struct efx_rx_queue *rx_queue) +{ + unsigned int bufs_in_recycle_ring, page_ring_size; + + /* Set the RX recycle ring size */ +#ifdef CONFIG_PPC64 + bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_IOMMU; +#else + if (efx->pci_dev->dev.iommu_group) + bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_IOMMU; + else + bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_NOIOMMU; +#endif /* CONFIG_PPC64 */ + + page_ring_size = roundup_pow_of_two(bufs_in_recycle_ring / + efx->rx_bufs_per_page); + rx_queue->page_ring = kcalloc(page_ring_size, + sizeof(*rx_queue->page_ring), GFP_KERNEL); + rx_queue->page_ptr_mask = page_ring_size - 1; +} + void efx_init_rx_queue(struct efx_rx_queue *rx_queue) { struct efx_nic *efx = rx_queue->efx; @@ -699,10 +701,18 @@ void efx_init_rx_queue(struct efx_rx_queue *rx_queue) rx_queue->notified_count = 0; rx_queue->removed_count = 0; rx_queue->min_fill = -1U; + efx_init_rx_recycle_ring(efx, rx_queue); + + rx_queue->page_remove = 0; + rx_queue->page_add = rx_queue->page_ptr_mask + 1; + rx_queue->page_recycle_count = 0; + rx_queue->page_recycle_failed = 0; + rx_queue->page_recycle_full = 0; /* Initialise limit fields */ max_fill = efx->rxq_entries - EFX_RXD_HEAD_ROOM; - max_trigger = max_fill - EFX_RX_BATCH; + max_trigger = + max_fill - efx->rx_pages_per_batch * efx->rx_bufs_per_page; if (rx_refill_threshold != 0) { trigger = max_fill * min(rx_refill_threshold, 100U) / 100U; if (trigger > max_trigger) @@ -722,6 +732,7 @@ void efx_init_rx_queue(struct efx_rx_queue *rx_queue) void efx_fini_rx_queue(struct efx_rx_queue *rx_queue) { int i; + struct efx_nic *efx = rx_queue->efx; struct efx_rx_buffer *rx_buf; netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, @@ -733,13 +744,32 @@ void efx_fini_rx_queue(struct efx_rx_queue *rx_queue) del_timer_sync(&rx_queue->slow_fill); efx_nic_fini_rx(rx_queue); - /* Release RX buffers NB start at index 0 not current HW ptr */ + /* Release RX buffers from the current read ptr to the write ptr */ if (rx_queue->buffer) { - for (i = 0; i <= rx_queue->ptr_mask; i++) { - rx_buf = efx_rx_buffer(rx_queue, i); + for (i = rx_queue->removed_count; i < rx_queue->added_count; + i++) { + unsigned index = i & rx_queue->ptr_mask; + rx_buf = efx_rx_buffer(rx_queue, index); efx_fini_rx_buffer(rx_queue, rx_buf); } } + + /* Unmap and release the pages in the recycle ring. Remove the ring. */ + for (i = 0; i <= rx_queue->page_ptr_mask; i++) { + struct page *page = rx_queue->page_ring[i]; + struct efx_rx_page_state *state; + + if (page == NULL) + continue; + + state = page_address(page); + dma_unmap_page(&efx->pci_dev->dev, state->dma_addr, + PAGE_SIZE << efx->rx_buffer_order, + DMA_FROM_DEVICE); + put_page(page); + } + kfree(rx_queue->page_ring); + rx_queue->page_ring = NULL; } void efx_remove_rx_queue(struct efx_rx_queue *rx_queue) @@ -754,9 +784,6 @@ void efx_remove_rx_queue(struct efx_rx_queue *rx_queue) } -module_param(rx_alloc_method, int, 0644); -MODULE_PARM_DESC(rx_alloc_method, "Allocation method used for RX buffers"); - module_param(rx_refill_threshold, uint, 0444); MODULE_PARM_DESC(rx_refill_threshold, "RX descriptor ring refill threshold (%)"); diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c index ba40f67e4f05..51669244d154 100644 --- a/drivers/net/ethernet/sfc/siena.c +++ b/drivers/net/ethernet/sfc/siena.c @@ -202,7 +202,7 @@ out: static enum reset_type siena_map_reset_reason(enum reset_type reason) { - return RESET_TYPE_ALL; + return RESET_TYPE_RECOVER_OR_ALL; } static int siena_map_reset_flags(u32 *flags) @@ -245,6 +245,22 @@ static int siena_reset_hw(struct efx_nic *efx, enum reset_type method) return efx_mcdi_reset_port(efx); } +#ifdef CONFIG_EEH +/* When a PCI device is isolated from the bus, a subsequent MMIO read is + * required for the kernel EEH mechanisms to notice. As the Solarflare driver + * was written to minimise MMIO read (for latency) then a periodic call to check + * the EEH status of the device is required so that device recovery can happen + * in a timely fashion. + */ +static void siena_monitor(struct efx_nic *efx) +{ + struct eeh_dev *eehdev = + of_node_to_eeh_dev(pci_device_to_OF_node(efx->pci_dev)); + + eeh_dev_check_failure(eehdev); +} +#endif + static int siena_probe_nvconfig(struct efx_nic *efx) { u32 caps = 0; @@ -398,6 +414,8 @@ static int siena_init_nic(struct efx_nic *efx) EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_HASH_INSRT_HDR, 1); EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_HASH_ALG, 1); EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_IP_HASH, 1); + EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_USR_BUF_SIZE, + EFX_RX_USR_BUF_SIZE >> 5); efx_writeo(efx, &temp, FR_AZ_RX_CFG); /* Set hash key for IPv4 */ @@ -665,7 +683,11 @@ const struct efx_nic_type siena_a0_nic_type = { .init = siena_init_nic, .dimension_resources = siena_dimension_resources, .fini = efx_port_dummy_op_void, +#ifdef CONFIG_EEH + .monitor = siena_monitor, +#else .monitor = NULL, +#endif .map_reset_reason = siena_map_reset_reason, .map_reset_flags = siena_map_reset_flags, .reset = siena_reset_hw, @@ -698,6 +720,7 @@ const struct efx_nic_type siena_a0_nic_type = { .max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH), .rx_buffer_hash_size = 0x10, .rx_buffer_padding = 0, + .can_rx_scatter = true, .max_interrupt_mode = EFX_INT_MODE_MSIX, .phys_addr_channels = 32, /* Hardware limit is 64, but the legacy * interrupt handler only supports 32 diff --git a/drivers/net/ethernet/sgi/meth.c b/drivers/net/ethernet/sgi/meth.c index 79ad9c94a21b..4bdbaad9932d 100644 --- a/drivers/net/ethernet/sgi/meth.c +++ b/drivers/net/ethernet/sgi/meth.c @@ -213,10 +213,11 @@ static int meth_init_tx_ring(struct meth_private *priv) { /* Init TX ring */ priv->tx_ring = dma_alloc_coherent(NULL, TX_RING_BUFFER_SIZE, - &priv->tx_ring_dma, GFP_ATOMIC); + &priv->tx_ring_dma, + GFP_ATOMIC | __GFP_ZERO); if (!priv->tx_ring) return -ENOMEM; - memset(priv->tx_ring, 0, TX_RING_BUFFER_SIZE); + priv->tx_count = priv->tx_read = priv->tx_write = 0; mace->eth.tx_ring_base = priv->tx_ring_dma; /* Now init skb save area */ diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c index efca14eaefa9..e45829628d5f 100644 --- a/drivers/net/ethernet/sis/sis900.c +++ b/drivers/net/ethernet/sis/sis900.c @@ -1841,15 +1841,12 @@ refill_rx_ring: entry = sis_priv->dirty_rx % NUM_RX_DESC; if (sis_priv->rx_skbuff[entry] == NULL) { - if ((skb = netdev_alloc_skb(net_dev, RX_BUF_SIZE)) == NULL) { + skb = netdev_alloc_skb(net_dev, RX_BUF_SIZE); + if (skb == NULL) { /* not enough memory for skbuff, this makes a * "hole" on the buffer ring, it is not clear * how the hardware will react to this kind * of degenerated buffer */ - if (netif_msg_rx_err(sis_priv)) - printk(KERN_INFO "%s: Memory squeeze, " - "deferring packet.\n", - net_dev->name); net_dev->stats.rx_dropped++; break; } diff --git a/drivers/net/ethernet/smsc/smc9194.c b/drivers/net/ethernet/smsc/smc9194.c index 50823da9dc1e..e85c2e7e8246 100644 --- a/drivers/net/ethernet/smsc/smc9194.c +++ b/drivers/net/ethernet/smsc/smc9194.c @@ -1223,9 +1223,7 @@ static void smc_rcv(struct net_device *dev) dev->stats.multicast++; skb = netdev_alloc_skb(dev, packet_length + 5); - if ( skb == NULL ) { - printk(KERN_NOTICE CARDNAME ": Low memory, packet dropped.\n"); dev->stats.rx_dropped++; goto done; } diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c index 591650a8de38..dfbf978315df 100644 --- a/drivers/net/ethernet/smsc/smc91x.c +++ b/drivers/net/ethernet/smsc/smc91x.c @@ -465,8 +465,6 @@ static inline void smc_rcv(struct net_device *dev) */ skb = netdev_alloc_skb(dev, packet_len); if (unlikely(skb == NULL)) { - printk(KERN_NOTICE "%s: Low memory, packet dropped.\n", - dev->name); SMC_WAIT_MMU_BUSY(lp); SMC_SET_MMU_CMD(lp, MC_RELEASE); dev->stats.rx_dropped++; diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c index da5cc9a3b34c..48e2b99bec51 100644 --- a/drivers/net/ethernet/smsc/smsc911x.c +++ b/drivers/net/ethernet/smsc/smsc911x.c @@ -2115,7 +2115,7 @@ static int smsc911x_init(struct net_device *dev) spin_lock_init(&pdata->dev_lock); spin_lock_init(&pdata->mac_lock); - if (pdata->ioaddr == 0) { + if (pdata->ioaddr == NULL) { SMSC_WARN(pdata, probe, "pdata->ioaddr: 0x00000000"); return -ENODEV; } diff --git a/drivers/net/ethernet/smsc/smsc9420.c b/drivers/net/ethernet/smsc/smsc9420.c index d457fa2d7509..ffa5c4ad1210 100644 --- a/drivers/net/ethernet/smsc/smsc9420.c +++ b/drivers/net/ethernet/smsc/smsc9420.c @@ -848,10 +848,8 @@ static int smsc9420_alloc_rx_buffer(struct smsc9420_pdata *pd, int index) BUG_ON(pd->rx_buffers[index].skb); BUG_ON(pd->rx_buffers[index].mapping); - if (unlikely(!skb)) { - smsc_warn(RX_ERR, "Failed to allocate new skb!"); + if (unlikely(!skb)) return -ENOMEM; - } mapping = pci_map_single(pd->pdev, skb_tail_pointer(skb), PKT_BUF_SZ, PCI_DMA_FROMDEVICE); diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig index c0ea838c78d1..f695a50bac47 100644 --- a/drivers/net/ethernet/stmicro/stmmac/Kconfig +++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig @@ -5,6 +5,7 @@ config STMMAC_ETH select MII select PHYLIB select CRC32 + select PTP_1588_CLOCK ---help--- This is the driver for the Ethernet IPs are built around a Synopsys IP Core and only tested on the STMicroelectronics @@ -54,22 +55,4 @@ config STMMAC_DA By default, the DMA arbitration scheme is based on Round-robin (rx:tx priority is 1:1). -choice - prompt "Select the DMA TX/RX descriptor operating modes" - depends on STMMAC_ETH - ---help--- - This driver supports DMA descriptor to operate both in dual buffer - (RING) and linked-list(CHAINED) mode. In RING mode each descriptor - points to two data buffer pointers whereas in CHAINED mode they - points to only one data buffer pointer. - -config STMMAC_RING - bool "Enable Descriptor Ring Mode" - -config STMMAC_CHAINED - bool "Enable Descriptor Chained Mode" - -endchoice - - endif diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile index c8e8ea60ac19..356a9dd32be7 100644 --- a/drivers/net/ethernet/stmicro/stmmac/Makefile +++ b/drivers/net/ethernet/stmicro/stmmac/Makefile @@ -1,9 +1,7 @@ obj-$(CONFIG_STMMAC_ETH) += stmmac.o -stmmac-$(CONFIG_STMMAC_RING) += ring_mode.o -stmmac-$(CONFIG_STMMAC_CHAINED) += chain_mode.o stmmac-$(CONFIG_STMMAC_PLATFORM) += stmmac_platform.o stmmac-$(CONFIG_STMMAC_PCI) += stmmac_pci.o -stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o \ - dwmac_lib.o dwmac1000_core.o dwmac1000_dma.o \ +stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o ring_mode.o \ + chain_mode.o dwmac_lib.o dwmac1000_core.o dwmac1000_dma.o \ dwmac100_core.o dwmac100_dma.o enh_desc.o norm_desc.o \ - mmc_core.o $(stmmac-y) + mmc_core.o stmmac_hwtstamp.o stmmac_ptp.o $(stmmac-y) diff --git a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c index 0668659803ed..37a3f93b487d 100644 --- a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c +++ b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c @@ -28,7 +28,7 @@ #include "stmmac.h" -unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) +static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) { struct stmmac_priv *priv = (struct stmmac_priv *) p; unsigned int txsize = priv->dma_tx_size; @@ -47,7 +47,8 @@ unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) desc->des2 = dma_map_single(priv->device, skb->data, bmax, DMA_TO_DEVICE); - priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum); + priv->tx_skbuff_dma[entry] = desc->des2; + priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum, STMMAC_CHAIN_MODE); while (len != 0) { entry = (++priv->cur_tx) % txsize; @@ -57,8 +58,9 @@ unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) desc->des2 = dma_map_single(priv->device, (skb->data + bmax * i), bmax, DMA_TO_DEVICE); - priv->hw->desc->prepare_tx_desc(desc, 0, bmax, - csum); + priv->tx_skbuff_dma[entry] = desc->des2; + priv->hw->desc->prepare_tx_desc(desc, 0, bmax, csum, + STMMAC_CHAIN_MODE); priv->hw->desc->set_tx_owner(desc); priv->tx_skbuff[entry] = NULL; len -= bmax; @@ -67,8 +69,9 @@ unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) desc->des2 = dma_map_single(priv->device, (skb->data + bmax * i), len, DMA_TO_DEVICE); - priv->hw->desc->prepare_tx_desc(desc, 0, len, - csum); + priv->tx_skbuff_dma[entry] = desc->des2; + priv->hw->desc->prepare_tx_desc(desc, 0, len, csum, + STMMAC_CHAIN_MODE); priv->hw->desc->set_tx_owner(desc); priv->tx_skbuff[entry] = NULL; len = 0; @@ -89,49 +92,70 @@ static unsigned int stmmac_is_jumbo_frm(int len, int enh_desc) return ret; } -static void stmmac_refill_desc3(int bfsize, struct dma_desc *p) -{ -} - -static void stmmac_init_desc3(int des3_as_data_buf, struct dma_desc *p) -{ -} - -static void stmmac_clean_desc3(struct dma_desc *p) -{ -} - -static void stmmac_init_dma_chain(struct dma_desc *des, dma_addr_t phy_addr, - unsigned int size) +static void stmmac_init_dma_chain(void *des, dma_addr_t phy_addr, + unsigned int size, unsigned int extend_desc) { /* * In chained mode the des3 points to the next element in the ring. * The latest element has to point to the head. */ int i; - struct dma_desc *p = des; dma_addr_t dma_phy = phy_addr; - for (i = 0; i < (size - 1); i++) { - dma_phy += sizeof(struct dma_desc); - p->des3 = (unsigned int)dma_phy; - p++; + if (extend_desc) { + struct dma_extended_desc *p = (struct dma_extended_desc *) des; + for (i = 0; i < (size - 1); i++) { + dma_phy += sizeof(struct dma_extended_desc); + p->basic.des3 = (unsigned int)dma_phy; + p++; + } + p->basic.des3 = (unsigned int)phy_addr; + + } else { + struct dma_desc *p = (struct dma_desc *) des; + for (i = 0; i < (size - 1); i++) { + dma_phy += sizeof(struct dma_desc); + p->des3 = (unsigned int)dma_phy; + p++; + } + p->des3 = (unsigned int)phy_addr; } - p->des3 = (unsigned int)phy_addr; } -static int stmmac_set_16kib_bfsize(int mtu) +static void stmmac_refill_desc3(void *priv_ptr, struct dma_desc *p) +{ + struct stmmac_priv *priv = (struct stmmac_priv *)priv_ptr; + + if (priv->hwts_rx_en && !priv->extend_desc) + /* NOTE: Device will overwrite des3 with timestamp value if + * 1588-2002 time stamping is enabled, hence reinitialize it + * to keep explicit chaining in the descriptor. + */ + p->des3 = (unsigned int)(priv->dma_rx_phy + + (((priv->dirty_rx) + 1) % + priv->dma_rx_size) * + sizeof(struct dma_desc)); +} + +static void stmmac_clean_desc3(void *priv_ptr, struct dma_desc *p) { - /* Not supported */ - return 0; + struct stmmac_priv *priv = (struct stmmac_priv *)priv_ptr; + + if (priv->hw->desc->get_tx_ls(p) && !priv->extend_desc) + /* NOTE: Device will overwrite des3 with timestamp value if + * 1588-2002 time stamping is enabled, hence reinitialize it + * to keep explicit chaining in the descriptor. + */ + p->des3 = (unsigned int)(priv->dma_tx_phy + + (((priv->dirty_tx + 1) % + priv->dma_tx_size) * + sizeof(struct dma_desc))); } -const struct stmmac_ring_mode_ops ring_mode_ops = { +const struct stmmac_chain_mode_ops chain_mode_ops = { + .init = stmmac_init_dma_chain, .is_jumbo_frm = stmmac_is_jumbo_frm, .jumbo_frm = stmmac_jumbo_frm, .refill_desc3 = stmmac_refill_desc3, - .init_desc3 = stmmac_init_desc3, - .init_dma_chain = stmmac_init_dma_chain, .clean_desc3 = stmmac_clean_desc3, - .set_16kib_bfsize = stmmac_set_16kib_bfsize, }; diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h index 186d14806122..ad7e20a9875d 100644 --- a/drivers/net/ethernet/stmicro/stmmac/common.h +++ b/drivers/net/ethernet/stmicro/stmmac/common.h @@ -117,6 +117,36 @@ struct stmmac_extra_stats { unsigned long irq_rx_path_in_lpi_mode_n; unsigned long irq_rx_path_exit_lpi_mode_n; unsigned long phy_eee_wakeup_error_n; + /* Extended RDES status */ + unsigned long ip_hdr_err; + unsigned long ip_payload_err; + unsigned long ip_csum_bypassed; + unsigned long ipv4_pkt_rcvd; + unsigned long ipv6_pkt_rcvd; + unsigned long rx_msg_type_ext_no_ptp; + unsigned long rx_msg_type_sync; + unsigned long rx_msg_type_follow_up; + unsigned long rx_msg_type_delay_req; + unsigned long rx_msg_type_delay_resp; + unsigned long rx_msg_type_pdelay_req; + unsigned long rx_msg_type_pdelay_resp; + unsigned long rx_msg_type_pdelay_follow_up; + unsigned long ptp_frame_type; + unsigned long ptp_ver; + unsigned long timestamp_dropped; + unsigned long av_pkt_rcvd; + unsigned long av_tagged_pkt_rcvd; + unsigned long vlan_tag_priority_val; + unsigned long l3_filter_match; + unsigned long l4_filter_match; + unsigned long l3_l4_filter_no_match; + /* PCS */ + unsigned long irq_pcs_ane_n; + unsigned long irq_pcs_link_n; + unsigned long irq_rgmii_n; + unsigned long pcs_link; + unsigned long pcs_duplex; + unsigned long pcs_speed; }; /* CSR Frequency Access Defines*/ @@ -138,6 +168,12 @@ struct stmmac_extra_stats { #define FLOW_TX 2 #define FLOW_AUTO (FLOW_TX | FLOW_RX) +/* PCS defines */ +#define STMMAC_PCS_RGMII (1 << 0) +#define STMMAC_PCS_SGMII (1 << 1) +#define STMMAC_PCS_TBI (1 << 2) +#define STMMAC_PCS_RTBI (1 << 3) + #define SF_DMA_MODE 1 /* DMA STORE-AND-FORWARD Operation Mode */ /* DAM HW feature register fields */ @@ -194,17 +230,25 @@ enum dma_irq_status { handle_tx = 0x8, }; -enum core_specific_irq_mask { - core_mmc_tx_irq = 1, - core_mmc_rx_irq = 2, - core_mmc_rx_csum_offload_irq = 4, - core_irq_receive_pmt_irq = 8, - core_irq_tx_path_in_lpi_mode = 16, - core_irq_tx_path_exit_lpi_mode = 32, - core_irq_rx_path_in_lpi_mode = 64, - core_irq_rx_path_exit_lpi_mode = 128, +#define CORE_IRQ_TX_PATH_IN_LPI_MODE (1 << 1) +#define CORE_IRQ_TX_PATH_EXIT_LPI_MODE (1 << 2) +#define CORE_IRQ_RX_PATH_IN_LPI_MODE (1 << 3) +#define CORE_IRQ_RX_PATH_EXIT_LPI_MODE (1 << 4) + +#define CORE_PCS_ANE_COMPLETE (1 << 5) +#define CORE_PCS_LINK_STATUS (1 << 6) +#define CORE_RGMII_IRQ (1 << 7) + +struct rgmii_adv { + unsigned int pause; + unsigned int duplex; + unsigned int lp_pause; + unsigned int lp_duplex; }; +#define STMMAC_PCS_PAUSE 1 +#define STMMAC_PCS_ASYM_PAUSE 2 + /* DMA HW capabilities */ struct dma_features { unsigned int mbps_10_100; @@ -255,23 +299,26 @@ struct dma_features { #define STMMAC_DEFAULT_LIT_LS_TIMER 0x3E8 #define STMMAC_DEFAULT_TWT_LS_TIMER 0x0 +#define STMMAC_CHAIN_MODE 0x1 +#define STMMAC_RING_MODE 0x2 + struct stmmac_desc_ops { /* DMA RX descriptor ring initialization */ - void (*init_rx_desc) (struct dma_desc *p, unsigned int ring_size, - int disable_rx_ic); + void (*init_rx_desc) (struct dma_desc *p, int disable_rx_ic, int mode, + int end); /* DMA TX descriptor ring initialization */ - void (*init_tx_desc) (struct dma_desc *p, unsigned int ring_size); + void (*init_tx_desc) (struct dma_desc *p, int mode, int end); /* Invoked by the xmit function to prepare the tx descriptor */ void (*prepare_tx_desc) (struct dma_desc *p, int is_fs, int len, - int csum_flag); + int csum_flag, int mode); /* Set/get the owner of the descriptor */ void (*set_tx_owner) (struct dma_desc *p); int (*get_tx_owner) (struct dma_desc *p); /* Invoked by the xmit function to close the tx descriptor */ void (*close_tx_desc) (struct dma_desc *p); /* Clean the tx descriptor as soon as the tx irq is received */ - void (*release_tx_desc) (struct dma_desc *p); + void (*release_tx_desc) (struct dma_desc *p, int mode); /* Clear interrupt on tx frame completion. When this bit is * set an interrupt happens as soon as the frame is transmitted */ void (*clear_tx_ic) (struct dma_desc *p); @@ -290,12 +337,22 @@ struct stmmac_desc_ops { /* Return the reception status looking at the RDES1 */ int (*rx_status) (void *data, struct stmmac_extra_stats *x, struct dma_desc *p); + void (*rx_extended_status) (void *data, struct stmmac_extra_stats *x, + struct dma_extended_desc *p); + /* Set tx timestamp enable bit */ + void (*enable_tx_timestamp) (struct dma_desc *p); + /* get tx timestamp status */ + int (*get_tx_timestamp_status) (struct dma_desc *p); + /* get timestamp value */ + u64 (*get_timestamp) (void *desc, u32 ats); + /* get rx timestamp status */ + int (*get_rx_timestamp_status) (void *desc, u32 ats); }; struct stmmac_dma_ops { /* DMA core initialization */ int (*init) (void __iomem *ioaddr, int pbl, int fb, int mb, - int burst_len, u32 dma_tx, u32 dma_rx); + int burst_len, u32 dma_tx, u32 dma_rx, int atds); /* Dump DMA registers */ void (*dump_regs) (void __iomem *ioaddr); /* Set tx/rx threshold in the csr6 register @@ -327,7 +384,8 @@ struct stmmac_ops { /* Dump MAC registers */ void (*dump_regs) (void __iomem *ioaddr); /* Handle extra events on specific interrupts hw dependent */ - int (*host_irq_status) (void __iomem *ioaddr); + int (*host_irq_status) (void __iomem *ioaddr, + struct stmmac_extra_stats *x); /* Multicast filter setting */ void (*set_filter) (struct net_device *dev, int id); /* Flow control setting */ @@ -344,6 +402,18 @@ struct stmmac_ops { void (*reset_eee_mode) (void __iomem *ioaddr); void (*set_eee_timer) (void __iomem *ioaddr, int ls, int tw); void (*set_eee_pls) (void __iomem *ioaddr, int link); + void (*ctrl_ane) (void __iomem *ioaddr, bool restart); + void (*get_adv) (void __iomem *ioaddr, struct rgmii_adv *adv); +}; + +struct stmmac_hwtimestamp { + void (*config_hw_tstamping) (void __iomem *ioaddr, u32 data); + void (*config_sub_second_increment) (void __iomem *ioaddr); + int (*init_systime) (void __iomem *ioaddr, u32 sec, u32 nsec); + int (*config_addend)(void __iomem *ioaddr, u32 addend); + int (*adjust_systime)(void __iomem *ioaddr, u32 sec, u32 nsec, + int add_sub); + u64 (*get_systime)(void __iomem *ioaddr); }; struct mac_link { @@ -360,19 +430,28 @@ struct mii_regs { struct stmmac_ring_mode_ops { unsigned int (*is_jumbo_frm) (int len, int ehn_desc); unsigned int (*jumbo_frm) (void *priv, struct sk_buff *skb, int csum); - void (*refill_desc3) (int bfsize, struct dma_desc *p); - void (*init_desc3) (int des3_as_data_buf, struct dma_desc *p); - void (*init_dma_chain) (struct dma_desc *des, dma_addr_t phy_addr, - unsigned int size); - void (*clean_desc3) (struct dma_desc *p); + void (*refill_desc3) (void *priv, struct dma_desc *p); + void (*init_desc3) (struct dma_desc *p); + void (*clean_desc3) (void *priv, struct dma_desc *p); int (*set_16kib_bfsize) (int mtu); }; +struct stmmac_chain_mode_ops { + void (*init) (void *des, dma_addr_t phy_addr, unsigned int size, + unsigned int extend_desc); + unsigned int (*is_jumbo_frm) (int len, int ehn_desc); + unsigned int (*jumbo_frm) (void *priv, struct sk_buff *skb, int csum); + void (*refill_desc3) (void *priv, struct dma_desc *p); + void (*clean_desc3) (void *priv, struct dma_desc *p); +}; + struct mac_device_info { const struct stmmac_ops *mac; const struct stmmac_desc_ops *desc; const struct stmmac_dma_ops *dma; const struct stmmac_ring_mode_ops *ring; + const struct stmmac_chain_mode_ops *chain; + const struct stmmac_hwtimestamp *ptp; struct mii_regs mii; /* MII register Addresses */ struct mac_link link; unsigned int synopsys_uid; @@ -390,5 +469,6 @@ extern void stmmac_set_mac(void __iomem *ioaddr, bool enable); extern void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr); extern const struct stmmac_ring_mode_ops ring_mode_ops; +extern const struct stmmac_chain_mode_ops chain_mode_ops; #endif /* __COMMON_H__ */ diff --git a/drivers/net/ethernet/stmicro/stmmac/descs.h b/drivers/net/ethernet/stmicro/stmmac/descs.h index 223adf95fd03..2eca0c033038 100644 --- a/drivers/net/ethernet/stmicro/stmmac/descs.h +++ b/drivers/net/ethernet/stmicro/stmmac/descs.h @@ -24,6 +24,7 @@ #ifndef __DESCS_H__ #define __DESCS_H__ +/* Basic descriptor structure for normal and alternate descriptors */ struct dma_desc { /* Receive descriptor */ union { @@ -60,7 +61,7 @@ struct dma_desc { } rx; struct { /* RDES0 */ - u32 payload_csum_error:1; + u32 rx_mac_addr:1; u32 crc_error:1; u32 dribbling:1; u32 error_gmii:1; @@ -162,13 +163,57 @@ struct dma_desc { unsigned int des3; }; +/* Extended descriptor structure (supported by new SYNP GMAC generations) */ +struct dma_extended_desc { + struct dma_desc basic; + union { + struct { + u32 ip_payload_type:3; + u32 ip_hdr_err:1; + u32 ip_payload_err:1; + u32 ip_csum_bypassed:1; + u32 ipv4_pkt_rcvd:1; + u32 ipv6_pkt_rcvd:1; + u32 msg_type:4; + u32 ptp_frame_type:1; + u32 ptp_ver:1; + u32 timestamp_dropped:1; + u32 reserved:1; + u32 av_pkt_rcvd:1; + u32 av_tagged_pkt_rcvd:1; + u32 vlan_tag_priority_val:3; + u32 reserved3:3; + u32 l3_filter_match:1; + u32 l4_filter_match:1; + u32 l3_l4_filter_no_match:2; + u32 reserved4:4; + } erx; + struct { + u32 reserved; + } etx; + } des4; + unsigned int des5; /* Reserved */ + unsigned int des6; /* Tx/Rx Timestamp Low */ + unsigned int des7; /* Tx/Rx Timestamp High */ +}; + /* Transmit checksum insertion control */ enum tdes_csum_insertion { cic_disabled = 0, /* Checksum Insertion Control */ cic_only_ip = 1, /* Only IP header */ - cic_no_pseudoheader = 2, /* IP header but pseudoheader - * is not calculated */ + /* IP header but pseudoheader is not calculated */ + cic_no_pseudoheader = 2, cic_full = 3, /* IP header and pseudoheader */ }; +/* Extended RDES4 definitions */ +#define RDES_EXT_NO_PTP 0 +#define RDES_EXT_SYNC 0x1 +#define RDES_EXT_FOLLOW_UP 0x2 +#define RDES_EXT_DELAY_REQ 0x3 +#define RDES_EXT_DELAY_RESP 0x4 +#define RDES_EXT_PDELAY_REQ 0x5 +#define RDES_EXT_PDELAY_RESP 0x6 +#define RDES_EXT_PDELAY_FOLLOW_UP 0x7 + #endif /* __DESCS_H__ */ diff --git a/drivers/net/ethernet/stmicro/stmmac/descs_com.h b/drivers/net/ethernet/stmicro/stmmac/descs_com.h index 7ee9499a6e38..20f83fc9cf13 100644 --- a/drivers/net/ethernet/stmicro/stmmac/descs_com.h +++ b/drivers/net/ethernet/stmicro/stmmac/descs_com.h @@ -30,26 +30,28 @@ #ifndef __DESC_COM_H__ #define __DESC_COM_H__ -#if defined(CONFIG_STMMAC_RING) -static inline void ehn_desc_rx_set_on_ring_chain(struct dma_desc *p, int end) +/* Specific functions used for Ring mode */ + +/* Enhanced descriptors */ +static inline void ehn_desc_rx_set_on_ring(struct dma_desc *p, int end) { p->des01.erx.buffer2_size = BUF_SIZE_8KiB - 1; if (end) p->des01.erx.end_ring = 1; } -static inline void ehn_desc_tx_set_on_ring_chain(struct dma_desc *p, int end) +static inline void ehn_desc_tx_set_on_ring(struct dma_desc *p, int end) { if (end) p->des01.etx.end_ring = 1; } -static inline void enh_desc_end_tx_desc(struct dma_desc *p, int ter) +static inline void enh_desc_end_tx_desc_on_ring(struct dma_desc *p, int ter) { p->des01.etx.end_ring = ter; } -static inline void enh_set_tx_desc_len(struct dma_desc *p, int len) +static inline void enh_set_tx_desc_len_on_ring(struct dma_desc *p, int len) { if (unlikely(len > BUF_SIZE_4KiB)) { p->des01.etx.buffer1_size = BUF_SIZE_4KiB; @@ -58,25 +60,26 @@ static inline void enh_set_tx_desc_len(struct dma_desc *p, int len) p->des01.etx.buffer1_size = len; } -static inline void ndesc_rx_set_on_ring_chain(struct dma_desc *p, int end) +/* Normal descriptors */ +static inline void ndesc_rx_set_on_ring(struct dma_desc *p, int end) { p->des01.rx.buffer2_size = BUF_SIZE_2KiB - 1; if (end) p->des01.rx.end_ring = 1; } -static inline void ndesc_tx_set_on_ring_chain(struct dma_desc *p, int end) +static inline void ndesc_tx_set_on_ring(struct dma_desc *p, int end) { if (end) p->des01.tx.end_ring = 1; } -static inline void ndesc_end_tx_desc(struct dma_desc *p, int ter) +static inline void ndesc_end_tx_desc_on_ring(struct dma_desc *p, int ter) { p->des01.tx.end_ring = ter; } -static inline void norm_set_tx_desc_len(struct dma_desc *p, int len) +static inline void norm_set_tx_desc_len_on_ring(struct dma_desc *p, int len) { if (unlikely(len > BUF_SIZE_2KiB)) { p->des01.etx.buffer1_size = BUF_SIZE_2KiB - 1; @@ -85,47 +88,48 @@ static inline void norm_set_tx_desc_len(struct dma_desc *p, int len) p->des01.tx.buffer1_size = len; } -#else +/* Specific functions used for Chain mode */ -static inline void ehn_desc_rx_set_on_ring_chain(struct dma_desc *p, int end) +/* Enhanced descriptors */ +static inline void ehn_desc_rx_set_on_chain(struct dma_desc *p, int end) { p->des01.erx.second_address_chained = 1; } -static inline void ehn_desc_tx_set_on_ring_chain(struct dma_desc *p, int end) +static inline void ehn_desc_tx_set_on_chain(struct dma_desc *p, int end) { p->des01.etx.second_address_chained = 1; } -static inline void enh_desc_end_tx_desc(struct dma_desc *p, int ter) +static inline void enh_desc_end_tx_desc_on_chain(struct dma_desc *p, int ter) { p->des01.etx.second_address_chained = 1; } -static inline void enh_set_tx_desc_len(struct dma_desc *p, int len) +static inline void enh_set_tx_desc_len_on_chain(struct dma_desc *p, int len) { p->des01.etx.buffer1_size = len; } -static inline void ndesc_rx_set_on_ring_chain(struct dma_desc *p, int end) +/* Normal descriptors */ +static inline void ndesc_rx_set_on_chain(struct dma_desc *p, int end) { p->des01.rx.second_address_chained = 1; } -static inline void ndesc_tx_set_on_ring_chain(struct dma_desc *p, int ring_size) +static inline void ndesc_tx_set_on_chain(struct dma_desc *p, int + ring_size) { p->des01.tx.second_address_chained = 1; } -static inline void ndesc_end_tx_desc(struct dma_desc *p, int ter) +static inline void ndesc_end_tx_desc_on_chain(struct dma_desc *p, int ter) { p->des01.tx.second_address_chained = 1; } -static inline void norm_set_tx_desc_len(struct dma_desc *p, int len) +static inline void norm_set_tx_desc_len_on_chain(struct dma_desc *p, int len) { p->des01.tx.buffer1_size = len; } -#endif - #endif /* __DESC_COM_H__ */ diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h index 7ad56afd6324..57f4e8f607e9 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h @@ -89,13 +89,46 @@ enum power_event { (reg * 8)) #define GMAC_MAX_PERFECT_ADDRESSES 32 +/* PCS registers (AN/TBI/SGMII/RGMII) offset */ #define GMAC_AN_CTRL 0x000000c0 /* AN control */ #define GMAC_AN_STATUS 0x000000c4 /* AN status */ #define GMAC_ANE_ADV 0x000000c8 /* Auto-Neg. Advertisement */ -#define GMAC_ANE_LINK 0x000000cc /* Auto-Neg. link partener ability */ +#define GMAC_ANE_LPA 0x000000cc /* Auto-Neg. link partener ability */ #define GMAC_ANE_EXP 0x000000d0 /* ANE expansion */ #define GMAC_TBI 0x000000d4 /* TBI extend status */ -#define GMAC_GMII_STATUS 0x000000d8 /* S/R-GMII status */ +#define GMAC_S_R_GMII 0x000000d8 /* SGMII RGMII status */ + +/* AN Configuration defines */ +#define GMAC_AN_CTRL_RAN 0x00000200 /* Restart Auto-Negotiation */ +#define GMAC_AN_CTRL_ANE 0x00001000 /* Auto-Negotiation Enable */ +#define GMAC_AN_CTRL_ELE 0x00004000 /* External Loopback Enable */ +#define GMAC_AN_CTRL_ECD 0x00010000 /* Enable Comma Detect */ +#define GMAC_AN_CTRL_LR 0x00020000 /* Lock to Reference */ +#define GMAC_AN_CTRL_SGMRAL 0x00040000 /* SGMII RAL Control */ + +/* AN Status defines */ +#define GMAC_AN_STATUS_LS 0x00000004 /* Link Status 0:down 1:up */ +#define GMAC_AN_STATUS_ANA 0x00000008 /* Auto-Negotiation Ability */ +#define GMAC_AN_STATUS_ANC 0x00000020 /* Auto-Negotiation Complete */ +#define GMAC_AN_STATUS_ES 0x00000100 /* Extended Status */ + +/* Register 54 (SGMII/RGMII status register) */ +#define GMAC_S_R_GMII_LINK 0x8 +#define GMAC_S_R_GMII_SPEED 0x5 +#define GMAC_S_R_GMII_SPEED_SHIFT 0x1 +#define GMAC_S_R_GMII_MODE 0x1 +#define GMAC_S_R_GMII_SPEED_125 2 +#define GMAC_S_R_GMII_SPEED_25 1 + +/* Common ADV and LPA defines */ +#define GMAC_ANE_FD (1 << 5) +#define GMAC_ANE_HD (1 << 6) +#define GMAC_ANE_PSE (3 << 7) +#define GMAC_ANE_PSE_SHIFT 7 + + /* GMAC Configuration defines */ +#define GMAC_CONTROL_TC 0x01000000 /* Transmit Conf. in RGMII/SGMII */ +#define GMAC_CONTROL_WD 0x00800000 /* Disable Watchdog on receive */ /* GMAC Configuration defines */ #define GMAC_CONTROL_TC 0x01000000 /* Transmit Conf. in RGMII/SGMII */ @@ -155,6 +188,7 @@ enum inter_frame_gap { /* Programmable burst length (passed thorugh platform)*/ #define DMA_BUS_MODE_PBL_MASK 0x00003f00 /* Programmable Burst Len */ #define DMA_BUS_MODE_PBL_SHIFT 8 +#define DMA_BUS_MODE_ATDS 0x00000080 /* Alternate Descriptor Size */ enum rx_tx_priority_ratio { double_ratio = 0x00004000, /*2:1 */ @@ -230,5 +264,7 @@ enum rtc_control { #define GMAC_MMC_TX_INTR 0x108 #define GMAC_MMC_RX_CSUM_OFFLOAD 0x208 + + extern const struct stmmac_dma_ops dwmac1000_dma_ops; #endif /* __DWMAC1000_H__ */ diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c index bfe022605498..29138da19db0 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c @@ -28,6 +28,7 @@ #include <linux/crc32.h> #include <linux/slab.h> +#include <linux/ethtool.h> #include <asm/io.h> #include "dwmac1000.h" @@ -193,59 +194,91 @@ static void dwmac1000_pmt(void __iomem *ioaddr, unsigned long mode) writel(pmt, ioaddr + GMAC_PMT); } - -static int dwmac1000_irq_status(void __iomem *ioaddr) +static int dwmac1000_irq_status(void __iomem *ioaddr, + struct stmmac_extra_stats *x) { u32 intr_status = readl(ioaddr + GMAC_INT_STATUS); - int status = 0; + int ret = 0; /* Not used events (e.g. MMC interrupts) are not handled. */ if ((intr_status & mmc_tx_irq)) { CHIP_DBG(KERN_INFO "GMAC: MMC tx interrupt: 0x%08x\n", readl(ioaddr + GMAC_MMC_TX_INTR)); - status |= core_mmc_tx_irq; + x->mmc_tx_irq_n++; } if (unlikely(intr_status & mmc_rx_irq)) { CHIP_DBG(KERN_INFO "GMAC: MMC rx interrupt: 0x%08x\n", readl(ioaddr + GMAC_MMC_RX_INTR)); - status |= core_mmc_rx_irq; + x->mmc_rx_irq_n++; } if (unlikely(intr_status & mmc_rx_csum_offload_irq)) { CHIP_DBG(KERN_INFO "GMAC: MMC rx csum offload: 0x%08x\n", readl(ioaddr + GMAC_MMC_RX_CSUM_OFFLOAD)); - status |= core_mmc_rx_csum_offload_irq; + x->mmc_rx_csum_offload_irq_n++; } if (unlikely(intr_status & pmt_irq)) { CHIP_DBG(KERN_INFO "GMAC: received Magic frame\n"); /* clear the PMT bits 5 and 6 by reading the PMT * status register. */ readl(ioaddr + GMAC_PMT); - status |= core_irq_receive_pmt_irq; + x->irq_receive_pmt_irq_n++; } /* MAC trx/rx EEE LPI entry/exit interrupts */ if (intr_status & lpiis_irq) { /* Clean LPI interrupt by reading the Reg 12 */ - u32 lpi_status = readl(ioaddr + LPI_CTRL_STATUS); + ret = readl(ioaddr + LPI_CTRL_STATUS); - if (lpi_status & LPI_CTRL_STATUS_TLPIEN) { + if (ret & LPI_CTRL_STATUS_TLPIEN) { CHIP_DBG(KERN_INFO "GMAC TX entered in LPI\n"); - status |= core_irq_tx_path_in_lpi_mode; + x->irq_tx_path_in_lpi_mode_n++; } - if (lpi_status & LPI_CTRL_STATUS_TLPIEX) { + if (ret & LPI_CTRL_STATUS_TLPIEX) { CHIP_DBG(KERN_INFO "GMAC TX exit from LPI\n"); - status |= core_irq_tx_path_exit_lpi_mode; + x->irq_tx_path_exit_lpi_mode_n++; } - if (lpi_status & LPI_CTRL_STATUS_RLPIEN) { + if (ret & LPI_CTRL_STATUS_RLPIEN) { CHIP_DBG(KERN_INFO "GMAC RX entered in LPI\n"); - status |= core_irq_rx_path_in_lpi_mode; + x->irq_rx_path_in_lpi_mode_n++; } - if (lpi_status & LPI_CTRL_STATUS_RLPIEX) { + if (ret & LPI_CTRL_STATUS_RLPIEX) { CHIP_DBG(KERN_INFO "GMAC RX exit from LPI\n"); - status |= core_irq_rx_path_exit_lpi_mode; + x->irq_rx_path_exit_lpi_mode_n++; + } + } + + if ((intr_status & pcs_ane_irq) || (intr_status & pcs_link_irq)) { + CHIP_DBG(KERN_INFO "GMAC PCS ANE IRQ\n"); + readl(ioaddr + GMAC_AN_STATUS); + x->irq_pcs_ane_n++; + } + if (intr_status & rgmii_irq) { + u32 status = readl(ioaddr + GMAC_S_R_GMII); + CHIP_DBG(KERN_INFO "GMAC RGMII/SGMII interrupt\n"); + x->irq_rgmii_n++; + + /* Save and dump the link status. */ + if (status & GMAC_S_R_GMII_LINK) { + int speed_value = (status & GMAC_S_R_GMII_SPEED) >> + GMAC_S_R_GMII_SPEED_SHIFT; + x->pcs_duplex = (status & GMAC_S_R_GMII_MODE); + + if (speed_value == GMAC_S_R_GMII_SPEED_125) + x->pcs_speed = SPEED_1000; + else if (speed_value == GMAC_S_R_GMII_SPEED_25) + x->pcs_speed = SPEED_100; + else + x->pcs_speed = SPEED_10; + + x->pcs_link = 1; + pr_debug("Link is Up - %d/%s\n", (int) x->pcs_speed, + x->pcs_duplex ? "Full" : "Half"); + } else { + x->pcs_link = 0; + pr_debug("Link is Down\n"); } } - return status; + return ret; } static void dwmac1000_set_eee_mode(void __iomem *ioaddr) @@ -297,6 +330,41 @@ static void dwmac1000_set_eee_timer(void __iomem *ioaddr, int ls, int tw) writel(value, ioaddr + LPI_TIMER_CTRL); } +static void dwmac1000_ctrl_ane(void __iomem *ioaddr, bool restart) +{ + u32 value; + + value = readl(ioaddr + GMAC_AN_CTRL); + /* auto negotiation enable and External Loopback enable */ + value = GMAC_AN_CTRL_ANE | GMAC_AN_CTRL_ELE; + + if (restart) + value |= GMAC_AN_CTRL_RAN; + + writel(value, ioaddr + GMAC_AN_CTRL); +} + +static void dwmac1000_get_adv(void __iomem *ioaddr, struct rgmii_adv *adv) +{ + u32 value = readl(ioaddr + GMAC_ANE_ADV); + + if (value & GMAC_ANE_FD) + adv->duplex = DUPLEX_FULL; + if (value & GMAC_ANE_HD) + adv->duplex |= DUPLEX_HALF; + + adv->pause = (value & GMAC_ANE_PSE) >> GMAC_ANE_PSE_SHIFT; + + value = readl(ioaddr + GMAC_ANE_LPA); + + if (value & GMAC_ANE_FD) + adv->lp_duplex = DUPLEX_FULL; + if (value & GMAC_ANE_HD) + adv->lp_duplex = DUPLEX_HALF; + + adv->lp_pause = (value & GMAC_ANE_PSE) >> GMAC_ANE_PSE_SHIFT; +} + static const struct stmmac_ops dwmac1000_ops = { .core_init = dwmac1000_core_init, .rx_ipc = dwmac1000_rx_ipc_enable, @@ -311,6 +379,8 @@ static const struct stmmac_ops dwmac1000_ops = { .reset_eee_mode = dwmac1000_reset_eee_mode, .set_eee_timer = dwmac1000_set_eee_timer, .set_eee_pls = dwmac1000_set_eee_pls, + .ctrl_ane = dwmac1000_ctrl_ane, + .get_adv = dwmac1000_get_adv, }; struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c index bf83c03bfd06..f1c4b2c00aa5 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c @@ -30,8 +30,8 @@ #include "dwmac1000.h" #include "dwmac_dma.h" -static int dwmac1000_dma_init(void __iomem *ioaddr, int pbl, int fb, - int mb, int burst_len, u32 dma_tx, u32 dma_rx) +static int dwmac1000_dma_init(void __iomem *ioaddr, int pbl, int fb, int mb, + int burst_len, u32 dma_tx, u32 dma_rx, int atds) { u32 value = readl(ioaddr + DMA_BUS_MODE); int limit; @@ -73,6 +73,10 @@ static int dwmac1000_dma_init(void __iomem *ioaddr, int pbl, int fb, #ifdef CONFIG_STMMAC_DA value |= DMA_BUS_MODE_DA; /* Rx has priority over tx */ #endif + + if (atds) + value |= DMA_BUS_MODE_ATDS; + writel(value, ioaddr + DMA_BUS_MODE); /* In case of GMAC AXI configuration, program the DMA_AXI_BUS_MODE diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c index f83210e7c221..cb86a58c1c5f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c @@ -72,7 +72,8 @@ static int dwmac100_rx_ipc_enable(void __iomem *ioaddr) return 0; } -static int dwmac100_irq_status(void __iomem *ioaddr) +static int dwmac100_irq_status(void __iomem *ioaddr, + struct stmmac_extra_stats *x) { return 0; } diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c index c2b4d55a79b6..e979a8b2ae42 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c @@ -32,8 +32,8 @@ #include "dwmac100.h" #include "dwmac_dma.h" -static int dwmac100_dma_init(void __iomem *ioaddr, int pbl, int fb, - int mb, int burst_len, u32 dma_tx, u32 dma_rx) +static int dwmac100_dma_init(void __iomem *ioaddr, int pbl, int fb, int mb, + int burst_len, u32 dma_tx, u32 dma_rx, int atds) { u32 value = readl(ioaddr + DMA_BUS_MODE); int limit; diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c index 2fc8ef95f97a..0fbc8fafa706 100644 --- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c +++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c @@ -150,6 +150,57 @@ static int enh_desc_coe_rdes0(int ipc_err, int type, int payload_err) return ret; } +static void enh_desc_get_ext_status(void *data, struct stmmac_extra_stats *x, + struct dma_extended_desc *p) +{ + if (unlikely(p->basic.des01.erx.rx_mac_addr)) { + if (p->des4.erx.ip_hdr_err) + x->ip_hdr_err++; + if (p->des4.erx.ip_payload_err) + x->ip_payload_err++; + if (p->des4.erx.ip_csum_bypassed) + x->ip_csum_bypassed++; + if (p->des4.erx.ipv4_pkt_rcvd) + x->ipv4_pkt_rcvd++; + if (p->des4.erx.ipv6_pkt_rcvd) + x->ipv6_pkt_rcvd++; + if (p->des4.erx.msg_type == RDES_EXT_SYNC) + x->rx_msg_type_sync++; + else if (p->des4.erx.msg_type == RDES_EXT_FOLLOW_UP) + x->rx_msg_type_follow_up++; + else if (p->des4.erx.msg_type == RDES_EXT_DELAY_REQ) + x->rx_msg_type_delay_req++; + else if (p->des4.erx.msg_type == RDES_EXT_DELAY_RESP) + x->rx_msg_type_delay_resp++; + else if (p->des4.erx.msg_type == RDES_EXT_DELAY_REQ) + x->rx_msg_type_pdelay_req++; + else if (p->des4.erx.msg_type == RDES_EXT_PDELAY_RESP) + x->rx_msg_type_pdelay_resp++; + else if (p->des4.erx.msg_type == RDES_EXT_PDELAY_FOLLOW_UP) + x->rx_msg_type_pdelay_follow_up++; + else + x->rx_msg_type_ext_no_ptp++; + if (p->des4.erx.ptp_frame_type) + x->ptp_frame_type++; + if (p->des4.erx.ptp_ver) + x->ptp_ver++; + if (p->des4.erx.timestamp_dropped) + x->timestamp_dropped++; + if (p->des4.erx.av_pkt_rcvd) + x->av_pkt_rcvd++; + if (p->des4.erx.av_tagged_pkt_rcvd) + x->av_tagged_pkt_rcvd++; + if (p->des4.erx.vlan_tag_priority_val) + x->vlan_tag_priority_val++; + if (p->des4.erx.l3_filter_match) + x->l3_filter_match++; + if (p->des4.erx.l4_filter_match) + x->l4_filter_match++; + if (p->des4.erx.l3_l4_filter_no_match) + x->l3_l4_filter_no_match++; + } +} + static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x, struct dma_desc *p) { @@ -198,7 +249,7 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x, * At any rate, we need to understand if the CSUM hw computation is ok * and report this info to the upper layers. */ ret = enh_desc_coe_rdes0(p->des01.erx.ipc_csum_error, - p->des01.erx.frame_type, p->des01.erx.payload_csum_error); + p->des01.erx.frame_type, p->des01.erx.rx_mac_addr); if (unlikely(p->des01.erx.dribbling)) { CHIP_DBG(KERN_ERR "GMAC RX: dribbling error\n"); @@ -225,34 +276,32 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x, x->rx_vlan++; } #endif + return ret; } -static void enh_desc_init_rx_desc(struct dma_desc *p, unsigned int ring_size, - int disable_rx_ic) +static void enh_desc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, + int mode, int end) { - int i; - for (i = 0; i < ring_size; i++) { - p->des01.erx.own = 1; - p->des01.erx.buffer1_size = BUF_SIZE_8KiB - 1; + p->des01.erx.own = 1; + p->des01.erx.buffer1_size = BUF_SIZE_8KiB - 1; - ehn_desc_rx_set_on_ring_chain(p, (i == ring_size - 1)); + if (mode == STMMAC_CHAIN_MODE) + ehn_desc_rx_set_on_chain(p, end); + else + ehn_desc_rx_set_on_ring(p, end); - if (disable_rx_ic) - p->des01.erx.disable_ic = 1; - p++; - } + if (disable_rx_ic) + p->des01.erx.disable_ic = 1; } -static void enh_desc_init_tx_desc(struct dma_desc *p, unsigned int ring_size) +static void enh_desc_init_tx_desc(struct dma_desc *p, int mode, int end) { - int i; - - for (i = 0; i < ring_size; i++) { - p->des01.etx.own = 0; - ehn_desc_tx_set_on_ring_chain(p, (i == ring_size - 1)); - p++; - } + p->des01.etx.own = 0; + if (mode == STMMAC_CHAIN_MODE) + ehn_desc_tx_set_on_chain(p, end); + else + ehn_desc_tx_set_on_ring(p, end); } static int enh_desc_get_tx_owner(struct dma_desc *p) @@ -280,20 +329,26 @@ static int enh_desc_get_tx_ls(struct dma_desc *p) return p->des01.etx.last_segment; } -static void enh_desc_release_tx_desc(struct dma_desc *p) +static void enh_desc_release_tx_desc(struct dma_desc *p, int mode) { int ter = p->des01.etx.end_ring; memset(p, 0, offsetof(struct dma_desc, des2)); - enh_desc_end_tx_desc(p, ter); + if (mode == STMMAC_CHAIN_MODE) + enh_desc_end_tx_desc_on_chain(p, ter); + else + enh_desc_end_tx_desc_on_ring(p, ter); } static void enh_desc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len, - int csum_flag) + int csum_flag, int mode) { p->des01.etx.first_segment = is_fs; - enh_set_tx_desc_len(p, len); + if (mode == STMMAC_CHAIN_MODE) + enh_set_tx_desc_len_on_chain(p, len); + else + enh_set_tx_desc_len_on_ring(p, len); if (likely(csum_flag)) p->des01.etx.checksum_insertion = cic_full; @@ -323,6 +378,49 @@ static int enh_desc_get_rx_frame_len(struct dma_desc *p, int rx_coe_type) return p->des01.erx.frame_length; } +static void enh_desc_enable_tx_timestamp(struct dma_desc *p) +{ + p->des01.etx.time_stamp_enable = 1; +} + +static int enh_desc_get_tx_timestamp_status(struct dma_desc *p) +{ + return p->des01.etx.time_stamp_status; +} + +static u64 enh_desc_get_timestamp(void *desc, u32 ats) +{ + u64 ns; + + if (ats) { + struct dma_extended_desc *p = (struct dma_extended_desc *)desc; + ns = p->des6; + /* convert high/sec time stamp value to nanosecond */ + ns += p->des7 * 1000000000ULL; + } else { + struct dma_desc *p = (struct dma_desc *)desc; + ns = p->des2; + ns += p->des3 * 1000000000ULL; + } + + return ns; +} + +static int enh_desc_get_rx_timestamp_status(void *desc, u32 ats) +{ + if (ats) { + struct dma_extended_desc *p = (struct dma_extended_desc *)desc; + return p->basic.des01.erx.ipc_csum_error; + } else { + struct dma_desc *p = (struct dma_desc *)desc; + if ((p->des2 == 0xffffffff) && (p->des3 == 0xffffffff)) + /* timestamp is corrupted, hence don't store it */ + return 0; + else + return 1; + } +} + const struct stmmac_desc_ops enh_desc_ops = { .tx_status = enh_desc_get_tx_status, .rx_status = enh_desc_get_rx_status, @@ -339,4 +437,9 @@ const struct stmmac_desc_ops enh_desc_ops = { .set_tx_owner = enh_desc_set_tx_owner, .set_rx_owner = enh_desc_set_rx_owner, .get_rx_frame_len = enh_desc_get_rx_frame_len, + .rx_extended_status = enh_desc_get_ext_status, + .enable_tx_timestamp = enh_desc_enable_tx_timestamp, + .get_tx_timestamp_status = enh_desc_get_tx_timestamp_status, + .get_timestamp = enh_desc_get_timestamp, + .get_rx_timestamp_status = enh_desc_get_rx_timestamp_status, }; diff --git a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c index 68962c549a2d..7cbcea348c3d 100644 --- a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c +++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c @@ -122,30 +122,28 @@ static int ndesc_get_rx_status(void *data, struct stmmac_extra_stats *x, return ret; } -static void ndesc_init_rx_desc(struct dma_desc *p, unsigned int ring_size, - int disable_rx_ic) +static void ndesc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, int mode, + int end) { - int i; - for (i = 0; i < ring_size; i++) { - p->des01.rx.own = 1; - p->des01.rx.buffer1_size = BUF_SIZE_2KiB - 1; + p->des01.rx.own = 1; + p->des01.rx.buffer1_size = BUF_SIZE_2KiB - 1; - ndesc_rx_set_on_ring_chain(p, (i == ring_size - 1)); + if (mode == STMMAC_CHAIN_MODE) + ndesc_rx_set_on_chain(p, end); + else + ndesc_rx_set_on_ring(p, end); - if (disable_rx_ic) - p->des01.rx.disable_ic = 1; - p++; - } + if (disable_rx_ic) + p->des01.rx.disable_ic = 1; } -static void ndesc_init_tx_desc(struct dma_desc *p, unsigned int ring_size) +static void ndesc_init_tx_desc(struct dma_desc *p, int mode, int end) { - int i; - for (i = 0; i < ring_size; i++) { - p->des01.tx.own = 0; - ndesc_tx_set_on_ring_chain(p, (i == (ring_size - 1))); - p++; - } + p->des01.tx.own = 0; + if (mode == STMMAC_CHAIN_MODE) + ndesc_tx_set_on_chain(p, end); + else + ndesc_tx_set_on_ring(p, end); } static int ndesc_get_tx_owner(struct dma_desc *p) @@ -173,19 +171,25 @@ static int ndesc_get_tx_ls(struct dma_desc *p) return p->des01.tx.last_segment; } -static void ndesc_release_tx_desc(struct dma_desc *p) +static void ndesc_release_tx_desc(struct dma_desc *p, int mode) { int ter = p->des01.tx.end_ring; memset(p, 0, offsetof(struct dma_desc, des2)); - ndesc_end_tx_desc(p, ter); + if (mode == STMMAC_CHAIN_MODE) + ndesc_end_tx_desc_on_chain(p, ter); + else + ndesc_end_tx_desc_on_ring(p, ter); } static void ndesc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len, - int csum_flag) + int csum_flag, int mode) { p->des01.tx.first_segment = is_fs; - norm_set_tx_desc_len(p, len); + if (mode == STMMAC_CHAIN_MODE) + norm_set_tx_desc_len_on_chain(p, len); + else + norm_set_tx_desc_len_on_ring(p, len); if (likely(csum_flag)) p->des01.tx.checksum_insertion = cic_full; @@ -215,6 +219,39 @@ static int ndesc_get_rx_frame_len(struct dma_desc *p, int rx_coe_type) return p->des01.rx.frame_length; } +static void ndesc_enable_tx_timestamp(struct dma_desc *p) +{ + p->des01.tx.time_stamp_enable = 1; +} + +static int ndesc_get_tx_timestamp_status(struct dma_desc *p) +{ + return p->des01.tx.time_stamp_status; +} + +static u64 ndesc_get_timestamp(void *desc, u32 ats) +{ + struct dma_desc *p = (struct dma_desc *)desc; + u64 ns; + + ns = p->des2; + /* convert high/sec time stamp value to nanosecond */ + ns += p->des3 * 1000000000ULL; + + return ns; +} + +static int ndesc_get_rx_timestamp_status(void *desc, u32 ats) +{ + struct dma_desc *p = (struct dma_desc *)desc; + + if ((p->des2 == 0xffffffff) && (p->des3 == 0xffffffff)) + /* timestamp is corrupted, hence don't store it */ + return 0; + else + return 1; +} + const struct stmmac_desc_ops ndesc_ops = { .tx_status = ndesc_get_tx_status, .rx_status = ndesc_get_rx_status, @@ -231,4 +268,8 @@ const struct stmmac_desc_ops ndesc_ops = { .set_tx_owner = ndesc_set_tx_owner, .set_rx_owner = ndesc_set_rx_owner, .get_rx_frame_len = ndesc_get_rx_frame_len, + .enable_tx_timestamp = ndesc_enable_tx_timestamp, + .get_tx_timestamp_status = ndesc_get_tx_timestamp_status, + .get_timestamp = ndesc_get_timestamp, + .get_rx_timestamp_status = ndesc_get_rx_timestamp_status, }; diff --git a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c index 4b785e10f2ed..d0265a7d5a54 100644 --- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c +++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c @@ -48,25 +48,30 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) desc->des2 = dma_map_single(priv->device, skb->data, bmax, DMA_TO_DEVICE); + priv->tx_skbuff_dma[entry] = desc->des2; desc->des3 = desc->des2 + BUF_SIZE_4KiB; - priv->hw->desc->prepare_tx_desc(desc, 1, bmax, - csum); + priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum, + STMMAC_RING_MODE); wmb(); entry = (++priv->cur_tx) % txsize; desc = priv->dma_tx + entry; desc->des2 = dma_map_single(priv->device, skb->data + bmax, len, DMA_TO_DEVICE); + priv->tx_skbuff_dma[entry] = desc->des2; desc->des3 = desc->des2 + BUF_SIZE_4KiB; - priv->hw->desc->prepare_tx_desc(desc, 0, len, csum); + priv->hw->desc->prepare_tx_desc(desc, 0, len, csum, + STMMAC_RING_MODE); wmb(); priv->hw->desc->set_tx_owner(desc); priv->tx_skbuff[entry] = NULL; } else { desc->des2 = dma_map_single(priv->device, skb->data, nopaged_len, DMA_TO_DEVICE); + priv->tx_skbuff_dma[entry] = desc->des2; desc->des3 = desc->des2 + BUF_SIZE_4KiB; - priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len, csum); + priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len, csum, + STMMAC_RING_MODE); } return entry; @@ -82,27 +87,23 @@ static unsigned int stmmac_is_jumbo_frm(int len, int enh_desc) return ret; } -static void stmmac_refill_desc3(int bfsize, struct dma_desc *p) +static void stmmac_refill_desc3(void *priv_ptr, struct dma_desc *p) { - /* Fill DES3 in case of RING mode */ - if (bfsize >= BUF_SIZE_8KiB) - p->des3 = p->des2 + BUF_SIZE_8KiB; -} + struct stmmac_priv *priv = (struct stmmac_priv *)priv_ptr; -/* In ring mode we need to fill the desc3 because it is used - * as buffer */ -static void stmmac_init_desc3(int des3_as_data_buf, struct dma_desc *p) -{ - if (unlikely(des3_as_data_buf)) - p->des3 = p->des2 + BUF_SIZE_8KiB; + if (unlikely(priv->plat->has_gmac)) + /* Fill DES3 in case of RING mode */ + if (priv->dma_buf_sz >= BUF_SIZE_8KiB) + p->des3 = p->des2 + BUF_SIZE_8KiB; } -static void stmmac_init_dma_chain(struct dma_desc *des, dma_addr_t phy_addr, - unsigned int size) +/* In ring mode we need to fill the desc3 because it is used as buffer */ +static void stmmac_init_desc3(struct dma_desc *p) { + p->des3 = p->des2 + BUF_SIZE_8KiB; } -static void stmmac_clean_desc3(struct dma_desc *p) +static void stmmac_clean_desc3(void *priv_ptr, struct dma_desc *p) { if (unlikely(p->des3)) p->des3 = 0; @@ -121,7 +122,6 @@ const struct stmmac_ring_mode_ops ring_mode_ops = { .jumbo_frm = stmmac_jumbo_frm, .refill_desc3 = stmmac_refill_desc3, .init_desc3 = stmmac_init_desc3, - .init_dma_chain = stmmac_init_dma_chain, .clean_desc3 = stmmac_clean_desc3, .set_16kib_bfsize = stmmac_set_16kib_bfsize, }; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h index b05df8983be5..75f997b467aa 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h @@ -24,25 +24,29 @@ #define __STMMAC_H__ #define STMMAC_RESOURCE_NAME "stmmaceth" -#define DRV_MODULE_VERSION "Nov_2012" +#define DRV_MODULE_VERSION "March_2013" #include <linux/clk.h> #include <linux/stmmac.h> #include <linux/phy.h> #include <linux/pci.h> #include "common.h" +#include <linux/ptp_clock_kernel.h> struct stmmac_priv { /* Frequently used values are kept adjacent for cache effect */ - struct dma_desc *dma_tx ____cacheline_aligned; + struct dma_desc *dma_tx ____cacheline_aligned; /* Basic TX desc */ + struct dma_extended_desc *dma_etx; /* Extended TX descriptor */ dma_addr_t dma_tx_phy; struct sk_buff **tx_skbuff; + dma_addr_t *tx_skbuff_dma; unsigned int cur_tx; unsigned int dirty_tx; unsigned int dma_tx_size; int tx_coalesce; - struct dma_desc *dma_rx ; + struct dma_desc *dma_rx; /* Basic RX descriptor */ + struct dma_extended_desc *dma_erx; /* Extended RX descriptor */ unsigned int cur_rx; unsigned int dirty_rx; struct sk_buff **rx_skbuff; @@ -93,6 +97,16 @@ struct stmmac_priv { u32 tx_coal_timer; int use_riwt; u32 rx_riwt; + unsigned int mode; + int extend_desc; + int pcs; + int hwts_tx_en; + int hwts_rx_en; + unsigned int default_addend; + u32 adv_ts; + struct ptp_clock *ptp_clock; + struct ptp_clock_info ptp_clock_ops; + spinlock_t ptp_lock; }; extern int phyaddr; @@ -102,6 +116,9 @@ extern int stmmac_mdio_register(struct net_device *ndev); extern void stmmac_set_ethtool_ops(struct net_device *netdev); extern const struct stmmac_desc_ops enh_desc_ops; extern const struct stmmac_desc_ops ndesc_ops; +extern const struct stmmac_hwtimestamp stmmac_ptp; +extern int stmmac_ptp_register(struct stmmac_priv *priv); +extern void stmmac_ptp_unregister(struct stmmac_priv *priv); int stmmac_freeze(struct net_device *ndev); int stmmac_restore(struct net_device *ndev); int stmmac_resume(struct net_device *ndev); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c index d1ac39c1b05d..c5f9cb85c8ef 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c @@ -27,6 +27,7 @@ #include <linux/interrupt.h> #include <linux/mii.h> #include <linux/phy.h> +#include <linux/net_tstamp.h> #include <asm/io.h> #include "stmmac.h" @@ -108,6 +109,33 @@ static const struct stmmac_stats stmmac_gstrings_stats[] = { STMMAC_STAT(irq_rx_path_in_lpi_mode_n), STMMAC_STAT(irq_rx_path_exit_lpi_mode_n), STMMAC_STAT(phy_eee_wakeup_error_n), + /* Extended RDES status */ + STMMAC_STAT(ip_hdr_err), + STMMAC_STAT(ip_payload_err), + STMMAC_STAT(ip_csum_bypassed), + STMMAC_STAT(ipv4_pkt_rcvd), + STMMAC_STAT(ipv6_pkt_rcvd), + STMMAC_STAT(rx_msg_type_ext_no_ptp), + STMMAC_STAT(rx_msg_type_sync), + STMMAC_STAT(rx_msg_type_follow_up), + STMMAC_STAT(rx_msg_type_delay_req), + STMMAC_STAT(rx_msg_type_delay_resp), + STMMAC_STAT(rx_msg_type_pdelay_req), + STMMAC_STAT(rx_msg_type_pdelay_resp), + STMMAC_STAT(rx_msg_type_pdelay_follow_up), + STMMAC_STAT(ptp_frame_type), + STMMAC_STAT(ptp_ver), + STMMAC_STAT(timestamp_dropped), + STMMAC_STAT(av_pkt_rcvd), + STMMAC_STAT(av_tagged_pkt_rcvd), + STMMAC_STAT(vlan_tag_priority_val), + STMMAC_STAT(l3_filter_match), + STMMAC_STAT(l4_filter_match), + STMMAC_STAT(l3_l4_filter_no_match), + /* PCS */ + STMMAC_STAT(irq_pcs_ane_n), + STMMAC_STAT(irq_pcs_link_n), + STMMAC_STAT(irq_rgmii_n), }; #define STMMAC_STATS_LEN ARRAY_SIZE(stmmac_gstrings_stats) @@ -219,6 +247,70 @@ static int stmmac_ethtool_getsettings(struct net_device *dev, struct stmmac_priv *priv = netdev_priv(dev); struct phy_device *phy = priv->phydev; int rc; + + if ((priv->pcs & STMMAC_PCS_RGMII) || (priv->pcs & STMMAC_PCS_SGMII)) { + struct rgmii_adv adv; + + if (!priv->xstats.pcs_link) { + ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN); + cmd->duplex = DUPLEX_UNKNOWN; + return 0; + } + cmd->duplex = priv->xstats.pcs_duplex; + + ethtool_cmd_speed_set(cmd, priv->xstats.pcs_speed); + + /* Get and convert ADV/LP_ADV from the HW AN registers */ + if (priv->hw->mac->get_adv) + priv->hw->mac->get_adv(priv->ioaddr, &adv); + else + return -EOPNOTSUPP; /* should never happen indeed */ + + /* Encoding of PSE bits is defined in 802.3z, 37.2.1.4 */ + + if (adv.pause & STMMAC_PCS_PAUSE) + cmd->advertising |= ADVERTISED_Pause; + if (adv.pause & STMMAC_PCS_ASYM_PAUSE) + cmd->advertising |= ADVERTISED_Asym_Pause; + if (adv.lp_pause & STMMAC_PCS_PAUSE) + cmd->lp_advertising |= ADVERTISED_Pause; + if (adv.lp_pause & STMMAC_PCS_ASYM_PAUSE) + cmd->lp_advertising |= ADVERTISED_Asym_Pause; + + /* Reg49[3] always set because ANE is always supported */ + cmd->autoneg = ADVERTISED_Autoneg; + cmd->supported |= SUPPORTED_Autoneg; + cmd->advertising |= ADVERTISED_Autoneg; + cmd->lp_advertising |= ADVERTISED_Autoneg; + + if (adv.duplex) { + cmd->supported |= (SUPPORTED_1000baseT_Full | + SUPPORTED_100baseT_Full | + SUPPORTED_10baseT_Full); + cmd->advertising |= (ADVERTISED_1000baseT_Full | + ADVERTISED_100baseT_Full | + ADVERTISED_10baseT_Full); + } else { + cmd->supported |= (SUPPORTED_1000baseT_Half | + SUPPORTED_100baseT_Half | + SUPPORTED_10baseT_Half); + cmd->advertising |= (ADVERTISED_1000baseT_Half | + ADVERTISED_100baseT_Half | + ADVERTISED_10baseT_Half); + } + if (adv.lp_duplex) + cmd->lp_advertising |= (ADVERTISED_1000baseT_Full | + ADVERTISED_100baseT_Full | + ADVERTISED_10baseT_Full); + else + cmd->lp_advertising |= (ADVERTISED_1000baseT_Half | + ADVERTISED_100baseT_Half | + ADVERTISED_10baseT_Half); + cmd->port = PORT_OTHER; + + return 0; + } + if (phy == NULL) { pr_err("%s: %s: PHY is not registered\n", __func__, dev->name); @@ -243,6 +335,30 @@ static int stmmac_ethtool_setsettings(struct net_device *dev, struct phy_device *phy = priv->phydev; int rc; + if ((priv->pcs & STMMAC_PCS_RGMII) || (priv->pcs & STMMAC_PCS_SGMII)) { + u32 mask = ADVERTISED_Autoneg | ADVERTISED_Pause; + + /* Only support ANE */ + if (cmd->autoneg != AUTONEG_ENABLE) + return -EINVAL; + + if (cmd->autoneg == AUTONEG_ENABLE) { + mask &= (ADVERTISED_1000baseT_Half | + ADVERTISED_1000baseT_Full | + ADVERTISED_100baseT_Half | + ADVERTISED_100baseT_Full | + ADVERTISED_10baseT_Half | + ADVERTISED_10baseT_Full); + + spin_lock(&priv->lock); + if (priv->hw->mac->ctrl_ane) + priv->hw->mac->ctrl_ane(priv->ioaddr, 1); + spin_unlock(&priv->lock); + } + + return 0; + } + spin_lock(&priv->lock); rc = phy_ethtool_sset(phy, cmd); spin_unlock(&priv->lock); @@ -312,6 +428,9 @@ stmmac_get_pauseparam(struct net_device *netdev, { struct stmmac_priv *priv = netdev_priv(netdev); + if (priv->pcs) /* FIXME */ + return; + spin_lock(&priv->lock); pause->rx_pause = 0; @@ -335,6 +454,9 @@ stmmac_set_pauseparam(struct net_device *netdev, int new_pause = FLOW_OFF; int ret = 0; + if (priv->pcs) /* FIXME */ + return -EOPNOTSUPP; + spin_lock(&priv->lock); if (pause->rx_pause) @@ -604,6 +726,38 @@ static int stmmac_set_coalesce(struct net_device *dev, return 0; } +static int stmmac_get_ts_info(struct net_device *dev, + struct ethtool_ts_info *info) +{ + struct stmmac_priv *priv = netdev_priv(dev); + + if ((priv->hwts_tx_en) && (priv->hwts_rx_en)) { + + info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; + + if (priv->ptp_clock) + info->phc_index = ptp_clock_index(priv->ptp_clock); + + info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON); + + info->rx_filters = ((1 << HWTSTAMP_FILTER_NONE) | + (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) | + (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) | + (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) | + (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_ALL)); + return 0; + } else + return ethtool_op_get_ts_info(dev, info); +} + static const struct ethtool_ops stmmac_ethtool_ops = { .begin = stmmac_check_if_running, .get_drvinfo = stmmac_ethtool_getdrvinfo, @@ -623,7 +777,7 @@ static const struct ethtool_ops stmmac_ethtool_ops = { .get_eee = stmmac_ethtool_op_get_eee, .set_eee = stmmac_ethtool_op_set_eee, .get_sset_count = stmmac_get_sset_count, - .get_ts_info = ethtool_op_get_ts_info, + .get_ts_info = stmmac_get_ts_info, .get_coalesce = stmmac_get_coalesce, .set_coalesce = stmmac_set_coalesce, }; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c new file mode 100644 index 000000000000..def7e75e1d57 --- /dev/null +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c @@ -0,0 +1,148 @@ +/******************************************************************************* + Copyright (C) 2013 Vayavya Labs Pvt Ltd + + This implements all the API for managing HW timestamp & PTP. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Author: Rayagond Kokatanur <rayagond@vayavyalabs.com> + Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> +*******************************************************************************/ + +#include <linux/io.h> +#include <linux/delay.h> +#include "common.h" +#include "stmmac_ptp.h" + +static void stmmac_config_hw_tstamping(void __iomem *ioaddr, u32 data) +{ + writel(data, ioaddr + PTP_TCR); +} + +static void stmmac_config_sub_second_increment(void __iomem *ioaddr) +{ + u32 value = readl(ioaddr + PTP_TCR); + unsigned long data; + + /* Convert the ptp_clock to nano second + * formula = (1/ptp_clock) * 1000000000 + * where, ptp_clock = 50MHz. + */ + data = (1000000000ULL / 50000000); + + /* 0.465ns accuracy */ + if (value & PTP_TCR_TSCTRLSSR) + data = (data * 100) / 465; + + writel(data, ioaddr + PTP_SSIR); +} + +static int stmmac_init_systime(void __iomem *ioaddr, u32 sec, u32 nsec) +{ + int limit; + u32 value; + + writel(sec, ioaddr + PTP_STSUR); + writel(nsec, ioaddr + PTP_STNSUR); + /* issue command to initialize the system time value */ + value = readl(ioaddr + PTP_TCR); + value |= PTP_TCR_TSINIT; + writel(value, ioaddr + PTP_TCR); + + /* wait for present system time initialize to complete */ + limit = 10; + while (limit--) { + if (!(readl(ioaddr + PTP_TCR) & PTP_TCR_TSINIT)) + break; + mdelay(10); + } + if (limit < 0) + return -EBUSY; + + return 0; +} + +static int stmmac_config_addend(void __iomem *ioaddr, u32 addend) +{ + u32 value; + int limit; + + writel(addend, ioaddr + PTP_TAR); + /* issue command to update the addend value */ + value = readl(ioaddr + PTP_TCR); + value |= PTP_TCR_TSADDREG; + writel(value, ioaddr + PTP_TCR); + + /* wait for present addend update to complete */ + limit = 10; + while (limit--) { + if (!(readl(ioaddr + PTP_TCR) & PTP_TCR_TSADDREG)) + break; + mdelay(10); + } + if (limit < 0) + return -EBUSY; + + return 0; +} + +static int stmmac_adjust_systime(void __iomem *ioaddr, u32 sec, u32 nsec, + int add_sub) +{ + u32 value; + int limit; + + writel(sec, ioaddr + PTP_STSUR); + writel(((add_sub << PTP_STNSUR_ADDSUB_SHIFT) | nsec), + ioaddr + PTP_STNSUR); + /* issue command to initialize the system time value */ + value = readl(ioaddr + PTP_TCR); + value |= PTP_TCR_TSUPDT; + writel(value, ioaddr + PTP_TCR); + + /* wait for present system time adjust/update to complete */ + limit = 10; + while (limit--) { + if (!(readl(ioaddr + PTP_TCR) & PTP_TCR_TSUPDT)) + break; + mdelay(10); + } + if (limit < 0) + return -EBUSY; + + return 0; +} + +static u64 stmmac_get_systime(void __iomem *ioaddr) +{ + u64 ns; + + ns = readl(ioaddr + PTP_STNSR); + /* convert sec time value to nanosecond */ + ns += readl(ioaddr + PTP_STSR) * 1000000000ULL; + + return ns; +} + +const struct stmmac_hwtimestamp stmmac_ptp = { + .config_hw_tstamping = stmmac_config_hw_tstamping, + .init_systime = stmmac_init_systime, + .config_sub_second_increment = stmmac_config_sub_second_increment, + .config_addend = stmmac_config_addend, + .adjust_systime = stmmac_adjust_systime, + .get_systime = stmmac_get_systime, +}; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 39c6c5524633..6b26d31c268f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -47,6 +47,8 @@ #include <linux/debugfs.h> #include <linux/seq_file.h> #endif +#include <linux/net_tstamp.h> +#include "stmmac_ptp.h" #include "stmmac.h" #undef STMMAC_DEBUG @@ -130,6 +132,13 @@ module_param(eee_timer, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec"); #define STMMAC_LPI_TIMER(x) (jiffies + msecs_to_jiffies(x)) +/* By default the driver will use the ring mode to manage tx and rx descriptors + * but passing this value so user can force to use the chain instead of the ring + */ +static unsigned int chain_mode; +module_param(chain_mode, int, S_IRUGO); +MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode"); + static irqreturn_t stmmac_interrupt(int irq, void *dev_id); #ifdef CONFIG_STMMAC_DEBUG_FS @@ -304,6 +313,339 @@ static void stmmac_eee_adjust(struct stmmac_priv *priv) priv->hw->mac->set_eee_pls(priv->ioaddr, priv->phydev->link); } +/* stmmac_get_tx_hwtstamp: + * @priv : pointer to private device structure. + * @entry : descriptor index to be used. + * @skb : the socket buffer + * Description : + * This function will read timestamp from the descriptor & pass it to stack. + * and also perform some sanity checks. + */ +static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv, + unsigned int entry, + struct sk_buff *skb) +{ + struct skb_shared_hwtstamps shhwtstamp; + u64 ns; + void *desc = NULL; + + if (!priv->hwts_tx_en) + return; + + /* if skb doesn't support hw tstamp */ + if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))) + return; + + if (priv->adv_ts) + desc = (priv->dma_etx + entry); + else + desc = (priv->dma_tx + entry); + + /* check tx tstamp status */ + if (!priv->hw->desc->get_tx_timestamp_status((struct dma_desc *)desc)) + return; + + /* get the valid tstamp */ + ns = priv->hw->desc->get_timestamp(desc, priv->adv_ts); + + memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps)); + shhwtstamp.hwtstamp = ns_to_ktime(ns); + /* pass tstamp to stack */ + skb_tstamp_tx(skb, &shhwtstamp); + + return; +} + +/* stmmac_get_rx_hwtstamp: + * @priv : pointer to private device structure. + * @entry : descriptor index to be used. + * @skb : the socket buffer + * Description : + * This function will read received packet's timestamp from the descriptor + * and pass it to stack. It also perform some sanity checks. + */ +static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, + unsigned int entry, + struct sk_buff *skb) +{ + struct skb_shared_hwtstamps *shhwtstamp = NULL; + u64 ns; + void *desc = NULL; + + if (!priv->hwts_rx_en) + return; + + if (priv->adv_ts) + desc = (priv->dma_erx + entry); + else + desc = (priv->dma_rx + entry); + + /* if rx tstamp is not valid */ + if (!priv->hw->desc->get_rx_timestamp_status(desc, priv->adv_ts)) + return; + + /* get valid tstamp */ + ns = priv->hw->desc->get_timestamp(desc, priv->adv_ts); + shhwtstamp = skb_hwtstamps(skb); + memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps)); + shhwtstamp->hwtstamp = ns_to_ktime(ns); +} + +/** + * stmmac_hwtstamp_ioctl - control hardware timestamping. + * @dev: device pointer. + * @ifr: An IOCTL specefic structure, that can contain a pointer to + * a proprietary structure used to pass information to the driver. + * Description: + * This function configures the MAC to enable/disable both outgoing(TX) + * and incoming(RX) packets time stamping based on user input. + * Return Value: + * 0 on success and an appropriate -ve integer on failure. + */ +static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr) +{ + struct stmmac_priv *priv = netdev_priv(dev); + struct hwtstamp_config config; + struct timespec now; + u64 temp = 0; + u32 ptp_v2 = 0; + u32 tstamp_all = 0; + u32 ptp_over_ipv4_udp = 0; + u32 ptp_over_ipv6_udp = 0; + u32 ptp_over_ethernet = 0; + u32 snap_type_sel = 0; + u32 ts_master_en = 0; + u32 ts_event_en = 0; + u32 value = 0; + + if (!(priv->dma_cap.time_stamp || priv->adv_ts)) { + netdev_alert(priv->dev, "No support for HW time stamping\n"); + priv->hwts_tx_en = 0; + priv->hwts_rx_en = 0; + + return -EOPNOTSUPP; + } + + if (copy_from_user(&config, ifr->ifr_data, + sizeof(struct hwtstamp_config))) + return -EFAULT; + + pr_debug("%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n", + __func__, config.flags, config.tx_type, config.rx_filter); + + /* reserved for future extensions */ + if (config.flags) + return -EINVAL; + + switch (config.tx_type) { + case HWTSTAMP_TX_OFF: + priv->hwts_tx_en = 0; + break; + case HWTSTAMP_TX_ON: + priv->hwts_tx_en = 1; + break; + default: + return -ERANGE; + } + + if (priv->adv_ts) { + switch (config.rx_filter) { + /* time stamp no incoming packet at all */ + case HWTSTAMP_FILTER_NONE: + config.rx_filter = HWTSTAMP_FILTER_NONE; + break; + + /* PTP v1, UDP, any kind of event packet */ + case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: + config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; + /* take time stamp for all event messages */ + snap_type_sel = PTP_TCR_SNAPTYPSEL_1; + + ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; + ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; + break; + + /* PTP v1, UDP, Sync packet */ + case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: + config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC; + /* take time stamp for SYNC messages only */ + ts_event_en = PTP_TCR_TSEVNTENA; + + ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; + ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; + break; + + /* PTP v1, UDP, Delay_req packet */ + case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: + config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ; + /* take time stamp for Delay_Req messages only */ + ts_master_en = PTP_TCR_TSMSTRENA; + ts_event_en = PTP_TCR_TSEVNTENA; + + ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; + ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; + break; + + /* PTP v2, UDP, any kind of event packet */ + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT; + ptp_v2 = PTP_TCR_TSVER2ENA; + /* take time stamp for all event messages */ + snap_type_sel = PTP_TCR_SNAPTYPSEL_1; + + ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; + ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; + break; + + /* PTP v2, UDP, Sync packet */ + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC; + ptp_v2 = PTP_TCR_TSVER2ENA; + /* take time stamp for SYNC messages only */ + ts_event_en = PTP_TCR_TSEVNTENA; + + ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; + ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; + break; + + /* PTP v2, UDP, Delay_req packet */ + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ; + ptp_v2 = PTP_TCR_TSVER2ENA; + /* take time stamp for Delay_Req messages only */ + ts_master_en = PTP_TCR_TSMSTRENA; + ts_event_en = PTP_TCR_TSEVNTENA; + + ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; + ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; + break; + + /* PTP v2/802.AS1, any layer, any kind of event packet */ + case HWTSTAMP_FILTER_PTP_V2_EVENT: + config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; + ptp_v2 = PTP_TCR_TSVER2ENA; + /* take time stamp for all event messages */ + snap_type_sel = PTP_TCR_SNAPTYPSEL_1; + + ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; + ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; + ptp_over_ethernet = PTP_TCR_TSIPENA; + break; + + /* PTP v2/802.AS1, any layer, Sync packet */ + case HWTSTAMP_FILTER_PTP_V2_SYNC: + config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC; + ptp_v2 = PTP_TCR_TSVER2ENA; + /* take time stamp for SYNC messages only */ + ts_event_en = PTP_TCR_TSEVNTENA; + + ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; + ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; + ptp_over_ethernet = PTP_TCR_TSIPENA; + break; + + /* PTP v2/802.AS1, any layer, Delay_req packet */ + case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: + config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ; + ptp_v2 = PTP_TCR_TSVER2ENA; + /* take time stamp for Delay_Req messages only */ + ts_master_en = PTP_TCR_TSMSTRENA; + ts_event_en = PTP_TCR_TSEVNTENA; + + ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; + ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; + ptp_over_ethernet = PTP_TCR_TSIPENA; + break; + + /* time stamp any incoming packet */ + case HWTSTAMP_FILTER_ALL: + config.rx_filter = HWTSTAMP_FILTER_ALL; + tstamp_all = PTP_TCR_TSENALL; + break; + + default: + return -ERANGE; + } + } else { + switch (config.rx_filter) { + case HWTSTAMP_FILTER_NONE: + config.rx_filter = HWTSTAMP_FILTER_NONE; + break; + default: + /* PTP v1, UDP, any kind of event packet */ + config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; + break; + } + } + priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1); + + if (!priv->hwts_tx_en && !priv->hwts_rx_en) + priv->hw->ptp->config_hw_tstamping(priv->ioaddr, 0); + else { + value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR | + tstamp_all | ptp_v2 | ptp_over_ethernet | + ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en | + ts_master_en | snap_type_sel); + + priv->hw->ptp->config_hw_tstamping(priv->ioaddr, value); + + /* program Sub Second Increment reg */ + priv->hw->ptp->config_sub_second_increment(priv->ioaddr); + + /* calculate default added value: + * formula is : + * addend = (2^32)/freq_div_ratio; + * where, freq_div_ratio = STMMAC_SYSCLOCK/50MHz + * hence, addend = ((2^32) * 50MHz)/STMMAC_SYSCLOCK; + * NOTE: STMMAC_SYSCLOCK should be >= 50MHz to + * achive 20ns accuracy. + * + * 2^x * y == (y << x), hence + * 2^32 * 50000000 ==> (50000000 << 32) + */ + temp = (u64)(50000000ULL << 32); + priv->default_addend = div_u64(temp, STMMAC_SYSCLOCK); + priv->hw->ptp->config_addend(priv->ioaddr, + priv->default_addend); + + /* initialize system time */ + getnstimeofday(&now); + priv->hw->ptp->init_systime(priv->ioaddr, now.tv_sec, + now.tv_nsec); + } + + return copy_to_user(ifr->ifr_data, &config, + sizeof(struct hwtstamp_config)) ? -EFAULT : 0; +} + +static int stmmac_init_ptp(struct stmmac_priv *priv) +{ + if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) + return -EOPNOTSUPP; + + if (netif_msg_hw(priv)) { + if (priv->dma_cap.time_stamp) { + pr_debug("IEEE 1588-2002 Time Stamp supported\n"); + priv->adv_ts = 0; + } + if (priv->dma_cap.atime_stamp && priv->extend_desc) { + pr_debug("IEEE 1588-2008 Advanced Time Stamp supported\n"); + priv->adv_ts = 1; + } + } + + priv->hw->ptp = &stmmac_ptp; + priv->hwts_tx_en = 0; + priv->hwts_rx_en = 0; + + return stmmac_ptp_register(priv); +} + +static void stmmac_release_ptp(struct stmmac_priv *priv) +{ + stmmac_ptp_unregister(priv); +} + /** * stmmac_adjust_link * @dev: net device structure @@ -398,6 +740,24 @@ static void stmmac_adjust_link(struct net_device *dev) DBG(probe, DEBUG, "stmmac_adjust_link: exiting\n"); } +static void stmmac_check_pcs_mode(struct stmmac_priv *priv) +{ + int interface = priv->plat->interface; + + if (priv->dma_cap.pcs) { + if ((interface & PHY_INTERFACE_MODE_RGMII) || + (interface & PHY_INTERFACE_MODE_RGMII_ID) || + (interface & PHY_INTERFACE_MODE_RGMII_RXID) || + (interface & PHY_INTERFACE_MODE_RGMII_TXID)) { + pr_debug("STMMAC: PCS RGMII support enable\n"); + priv->pcs = STMMAC_PCS_RGMII; + } else if (interface & PHY_INTERFACE_MODE_SGMII) { + pr_debug("STMMAC: PCS SGMII support enable\n"); + priv->pcs = STMMAC_PCS_SGMII; + } + } +} + /** * stmmac_init_phy - PHY initialization * @dev: net device structure @@ -461,29 +821,56 @@ static int stmmac_init_phy(struct net_device *dev) } /** - * display_ring + * stmmac_display_ring * @p: pointer to the ring. * @size: size of the ring. - * Description: display all the descriptors within the ring. + * Description: display the control/status and buffer descriptors. */ -static void display_ring(struct dma_desc *p, int size) +static void stmmac_display_ring(void *head, int size, int extend_desc) { - struct tmp_s { - u64 a; - unsigned int b; - unsigned int c; - }; int i; + struct dma_extended_desc *ep = (struct dma_extended_desc *) head; + struct dma_desc *p = (struct dma_desc *) head; + for (i = 0; i < size; i++) { - struct tmp_s *x = (struct tmp_s *)(p + i); - pr_info("\t%d [0x%x]: DES0=0x%x DES1=0x%x BUF1=0x%x BUF2=0x%x", - i, (unsigned int)virt_to_phys(&p[i]), - (unsigned int)(x->a), (unsigned int)((x->a) >> 32), - x->b, x->c); + u64 x; + if (extend_desc) { + x = *(u64 *) ep; + pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n", + i, (unsigned int) virt_to_phys(ep), + (unsigned int) x, (unsigned int) (x >> 32), + ep->basic.des2, ep->basic.des3); + ep++; + } else { + x = *(u64 *) p; + pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x", + i, (unsigned int) virt_to_phys(p), + (unsigned int) x, (unsigned int) (x >> 32), + p->des2, p->des3); + p++; + } pr_info("\n"); } } +static void stmmac_display_rings(struct stmmac_priv *priv) +{ + unsigned int txsize = priv->dma_tx_size; + unsigned int rxsize = priv->dma_rx_size; + + if (priv->extend_desc) { + pr_info("Extended RX descriptor ring:\n"); + stmmac_display_ring((void *) priv->dma_erx, rxsize, 1); + pr_info("Extended TX descriptor ring:\n"); + stmmac_display_ring((void *) priv->dma_etx, txsize, 1); + } else { + pr_info("RX descriptor ring:\n"); + stmmac_display_ring((void *)priv->dma_rx, rxsize, 0); + pr_info("TX descriptor ring:\n"); + stmmac_display_ring((void *)priv->dma_tx, txsize, 0); + } +} + static int stmmac_set_bfsize(int mtu, int bufsize) { int ret = bufsize; @@ -500,6 +887,59 @@ static int stmmac_set_bfsize(int mtu, int bufsize) return ret; } +static void stmmac_clear_descriptors(struct stmmac_priv *priv) +{ + int i; + unsigned int txsize = priv->dma_tx_size; + unsigned int rxsize = priv->dma_rx_size; + + /* Clear the Rx/Tx descriptors */ + for (i = 0; i < rxsize; i++) + if (priv->extend_desc) + priv->hw->desc->init_rx_desc(&priv->dma_erx[i].basic, + priv->use_riwt, priv->mode, + (i == rxsize - 1)); + else + priv->hw->desc->init_rx_desc(&priv->dma_rx[i], + priv->use_riwt, priv->mode, + (i == rxsize - 1)); + for (i = 0; i < txsize; i++) + if (priv->extend_desc) + priv->hw->desc->init_tx_desc(&priv->dma_etx[i].basic, + priv->mode, + (i == txsize - 1)); + else + priv->hw->desc->init_tx_desc(&priv->dma_tx[i], + priv->mode, + (i == txsize - 1)); +} + +static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p, + int i) +{ + struct sk_buff *skb; + + skb = __netdev_alloc_skb(priv->dev, priv->dma_buf_sz + NET_IP_ALIGN, + GFP_KERNEL); + if (unlikely(skb == NULL)) { + pr_err("%s: Rx init fails; skb is NULL\n", __func__); + return 1; + } + skb_reserve(skb, NET_IP_ALIGN); + priv->rx_skbuff[i] = skb; + priv->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data, + priv->dma_buf_sz, + DMA_FROM_DEVICE); + + p->des2 = priv->rx_skbuff_dma[i]; + + if ((priv->mode == STMMAC_RING_MODE) && + (priv->dma_buf_sz == BUF_SIZE_16KiB)) + priv->hw->ring->init_desc3(p); + + return 0; +} + /** * init_dma_desc_rings - init the RX/TX descriptor rings * @dev: net device structure @@ -511,75 +951,70 @@ static void init_dma_desc_rings(struct net_device *dev) { int i; struct stmmac_priv *priv = netdev_priv(dev); - struct sk_buff *skb; unsigned int txsize = priv->dma_tx_size; unsigned int rxsize = priv->dma_rx_size; - unsigned int bfsize; - int dis_ic = 0; - int des3_as_data_buf = 0; + unsigned int bfsize = 0; /* Set the max buffer size according to the DESC mode * and the MTU. Note that RING mode allows 16KiB bsize. */ - bfsize = priv->hw->ring->set_16kib_bfsize(dev->mtu); + if (priv->mode == STMMAC_RING_MODE) + bfsize = priv->hw->ring->set_16kib_bfsize(dev->mtu); - if (bfsize == BUF_SIZE_16KiB) - des3_as_data_buf = 1; - else + if (bfsize < BUF_SIZE_16KiB) bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz); DBG(probe, INFO, "stmmac: txsize %d, rxsize %d, bfsize %d\n", txsize, rxsize, bfsize); - priv->rx_skbuff_dma = kmalloc_array(rxsize, sizeof(dma_addr_t), - GFP_KERNEL); - priv->rx_skbuff = kmalloc_array(rxsize, sizeof(struct sk_buff *), - GFP_KERNEL); - priv->dma_rx = - (struct dma_desc *)dma_alloc_coherent(priv->device, - rxsize * + if (priv->extend_desc) { + priv->dma_erx = dma_alloc_coherent(priv->device, rxsize * + sizeof(struct + dma_extended_desc), + &priv->dma_rx_phy, + GFP_KERNEL); + priv->dma_etx = dma_alloc_coherent(priv->device, txsize * + sizeof(struct + dma_extended_desc), + &priv->dma_tx_phy, + GFP_KERNEL); + if ((!priv->dma_erx) || (!priv->dma_etx)) + return; + } else { + priv->dma_rx = dma_alloc_coherent(priv->device, rxsize * sizeof(struct dma_desc), &priv->dma_rx_phy, GFP_KERNEL); - priv->tx_skbuff = kmalloc_array(txsize, sizeof(struct sk_buff *), - GFP_KERNEL); - priv->dma_tx = - (struct dma_desc *)dma_alloc_coherent(priv->device, - txsize * + priv->dma_tx = dma_alloc_coherent(priv->device, txsize * sizeof(struct dma_desc), &priv->dma_tx_phy, GFP_KERNEL); - - if ((priv->dma_rx == NULL) || (priv->dma_tx == NULL)) { - pr_err("%s:ERROR allocating the DMA Tx/Rx desc\n", __func__); - return; + if ((!priv->dma_rx) || (!priv->dma_tx)) + return; } - DBG(probe, INFO, "stmmac (%s) DMA desc: virt addr (Rx %p, " - "Tx %p)\n\tDMA phy addr (Rx 0x%08x, Tx 0x%08x)\n", - dev->name, priv->dma_rx, priv->dma_tx, - (unsigned int)priv->dma_rx_phy, (unsigned int)priv->dma_tx_phy); + priv->rx_skbuff_dma = kmalloc_array(rxsize, sizeof(dma_addr_t), + GFP_KERNEL); + priv->rx_skbuff = kmalloc_array(rxsize, sizeof(struct sk_buff *), + GFP_KERNEL); + priv->tx_skbuff_dma = kmalloc_array(txsize, sizeof(dma_addr_t), + GFP_KERNEL); + priv->tx_skbuff = kmalloc_array(txsize, sizeof(struct sk_buff *), + GFP_KERNEL); + if (netif_msg_drv(priv)) + pr_debug("(%s) dma_rx_phy=0x%08x dma_tx_phy=0x%08x\n", __func__, + (u32) priv->dma_rx_phy, (u32) priv->dma_tx_phy); /* RX INITIALIZATION */ - DBG(probe, INFO, "stmmac: SKB addresses:\n" - "skb\t\tskb data\tdma data\n"); - + DBG(probe, INFO, "stmmac: SKB addresses:\nskb\t\tskb data\tdma data\n"); for (i = 0; i < rxsize; i++) { - struct dma_desc *p = priv->dma_rx + i; + struct dma_desc *p; + if (priv->extend_desc) + p = &((priv->dma_erx + i)->basic); + else + p = priv->dma_rx + i; - skb = __netdev_alloc_skb(dev, bfsize + NET_IP_ALIGN, - GFP_KERNEL); - if (unlikely(skb == NULL)) { - pr_err("%s: Rx init fails; skb is NULL\n", __func__); + if (stmmac_init_rx_buffers(priv, p, i)) break; - } - skb_reserve(skb, NET_IP_ALIGN); - priv->rx_skbuff[i] = skb; - priv->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data, - bfsize, DMA_FROM_DEVICE); - - p->des2 = priv->rx_skbuff_dma[i]; - - priv->hw->ring->init_desc3(des3_as_data_buf, p); DBG(probe, INFO, "[%p]\t[%p]\t[%x]\n", priv->rx_skbuff[i], priv->rx_skbuff[i]->data, priv->rx_skbuff_dma[i]); @@ -589,32 +1024,40 @@ static void init_dma_desc_rings(struct net_device *dev) priv->dma_buf_sz = bfsize; buf_sz = bfsize; + /* Setup the chained descriptor addresses */ + if (priv->mode == STMMAC_CHAIN_MODE) { + if (priv->extend_desc) { + priv->hw->chain->init(priv->dma_erx, priv->dma_rx_phy, + rxsize, 1); + priv->hw->chain->init(priv->dma_etx, priv->dma_tx_phy, + txsize, 1); + } else { + priv->hw->chain->init(priv->dma_rx, priv->dma_rx_phy, + rxsize, 0); + priv->hw->chain->init(priv->dma_tx, priv->dma_tx_phy, + txsize, 0); + } + } + /* TX INITIALIZATION */ for (i = 0; i < txsize; i++) { + struct dma_desc *p; + if (priv->extend_desc) + p = &((priv->dma_etx + i)->basic); + else + p = priv->dma_tx + i; + p->des2 = 0; + priv->tx_skbuff_dma[i] = 0; priv->tx_skbuff[i] = NULL; - priv->dma_tx[i].des2 = 0; } - /* In case of Chained mode this sets the des3 to the next - * element in the chain */ - priv->hw->ring->init_dma_chain(priv->dma_rx, priv->dma_rx_phy, rxsize); - priv->hw->ring->init_dma_chain(priv->dma_tx, priv->dma_tx_phy, txsize); - priv->dirty_tx = 0; priv->cur_tx = 0; - if (priv->use_riwt) - dis_ic = 1; - /* Clear the Rx/Tx descriptors */ - priv->hw->desc->init_rx_desc(priv->dma_rx, rxsize, dis_ic); - priv->hw->desc->init_tx_desc(priv->dma_tx, txsize); + stmmac_clear_descriptors(priv); - if (netif_msg_hw(priv)) { - pr_info("RX descriptor ring:\n"); - display_ring(priv->dma_rx, rxsize); - pr_info("TX descriptor ring:\n"); - display_ring(priv->dma_tx, txsize); - } + if (netif_msg_hw(priv)) + stmmac_display_rings(priv); } static void dma_free_rx_skbufs(struct stmmac_priv *priv) @@ -637,13 +1080,20 @@ static void dma_free_tx_skbufs(struct stmmac_priv *priv) for (i = 0; i < priv->dma_tx_size; i++) { if (priv->tx_skbuff[i] != NULL) { - struct dma_desc *p = priv->dma_tx + i; - if (p->des2) - dma_unmap_single(priv->device, p->des2, + struct dma_desc *p; + if (priv->extend_desc) + p = &((priv->dma_etx + i)->basic); + else + p = priv->dma_tx + i; + + if (priv->tx_skbuff_dma[i]) + dma_unmap_single(priv->device, + priv->tx_skbuff_dma[i], priv->hw->desc->get_tx_len(p), DMA_TO_DEVICE); dev_kfree_skb_any(priv->tx_skbuff[i]); priv->tx_skbuff[i] = NULL; + priv->tx_skbuff_dma[i] = 0; } } } @@ -656,14 +1106,24 @@ static void free_dma_desc_resources(struct stmmac_priv *priv) /* Free the region of consistent memory previously allocated for * the DMA */ - dma_free_coherent(priv->device, - priv->dma_tx_size * sizeof(struct dma_desc), - priv->dma_tx, priv->dma_tx_phy); - dma_free_coherent(priv->device, - priv->dma_rx_size * sizeof(struct dma_desc), - priv->dma_rx, priv->dma_rx_phy); + if (!priv->extend_desc) { + dma_free_coherent(priv->device, + priv->dma_tx_size * sizeof(struct dma_desc), + priv->dma_tx, priv->dma_tx_phy); + dma_free_coherent(priv->device, + priv->dma_rx_size * sizeof(struct dma_desc), + priv->dma_rx, priv->dma_rx_phy); + } else { + dma_free_coherent(priv->device, priv->dma_tx_size * + sizeof(struct dma_extended_desc), + priv->dma_etx, priv->dma_tx_phy); + dma_free_coherent(priv->device, priv->dma_rx_size * + sizeof(struct dma_extended_desc), + priv->dma_erx, priv->dma_rx_phy); + } kfree(priv->rx_skbuff_dma); kfree(priv->rx_skbuff); + kfree(priv->tx_skbuff_dma); kfree(priv->tx_skbuff); } @@ -708,13 +1168,18 @@ static void stmmac_tx_clean(struct stmmac_priv *priv) int last; unsigned int entry = priv->dirty_tx % txsize; struct sk_buff *skb = priv->tx_skbuff[entry]; - struct dma_desc *p = priv->dma_tx + entry; + struct dma_desc *p; + + if (priv->extend_desc) + p = (struct dma_desc *) (priv->dma_etx + entry); + else + p = priv->dma_tx + entry; /* Check if the descriptor is owned by the DMA. */ if (priv->hw->desc->get_tx_owner(p)) break; - /* Verify tx error by looking at the last segment */ + /* Verify tx error by looking at the last segment. */ last = priv->hw->desc->get_tx_ls(p); if (likely(last)) { int tx_error = @@ -726,22 +1191,27 @@ static void stmmac_tx_clean(struct stmmac_priv *priv) priv->xstats.tx_pkt_n++; } else priv->dev->stats.tx_errors++; + + stmmac_get_tx_hwtstamp(priv, entry, skb); } TX_DBG("%s: curr %d, dirty %d\n", __func__, priv->cur_tx, priv->dirty_tx); - if (likely(p->des2)) - dma_unmap_single(priv->device, p->des2, + if (likely(priv->tx_skbuff_dma[entry])) { + dma_unmap_single(priv->device, + priv->tx_skbuff_dma[entry], priv->hw->desc->get_tx_len(p), DMA_TO_DEVICE); - priv->hw->ring->clean_desc3(p); + priv->tx_skbuff_dma[entry] = 0; + } + priv->hw->ring->clean_desc3(priv, p); if (likely(skb != NULL)) { dev_kfree_skb(skb); priv->tx_skbuff[entry] = NULL; } - priv->hw->desc->release_tx_desc(p); + priv->hw->desc->release_tx_desc(p, priv->mode); priv->dirty_tx++; } @@ -782,11 +1252,21 @@ static inline void stmmac_disable_dma_irq(struct stmmac_priv *priv) */ static void stmmac_tx_err(struct stmmac_priv *priv) { + int i; + int txsize = priv->dma_tx_size; netif_stop_queue(priv->dev); priv->hw->dma->stop_tx(priv->ioaddr); dma_free_tx_skbufs(priv); - priv->hw->desc->init_tx_desc(priv->dma_tx, priv->dma_tx_size); + for (i = 0; i < txsize; i++) + if (priv->extend_desc) + priv->hw->desc->init_tx_desc(&priv->dma_etx[i].basic, + priv->mode, + (i == txsize - 1)); + else + priv->hw->desc->init_tx_desc(&priv->dma_tx[i], + priv->mode, + (i == txsize - 1)); priv->dirty_tx = 0; priv->cur_tx = 0; priv->hw->dma->start_tx(priv->ioaddr); @@ -860,6 +1340,14 @@ static void stmmac_selec_desc_mode(struct stmmac_priv *priv) { if (priv->plat->enh_desc) { pr_info(" Enhanced/Alternate descriptors\n"); + + /* GMAC older than 3.50 has no extended descriptors */ + if (priv->synopsys_id >= DWMAC_CORE_3_50) { + pr_info("\tEnabled extended descriptors\n"); + priv->extend_desc = 1; + } else + pr_warn("Extended descriptors not supported\n"); + priv->hw->desc = &enh_desc_ops; } else { pr_info(" Normal descriptors\n"); @@ -946,6 +1434,7 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv) { int pbl = DEFAULT_DMA_PBL, fixed_burst = 0, burst_len = 0; int mixed_burst = 0; + int atds = 0; /* Some DMA parameters can be passed from the platform; * in case of these are not passed we keep a default @@ -957,9 +1446,12 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv) burst_len = priv->plat->dma_cfg->burst_len; } + if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE)) + atds = 1; + return priv->hw->dma->init(priv->ioaddr, pbl, fixed_burst, mixed_burst, burst_len, priv->dma_tx_phy, - priv->dma_rx_phy); + priv->dma_rx_phy, atds); } /** @@ -1012,10 +1504,13 @@ static int stmmac_open(struct net_device *dev) stmmac_check_ether_addr(priv); - ret = stmmac_init_phy(dev); - if (unlikely(ret)) { - pr_err("%s: Cannot attach to PHY (error: %d)\n", __func__, ret); - goto open_error; + if (!priv->pcs) { + ret = stmmac_init_phy(dev); + if (ret) { + pr_err("%s: Cannot attach to PHY (error: %d)\n", + __func__, ret); + goto open_error; + } } /* Create and initialize the TX/RX descriptors chains. */ @@ -1084,6 +1579,10 @@ static int stmmac_open(struct net_device *dev) stmmac_mmc_setup(priv); + ret = stmmac_init_ptp(priv); + if (ret) + pr_warn("%s: failed PTP initialisation\n", __func__); + #ifdef CONFIG_STMMAC_DEBUG_FS ret = stmmac_init_fs(dev); if (ret < 0) @@ -1104,7 +1603,12 @@ static int stmmac_open(struct net_device *dev) phy_start(priv->phydev); priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS_TIMER; - priv->eee_enabled = stmmac_eee_init(priv); + + /* Using PCS we cannot dial with the phy registers at this stage + * so we do not support extra feature like EEE. + */ + if (!priv->pcs) + priv->eee_enabled = stmmac_eee_init(priv); stmmac_init_tx_coalesce(priv); @@ -1113,6 +1617,9 @@ static int stmmac_open(struct net_device *dev) priv->hw->dma->rx_watchdog(priv->ioaddr, MAX_DMA_RIWT); } + if (priv->pcs && priv->hw->mac->ctrl_ane) + priv->hw->mac->ctrl_ane(priv->ioaddr, 0); + napi_enable(&priv->napi); netif_start_queue(dev); @@ -1184,6 +1691,8 @@ static int stmmac_release(struct net_device *dev) #endif clk_disable_unprepare(priv->stmmac_clk); + stmmac_release_ptp(priv); + return 0; } @@ -1198,7 +1707,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) struct stmmac_priv *priv = netdev_priv(dev); unsigned int txsize = priv->dma_tx_size; unsigned int entry; - int i, csum_insertion = 0; + int i, csum_insertion = 0, is_jumbo = 0; int nfrags = skb_shinfo(skb)->nr_frags; struct dma_desc *desc, *first; unsigned int nopaged_len = skb_headlen(skb); @@ -1233,7 +1742,11 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL); - desc = priv->dma_tx + entry; + if (priv->extend_desc) + desc = (struct dma_desc *) (priv->dma_etx + entry); + else + desc = priv->dma_tx + entry; + first = desc; #ifdef STMMAC_XMIT_DEBUG @@ -1244,28 +1757,46 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) #endif priv->tx_skbuff[entry] = skb; - if (priv->hw->ring->is_jumbo_frm(skb->len, priv->plat->enh_desc)) { - entry = priv->hw->ring->jumbo_frm(priv, skb, csum_insertion); - desc = priv->dma_tx + entry; + /* To program the descriptors according to the size of the frame */ + if (priv->mode == STMMAC_RING_MODE) { + is_jumbo = priv->hw->ring->is_jumbo_frm(skb->len, + priv->plat->enh_desc); + if (unlikely(is_jumbo)) + entry = priv->hw->ring->jumbo_frm(priv, skb, + csum_insertion); } else { + is_jumbo = priv->hw->chain->is_jumbo_frm(skb->len, + priv->plat->enh_desc); + if (unlikely(is_jumbo)) + entry = priv->hw->chain->jumbo_frm(priv, skb, + csum_insertion); + } + if (likely(!is_jumbo)) { desc->des2 = dma_map_single(priv->device, skb->data, nopaged_len, DMA_TO_DEVICE); + priv->tx_skbuff_dma[entry] = desc->des2; priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len, - csum_insertion); - } + csum_insertion, priv->mode); + } else + desc = first; for (i = 0; i < nfrags; i++) { const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; int len = skb_frag_size(frag); entry = (++priv->cur_tx) % txsize; - desc = priv->dma_tx + entry; + if (priv->extend_desc) + desc = (struct dma_desc *) (priv->dma_etx + entry); + else + desc = priv->dma_tx + entry; TX_DBG("\t[entry %d] segment len: %d\n", entry, len); desc->des2 = skb_frag_dma_map(priv->device, frag, 0, len, DMA_TO_DEVICE); + priv->tx_skbuff_dma[entry] = desc->des2; priv->tx_skbuff[entry] = NULL; - priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion); + priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion, + priv->mode); wmb(); priv->hw->desc->set_tx_owner(desc); wmb(); @@ -1302,7 +1833,11 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) "first=%p, nfrags=%d\n", (priv->cur_tx % txsize), (priv->dirty_tx % txsize), entry, first, nfrags); - display_ring(priv->dma_tx, txsize); + if (priv->extend_desc) + stmmac_display_ring((void *)priv->dma_etx, txsize, 1); + else + stmmac_display_ring((void *)priv->dma_tx, txsize, 0); + pr_info(">>> frame to be transmitted: "); print_pkt(skb->data, skb->len); } @@ -1314,7 +1849,15 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) dev->stats.tx_bytes += skb->len; - skb_tx_timestamp(skb); + if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && + priv->hwts_tx_en)) { + /* declare that device is doing timestamping */ + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + priv->hw->desc->enable_tx_timestamp(first); + } + + if (!priv->hwts_tx_en) + skb_tx_timestamp(skb); priv->hw->dma->enable_dma_transmission(priv->ioaddr); @@ -1327,10 +1870,16 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv) { unsigned int rxsize = priv->dma_rx_size; int bfsize = priv->dma_buf_sz; - struct dma_desc *p = priv->dma_rx; for (; priv->cur_rx - priv->dirty_rx > 0; priv->dirty_rx++) { unsigned int entry = priv->dirty_rx % rxsize; + struct dma_desc *p; + + if (priv->extend_desc) + p = (struct dma_desc *) (priv->dma_erx + entry); + else + p = priv->dma_rx + entry; + if (likely(priv->rx_skbuff[entry] == NULL)) { struct sk_buff *skb; @@ -1344,15 +1893,14 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv) dma_map_single(priv->device, skb->data, bfsize, DMA_FROM_DEVICE); - (p + entry)->des2 = priv->rx_skbuff_dma[entry]; + p->des2 = priv->rx_skbuff_dma[entry]; - if (unlikely(priv->plat->has_gmac)) - priv->hw->ring->refill_desc3(bfsize, p + entry); + priv->hw->ring->refill_desc3(priv, p); RX_DBG(KERN_INFO "\trefill entry #%d\n", entry); } wmb(); - priv->hw->desc->set_rx_owner(p + entry); + priv->hw->desc->set_rx_owner(p); wmb(); } } @@ -1363,33 +1911,61 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit) unsigned int entry = priv->cur_rx % rxsize; unsigned int next_entry; unsigned int count = 0; - struct dma_desc *p = priv->dma_rx + entry; - struct dma_desc *p_next; #ifdef STMMAC_RX_DEBUG if (netif_msg_hw(priv)) { pr_debug(">>> stmmac_rx: descriptor ring:\n"); - display_ring(priv->dma_rx, rxsize); + if (priv->extend_desc) + stmmac_display_ring((void *) priv->dma_erx, rxsize, 1); + else + stmmac_display_ring((void *)priv->dma_rx, rxsize, 0); } #endif - while (!priv->hw->desc->get_rx_owner(p)) { + while (count < limit) { int status; + struct dma_desc *p, *p_next; + + if (priv->extend_desc) + p = (struct dma_desc *) (priv->dma_erx + entry); + else + p = priv->dma_rx + entry ; - if (count >= limit) + if (priv->hw->desc->get_rx_owner(p)) break; count++; next_entry = (++priv->cur_rx) % rxsize; - p_next = priv->dma_rx + next_entry; + if (priv->extend_desc) + p_next = (struct dma_desc *) (priv->dma_erx + + next_entry); + else + p_next = priv->dma_rx + next_entry; + prefetch(p_next); /* read the status of the incoming frame */ - status = (priv->hw->desc->rx_status(&priv->dev->stats, - &priv->xstats, p)); - if (unlikely(status == discard_frame)) + status = priv->hw->desc->rx_status(&priv->dev->stats, + &priv->xstats, p); + if ((priv->extend_desc) && (priv->hw->desc->rx_extended_status)) + priv->hw->desc->rx_extended_status(&priv->dev->stats, + &priv->xstats, + priv->dma_erx + + entry); + if (unlikely(status == discard_frame)) { priv->dev->stats.rx_errors++; - else { + if (priv->hwts_rx_en && !priv->extend_desc) { + /* DESC2 & DESC3 will be overwitten by device + * with timestamp value, hence reinitialize + * them in stmmac_rx_refill() function so that + * device can reuse it. + */ + priv->rx_skbuff[entry] = NULL; + dma_unmap_single(priv->device, + priv->rx_skbuff_dma[entry], + priv->dma_buf_sz, DMA_FROM_DEVICE); + } + } else { struct sk_buff *skb; int frame_len; @@ -1418,6 +1994,8 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit) prefetch(skb->data - NET_IP_ALIGN); priv->rx_skbuff[entry] = NULL; + stmmac_get_rx_hwtstamp(priv, entry, skb); + skb_put(skb, frame_len); dma_unmap_single(priv->device, priv->rx_skbuff_dma[entry], @@ -1441,7 +2019,6 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit) priv->dev->stats.rx_bytes += frame_len; } entry = next_entry; - p = p_next; /* use prefetched values */ } stmmac_rx_refill(priv); @@ -1604,30 +2181,14 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id) /* To handle GMAC own interrupts */ if (priv->plat->has_gmac) { int status = priv->hw->mac->host_irq_status((void __iomem *) - dev->base_addr); + dev->base_addr, + &priv->xstats); if (unlikely(status)) { - if (status & core_mmc_tx_irq) - priv->xstats.mmc_tx_irq_n++; - if (status & core_mmc_rx_irq) - priv->xstats.mmc_rx_irq_n++; - if (status & core_mmc_rx_csum_offload_irq) - priv->xstats.mmc_rx_csum_offload_irq_n++; - if (status & core_irq_receive_pmt_irq) - priv->xstats.irq_receive_pmt_irq_n++; - /* For LPI we need to save the tx status */ - if (status & core_irq_tx_path_in_lpi_mode) { - priv->xstats.irq_tx_path_in_lpi_mode_n++; + if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE) priv->tx_path_in_lpi_mode = true; - } - if (status & core_irq_tx_path_exit_lpi_mode) { - priv->xstats.irq_tx_path_exit_lpi_mode_n++; + if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE) priv->tx_path_in_lpi_mode = false; - } - if (status & core_irq_rx_path_in_lpi_mode) - priv->xstats.irq_rx_path_in_lpi_mode_n++; - if (status & core_irq_rx_path_exit_lpi_mode) - priv->xstats.irq_rx_path_exit_lpi_mode_n++; } } @@ -1655,21 +2216,30 @@ static void stmmac_poll_controller(struct net_device *dev) * a proprietary structure used to pass information to the driver. * @cmd: IOCTL command * Description: - * Currently there are no special functionality supported in IOCTL, just the - * phy_mii_ioctl(...) can be invoked. + * Currently it supports just the phy_mii_ioctl(...) and HW time stamping. */ static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { struct stmmac_priv *priv = netdev_priv(dev); - int ret; + int ret = -EOPNOTSUPP; if (!netif_running(dev)) return -EINVAL; - if (!priv->phydev) - return -EINVAL; - - ret = phy_mii_ioctl(priv->phydev, rq, cmd); + switch (cmd) { + case SIOCGMIIPHY: + case SIOCGMIIREG: + case SIOCSMIIREG: + if (!priv->phydev) + return -EINVAL; + ret = phy_mii_ioctl(priv->phydev, rq, cmd); + break; + case SIOCSHWTSTAMP: + ret = stmmac_hwtstamp_ioctl(dev, rq); + break; + default: + break; + } return ret; } @@ -1679,40 +2249,51 @@ static struct dentry *stmmac_fs_dir; static struct dentry *stmmac_rings_status; static struct dentry *stmmac_dma_cap; -static int stmmac_sysfs_ring_read(struct seq_file *seq, void *v) +static void sysfs_display_ring(void *head, int size, int extend_desc, + struct seq_file *seq) { - struct tmp_s { - u64 a; - unsigned int b; - unsigned int c; - }; int i; - struct net_device *dev = seq->private; - struct stmmac_priv *priv = netdev_priv(dev); - - seq_printf(seq, "=======================\n"); - seq_printf(seq, " RX descriptor ring\n"); - seq_printf(seq, "=======================\n"); + struct dma_extended_desc *ep = (struct dma_extended_desc *) head; + struct dma_desc *p = (struct dma_desc *) head; - for (i = 0; i < priv->dma_rx_size; i++) { - struct tmp_s *x = (struct tmp_s *)(priv->dma_rx + i); - seq_printf(seq, "[%d] DES0=0x%x DES1=0x%x BUF1=0x%x BUF2=0x%x", - i, (unsigned int)(x->a), - (unsigned int)((x->a) >> 32), x->b, x->c); + for (i = 0; i < size; i++) { + u64 x; + if (extend_desc) { + x = *(u64 *) ep; + seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n", + i, (unsigned int) virt_to_phys(ep), + (unsigned int) x, (unsigned int) (x >> 32), + ep->basic.des2, ep->basic.des3); + ep++; + } else { + x = *(u64 *) p; + seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n", + i, (unsigned int) virt_to_phys(ep), + (unsigned int) x, (unsigned int) (x >> 32), + p->des2, p->des3); + p++; + } seq_printf(seq, "\n"); } +} - seq_printf(seq, "\n"); - seq_printf(seq, "=======================\n"); - seq_printf(seq, " TX descriptor ring\n"); - seq_printf(seq, "=======================\n"); +static int stmmac_sysfs_ring_read(struct seq_file *seq, void *v) +{ + struct net_device *dev = seq->private; + struct stmmac_priv *priv = netdev_priv(dev); + unsigned int txsize = priv->dma_tx_size; + unsigned int rxsize = priv->dma_rx_size; - for (i = 0; i < priv->dma_tx_size; i++) { - struct tmp_s *x = (struct tmp_s *)(priv->dma_tx + i); - seq_printf(seq, "[%d] DES0=0x%x DES1=0x%x BUF1=0x%x BUF2=0x%x", - i, (unsigned int)(x->a), - (unsigned int)((x->a) >> 32), x->b, x->c); - seq_printf(seq, "\n"); + if (priv->extend_desc) { + seq_printf(seq, "Extended RX descriptor ring:\n"); + sysfs_display_ring((void *) priv->dma_erx, rxsize, 1, seq); + seq_printf(seq, "Extended TX descriptor ring:\n"); + sysfs_display_ring((void *) priv->dma_etx, txsize, 1, seq); + } else { + seq_printf(seq, "RX descriptor ring:\n"); + sysfs_display_ring((void *)priv->dma_rx, rxsize, 0, seq); + seq_printf(seq, "TX descriptor ring:\n"); + sysfs_display_ring((void *)priv->dma_tx, txsize, 0, seq); } return 0; @@ -1877,7 +2458,7 @@ static const struct net_device_ops stmmac_netdev_ops = { */ static int stmmac_hw_init(struct stmmac_priv *priv) { - int ret = 0; + int ret; struct mac_device_info *mac; /* Identify the MAC HW device */ @@ -1892,12 +2473,23 @@ static int stmmac_hw_init(struct stmmac_priv *priv) priv->hw = mac; - /* To use the chained or ring mode */ - priv->hw->ring = &ring_mode_ops; - /* Get and dump the chip ID */ priv->synopsys_id = stmmac_get_synopsys_id(priv); + /* To use alternate (extended) or normal descriptor structures */ + stmmac_selec_desc_mode(priv); + + /* To use the chained or ring mode */ + if (chain_mode) { + priv->hw->chain = &chain_mode_ops; + pr_info(" Chain mode enabled\n"); + priv->mode = STMMAC_CHAIN_MODE; + } else { + priv->hw->ring = &ring_mode_ops; + pr_info(" Ring mode enabled\n"); + priv->mode = STMMAC_RING_MODE; + } + /* Get the HW capability (new GMAC newer than 3.50a) */ priv->hw_cap_support = stmmac_get_hw_features(priv); if (priv->hw_cap_support) { @@ -1921,9 +2513,6 @@ static int stmmac_hw_init(struct stmmac_priv *priv) } else pr_info(" No HW DMA feature register supported"); - /* Select the enhnaced/normal descriptor structures */ - stmmac_selec_desc_mode(priv); - /* Enable the IPC (Checksum Offload) and check if the feature has been * enabled during the core configuration. */ ret = priv->hw->mac->rx_ipc(priv->ioaddr); @@ -1943,7 +2532,7 @@ static int stmmac_hw_init(struct stmmac_priv *priv) device_set_wakeup_capable(priv->device, 1); } - return ret; + return 0; } /** @@ -1989,7 +2578,9 @@ struct stmmac_priv *stmmac_dvr_probe(struct device *device, priv->plat->phy_addr = phyaddr; /* Init MAC and get the capabilities */ - stmmac_hw_init(priv); + ret = stmmac_hw_init(priv); + if (ret) + goto error_free_netdev; ndev->netdev_ops = &stmmac_netdev_ops; @@ -2044,12 +2635,16 @@ struct stmmac_priv *stmmac_dvr_probe(struct device *device, else priv->clk_csr = priv->plat->clk_csr; - /* MDIO bus Registration */ - ret = stmmac_mdio_register(ndev); - if (ret < 0) { - pr_debug("%s: MDIO bus (id: %d) registration failed", - __func__, priv->plat->bus_id); - goto error_mdio_register; + stmmac_check_pcs_mode(priv); + + if (!priv->pcs) { + /* MDIO bus Registration */ + ret = stmmac_mdio_register(ndev); + if (ret < 0) { + pr_debug("%s: MDIO bus (id: %d) registration failed", + __func__, priv->plat->bus_id); + goto error_mdio_register; + } } return priv; @@ -2060,6 +2655,7 @@ error_clk_get: unregister_netdev(ndev); error_netdev_register: netif_napi_del(&priv->napi); +error_free_netdev: free_netdev(ndev); return NULL; @@ -2081,7 +2677,8 @@ int stmmac_dvr_remove(struct net_device *ndev) priv->hw->dma->stop_tx(priv->ioaddr); stmmac_set_mac(priv->ioaddr, false); - stmmac_mdio_unregister(ndev); + if (!priv->pcs) + stmmac_mdio_unregister(ndev); netif_carrier_off(ndev); unregister_netdev(ndev); free_netdev(ndev); @@ -2093,7 +2690,6 @@ int stmmac_dvr_remove(struct net_device *ndev) int stmmac_suspend(struct net_device *ndev) { struct stmmac_priv *priv = netdev_priv(ndev); - int dis_ic = 0; unsigned long flags; if (!ndev || !netif_running(ndev)) @@ -2107,18 +2703,13 @@ int stmmac_suspend(struct net_device *ndev) netif_device_detach(ndev); netif_stop_queue(ndev); - if (priv->use_riwt) - dis_ic = 1; - napi_disable(&priv->napi); /* Stop TX/RX DMA */ priv->hw->dma->stop_tx(priv->ioaddr); priv->hw->dma->stop_rx(priv->ioaddr); - /* Clear the Rx/Tx descriptors */ - priv->hw->desc->init_rx_desc(priv->dma_rx, priv->dma_rx_size, - dis_ic); - priv->hw->desc->init_tx_desc(priv->dma_tx, priv->dma_tx_size); + + stmmac_clear_descriptors(priv); /* Enable Power down mode by programming the PMT regs */ if (device_may_wakeup(priv->device)) @@ -2257,6 +2848,9 @@ static int __init stmmac_cmdline_opt(char *str) } else if (!strncmp(opt, "eee_timer:", 10)) { if (kstrtoint(opt + 10, 0, &eee_timer)) goto err; + } else if (!strncmp(opt, "chain_mode:", 11)) { + if (kstrtoint(opt + 11, 0, &chain_mode)) + goto err; } } return 0; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c new file mode 100644 index 000000000000..93d4beff92c7 --- /dev/null +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c @@ -0,0 +1,215 @@ +/******************************************************************************* + PTP 1588 clock using the STMMAC. + + Copyright (C) 2013 Vayavya Labs Pvt Ltd + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Author: Rayagond Kokatanur <rayagond@vayavyalabs.com> +*******************************************************************************/ +#include "stmmac.h" +#include "stmmac_ptp.h" + +/** + * stmmac_adjust_freq + * + * @ptp: pointer to ptp_clock_info structure + * @ppb: desired period change in parts ber billion + * + * Description: this function will adjust the frequency of hardware clock. + */ +static int stmmac_adjust_freq(struct ptp_clock_info *ptp, s32 ppb) +{ + struct stmmac_priv *priv = + container_of(ptp, struct stmmac_priv, ptp_clock_ops); + unsigned long flags; + u32 diff, addend; + int neg_adj = 0; + u64 adj; + + if (ppb < 0) { + neg_adj = 1; + ppb = -ppb; + } + + addend = priv->default_addend; + adj = addend; + adj *= ppb; + diff = div_u64(adj, 1000000000ULL); + addend = neg_adj ? (addend - diff) : (addend + diff); + + spin_lock_irqsave(&priv->ptp_lock, flags); + + priv->hw->ptp->config_addend(priv->ioaddr, addend); + + spin_unlock_irqrestore(&priv->lock, flags); + + return 0; +} + +/** + * stmmac_adjust_time + * + * @ptp: pointer to ptp_clock_info structure + * @delta: desired change in nanoseconds + * + * Description: this function will shift/adjust the hardware clock time. + */ +static int stmmac_adjust_time(struct ptp_clock_info *ptp, s64 delta) +{ + struct stmmac_priv *priv = + container_of(ptp, struct stmmac_priv, ptp_clock_ops); + unsigned long flags; + u32 sec, nsec; + u32 quotient, reminder; + int neg_adj = 0; + + if (delta < 0) { + neg_adj = 1; + delta = -delta; + } + + quotient = div_u64_rem(delta, 1000000000ULL, &reminder); + sec = quotient; + nsec = reminder; + + spin_lock_irqsave(&priv->ptp_lock, flags); + + priv->hw->ptp->adjust_systime(priv->ioaddr, sec, nsec, neg_adj); + + spin_unlock_irqrestore(&priv->lock, flags); + + return 0; +} + +/** + * stmmac_get_time + * + * @ptp: pointer to ptp_clock_info structure + * @ts: pointer to hold time/result + * + * Description: this function will read the current time from the + * hardware clock and store it in @ts. + */ +static int stmmac_get_time(struct ptp_clock_info *ptp, struct timespec *ts) +{ + struct stmmac_priv *priv = + container_of(ptp, struct stmmac_priv, ptp_clock_ops); + unsigned long flags; + u64 ns; + u32 reminder; + + spin_lock_irqsave(&priv->ptp_lock, flags); + + ns = priv->hw->ptp->get_systime(priv->ioaddr); + + spin_unlock_irqrestore(&priv->ptp_lock, flags); + + ts->tv_sec = div_u64_rem(ns, 1000000000ULL, &reminder); + ts->tv_nsec = reminder; + + return 0; +} + +/** + * stmmac_set_time + * + * @ptp: pointer to ptp_clock_info structure + * @ts: time value to set + * + * Description: this function will set the current time on the + * hardware clock. + */ +static int stmmac_set_time(struct ptp_clock_info *ptp, + const struct timespec *ts) +{ + struct stmmac_priv *priv = + container_of(ptp, struct stmmac_priv, ptp_clock_ops); + unsigned long flags; + + spin_lock_irqsave(&priv->ptp_lock, flags); + + priv->hw->ptp->init_systime(priv->ioaddr, ts->tv_sec, ts->tv_nsec); + + spin_unlock_irqrestore(&priv->ptp_lock, flags); + + return 0; +} + +static int stmmac_enable(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, int on) +{ + return -EOPNOTSUPP; +} + +/* structure describing a PTP hardware clock */ +static struct ptp_clock_info stmmac_ptp_clock_ops = { + .owner = THIS_MODULE, + .name = "stmmac_ptp_clock", + .max_adj = 62500000, + .n_alarm = 0, + .n_ext_ts = 0, + .n_per_out = 0, + .pps = 0, + .adjfreq = stmmac_adjust_freq, + .adjtime = stmmac_adjust_time, + .gettime = stmmac_get_time, + .settime = stmmac_set_time, + .enable = stmmac_enable, +}; + +/** + * stmmac_ptp_register + * + * @ndev: net device pointer + * + * Description: this function will register the ptp clock driver + * to kernel. It also does some house keeping work. + */ +int stmmac_ptp_register(struct stmmac_priv *priv) +{ + spin_lock_init(&priv->ptp_lock); + priv->ptp_clock_ops = stmmac_ptp_clock_ops; + + priv->ptp_clock = ptp_clock_register(&priv->ptp_clock_ops, + priv->device); + if (IS_ERR(priv->ptp_clock)) { + priv->ptp_clock = NULL; + pr_err("ptp_clock_register() failed on %s\n", priv->dev->name); + } else + pr_debug("Added PTP HW clock successfully on %s\n", + priv->dev->name); + + return 0; +} + +/** + * stmmac_ptp_unregister + * + * @ndev: net device pointer + * + * Description: this function will remove/unregister the ptp clock driver + * from the kernel. + */ +void stmmac_ptp_unregister(struct stmmac_priv *priv) +{ + if (priv->ptp_clock) { + ptp_clock_unregister(priv->ptp_clock); + pr_debug("Removed PTP HW clock successfully on %s\n", + priv->dev->name); + } +} diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h new file mode 100644 index 000000000000..3dbc047622fa --- /dev/null +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h @@ -0,0 +1,74 @@ +/****************************************************************************** + PTP Header file + + Copyright (C) 2013 Vayavya Labs Pvt Ltd + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Author: Rayagond Kokatanur <rayagond@vayavyalabs.com> +******************************************************************************/ + +#ifndef __STMMAC_PTP_H__ +#define __STMMAC_PTP_H__ + +#define STMMAC_SYSCLOCK 62500000 + +/* IEEE 1588 PTP register offsets */ +#define PTP_TCR 0x0700 /* Timestamp Control Reg */ +#define PTP_SSIR 0x0704 /* Sub-Second Increment Reg */ +#define PTP_STSR 0x0708 /* System Time – Seconds Regr */ +#define PTP_STNSR 0x070C /* System Time – Nanoseconds Reg */ +#define PTP_STSUR 0x0710 /* System Time – Seconds Update Reg */ +#define PTP_STNSUR 0x0714 /* System Time – Nanoseconds Update Reg */ +#define PTP_TAR 0x0718 /* Timestamp Addend Reg */ +#define PTP_TTSR 0x071C /* Target Time Seconds Reg */ +#define PTP_TTNSR 0x0720 /* Target Time Nanoseconds Reg */ +#define PTP_STHWSR 0x0724 /* System Time - Higher Word Seconds Reg */ +#define PTP_TSR 0x0728 /* Timestamp Status */ + +#define PTP_STNSUR_ADDSUB_SHIFT 31 + +/* PTP TCR defines */ +#define PTP_TCR_TSENA 0x00000001 /* Timestamp Enable */ +#define PTP_TCR_TSCFUPDT 0x00000002 /* Timestamp Fine/Coarse Update */ +#define PTP_TCR_TSINIT 0x00000004 /* Timestamp Initialize */ +#define PTP_TCR_TSUPDT 0x00000008 /* Timestamp Update */ +/* Timestamp Interrupt Trigger Enable */ +#define PTP_TCR_TSTRIG 0x00000010 +#define PTP_TCR_TSADDREG 0x00000020 /* Addend Reg Update */ +#define PTP_TCR_TSENALL 0x00000100 /* Enable Timestamp for All Frames */ +/* Timestamp Digital or Binary Rollover Control */ +#define PTP_TCR_TSCTRLSSR 0x00000200 + +/* Enable PTP packet Processing for Version 2 Format */ +#define PTP_TCR_TSVER2ENA 0x00000400 +/* Enable Processing of PTP over Ethernet Frames */ +#define PTP_TCR_TSIPENA 0x00000800 +/* Enable Processing of PTP Frames Sent over IPv6-UDP */ +#define PTP_TCR_TSIPV6ENA 0x00001000 +/* Enable Processing of PTP Frames Sent over IPv4-UDP */ +#define PTP_TCR_TSIPV4ENA 0x00002000 +/* Enable Timestamp Snapshot for Event Messages */ +#define PTP_TCR_TSEVNTENA 0x00004000 +/* Enable Snapshot for Messages Relevant to Master */ +#define PTP_TCR_TSMSTRENA 0x00008000 +/* Select PTP packets for Taking Snapshots */ +#define PTP_TCR_SNAPTYPSEL_1 0x00010000 +/* Enable MAC address for PTP Frame Filtering */ +#define PTP_TCR_TSENMACADDR 0x00040000 + +#endif /* __STMMAC_PTP_H__ */ diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c index e4c1c88e4c2a..95cff98d8a34 100644 --- a/drivers/net/ethernet/sun/niu.c +++ b/drivers/net/ethernet/sun/niu.c @@ -6618,7 +6618,7 @@ static u64 niu_compute_tx_flags(struct sk_buff *skb, struct ethhdr *ehdr, (len << TXHDR_LEN_SHIFT) | ((l3off / 2) << TXHDR_L3START_SHIFT) | (ihl << TXHDR_IHL_SHIFT) | - ((eth_proto_inner < 1536) ? TXHDR_LLC : 0) | + ((eth_proto_inner < ETH_P_802_3_MIN) ? TXHDR_LLC : 0) | ((eth_proto == ETH_P_8021Q) ? TXHDR_VLAN : 0) | (ipv6 ? TXHDR_IP_VER : 0) | csum_bits); diff --git a/drivers/net/ethernet/sun/sunbmac.c b/drivers/net/ethernet/sun/sunbmac.c index 5fafca065305..054975939a18 100644 --- a/drivers/net/ethernet/sun/sunbmac.c +++ b/drivers/net/ethernet/sun/sunbmac.c @@ -1169,10 +1169,8 @@ static int bigmac_ether_init(struct platform_device *op, bp->bmac_block = dma_alloc_coherent(&bp->bigmac_op->dev, PAGE_SIZE, &bp->bblock_dvma, GFP_ATOMIC); - if (bp->bmac_block == NULL || bp->bblock_dvma == 0) { - printk(KERN_ERR "BIGMAC: Cannot allocate consistent DMA.\n"); + if (bp->bmac_block == NULL || bp->bblock_dvma == 0) goto fail_and_cleanup; - } /* Get the board revision of this BigMAC. */ bp->board_rev = of_getintprop_default(bp->bigmac_op->dev.of_node, diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c index a1bff49a8155..436fa9d5a071 100644 --- a/drivers/net/ethernet/sun/sunhme.c +++ b/drivers/net/ethernet/sun/sunhme.c @@ -2752,10 +2752,8 @@ static int happy_meal_sbus_probe_one(struct platform_device *op, int is_qfe) &hp->hblock_dvma, GFP_ATOMIC); err = -ENOMEM; - if (!hp->happy_block) { - printk(KERN_ERR "happymeal: Cannot allocate descriptors.\n"); + if (!hp->happy_block) goto err_out_iounmap; - } /* Force check of the link first time we are brought up. */ hp->linkcheck = 0; @@ -3068,14 +3066,11 @@ static int happy_meal_pci_probe(struct pci_dev *pdev, hp->happy_bursts = DMA_BURSTBITS; #endif - hp->happy_block = (struct hmeal_init_block *) - dma_alloc_coherent(&pdev->dev, PAGE_SIZE, &hp->hblock_dvma, GFP_KERNEL); - + hp->happy_block = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, + &hp->hblock_dvma, GFP_KERNEL); err = -ENODEV; - if (!hp->happy_block) { - printk(KERN_ERR "happymeal(PCI): Cannot get hme init block.\n"); + if (!hp->happy_block) goto err_out_iounmap; - } hp->linkcheck = 0; hp->timer_state = asleep; diff --git a/drivers/net/ethernet/sun/sunqe.c b/drivers/net/ethernet/sun/sunqe.c index 49bf3e2eb652..8182591bc187 100644 --- a/drivers/net/ethernet/sun/sunqe.c +++ b/drivers/net/ethernet/sun/sunqe.c @@ -414,7 +414,7 @@ static void qe_rx(struct sunqe *qep) struct qe_rxd *this; struct sunqe_buffers *qbufs = qep->buffers; __u32 qbufs_dvma = qep->buffers_dvma; - int elem = qep->rx_new, drops = 0; + int elem = qep->rx_new; u32 flags; this = &rxbase[elem]; @@ -436,7 +436,6 @@ static void qe_rx(struct sunqe *qep) } else { skb = netdev_alloc_skb(dev, len + 2); if (skb == NULL) { - drops++; dev->stats.rx_dropped++; } else { skb_reserve(skb, 2); @@ -456,8 +455,6 @@ static void qe_rx(struct sunqe *qep) this = &rxbase[elem]; } qep->rx_new = elem; - if (drops) - printk(KERN_NOTICE "%s: Memory squeeze, deferring packet.\n", qep->dev->name); } static void qe_tx_reclaim(struct sunqe *qep); diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c index e15cc71b826d..e8824cea093b 100644 --- a/drivers/net/ethernet/tehuti/tehuti.c +++ b/drivers/net/ethernet/tehuti/tehuti.c @@ -1102,10 +1102,9 @@ static void bdx_rx_alloc_skbs(struct bdx_priv *priv, struct rxf_fifo *f) dno = bdx_rxdb_available(db) - 1; while (dno > 0) { skb = netdev_alloc_skb(priv->ndev, f->m.pktsz + NET_IP_ALIGN); - if (!skb) { - pr_err("NO MEM: netdev_alloc_skb failed\n"); + if (!skb) break; - } + skb_reserve(skb, NET_IP_ALIGN); idx = bdx_rxdb_alloc_elem(db); diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 80cad06e5eb2..1d740423a053 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -126,6 +126,13 @@ do { \ #define CPSW_FIFO_DUAL_MAC_MODE (1 << 15) #define CPSW_FIFO_RATE_LIMIT_MODE (2 << 15) +#define CPSW_INTPACEEN (0x3f << 16) +#define CPSW_INTPRESCALE_MASK (0x7FF << 0) +#define CPSW_CMINTMAX_CNT 63 +#define CPSW_CMINTMIN_CNT 2 +#define CPSW_CMINTMAX_INTVL (1000 / CPSW_CMINTMIN_CNT) +#define CPSW_CMINTMIN_INTVL ((1000 / CPSW_CMINTMAX_CNT) + 1) + #define cpsw_enable_irq(priv) \ do { \ u32 i; \ @@ -139,6 +146,10 @@ do { \ disable_irq_nosync(priv->irqs_table[i]); \ } while (0); +#define cpsw_slave_index(priv) \ + ((priv->data.dual_emac) ? priv->emac_port : \ + priv->data.active_slave) + static int debug_level; module_param(debug_level, int, 0); MODULE_PARM_DESC(debug_level, "cpsw debug level (NETIF_MSG bits)"); @@ -160,6 +171,15 @@ struct cpsw_wr_regs { u32 rx_en; u32 tx_en; u32 misc_en; + u32 mem_allign1[8]; + u32 rx_thresh_stat; + u32 rx_stat; + u32 tx_stat; + u32 misc_stat; + u32 mem_allign2[8]; + u32 rx_imax; + u32 tx_imax; + }; struct cpsw_ss_regs { @@ -314,6 +334,8 @@ struct cpsw_priv { struct cpsw_host_regs __iomem *host_port_regs; u32 msg_enable; u32 version; + u32 coal_intvl; + u32 bus_freq_mhz; struct net_device_stats stats; int rx_packet_max; int host_port; @@ -612,6 +634,77 @@ static void cpsw_adjust_link(struct net_device *ndev) } } +static int cpsw_get_coalesce(struct net_device *ndev, + struct ethtool_coalesce *coal) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + + coal->rx_coalesce_usecs = priv->coal_intvl; + return 0; +} + +static int cpsw_set_coalesce(struct net_device *ndev, + struct ethtool_coalesce *coal) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + u32 int_ctrl; + u32 num_interrupts = 0; + u32 prescale = 0; + u32 addnl_dvdr = 1; + u32 coal_intvl = 0; + + if (!coal->rx_coalesce_usecs) + return -EINVAL; + + coal_intvl = coal->rx_coalesce_usecs; + + int_ctrl = readl(&priv->wr_regs->int_control); + prescale = priv->bus_freq_mhz * 4; + + if (coal_intvl < CPSW_CMINTMIN_INTVL) + coal_intvl = CPSW_CMINTMIN_INTVL; + + if (coal_intvl > CPSW_CMINTMAX_INTVL) { + /* Interrupt pacer works with 4us Pulse, we can + * throttle further by dilating the 4us pulse. + */ + addnl_dvdr = CPSW_INTPRESCALE_MASK / prescale; + + if (addnl_dvdr > 1) { + prescale *= addnl_dvdr; + if (coal_intvl > (CPSW_CMINTMAX_INTVL * addnl_dvdr)) + coal_intvl = (CPSW_CMINTMAX_INTVL + * addnl_dvdr); + } else { + addnl_dvdr = 1; + coal_intvl = CPSW_CMINTMAX_INTVL; + } + } + + num_interrupts = (1000 * addnl_dvdr) / coal_intvl; + writel(num_interrupts, &priv->wr_regs->rx_imax); + writel(num_interrupts, &priv->wr_regs->tx_imax); + + int_ctrl |= CPSW_INTPACEEN; + int_ctrl &= (~CPSW_INTPRESCALE_MASK); + int_ctrl |= (prescale & CPSW_INTPRESCALE_MASK); + writel(int_ctrl, &priv->wr_regs->int_control); + + cpsw_notice(priv, timer, "Set coalesce to %d usecs.\n", coal_intvl); + if (priv->data.dual_emac) { + int i; + + for (i = 0; i < priv->data.slaves; i++) { + priv = netdev_priv(priv->slaves[i].ndev); + priv->coal_intvl = coal_intvl; + } + } else { + priv->coal_intvl = coal_intvl; + } + + return 0; +} + static inline int __show_stat(char *buf, int maxlen, const char *name, u32 val) { static char *leader = "........................................"; @@ -834,6 +927,14 @@ static int cpsw_ndo_open(struct net_device *ndev) cpsw_info(priv, ifup, "submitted %d rx descriptors\n", i); } + /* Enable Interrupt pacing if configured */ + if (priv->coal_intvl != 0) { + struct ethtool_coalesce coal; + + coal.rx_coalesce_usecs = (priv->coal_intvl << 4); + cpsw_set_coalesce(ndev, &coal); + } + cpdma_ctlr_start(priv->dma); cpsw_intr_enable(priv); napi_enable(&priv->napi); @@ -942,7 +1043,7 @@ static void cpsw_ndo_change_rx_flags(struct net_device *ndev, int flags) static void cpsw_hwtstamp_v1(struct cpsw_priv *priv) { - struct cpsw_slave *slave = &priv->slaves[priv->data.cpts_active_slave]; + struct cpsw_slave *slave = &priv->slaves[priv->data.active_slave]; u32 ts_en, seq_id; if (!priv->cpts->tx_enable && !priv->cpts->rx_enable) { @@ -971,7 +1072,7 @@ static void cpsw_hwtstamp_v2(struct cpsw_priv *priv) if (priv->data.dual_emac) slave = &priv->slaves[priv->emac_port]; else - slave = &priv->slaves[priv->data.cpts_active_slave]; + slave = &priv->slaves[priv->data.active_slave]; ctrl = slave_read(slave, CPSW2_CONTROL); ctrl &= ~CTRL_ALL_TS_MASK; @@ -1056,14 +1157,26 @@ static int cpsw_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr) static int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd) { + struct cpsw_priv *priv = netdev_priv(dev); + struct mii_ioctl_data *data = if_mii(req); + int slave_no = cpsw_slave_index(priv); + if (!netif_running(dev)) return -EINVAL; + switch (cmd) { #ifdef CONFIG_TI_CPTS - if (cmd == SIOCSHWTSTAMP) + case SIOCSHWTSTAMP: return cpsw_hwtstamp_ioctl(dev, req); #endif - return -ENOTSUPP; + case SIOCGMIIPHY: + data->phy_id = priv->slaves[slave_no].phy->addr; + break; + default: + return -ENOTSUPP; + } + + return 0; } static void cpsw_ndo_tx_timeout(struct net_device *ndev) @@ -1244,12 +1357,39 @@ static int cpsw_get_ts_info(struct net_device *ndev, return 0; } +static int cpsw_get_settings(struct net_device *ndev, + struct ethtool_cmd *ecmd) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + int slave_no = cpsw_slave_index(priv); + + if (priv->slaves[slave_no].phy) + return phy_ethtool_gset(priv->slaves[slave_no].phy, ecmd); + else + return -EOPNOTSUPP; +} + +static int cpsw_set_settings(struct net_device *ndev, struct ethtool_cmd *ecmd) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + int slave_no = cpsw_slave_index(priv); + + if (priv->slaves[slave_no].phy) + return phy_ethtool_sset(priv->slaves[slave_no].phy, ecmd); + else + return -EOPNOTSUPP; +} + static const struct ethtool_ops cpsw_ethtool_ops = { .get_drvinfo = cpsw_get_drvinfo, .get_msglevel = cpsw_get_msglevel, .set_msglevel = cpsw_set_msglevel, .get_link = ethtool_op_get_link, .get_ts_info = cpsw_get_ts_info, + .get_settings = cpsw_get_settings, + .set_settings = cpsw_set_settings, + .get_coalesce = cpsw_get_coalesce, + .set_coalesce = cpsw_set_coalesce, }; static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv, @@ -1282,12 +1422,12 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data, } data->slaves = prop; - if (of_property_read_u32(node, "cpts_active_slave", &prop)) { - pr_err("Missing cpts_active_slave property in the DT.\n"); + if (of_property_read_u32(node, "active_slave", &prop)) { + pr_err("Missing active_slave property in the DT.\n"); ret = -EINVAL; goto error_ret; } - data->cpts_active_slave = prop; + data->active_slave = prop; if (of_property_read_u32(node, "cpts_clock_mult", &prop)) { pr_err("Missing cpts_clock_mult property in the DT.\n"); @@ -1437,6 +1577,9 @@ static int cpsw_probe_dual_emac(struct platform_device *pdev, priv_sl2->slaves = priv->slaves; priv_sl2->clk = priv->clk; + priv_sl2->coal_intvl = 0; + priv_sl2->bus_freq_mhz = priv->bus_freq_mhz; + priv_sl2->cpsw_res = priv->cpsw_res; priv_sl2->regs = priv->regs; priv_sl2->host_port = priv->host_port; @@ -1546,6 +1689,8 @@ static int cpsw_probe(struct platform_device *pdev) ret = -ENODEV; goto clean_slave_ret; } + priv->coal_intvl = 0; + priv->bus_freq_mhz = clk_get_rate(priv->clk) / 1000000; priv->cpsw_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!priv->cpsw_res) { diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c index 72300bc9e378..6a0b47715a84 100644 --- a/drivers/net/ethernet/ti/davinci_emac.c +++ b/drivers/net/ethernet/ti/davinci_emac.c @@ -1438,7 +1438,7 @@ static int emac_poll(struct napi_struct *napi, int budget) * Polled functionality used by netconsole and others in non interrupt mode * */ -void emac_poll_controller(struct net_device *ndev) +static void emac_poll_controller(struct net_device *ndev) { struct emac_priv *priv = netdev_priv(ndev); @@ -1865,21 +1865,18 @@ static int davinci_emac_probe(struct platform_device *pdev) /* obtain emac clock from kernel */ - emac_clk = clk_get(&pdev->dev, NULL); + emac_clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(emac_clk)) { dev_err(&pdev->dev, "failed to get EMAC clock\n"); return -EBUSY; } emac_bus_frequency = clk_get_rate(emac_clk); - clk_put(emac_clk); /* TODO: Probe PHY here if possible */ ndev = alloc_etherdev(sizeof(struct emac_priv)); - if (!ndev) { - rc = -ENOMEM; - goto no_ndev; - } + if (!ndev) + return -ENOMEM; platform_set_drvdata(pdev, ndev); priv = netdev_priv(ndev); @@ -1893,7 +1890,7 @@ static int davinci_emac_probe(struct platform_device *pdev) if (!pdata) { dev_err(&pdev->dev, "no platform data\n"); rc = -ENODEV; - goto probe_quit; + goto no_pdata; } /* MAC addr and PHY mask , RMII enable info from platform_data */ @@ -1913,23 +1910,23 @@ static int davinci_emac_probe(struct platform_device *pdev) if (!res) { dev_err(&pdev->dev,"error getting res\n"); rc = -ENOENT; - goto probe_quit; + goto no_pdata; } priv->emac_base_phys = res->start + pdata->ctrl_reg_offset; size = resource_size(res); - if (!request_mem_region(res->start, size, ndev->name)) { + if (!devm_request_mem_region(&pdev->dev, res->start, + size, ndev->name)) { dev_err(&pdev->dev, "failed request_mem_region() for regs\n"); rc = -ENXIO; - goto probe_quit; + goto no_pdata; } - priv->remap_addr = ioremap(res->start, size); + priv->remap_addr = devm_ioremap(&pdev->dev, res->start, size); if (!priv->remap_addr) { dev_err(&pdev->dev, "unable to map IO\n"); rc = -ENOMEM; - release_mem_region(res->start, size); - goto probe_quit; + goto no_pdata; } priv->emac_base = priv->remap_addr + pdata->ctrl_reg_offset; ndev->base_addr = (unsigned long)priv->remap_addr; @@ -1962,7 +1959,7 @@ static int davinci_emac_probe(struct platform_device *pdev) if (!priv->dma) { dev_err(&pdev->dev, "error initializing DMA\n"); rc = -ENOMEM; - goto no_dma; + goto no_pdata; } priv->txchan = cpdma_chan_create(priv->dma, tx_chan_num(EMAC_DEF_TX_CH), @@ -1971,14 +1968,14 @@ static int davinci_emac_probe(struct platform_device *pdev) emac_rx_handler); if (WARN_ON(!priv->txchan || !priv->rxchan)) { rc = -ENOMEM; - goto no_irq_res; + goto no_cpdma_chan; } res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (!res) { dev_err(&pdev->dev, "error getting irq res\n"); rc = -ENOENT; - goto no_irq_res; + goto no_cpdma_chan; } ndev->irq = res->start; @@ -2000,7 +1997,7 @@ static int davinci_emac_probe(struct platform_device *pdev) if (rc) { dev_err(&pdev->dev, "error in register_netdev\n"); rc = -ENODEV; - goto no_irq_res; + goto no_cpdma_chan; } @@ -2015,20 +2012,14 @@ static int davinci_emac_probe(struct platform_device *pdev) return 0; -no_irq_res: +no_cpdma_chan: if (priv->txchan) cpdma_chan_destroy(priv->txchan); if (priv->rxchan) cpdma_chan_destroy(priv->rxchan); cpdma_ctlr_destroy(priv->dma); -no_dma: - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - release_mem_region(res->start, resource_size(res)); - iounmap(priv->remap_addr); - -probe_quit: +no_pdata: free_netdev(ndev); -no_ndev: return rc; } @@ -2041,14 +2032,12 @@ no_ndev: */ static int davinci_emac_remove(struct platform_device *pdev) { - struct resource *res; struct net_device *ndev = platform_get_drvdata(pdev); struct emac_priv *priv = netdev_priv(ndev); dev_notice(&ndev->dev, "DaVinci EMAC: davinci_emac_remove()\n"); platform_set_drvdata(pdev, NULL); - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (priv->txchan) cpdma_chan_destroy(priv->txchan); @@ -2056,10 +2045,7 @@ static int davinci_emac_remove(struct platform_device *pdev) cpdma_chan_destroy(priv->rxchan); cpdma_ctlr_destroy(priv->dma); - release_mem_region(res->start, resource_size(res)); - unregister_netdev(ndev); - iounmap(priv->remap_addr); free_netdev(ndev); return 0; diff --git a/drivers/net/ethernet/ti/tlan.c b/drivers/net/ethernet/ti/tlan.c index 22725386c5de..bdda36f8e541 100644 --- a/drivers/net/ethernet/ti/tlan.c +++ b/drivers/net/ethernet/ti/tlan.c @@ -1911,10 +1911,8 @@ static void tlan_reset_lists(struct net_device *dev) list->frame_size = TLAN_MAX_FRAME_SIZE; list->buffer[0].count = TLAN_MAX_FRAME_SIZE | TLAN_LAST_BUFFER; skb = netdev_alloc_skb_ip_align(dev, TLAN_MAX_FRAME_SIZE + 5); - if (!skb) { - netdev_err(dev, "Out of memory for received data\n"); + if (!skb) break; - } list->buffer[0].address = pci_map_single(priv->pci_dev, skb->data, diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.c b/drivers/net/ethernet/toshiba/ps3_gelic_net.c index 445c0595c997..ad32af67e618 100644 --- a/drivers/net/ethernet/toshiba/ps3_gelic_net.c +++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.c @@ -58,13 +58,6 @@ MODULE_DESCRIPTION("Gelic Network driver"); MODULE_LICENSE("GPL"); -static inline void gelic_card_enable_rxdmac(struct gelic_card *card); -static inline void gelic_card_disable_rxdmac(struct gelic_card *card); -static inline void gelic_card_disable_txdmac(struct gelic_card *card); -static inline void gelic_card_reset_chain(struct gelic_card *card, - struct gelic_descr_chain *chain, - struct gelic_descr *start_descr); - /* set irq_mask */ int gelic_card_set_irq_mask(struct gelic_card *card, u64 mask) { @@ -78,12 +71,12 @@ int gelic_card_set_irq_mask(struct gelic_card *card, u64 mask) return status; } -static inline void gelic_card_rx_irq_on(struct gelic_card *card) +static void gelic_card_rx_irq_on(struct gelic_card *card) { card->irq_mask |= GELIC_CARD_RXINT; gelic_card_set_irq_mask(card, card->irq_mask); } -static inline void gelic_card_rx_irq_off(struct gelic_card *card) +static void gelic_card_rx_irq_off(struct gelic_card *card) { card->irq_mask &= ~GELIC_CARD_RXINT; gelic_card_set_irq_mask(card, card->irq_mask); @@ -127,6 +120,120 @@ static int gelic_card_set_link_mode(struct gelic_card *card, int mode) return 0; } +/** + * gelic_card_disable_txdmac - disables the transmit DMA controller + * @card: card structure + * + * gelic_card_disable_txdmac terminates processing on the DMA controller by + * turing off DMA and issuing a force end + */ +static void gelic_card_disable_txdmac(struct gelic_card *card) +{ + int status; + + /* this hvc blocks until the DMA in progress really stopped */ + status = lv1_net_stop_tx_dma(bus_id(card), dev_id(card)); + if (status) + dev_err(ctodev(card), + "lv1_net_stop_tx_dma failed, status=%d\n", status); +} + +/** + * gelic_card_enable_rxdmac - enables the receive DMA controller + * @card: card structure + * + * gelic_card_enable_rxdmac enables the DMA controller by setting RX_DMA_EN + * in the GDADMACCNTR register + */ +static void gelic_card_enable_rxdmac(struct gelic_card *card) +{ + int status; + +#ifdef DEBUG + if (gelic_descr_get_status(card->rx_chain.head) != + GELIC_DESCR_DMA_CARDOWNED) { + printk(KERN_ERR "%s: status=%x\n", __func__, + be32_to_cpu(card->rx_chain.head->dmac_cmd_status)); + printk(KERN_ERR "%s: nextphy=%x\n", __func__, + be32_to_cpu(card->rx_chain.head->next_descr_addr)); + printk(KERN_ERR "%s: head=%p\n", __func__, + card->rx_chain.head); + } +#endif + status = lv1_net_start_rx_dma(bus_id(card), dev_id(card), + card->rx_chain.head->bus_addr, 0); + if (status) + dev_info(ctodev(card), + "lv1_net_start_rx_dma failed, status=%d\n", status); +} + +/** + * gelic_card_disable_rxdmac - disables the receive DMA controller + * @card: card structure + * + * gelic_card_disable_rxdmac terminates processing on the DMA controller by + * turing off DMA and issuing a force end + */ +static void gelic_card_disable_rxdmac(struct gelic_card *card) +{ + int status; + + /* this hvc blocks until the DMA in progress really stopped */ + status = lv1_net_stop_rx_dma(bus_id(card), dev_id(card)); + if (status) + dev_err(ctodev(card), + "lv1_net_stop_rx_dma failed, %d\n", status); +} + +/** + * gelic_descr_set_status -- sets the status of a descriptor + * @descr: descriptor to change + * @status: status to set in the descriptor + * + * changes the status to the specified value. Doesn't change other bits + * in the status + */ +static void gelic_descr_set_status(struct gelic_descr *descr, + enum gelic_descr_dma_status status) +{ + descr->dmac_cmd_status = cpu_to_be32(status | + (be32_to_cpu(descr->dmac_cmd_status) & + ~GELIC_DESCR_DMA_STAT_MASK)); + /* + * dma_cmd_status field is used to indicate whether the descriptor + * is valid or not. + * Usually caller of this function wants to inform that to the + * hardware, so we assure here the hardware sees the change. + */ + wmb(); +} + +/** + * gelic_card_reset_chain - reset status of a descriptor chain + * @card: card structure + * @chain: address of chain + * @start_descr: address of descriptor array + * + * Reset the status of dma descriptors to ready state + * and re-initialize the hardware chain for later use + */ +static void gelic_card_reset_chain(struct gelic_card *card, + struct gelic_descr_chain *chain, + struct gelic_descr *start_descr) +{ + struct gelic_descr *descr; + + for (descr = start_descr; start_descr != descr->next; descr++) { + gelic_descr_set_status(descr, GELIC_DESCR_DMA_CARDOWNED); + descr->next_descr_addr = cpu_to_be32(descr->next->bus_addr); + } + + chain->head = start_descr; + chain->tail = (descr - 1); + + (descr - 1)->next_descr_addr = 0; +} + void gelic_card_up(struct gelic_card *card) { pr_debug("%s: called\n", __func__); @@ -183,29 +290,6 @@ gelic_descr_get_status(struct gelic_descr *descr) } /** - * gelic_descr_set_status -- sets the status of a descriptor - * @descr: descriptor to change - * @status: status to set in the descriptor - * - * changes the status to the specified value. Doesn't change other bits - * in the status - */ -static void gelic_descr_set_status(struct gelic_descr *descr, - enum gelic_descr_dma_status status) -{ - descr->dmac_cmd_status = cpu_to_be32(status | - (be32_to_cpu(descr->dmac_cmd_status) & - ~GELIC_DESCR_DMA_STAT_MASK)); - /* - * dma_cmd_status field is used to indicate whether the descriptor - * is valid or not. - * Usually caller of this function wants to inform that to the - * hardware, so we assure here the hardware sees the change. - */ - wmb(); -} - -/** * gelic_card_free_chain - free descriptor chain * @card: card structure * @descr_in: address of desc @@ -286,31 +370,6 @@ iommu_error: } /** - * gelic_card_reset_chain - reset status of a descriptor chain - * @card: card structure - * @chain: address of chain - * @start_descr: address of descriptor array - * - * Reset the status of dma descriptors to ready state - * and re-initialize the hardware chain for later use - */ -static void gelic_card_reset_chain(struct gelic_card *card, - struct gelic_descr_chain *chain, - struct gelic_descr *start_descr) -{ - struct gelic_descr *descr; - - for (descr = start_descr; start_descr != descr->next; descr++) { - gelic_descr_set_status(descr, GELIC_DESCR_DMA_CARDOWNED); - descr->next_descr_addr = cpu_to_be32(descr->next->bus_addr); - } - - chain->head = start_descr; - chain->tail = (descr - 1); - - (descr - 1)->next_descr_addr = 0; -} -/** * gelic_descr_prepare_rx - reinitializes a rx descriptor * @card: card structure * @descr: descriptor to re-init @@ -599,71 +658,6 @@ void gelic_net_set_multi(struct net_device *netdev) } /** - * gelic_card_enable_rxdmac - enables the receive DMA controller - * @card: card structure - * - * gelic_card_enable_rxdmac enables the DMA controller by setting RX_DMA_EN - * in the GDADMACCNTR register - */ -static inline void gelic_card_enable_rxdmac(struct gelic_card *card) -{ - int status; - -#ifdef DEBUG - if (gelic_descr_get_status(card->rx_chain.head) != - GELIC_DESCR_DMA_CARDOWNED) { - printk(KERN_ERR "%s: status=%x\n", __func__, - be32_to_cpu(card->rx_chain.head->dmac_cmd_status)); - printk(KERN_ERR "%s: nextphy=%x\n", __func__, - be32_to_cpu(card->rx_chain.head->next_descr_addr)); - printk(KERN_ERR "%s: head=%p\n", __func__, - card->rx_chain.head); - } -#endif - status = lv1_net_start_rx_dma(bus_id(card), dev_id(card), - card->rx_chain.head->bus_addr, 0); - if (status) - dev_info(ctodev(card), - "lv1_net_start_rx_dma failed, status=%d\n", status); -} - -/** - * gelic_card_disable_rxdmac - disables the receive DMA controller - * @card: card structure - * - * gelic_card_disable_rxdmac terminates processing on the DMA controller by - * turing off DMA and issuing a force end - */ -static inline void gelic_card_disable_rxdmac(struct gelic_card *card) -{ - int status; - - /* this hvc blocks until the DMA in progress really stopped */ - status = lv1_net_stop_rx_dma(bus_id(card), dev_id(card)); - if (status) - dev_err(ctodev(card), - "lv1_net_stop_rx_dma failed, %d\n", status); -} - -/** - * gelic_card_disable_txdmac - disables the transmit DMA controller - * @card: card structure - * - * gelic_card_disable_txdmac terminates processing on the DMA controller by - * turing off DMA and issuing a force end - */ -static inline void gelic_card_disable_txdmac(struct gelic_card *card) -{ - int status; - - /* this hvc blocks until the DMA in progress really stopped */ - status = lv1_net_stop_tx_dma(bus_id(card), dev_id(card)); - if (status) - dev_err(ctodev(card), - "lv1_net_stop_tx_dma failed, status=%d\n", status); -} - -/** * gelic_net_stop - called upon ifconfig down * @netdev: interface device structure * @@ -746,7 +740,7 @@ static void gelic_descr_set_tx_cmdstat(struct gelic_descr *descr, } } -static inline struct sk_buff *gelic_put_vlan_tag(struct sk_buff *skb, +static struct sk_buff *gelic_put_vlan_tag(struct sk_buff *skb, unsigned short tag) { struct vlan_ethhdr *veth; diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c index f1b91fd7e41c..fef6b59e69c9 100644 --- a/drivers/net/ethernet/toshiba/spider_net.c +++ b/drivers/net/ethernet/toshiba/spider_net.c @@ -352,8 +352,7 @@ spider_net_init_chain(struct spider_net_card *card, alloc_size = chain->num_desc * sizeof(struct spider_net_hw_descr); chain->hwring = dma_alloc_coherent(&card->pdev->dev, alloc_size, - &chain->dma_addr, GFP_KERNEL); - + &chain->dma_addr, GFP_KERNEL); if (!chain->hwring) return -ENOMEM; diff --git a/drivers/net/ethernet/tundra/tsi108_eth.c b/drivers/net/ethernet/tundra/tsi108_eth.c index 8fa947a2d929..3c69a0460832 100644 --- a/drivers/net/ethernet/tundra/tsi108_eth.c +++ b/drivers/net/ethernet/tundra/tsi108_eth.c @@ -1308,27 +1308,16 @@ static int tsi108_open(struct net_device *dev) data->id, dev->irq, dev->name); } - data->rxring = dma_alloc_coherent(NULL, rxring_size, - &data->rxdma, GFP_KERNEL); - - if (!data->rxring) { - printk(KERN_DEBUG - "TSI108_ETH: failed to allocate memory for rxring!\n"); + data->rxring = dma_alloc_coherent(NULL, rxring_size, &data->rxdma, + GFP_KERNEL | __GFP_ZERO); + if (!data->rxring) return -ENOMEM; - } else { - memset(data->rxring, 0, rxring_size); - } - - data->txring = dma_alloc_coherent(NULL, txring_size, - &data->txdma, GFP_KERNEL); + data->txring = dma_alloc_coherent(NULL, txring_size, &data->txdma, + GFP_KERNEL | __GFP_ZERO); if (!data->txring) { - printk(KERN_DEBUG - "TSI108_ETH: failed to allocate memory for txring!\n"); pci_free_consistent(0, rxring_size, data->rxring, data->rxdma); return -ENOMEM; - } else { - memset(data->txring, 0, txring_size); } for (i = 0; i < TSI108_RXRING_LEN; i++) { diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c index 545043cc4c0b..a518dcab396e 100644 --- a/drivers/net/ethernet/wiznet/w5100.c +++ b/drivers/net/ethernet/wiznet/w5100.c @@ -754,7 +754,7 @@ static int w5100_remove(struct platform_device *pdev) return 0; } -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP static int w5100_suspend(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); @@ -787,7 +787,7 @@ static int w5100_resume(struct device *dev) } return 0; } -#endif /* CONFIG_PM */ +#endif /* CONFIG_PM_SLEEP */ static SIMPLE_DEV_PM_OPS(w5100_pm_ops, w5100_suspend, w5100_resume); diff --git a/drivers/net/ethernet/wiznet/w5300.c b/drivers/net/ethernet/wiznet/w5300.c index 7cbd0e6fc6f3..6e00e3f94ce4 100644 --- a/drivers/net/ethernet/wiznet/w5300.c +++ b/drivers/net/ethernet/wiznet/w5300.c @@ -666,7 +666,7 @@ static int w5300_remove(struct platform_device *pdev) return 0; } -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP static int w5300_suspend(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); @@ -699,7 +699,7 @@ static int w5300_resume(struct device *dev) } return 0; } -#endif /* CONFIG_PM */ +#endif /* CONFIG_PM_SLEEP */ static SIMPLE_DEV_PM_OPS(w5300_pm_ops, w5300_suspend, w5300_resume); diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c index 9fc2ada4c3c2..4a7c60f4c83d 100644 --- a/drivers/net/ethernet/xilinx/ll_temac_main.c +++ b/drivers/net/ethernet/xilinx/ll_temac_main.c @@ -245,39 +245,30 @@ static int temac_dma_bd_init(struct net_device *ndev) /* returns a virtual address and a physical address. */ lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent, sizeof(*lp->tx_bd_v) * TX_BD_NUM, - &lp->tx_bd_p, GFP_KERNEL); - if (!lp->tx_bd_v) { - dev_err(&ndev->dev, - "unable to allocate DMA TX buffer descriptors"); + &lp->tx_bd_p, GFP_KERNEL | __GFP_ZERO); + if (!lp->tx_bd_v) goto out; - } + lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent, sizeof(*lp->rx_bd_v) * RX_BD_NUM, - &lp->rx_bd_p, GFP_KERNEL); - if (!lp->rx_bd_v) { - dev_err(&ndev->dev, - "unable to allocate DMA RX buffer descriptors"); + &lp->rx_bd_p, GFP_KERNEL | __GFP_ZERO); + if (!lp->rx_bd_v) goto out; - } - memset(lp->tx_bd_v, 0, sizeof(*lp->tx_bd_v) * TX_BD_NUM); for (i = 0; i < TX_BD_NUM; i++) { lp->tx_bd_v[i].next = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * ((i + 1) % TX_BD_NUM); } - memset(lp->rx_bd_v, 0, sizeof(*lp->rx_bd_v) * RX_BD_NUM); for (i = 0; i < RX_BD_NUM; i++) { lp->rx_bd_v[i].next = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * ((i + 1) % RX_BD_NUM); skb = netdev_alloc_skb_ip_align(ndev, XTE_MAX_JUMBO_FRAME_SIZE); - - if (skb == 0) { - dev_err(&ndev->dev, "alloc_skb error %d\n", i); + if (!skb) goto out; - } + lp->rx_skb[i] = skb; /* returns physical address of skb->data */ lp->rx_bd_v[i].phys = dma_map_single(ndev->dev.parent, @@ -789,9 +780,7 @@ static void ll_temac_recv(struct net_device *ndev) new_skb = netdev_alloc_skb_ip_align(ndev, XTE_MAX_JUMBO_FRAME_SIZE); - - if (new_skb == 0) { - dev_err(&ndev->dev, "no memory for new sk_buff\n"); + if (!new_skb) { spin_unlock_irqrestore(&lp->rx_lock, flags); return; } diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index 278c9db3b5b8..24748e8367a1 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c @@ -204,41 +204,31 @@ static int axienet_dma_bd_init(struct net_device *ndev) lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent, sizeof(*lp->tx_bd_v) * TX_BD_NUM, &lp->tx_bd_p, - GFP_KERNEL); - if (!lp->tx_bd_v) { - dev_err(&ndev->dev, "unable to allocate DMA Tx buffer " - "descriptors"); + GFP_KERNEL | __GFP_ZERO); + if (!lp->tx_bd_v) goto out; - } lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent, sizeof(*lp->rx_bd_v) * RX_BD_NUM, &lp->rx_bd_p, - GFP_KERNEL); - if (!lp->rx_bd_v) { - dev_err(&ndev->dev, "unable to allocate DMA Rx buffer " - "descriptors"); + GFP_KERNEL | __GFP_ZERO); + if (!lp->rx_bd_v) goto out; - } - memset(lp->tx_bd_v, 0, sizeof(*lp->tx_bd_v) * TX_BD_NUM); for (i = 0; i < TX_BD_NUM; i++) { lp->tx_bd_v[i].next = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * ((i + 1) % TX_BD_NUM); } - memset(lp->rx_bd_v, 0, sizeof(*lp->rx_bd_v) * RX_BD_NUM); for (i = 0; i < RX_BD_NUM; i++) { lp->rx_bd_v[i].next = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * ((i + 1) % RX_BD_NUM); skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size); - if (!skb) { - dev_err(&ndev->dev, "alloc_skb error %d\n", i); + if (!skb) goto out; - } lp->rx_bd_v[i].sw_id_offset = (u32) skb; lp->rx_bd_v[i].phys = dma_map_single(ndev->dev.parent, @@ -777,10 +767,9 @@ static void axienet_recv(struct net_device *ndev) packets++; new_skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size); - if (!new_skb) { - dev_err(&ndev->dev, "no memory for new sk_buff\n"); + if (!new_skb) return; - } + cur_p->phys = dma_map_single(ndev->dev.parent, new_skb->data, lp->max_frm_size, DMA_FROM_DEVICE); diff --git a/drivers/net/ethernet/xircom/xirc2ps_cs.c b/drivers/net/ethernet/xircom/xirc2ps_cs.c index 98e09d0d3ce2..76210abf2e9b 100644 --- a/drivers/net/ethernet/xircom/xirc2ps_cs.c +++ b/drivers/net/ethernet/xircom/xirc2ps_cs.c @@ -1041,7 +1041,6 @@ xirc2ps_interrupt(int irq, void *dev_id) /* 1 extra so we can use insw */ skb = netdev_alloc_skb(dev, pktlen + 3); if (!skb) { - pr_notice("low memory, packet dropped (size=%u)\n", pktlen); dev->stats.rx_dropped++; } else { /* okay get the packet */ skb_reserve(skb, 2); |