diff options
Diffstat (limited to 'drivers/net/ethernet/broadcom')
| -rw-r--r-- | drivers/net/ethernet/broadcom/bnx2.c | 182 | ||||
| -rw-r--r-- | drivers/net/ethernet/broadcom/bnx2.h | 17 | ||||
| -rw-r--r-- | drivers/net/ethernet/broadcom/bnx2x/bnx2x.h | 48 | ||||
| -rw-r--r-- | drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c | 296 | ||||
| -rw-r--r-- | drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h | 106 | ||||
| -rw-r--r-- | drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c | 61 | ||||
| -rw-r--r-- | drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h | 4 | ||||
| -rw-r--r-- | drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c | 14 | ||||
| -rw-r--r-- | drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | 106 | ||||
| -rw-r--r-- | drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c | 4 | ||||
| -rw-r--r-- | drivers/net/ethernet/broadcom/cnic.c | 8 | ||||
| -rw-r--r-- | drivers/net/ethernet/broadcom/sb1250-mac.c | 3 | ||||
| -rw-r--r-- | drivers/net/ethernet/broadcom/tg3.c | 249 | ||||
| -rw-r--r-- | drivers/net/ethernet/broadcom/tg3.h | 11 |
14 files changed, 611 insertions, 498 deletions
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c index 965c7235804d..d573169279b7 100644 --- a/drivers/net/ethernet/broadcom/bnx2.c +++ b/drivers/net/ethernet/broadcom/bnx2.c @@ -409,7 +409,7 @@ static int bnx2_unregister_cnic(struct net_device *dev) mutex_lock(&bp->cnic_lock); cp->drv_state = 0; bnapi->cnic_present = 0; - rcu_assign_pointer(bp->cnic_ops, NULL); + RCU_INIT_POINTER(bp->cnic_ops, NULL); mutex_unlock(&bp->cnic_lock); synchronize_rcu(); return 0; @@ -2054,8 +2054,8 @@ __acquires(&bp->phy_lock) if (bp->autoneg & AUTONEG_SPEED) { u32 adv_reg, adv1000_reg; - u32 new_adv_reg = 0; - u32 new_adv1000_reg = 0; + u32 new_adv = 0; + u32 new_adv1000 = 0; bnx2_read_phy(bp, bp->mii_adv, &adv_reg); adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP | @@ -2064,27 +2064,18 @@ __acquires(&bp->phy_lock) bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg); adv1000_reg &= PHY_ALL_1000_SPEED; - if (bp->advertising & ADVERTISED_10baseT_Half) - new_adv_reg |= ADVERTISE_10HALF; - if (bp->advertising & ADVERTISED_10baseT_Full) - new_adv_reg |= ADVERTISE_10FULL; - if (bp->advertising & ADVERTISED_100baseT_Half) - new_adv_reg |= ADVERTISE_100HALF; - if (bp->advertising & ADVERTISED_100baseT_Full) - new_adv_reg |= ADVERTISE_100FULL; - if (bp->advertising & ADVERTISED_1000baseT_Full) - new_adv1000_reg |= ADVERTISE_1000FULL; + new_adv = ethtool_adv_to_mii_adv_t(bp->advertising); + new_adv |= ADVERTISE_CSMA; + new_adv |= bnx2_phy_get_pause_adv(bp); - new_adv_reg |= ADVERTISE_CSMA; + new_adv1000 |= ethtool_adv_to_mii_ctrl1000_t(bp->advertising); - new_adv_reg |= bnx2_phy_get_pause_adv(bp); - - if ((adv1000_reg != new_adv1000_reg) || - (adv_reg != new_adv_reg) || + if ((adv1000_reg != new_adv1000) || + (adv_reg != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) { - bnx2_write_phy(bp, bp->mii_adv, new_adv_reg); - bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg); + bnx2_write_phy(bp, bp->mii_adv, new_adv); + bnx2_write_phy(bp, MII_CTRL1000, new_adv1000); bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART | BMCR_ANENABLE); } @@ -2734,31 +2725,27 @@ bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index) } static inline int -bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp) +bnx2_alloc_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp) { - struct sk_buff *skb; + u8 *data; struct sw_bd *rx_buf = &rxr->rx_buf_ring[index]; dma_addr_t mapping; struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(index)][RX_IDX(index)]; - unsigned long align; - skb = __netdev_alloc_skb(bp->dev, bp->rx_buf_size, gfp); - if (skb == NULL) { + data = kmalloc(bp->rx_buf_size, gfp); + if (!data) return -ENOMEM; - } - if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1)))) - skb_reserve(skb, BNX2_RX_ALIGN - align); - - mapping = dma_map_single(&bp->pdev->dev, skb->data, bp->rx_buf_use_size, + mapping = dma_map_single(&bp->pdev->dev, + get_l2_fhdr(data), + bp->rx_buf_use_size, PCI_DMA_FROMDEVICE); if (dma_mapping_error(&bp->pdev->dev, mapping)) { - dev_kfree_skb(skb); + kfree(data); return -EIO; } - rx_buf->skb = skb; - rx_buf->desc = (struct l2_fhdr *) skb->data; + rx_buf->data = data; dma_unmap_addr_set(rx_buf, mapping, mapping); rxbd->rx_bd_haddr_hi = (u64) mapping >> 32; @@ -2965,8 +2952,8 @@ bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, } static inline void -bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, - struct sk_buff *skb, u16 cons, u16 prod) +bnx2_reuse_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, + u8 *data, u16 cons, u16 prod) { struct sw_bd *cons_rx_buf, *prod_rx_buf; struct rx_bd *cons_bd, *prod_bd; @@ -2980,8 +2967,7 @@ bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, rxr->rx_prod_bseq += bp->rx_buf_use_size; - prod_rx_buf->skb = skb; - prod_rx_buf->desc = (struct l2_fhdr *) skb->data; + prod_rx_buf->data = data; if (cons == prod) return; @@ -2995,33 +2981,39 @@ bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo; } -static int -bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb, +static struct sk_buff * +bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u8 *data, unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr, u32 ring_idx) { int err; u16 prod = ring_idx & 0xffff; + struct sk_buff *skb; - err = bnx2_alloc_rx_skb(bp, rxr, prod, GFP_ATOMIC); + err = bnx2_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC); if (unlikely(err)) { - bnx2_reuse_rx_skb(bp, rxr, skb, (u16) (ring_idx >> 16), prod); + bnx2_reuse_rx_data(bp, rxr, data, (u16) (ring_idx >> 16), prod); +error: if (hdr_len) { unsigned int raw_len = len + 4; int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT; bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages); } - return err; + return NULL; } - skb_reserve(skb, BNX2_RX_OFFSET); dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size, PCI_DMA_FROMDEVICE); - + skb = build_skb(data); + if (!skb) { + kfree(data); + goto error; + } + skb_reserve(skb, ((u8 *)get_l2_fhdr(data) - data) + BNX2_RX_OFFSET); if (hdr_len == 0) { skb_put(skb, len); - return 0; + return skb; } else { unsigned int i, frag_len, frag_size, pages; struct sw_pg *rx_pg; @@ -3052,7 +3044,7 @@ bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb, skb_frag_size_sub(frag, tail); skb->data_len -= tail; } - return 0; + return skb; } rx_pg = &rxr->rx_pg_ring[pg_cons]; @@ -3074,7 +3066,7 @@ bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb, rxr->rx_pg_prod = pg_prod; bnx2_reuse_rx_skb_pages(bp, rxr, skb, pages - i); - return err; + return NULL; } dma_unmap_page(&bp->pdev->dev, mapping_old, @@ -3091,7 +3083,7 @@ bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb, rxr->rx_pg_prod = pg_prod; rxr->rx_pg_cons = pg_cons; } - return 0; + return skb; } static inline u16 @@ -3130,19 +3122,17 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget) struct sw_bd *rx_buf, *next_rx_buf; struct sk_buff *skb; dma_addr_t dma_addr; + u8 *data; sw_ring_cons = RX_RING_IDX(sw_cons); sw_ring_prod = RX_RING_IDX(sw_prod); rx_buf = &rxr->rx_buf_ring[sw_ring_cons]; - skb = rx_buf->skb; - prefetchw(skb); - - next_rx_buf = - &rxr->rx_buf_ring[RX_RING_IDX(NEXT_RX_BD(sw_cons))]; - prefetch(next_rx_buf->desc); + data = rx_buf->data; + rx_buf->data = NULL; - rx_buf->skb = NULL; + rx_hdr = get_l2_fhdr(data); + prefetch(rx_hdr); dma_addr = dma_unmap_addr(rx_buf, mapping); @@ -3150,7 +3140,10 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget) BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE); - rx_hdr = rx_buf->desc; + next_rx_buf = + &rxr->rx_buf_ring[RX_RING_IDX(NEXT_RX_BD(sw_cons))]; + prefetch(get_l2_fhdr(next_rx_buf->data)); + len = rx_hdr->l2_fhdr_pkt_len; status = rx_hdr->l2_fhdr_status; @@ -3169,7 +3162,7 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget) L2_FHDR_ERRORS_TOO_SHORT | L2_FHDR_ERRORS_GIANT_FRAME))) { - bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons, + bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons, sw_ring_prod); if (pg_ring_used) { int pages; @@ -3184,30 +3177,29 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget) len -= 4; if (len <= bp->rx_copy_thresh) { - struct sk_buff *new_skb; - - new_skb = netdev_alloc_skb(bp->dev, len + 6); - if (new_skb == NULL) { - bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons, + skb = netdev_alloc_skb(bp->dev, len + 6); + if (skb == NULL) { + bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons, sw_ring_prod); goto next_rx; } /* aligned copy */ - skb_copy_from_linear_data_offset(skb, - BNX2_RX_OFFSET - 6, - new_skb->data, len + 6); - skb_reserve(new_skb, 6); - skb_put(new_skb, len); + memcpy(skb->data, + (u8 *)rx_hdr + BNX2_RX_OFFSET - 6, + len + 6); + skb_reserve(skb, 6); + skb_put(skb, len); - bnx2_reuse_rx_skb(bp, rxr, skb, + bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons, sw_ring_prod); - skb = new_skb; - } else if (unlikely(bnx2_rx_skb(bp, rxr, skb, len, hdr_len, - dma_addr, (sw_ring_cons << 16) | sw_ring_prod))) - goto next_rx; - + } else { + skb = bnx2_rx_skb(bp, rxr, data, len, hdr_len, dma_addr, + (sw_ring_cons << 16) | sw_ring_prod); + if (!skb) + goto next_rx; + } if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) __vlan_hwaccel_put_tag(skb, rx_hdr->l2_fhdr_vlan_tag); @@ -5234,7 +5226,7 @@ bnx2_init_rx_ring(struct bnx2 *bp, int ring_num) ring_prod = prod = rxr->rx_prod; for (i = 0; i < bp->rx_ring_size; i++) { - if (bnx2_alloc_rx_skb(bp, rxr, ring_prod, GFP_KERNEL) < 0) { + if (bnx2_alloc_rx_data(bp, rxr, ring_prod, GFP_KERNEL) < 0) { netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n", ring_num, i, bp->rx_ring_size); break; @@ -5329,7 +5321,7 @@ bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size) rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8; rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD + - sizeof(struct skb_shared_info); + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); bp->rx_copy_thresh = BNX2_RX_COPY_THRESH; bp->rx_pg_ring_size = 0; @@ -5351,8 +5343,9 @@ bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size) } bp->rx_buf_use_size = rx_size; - /* hw alignment */ - bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN; + /* hw alignment + build_skb() overhead*/ + bp->rx_buf_size = SKB_DATA_ALIGN(bp->rx_buf_use_size + BNX2_RX_ALIGN) + + NET_SKB_PAD + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET; bp->rx_ring_size = size; bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS); @@ -5418,9 +5411,9 @@ bnx2_free_rx_skbs(struct bnx2 *bp) for (j = 0; j < bp->rx_max_ring_idx; j++) { struct sw_bd *rx_buf = &rxr->rx_buf_ring[j]; - struct sk_buff *skb = rx_buf->skb; + u8 *data = rx_buf->data; - if (skb == NULL) + if (data == NULL) continue; dma_unmap_single(&bp->pdev->dev, @@ -5428,9 +5421,9 @@ bnx2_free_rx_skbs(struct bnx2 *bp) bp->rx_buf_use_size, PCI_DMA_FROMDEVICE); - rx_buf->skb = NULL; + rx_buf->data = NULL; - dev_kfree_skb(skb); + kfree(data); } for (j = 0; j < bp->rx_max_pg_ring_idx; j++) bnx2_free_rx_page(bp, rxr, j); @@ -5736,7 +5729,8 @@ static int bnx2_run_loopback(struct bnx2 *bp, int loopback_mode) { unsigned int pkt_size, num_pkts, i; - struct sk_buff *skb, *rx_skb; + struct sk_buff *skb; + u8 *data; unsigned char *packet; u16 rx_start_idx, rx_idx; dma_addr_t map; @@ -5828,14 +5822,14 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode) } rx_buf = &rxr->rx_buf_ring[rx_start_idx]; - rx_skb = rx_buf->skb; + data = rx_buf->data; - rx_hdr = rx_buf->desc; - skb_reserve(rx_skb, BNX2_RX_OFFSET); + rx_hdr = get_l2_fhdr(data); + data = (u8 *)rx_hdr + BNX2_RX_OFFSET; dma_sync_single_for_cpu(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping), - bp->rx_buf_size, PCI_DMA_FROMDEVICE); + bp->rx_buf_use_size, PCI_DMA_FROMDEVICE); if (rx_hdr->l2_fhdr_status & (L2_FHDR_ERRORS_BAD_CRC | @@ -5852,7 +5846,7 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode) } for (i = 14; i < pkt_size; i++) { - if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) { + if (*(data + i) != (unsigned char) (i & 0xff)) { goto loopback_test_done; } } @@ -6873,10 +6867,10 @@ bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct bnx2 *bp = netdev_priv(dev); - strcpy(info->driver, DRV_MODULE_NAME); - strcpy(info->version, DRV_MODULE_VERSION); - strcpy(info->bus_info, pci_name(bp->pdev)); - strcpy(info->fw_version, bp->fw_version); + strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version)); + strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info)); + strlcpy(info->fw_version, bp->fw_version, sizeof(info->fw_version)); } #define BNX2_REGDUMP_LEN (32 * 1024) @@ -7571,8 +7565,8 @@ bnx2_set_phys_id(struct net_device *dev, enum ethtool_phys_id_state state) return 0; } -static u32 -bnx2_fix_features(struct net_device *dev, u32 features) +static netdev_features_t +bnx2_fix_features(struct net_device *dev, netdev_features_t features) { struct bnx2 *bp = netdev_priv(dev); @@ -7583,7 +7577,7 @@ bnx2_fix_features(struct net_device *dev, u32 features) } static int -bnx2_set_features(struct net_device *dev, u32 features) +bnx2_set_features(struct net_device *dev, netdev_features_t features) { struct bnx2 *bp = netdev_priv(dev); diff --git a/drivers/net/ethernet/broadcom/bnx2.h b/drivers/net/ethernet/broadcom/bnx2.h index 99d31a7d6aaa..1db2d51ba3f1 100644 --- a/drivers/net/ethernet/broadcom/bnx2.h +++ b/drivers/net/ethernet/broadcom/bnx2.h @@ -6563,12 +6563,25 @@ struct l2_fhdr { #define MB_TX_CID_ADDR MB_GET_CID_ADDR(TX_CID) #define MB_RX_CID_ADDR MB_GET_CID_ADDR(RX_CID) +/* + * This driver uses new build_skb() API : + * RX ring buffer contains pointer to kmalloc() data only, + * skb are built only after Hardware filled the frame. + */ struct sw_bd { - struct sk_buff *skb; - struct l2_fhdr *desc; + u8 *data; DEFINE_DMA_UNMAP_ADDR(mapping); }; +/* Its faster to compute this from data than storing it in sw_bd + * (less cache misses) + */ +static inline struct l2_fhdr *get_l2_fhdr(u8 *data) +{ + return (struct l2_fhdr *)(PTR_ALIGN(data, BNX2_RX_ALIGN) + NET_SKB_PAD); +} + + struct sw_pg { struct page *page; DEFINE_DMA_UNMAP_ADDR(mapping); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index aec7212ac983..0f7b7a463eba 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h @@ -23,8 +23,8 @@ * (you will need to reboot afterwards) */ /* #define BNX2X_STOP_ON_ERROR */ -#define DRV_MODULE_VERSION "1.70.30-0" -#define DRV_MODULE_RELDATE "2011/10/25" +#define DRV_MODULE_VERSION "1.70.35-0" +#define DRV_MODULE_RELDATE "2011/11/10" #define BNX2X_BC_VER 0x040200 #if defined(CONFIG_DCB) @@ -293,8 +293,13 @@ enum { #define FCOE_TXQ_IDX(bp) (MAX_ETH_TXQ_IDX(bp)) /* fast path */ +/* + * This driver uses new build_skb() API : + * RX ring buffer contains pointer to kmalloc() data only, + * skb are built only after Hardware filled the frame. + */ struct sw_rx_bd { - struct sk_buff *skb; + u8 *data; DEFINE_DMA_UNMAP_ADDR(mapping); }; @@ -411,8 +416,7 @@ union db_prod { /* Number of u64 elements in SGE mask array */ -#define RX_SGE_MASK_LEN ((NUM_RX_SGE_PAGES * RX_SGE_CNT) / \ - BIT_VEC64_ELEM_SZ) +#define RX_SGE_MASK_LEN (NUM_RX_SGE / BIT_VEC64_ELEM_SZ) #define RX_SGE_MASK_LEN_MASK (RX_SGE_MASK_LEN - 1) #define NEXT_SGE_MASK_ELEM(el) (((el) + 1) & RX_SGE_MASK_LEN_MASK) @@ -425,8 +429,8 @@ union host_hc_status_block { struct bnx2x_agg_info { /* - * First aggregation buffer is an skb, the following - are pages. - * We will preallocate the skbs for each aggregation when + * First aggregation buffer is a data buffer, the following - are pages. + * We will preallocate the data buffer for each aggregation when * we open the interface and will replace the BD at the consumer * with this one when we receive the TPA_START CQE in order to * keep the Rx BD ring consistent. @@ -440,6 +444,7 @@ struct bnx2x_agg_info { u16 parsing_flags; u16 vlan_tag; u16 len_on_bd; + u32 rxhash; }; #define Q_STATS_OFFSET32(stat_name) \ @@ -507,6 +512,7 @@ struct bnx2x_fastpath { __le16 fp_hc_idx; u8 index; /* number in fp array */ + u8 rx_queue; /* index for skb_record */ u8 cl_id; /* eth client id */ u8 cl_qzone_id; u8 fw_sb_id; /* status block number in FW */ @@ -1141,6 +1147,7 @@ struct bnx2x_fw_stats_data { enum { BNX2X_SP_RTNL_SETUP_TC, BNX2X_SP_RTNL_TX_TIMEOUT, + BNX2X_SP_RTNL_FAN_FAILURE, }; @@ -1186,10 +1193,20 @@ struct bnx2x { #define ETH_MAX_JUMBO_PACKET_SIZE 9600 /* Max supported alignment is 256 (8 shift) */ -#define BNX2X_RX_ALIGN_SHIFT ((L1_CACHE_SHIFT < 8) ? \ - L1_CACHE_SHIFT : 8) - /* FW use 2 Cache lines Alignment for start packet and size */ -#define BNX2X_FW_RX_ALIGN (2 << BNX2X_RX_ALIGN_SHIFT) +#define BNX2X_RX_ALIGN_SHIFT min(8, L1_CACHE_SHIFT) + + /* FW uses 2 Cache lines Alignment for start packet and size + * + * We assume skb_build() uses sizeof(struct skb_shared_info) bytes + * at the end of skb->data, to avoid wasting a full cache line. + * This reduces memory use (skb->truesize). + */ +#define BNX2X_FW_RX_ALIGN_START (1UL << BNX2X_RX_ALIGN_SHIFT) + +#define BNX2X_FW_RX_ALIGN_END \ + max(1UL << BNX2X_RX_ALIGN_SHIFT, \ + SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) + #define BNX2X_PXP_DRAM_ALIGN (BNX2X_RX_ALIGN_SHIFT - 5) struct host_sp_status_block *def_status_blk; @@ -1984,13 +2001,6 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms, #define HW_PRTY_ASSERT_SET_4 (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR | \ AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR) -#define RSS_FLAGS(bp) \ - (TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY | \ - TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY | \ - TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_CAPABILITY | \ - TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY | \ - (bp->multi_mode << \ - TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT)) #define MULTI_MASK 0x7f @@ -2055,6 +2065,8 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms, #define BNX2X_VPD_LEN 128 #define VENDOR_ID_LEN 4 +int bnx2x_close(struct net_device *dev); + /* Congestion management fairness mode */ #define CMNG_FNS_NONE 0 #define CMNG_FNS_MINMAX 1 diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 580b44edb066..8336c784db49 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -79,19 +79,21 @@ static inline void bnx2x_bz_fp(struct bnx2x *bp, int index) * @to: destination FP index * * Makes sure the contents of the bp->fp[to].napi is kept - * intact. + * intact. This is done by first copying the napi struct from + * the target to the source, and then mem copying the entire + * source onto the target */ static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to) { struct bnx2x_fastpath *from_fp = &bp->fp[from]; struct bnx2x_fastpath *to_fp = &bp->fp[to]; - struct napi_struct orig_napi = to_fp->napi; + + /* Copy the NAPI object as it has been already initialized */ + from_fp->napi = to_fp->napi; + /* Move bnx2x_fastpath contents */ memcpy(to_fp, from_fp, sizeof(*to_fp)); to_fp->index = to; - - /* Restore the NAPI object as it has been already initialized */ - to_fp->napi = orig_napi; } int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */ @@ -292,8 +294,21 @@ static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp, fp->last_max_sge, fp->rx_sge_prod); } +/* Set Toeplitz hash value in the skb using the value from the + * CQE (calculated by HW). + */ +static u32 bnx2x_get_rxhash(const struct bnx2x *bp, + const struct eth_fast_path_rx_cqe *cqe) +{ + /* Set Toeplitz hash from CQE */ + if ((bp->dev->features & NETIF_F_RXHASH) && + (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) + return le32_to_cpu(cqe->rss_hash_result); + return 0; +} + static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue, - struct sk_buff *skb, u16 cons, u16 prod, + u16 cons, u16 prod, struct eth_fast_path_rx_cqe *cqe) { struct bnx2x *bp = fp->bp; @@ -308,9 +323,9 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue, if (tpa_info->tpa_state != BNX2X_TPA_STOP) BNX2X_ERR("start of bin not in stop [%d]\n", queue); - /* Try to map an empty skb from the aggregation info */ + /* Try to map an empty data buffer from the aggregation info */ mapping = dma_map_single(&bp->pdev->dev, - first_buf->skb->data, + first_buf->data + NET_SKB_PAD, fp->rx_buf_size, DMA_FROM_DEVICE); /* * ...if it fails - move the skb from the consumer to the producer @@ -320,15 +335,15 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue, if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { /* Move the BD from the consumer to the producer */ - bnx2x_reuse_rx_skb(fp, cons, prod); + bnx2x_reuse_rx_data(fp, cons, prod); tpa_info->tpa_state = BNX2X_TPA_ERROR; return; } - /* move empty skb from pool to prod */ - prod_rx_buf->skb = first_buf->skb; + /* move empty data from pool to prod */ + prod_rx_buf->data = first_buf->data; dma_unmap_addr_set(prod_rx_buf, mapping, mapping); - /* point prod_bd to new skb */ + /* point prod_bd to new data */ prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); @@ -342,6 +357,7 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue, tpa_info->tpa_state = BNX2X_TPA_START; tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd); tpa_info->placement_offset = cqe->placement_offset; + tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe); #ifdef BNX2X_STOP_ON_ERROR fp->tpa_queue_used |= (1 << queue); @@ -469,11 +485,12 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp, { struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue]; struct sw_rx_bd *rx_buf = &tpa_info->first_buf; - u8 pad = tpa_info->placement_offset; + u32 pad = tpa_info->placement_offset; u16 len = tpa_info->len_on_bd; - struct sk_buff *skb = rx_buf->skb; + struct sk_buff *skb = NULL; + u8 *data = rx_buf->data; /* alloc new skb */ - struct sk_buff *new_skb; + u8 *new_data; u8 old_tpa_state = tpa_info->tpa_state; tpa_info->tpa_state = BNX2X_TPA_STOP; @@ -484,18 +501,18 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp, if (old_tpa_state == BNX2X_TPA_ERROR) goto drop; - /* Try to allocate the new skb */ - new_skb = netdev_alloc_skb(bp->dev, fp->rx_buf_size); + /* Try to allocate the new data */ + new_data = kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC); /* Unmap skb in the pool anyway, as we are going to change pool entry status to BNX2X_TPA_STOP even if new skb allocation fails. */ dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping), fp->rx_buf_size, DMA_FROM_DEVICE); + if (likely(new_data)) + skb = build_skb(data); - if (likely(new_skb)) { - prefetch(skb); - prefetch(((char *)(skb)) + L1_CACHE_BYTES); + if (likely(skb)) { #ifdef BNX2X_STOP_ON_ERROR if (pad + len > fp->rx_buf_size) { @@ -507,8 +524,9 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp, } #endif - skb_reserve(skb, pad); + skb_reserve(skb, pad + NET_SKB_PAD); skb_put(skb, len); + skb->rxhash = tpa_info->rxhash; skb->protocol = eth_type_trans(skb, bp->dev); skb->ip_summed = CHECKSUM_UNNECESSARY; @@ -524,8 +542,8 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp, } - /* put new skb in bin */ - rx_buf->skb = new_skb; + /* put new data in bin */ + rx_buf->data = new_data; return; } @@ -537,19 +555,6 @@ drop: fp->eth_q_stats.rx_skb_alloc_failed++; } -/* Set Toeplitz hash value in the skb using the value from the - * CQE (calculated by HW). - */ -static inline void bnx2x_set_skb_rxhash(struct bnx2x *bp, union eth_rx_cqe *cqe, - struct sk_buff *skb) -{ - /* Set Toeplitz hash from CQE */ - if ((bp->dev->features & NETIF_F_RXHASH) && - (cqe->fast_path_cqe.status_flags & - ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) - skb->rxhash = - le32_to_cpu(cqe->fast_path_cqe.rss_hash_result); -} int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) { @@ -592,6 +597,7 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) u8 cqe_fp_flags; enum eth_rx_cqe_type cqe_fp_type; u16 len, pad; + u8 *data; #ifdef BNX2X_STOP_ON_ERROR if (unlikely(bp->panic)) @@ -602,13 +608,6 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) bd_prod = RX_BD(bd_prod); bd_cons = RX_BD(bd_cons); - /* Prefetch the page containing the BD descriptor - at producer's index. It will be needed when new skb is - allocated */ - prefetch((void *)(PAGE_ALIGN((unsigned long) - (&fp->rx_desc_ring[bd_prod])) - - PAGE_SIZE + 1)); - cqe = &fp->rx_comp_ring[comp_ring_cons]; cqe_fp = &cqe->fast_path_cqe; cqe_fp_flags = cqe_fp->type_error_flags; @@ -624,125 +623,110 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) { bnx2x_sp_event(fp, cqe); goto next_cqe; + } + rx_buf = &fp->rx_buf_ring[bd_cons]; + data = rx_buf->data; - /* this is an rx packet */ - } else { - rx_buf = &fp->rx_buf_ring[bd_cons]; - skb = rx_buf->skb; - prefetch(skb); - - if (!CQE_TYPE_FAST(cqe_fp_type)) { + if (!CQE_TYPE_FAST(cqe_fp_type)) { #ifdef BNX2X_STOP_ON_ERROR - /* sanity check */ - if (fp->disable_tpa && - (CQE_TYPE_START(cqe_fp_type) || - CQE_TYPE_STOP(cqe_fp_type))) - BNX2X_ERR("START/STOP packet while " - "disable_tpa type %x\n", - CQE_TYPE(cqe_fp_type)); + /* sanity check */ + if (fp->disable_tpa && + (CQE_TYPE_START(cqe_fp_type) || + CQE_TYPE_STOP(cqe_fp_type))) + BNX2X_ERR("START/STOP packet while " + "disable_tpa type %x\n", + CQE_TYPE(cqe_fp_type)); #endif - if (CQE_TYPE_START(cqe_fp_type)) { - u16 queue = cqe_fp->queue_index; - DP(NETIF_MSG_RX_STATUS, - "calling tpa_start on queue %d\n", - queue); - - bnx2x_tpa_start(fp, queue, skb, - bd_cons, bd_prod, - cqe_fp); - - /* Set Toeplitz hash for LRO skb */ - bnx2x_set_skb_rxhash(bp, cqe, skb); + if (CQE_TYPE_START(cqe_fp_type)) { + u16 queue = cqe_fp->queue_index; + DP(NETIF_MSG_RX_STATUS, + "calling tpa_start on queue %d\n", + queue); - goto next_rx; - - } else { - u16 queue = - cqe->end_agg_cqe.queue_index; - DP(NETIF_MSG_RX_STATUS, - "calling tpa_stop on queue %d\n", - queue); - - bnx2x_tpa_stop(bp, fp, queue, - &cqe->end_agg_cqe, - comp_ring_cons); + bnx2x_tpa_start(fp, queue, + bd_cons, bd_prod, + cqe_fp); + goto next_rx; + } else { + u16 queue = + cqe->end_agg_cqe.queue_index; + DP(NETIF_MSG_RX_STATUS, + "calling tpa_stop on queue %d\n", + queue); + + bnx2x_tpa_stop(bp, fp, queue, + &cqe->end_agg_cqe, + comp_ring_cons); #ifdef BNX2X_STOP_ON_ERROR - if (bp->panic) - return 0; + if (bp->panic) + return 0; #endif - bnx2x_update_sge_prod(fp, cqe_fp); - goto next_cqe; - } + bnx2x_update_sge_prod(fp, cqe_fp); + goto next_cqe; } - /* non TPA */ - len = le16_to_cpu(cqe_fp->pkt_len); - pad = cqe_fp->placement_offset; - dma_sync_single_for_cpu(&bp->pdev->dev, + } + /* non TPA */ + len = le16_to_cpu(cqe_fp->pkt_len); + pad = cqe_fp->placement_offset; + dma_sync_single_for_cpu(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping), - pad + RX_COPY_THRESH, - DMA_FROM_DEVICE); - prefetch(((char *)(skb)) + L1_CACHE_BYTES); + pad + RX_COPY_THRESH, + DMA_FROM_DEVICE); + pad += NET_SKB_PAD; + prefetch(data + pad); /* speedup eth_type_trans() */ + /* is this an error packet? */ + if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) { + DP(NETIF_MSG_RX_ERR, + "ERROR flags %x rx packet %u\n", + cqe_fp_flags, sw_comp_cons); + fp->eth_q_stats.rx_err_discard_pkt++; + goto reuse_rx; + } - /* is this an error packet? */ - if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) { + /* Since we don't have a jumbo ring + * copy small packets if mtu > 1500 + */ + if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) && + (len <= RX_COPY_THRESH)) { + skb = netdev_alloc_skb_ip_align(bp->dev, len); + if (skb == NULL) { DP(NETIF_MSG_RX_ERR, - "ERROR flags %x rx packet %u\n", - cqe_fp_flags, sw_comp_cons); - fp->eth_q_stats.rx_err_discard_pkt++; + "ERROR packet dropped because of alloc failure\n"); + fp->eth_q_stats.rx_skb_alloc_failed++; goto reuse_rx; } - - /* Since we don't have a jumbo ring - * copy small packets if mtu > 1500 - */ - if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) && - (len <= RX_COPY_THRESH)) { - struct sk_buff *new_skb; - - new_skb = netdev_alloc_skb(bp->dev, len + pad); - if (new_skb == NULL) { - DP(NETIF_MSG_RX_ERR, - "ERROR packet dropped " - "because of alloc failure\n"); - fp->eth_q_stats.rx_skb_alloc_failed++; - goto reuse_rx; - } - - /* aligned copy */ - skb_copy_from_linear_data_offset(skb, pad, - new_skb->data + pad, len); - skb_reserve(new_skb, pad); - skb_put(new_skb, len); - - bnx2x_reuse_rx_skb(fp, bd_cons, bd_prod); - - skb = new_skb; - - } else - if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) { + memcpy(skb->data, data + pad, len); + bnx2x_reuse_rx_data(fp, bd_cons, bd_prod); + } else { + if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod) == 0)) { dma_unmap_single(&bp->pdev->dev, - dma_unmap_addr(rx_buf, mapping), + dma_unmap_addr(rx_buf, mapping), fp->rx_buf_size, DMA_FROM_DEVICE); + skb = build_skb(data); + if (unlikely(!skb)) { + kfree(data); + fp->eth_q_stats.rx_skb_alloc_failed++; + goto next_rx; + } skb_reserve(skb, pad); - skb_put(skb, len); - } else { DP(NETIF_MSG_RX_ERR, "ERROR packet dropped because " "of alloc failure\n"); fp->eth_q_stats.rx_skb_alloc_failed++; reuse_rx: - bnx2x_reuse_rx_skb(fp, bd_cons, bd_prod); + bnx2x_reuse_rx_data(fp, bd_cons, bd_prod); goto next_rx; } + skb_put(skb, len); skb->protocol = eth_type_trans(skb, bp->dev); /* Set Toeplitz hash for a none-LRO skb */ - bnx2x_set_skb_rxhash(bp, cqe, skb); + skb->rxhash = bnx2x_get_rxhash(bp, cqe_fp); skb_checksum_none_assert(skb); @@ -755,7 +739,7 @@ reuse_rx: } } - skb_record_rx_queue(skb, fp->index); + skb_record_rx_queue(skb, fp->rx_queue); if (le16_to_cpu(cqe_fp->pars_flags.flags) & PARSING_FLAGS_VLAN) @@ -765,7 +749,7 @@ reuse_rx: next_rx: - rx_buf->skb = NULL; + rx_buf->data = NULL; bd_cons = NEXT_RX_IDX(bd_cons); bd_prod = NEXT_RX_IDX(bd_prod); @@ -1011,9 +995,9 @@ void bnx2x_init_rx_rings(struct bnx2x *bp) struct sw_rx_bd *first_buf = &tpa_info->first_buf; - first_buf->skb = netdev_alloc_skb(bp->dev, - fp->rx_buf_size); - if (!first_buf->skb) { + first_buf->data = kmalloc(fp->rx_buf_size + NET_SKB_PAD, + GFP_ATOMIC); + if (!first_buf->data) { BNX2X_ERR("Failed to allocate TPA " "skb pool for queue[%d] - " "disabling TPA on this " @@ -1094,13 +1078,11 @@ static void bnx2x_free_tx_skbs(struct bnx2x *bp) for_each_cos_in_tx_queue(fp, cos) { struct bnx2x_fp_txdata *txdata = &fp->txdata[cos]; - u16 bd_cons = txdata->tx_bd_cons; u16 sw_prod = txdata->tx_pkt_prod; u16 sw_cons = txdata->tx_pkt_cons; while (sw_cons != sw_prod) { - bd_cons = bnx2x_free_tx_pkt(bp, txdata, - TX_BD(sw_cons)); + bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons)); sw_cons++; } } @@ -1118,16 +1100,16 @@ static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp) for (i = 0; i < NUM_RX_BD; i++) { struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i]; - struct sk_buff *skb = rx_buf->skb; + u8 *data = rx_buf->data; - if (skb == NULL) + if (data == NULL) continue; dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping), fp->rx_buf_size, DMA_FROM_DEVICE); - rx_buf->skb = NULL; - dev_kfree_skb(skb); + rx_buf->data = NULL; + kfree(data); } } @@ -1509,6 +1491,7 @@ static inline void bnx2x_set_rx_buf_size(struct bnx2x *bp) for_each_queue(bp, i) { struct bnx2x_fastpath *fp = &bp->fp[i]; + u32 mtu; /* Always use a mini-jumbo MTU for the FCoE L2 ring */ if (IS_FCOE_IDX(i)) @@ -1518,13 +1501,15 @@ static inline void bnx2x_set_rx_buf_size(struct bnx2x *bp) * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer * overrun attack. */ - fp->rx_buf_size = - BNX2X_FCOE_MINI_JUMBO_MTU + ETH_OVREHEAD + - BNX2X_FW_RX_ALIGN + IP_HEADER_ALIGNMENT_PADDING; + mtu = BNX2X_FCOE_MINI_JUMBO_MTU; else - fp->rx_buf_size = - bp->dev->mtu + ETH_OVREHEAD + - BNX2X_FW_RX_ALIGN + IP_HEADER_ALIGNMENT_PADDING; + mtu = bp->dev->mtu; + fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START + + IP_HEADER_ALIGNMENT_PADDING + + ETH_OVREHEAD + + mtu + + BNX2X_FW_RX_ALIGN_END; + /* Note : rx_buf_size doesnt take into account NET_SKB_PAD */ } } @@ -1929,13 +1914,17 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) break; } - if (!bp->port.pmf) + if (bp->port.pmf) + bnx2x_update_drv_flags(bp, DRV_FLAGS_DCB_CONFIGURED, 0); + else bnx2x__link_status_update(bp); /* start the timer */ mod_timer(&bp->timer, jiffies + bp->current_interval); #ifdef BCM_CNIC + /* re-read iscsi info */ + bnx2x_get_iscsi_info(bp); bnx2x_setup_cnic_irq_info(bp); if (bp->state == BNX2X_STATE_OPEN) bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD); @@ -3409,7 +3398,8 @@ int bnx2x_change_mtu(struct net_device *dev, int new_mtu) return bnx2x_reload_if_running(dev); } -u32 bnx2x_fix_features(struct net_device *dev, u32 features) +netdev_features_t bnx2x_fix_features(struct net_device *dev, + netdev_features_t features) { struct bnx2x *bp = netdev_priv(dev); @@ -3420,7 +3410,7 @@ u32 bnx2x_fix_features(struct net_device *dev, u32 features) return features; } -int bnx2x_set_features(struct net_device *dev, u32 features) +int bnx2x_set_features(struct net_device *dev, netdev_features_t features) { struct bnx2x *bp = netdev_priv(dev); u32 flags = bp->flags; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h index 283d663da180..80c5ed08e419 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h @@ -533,8 +533,9 @@ int bnx2x_change_mtu(struct net_device *dev, int new_mtu); */ int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type); #endif -u32 bnx2x_fix_features(struct net_device *dev, u32 features); -int bnx2x_set_features(struct net_device *dev, u32 features); +netdev_features_t bnx2x_fix_features(struct net_device *dev, + netdev_features_t features); +int bnx2x_set_features(struct net_device *dev, netdev_features_t features); /** * bnx2x_tx_timeout - tx timeout netdev callback @@ -874,8 +875,7 @@ static inline void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp) static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp) { /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */ - memset(fp->sge_mask, 0xff, - (NUM_RX_SGE >> BIT_VEC64_ELEM_SHIFT)*sizeof(u64)); + memset(fp->sge_mask, 0xff, sizeof(fp->sge_mask)); /* Clear the two last indices in the page to 1: these are the indices that correspond to the "next" element, @@ -911,26 +911,27 @@ static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp, return 0; } -static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp, - struct bnx2x_fastpath *fp, u16 index) +static inline int bnx2x_alloc_rx_data(struct bnx2x *bp, + struct bnx2x_fastpath *fp, u16 index) { - struct sk_buff *skb; + u8 *data; struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index]; struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index]; dma_addr_t mapping; - skb = netdev_alloc_skb(bp->dev, fp->rx_buf_size); - if (unlikely(skb == NULL)) + data = kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC); + if (unlikely(data == NULL)) return -ENOMEM; - mapping = dma_map_single(&bp->pdev->dev, skb->data, fp->rx_buf_size, + mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD, + fp->rx_buf_size, DMA_FROM_DEVICE); if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { - dev_kfree_skb_any(skb); + kfree(data); return -ENOMEM; } - rx_buf->skb = skb; + rx_buf->data = data; dma_unmap_addr_set(rx_buf, mapping, mapping); rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); @@ -939,12 +940,12 @@ static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp, return 0; } -/* note that we are not allocating a new skb, +/* note that we are not allocating a new buffer, * we are just moving one from cons to prod * we are not creating a new mapping, * so there is no need to check for dma_mapping_error(). */ -static inline void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp, +static inline void bnx2x_reuse_rx_data(struct bnx2x_fastpath *fp, u16 cons, u16 prod) { struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons]; @@ -954,7 +955,7 @@ static inline void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp, dma_unmap_addr_set(prod_rx_buf, mapping, dma_unmap_addr(cons_rx_buf, mapping)); - prod_rx_buf->skb = cons_rx_buf->skb; + prod_rx_buf->data = cons_rx_buf->data; *prod_bd = *cons_bd; } @@ -1030,9 +1031,9 @@ static inline void bnx2x_free_tpa_pool(struct bnx2x *bp, for (i = 0; i < last; i++) { struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i]; struct sw_rx_bd *first_buf = &tpa_info->first_buf; - struct sk_buff *skb = first_buf->skb; + u8 *data = first_buf->data; - if (skb == NULL) { + if (data == NULL) { DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i); continue; } @@ -1040,8 +1041,8 @@ static inline void bnx2x_free_tpa_pool(struct bnx2x *bp, dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(first_buf, mapping), fp->rx_buf_size, DMA_FROM_DEVICE); - dev_kfree_skb(skb); - first_buf->skb = NULL; + kfree(data); + first_buf->data = NULL; } } @@ -1149,7 +1150,7 @@ static inline int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp, * fp->eth_q_stats.rx_skb_alloc_failed = 0 */ for (i = 0; i < rx_ring_size; i++) { - if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) { + if (bnx2x_alloc_rx_data(bp, fp, ring_prod) < 0) { fp->eth_q_stats.rx_skb_alloc_failed++; continue; } @@ -1318,6 +1319,7 @@ static inline void bnx2x_init_fcoe_fp(struct bnx2x *bp) struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp); unsigned long q_type = 0; + bnx2x_fcoe(bp, rx_queue) = BNX2X_NUM_ETH_QUEUES(bp); bnx2x_fcoe(bp, cl_id) = bnx2x_cnic_eth_cl_id(bp, BNX2X_FCOE_ETH_CL_ID_IDX); /** Current BNX2X_FCOE_ETH_CID deffinition implies not more than @@ -1488,4 +1490,68 @@ static inline u16 bnx2x_extract_max_cfg(struct bnx2x *bp, u32 mf_cfg) return max_cfg; } +#ifdef BCM_CNIC +/** + * bnx2x_get_iscsi_info - update iSCSI params according to licensing info. + * + * @bp: driver handle + * + */ +void bnx2x_get_iscsi_info(struct bnx2x *bp); +#endif + +/* returns func by VN for current port */ +static inline int func_by_vn(struct bnx2x *bp, int vn) +{ + return 2 * vn + BP_PORT(bp); +} + +/** + * bnx2x_link_sync_notify - send notification to other functions. + * + * @bp: driver handle + * + */ +static inline void bnx2x_link_sync_notify(struct bnx2x *bp) +{ + int func; + int vn; + + /* Set the attention towards other drivers on the same port */ + for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) { + if (vn == BP_VN(bp)) + continue; + + func = func_by_vn(bp, vn); + REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 + + (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1); + } +} + +/** + * bnx2x_update_drv_flags - update flags in shmem + * + * @bp: driver handle + * @flags: flags to update + * @set: set or clear + * + */ +static inline void bnx2x_update_drv_flags(struct bnx2x *bp, u32 flags, u32 set) +{ + if (SHMEM2_HAS(bp, drv_flags)) { + u32 drv_flags; + bnx2x_acquire_hw_lock(bp, HW_LOCK_DRV_FLAGS); + drv_flags = SHMEM2_RD(bp, drv_flags); + + if (set) + SET_FLAGS(drv_flags, flags); + else + RESET_FLAGS(drv_flags, flags); + + SHMEM2_WR(bp, drv_flags, drv_flags); + DP(NETIF_MSG_HW, "drv_flags 0x%08x\n", drv_flags); + bnx2x_release_hw_lock(bp, HW_LOCK_DRV_FLAGS); + } +} + #endif /* BNX2X_CMN_H */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c index 51bd7485ab18..5051cf3deb20 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c @@ -685,24 +685,6 @@ int bnx2x_dcbnl_update_applist(struct bnx2x *bp, bool delall) } #endif -static inline void bnx2x_update_drv_flags(struct bnx2x *bp, u32 flags, u32 set) -{ - if (SHMEM2_HAS(bp, drv_flags)) { - u32 drv_flags; - bnx2x_acquire_hw_lock(bp, HW_LOCK_DRV_FLAGS); - drv_flags = SHMEM2_RD(bp, drv_flags); - - if (set) - SET_FLAGS(drv_flags, flags); - else - RESET_FLAGS(drv_flags, flags); - - SHMEM2_WR(bp, drv_flags, drv_flags); - DP(NETIF_MSG_HW, "drv_flags 0x%08x\n", drv_flags); - bnx2x_release_hw_lock(bp, HW_LOCK_DRV_FLAGS); - } -} - static inline void bnx2x_dcbx_update_tc_mapping(struct bnx2x *bp) { u8 prio, cos; @@ -755,18 +737,26 @@ void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state) /* mark DCBX result for PMF migration */ bnx2x_update_drv_flags(bp, DRV_FLAGS_DCB_CONFIGURED, 1); #ifdef BCM_DCBNL - /** + /* * Add new app tlvs to dcbnl */ bnx2x_dcbnl_update_applist(bp, false); #endif - bnx2x_dcbx_stop_hw_tx(bp); - - /* reconfigure the netdevice with the results of the new + /* + * reconfigure the netdevice with the results of the new * dcbx negotiation. */ bnx2x_dcbx_update_tc_mapping(bp); + /* + * allow other funtions to update their netdevices + * accordingly + */ + if (IS_MF(bp)) + bnx2x_link_sync_notify(bp); + + bnx2x_dcbx_stop_hw_tx(bp); + return; } case BNX2X_DCBX_STATE_TX_PAUSED: @@ -775,6 +765,7 @@ void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state) bnx2x_dcbx_update_ets_params(bp); bnx2x_dcbx_resume_hw_tx(bp); + return; case BNX2X_DCBX_STATE_TX_RELEASED: DP(NETIF_MSG_LINK, "BNX2X_DCBX_STATE_TX_RELEASED\n"); @@ -883,7 +874,7 @@ static void bnx2x_dcbx_admin_mib_updated_params(struct bnx2x *bp, /*For IEEE admin_recommendation_bw_precentage *For IEEE admin_recommendation_ets_pg */ af->pfc.pri_en_bitmap = (u8)dp->admin_pfc_bitmap; - for (i = 0; i < 4; i++) { + for (i = 0; i < DCBX_CONFIG_MAX_APP_PROTOCOL; i++) { if (dp->admin_priority_app_table[i].valid) { struct bnx2x_admin_priority_app_table *table = dp->admin_priority_app_table; @@ -923,7 +914,7 @@ static void bnx2x_dcbx_admin_mib_updated_params(struct bnx2x *bp, void bnx2x_dcbx_set_state(struct bnx2x *bp, bool dcb_on, u32 dcbx_enabled) { - if (!CHIP_IS_E1x(bp) && !CHIP_IS_E3(bp)) { + if (!CHIP_IS_E1x(bp)) { bp->dcb_state = dcb_on; bp->dcbx_enabled = dcbx_enabled; } else { @@ -1863,7 +1854,7 @@ static void bnx2x_dcbx_fw_struct(struct bnx2x *bp, void bnx2x_dcbx_pmf_update(struct bnx2x *bp) { /* if we need to syncronize DCBX result from prev PMF - * read it from shmem and update bp accordingly + * read it from shmem and update bp and netdev accordingly */ if (SHMEM2_HAS(bp, drv_flags) && GET_FLAGS(SHMEM2_RD(bp, drv_flags), DRV_FLAGS_DCB_CONFIGURED)) { @@ -1875,6 +1866,22 @@ void bnx2x_dcbx_pmf_update(struct bnx2x *bp) bp->dcbx_error); bnx2x_get_dcbx_drv_param(bp, &bp->dcbx_local_feat, bp->dcbx_error); +#ifdef BCM_DCBNL + /* + * Add new app tlvs to dcbnl + */ + bnx2x_dcbnl_update_applist(bp, false); + /* + * Send a notification for the new negotiated parameters + */ + dcbnl_cee_notify(bp->dev, RTM_GETDCB, DCB_CMD_CEE_GET, 0, 0); +#endif + /* + * reconfigure the netdevice with the results of the new + * dcbx negotiation. + */ + bnx2x_dcbx_update_tc_mapping(bp); + } } @@ -2242,7 +2249,7 @@ static int bnx2x_set_admin_app_up(struct bnx2x *bp, u8 idtype, u16 idval, u8 up) int i, ff; /* iterate over the app entries looking for idtype and idval */ - for (i = 0, ff = -1; i < 4; i++) { + for (i = 0, ff = -1; i < DCBX_CONFIG_MAX_APP_PROTOCOL; i++) { struct bnx2x_admin_priority_app_table *app_ent = &bp->dcbx_config_params.admin_priority_app_table[i]; if (bnx2x_admin_app_is_equal(app_ent, idtype, idval)) @@ -2251,7 +2258,7 @@ static int bnx2x_set_admin_app_up(struct bnx2x *bp, u8 idtype, u16 idval, u8 up) if (ff < 0 && !app_ent->valid) ff = i; } - if (i < 4) + if (i < DCBX_CONFIG_MAX_APP_PROTOCOL) /* if found overwrite up */ bp->dcbx_config_params. admin_priority_app_table[i].priority = up; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h index 2c6a3bca6f28..2ab9254e2d5e 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h @@ -90,6 +90,7 @@ struct bnx2x_admin_priority_app_table { u32 app_id; }; +#define DCBX_CONFIG_MAX_APP_PROTOCOL 4 struct bnx2x_config_dcbx_params { u32 overwrite_settings; u32 admin_dcbx_version; @@ -109,7 +110,8 @@ struct bnx2x_config_dcbx_params { u32 admin_recommendation_bw_precentage[8]; u32 admin_recommendation_ets_pg[8]; u32 admin_pfc_bitmap; - struct bnx2x_admin_priority_app_table admin_priority_app_table[4]; + struct bnx2x_admin_priority_app_table + admin_priority_app_table[DCBX_CONFIG_MAX_APP_PROTOCOL]; u32 admin_default_priority; }; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c index f0ca8b27a55e..ec318711f483 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c @@ -761,8 +761,8 @@ static void bnx2x_get_drvinfo(struct net_device *dev, struct bnx2x *bp = netdev_priv(dev); u8 phy_fw_ver[PHY_FW_VER_LEN]; - strcpy(info->driver, DRV_MODULE_NAME); - strcpy(info->version, DRV_MODULE_VERSION); + strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version)); phy_fw_ver[0] = '\0'; if (bp->port.pmf) { @@ -773,14 +773,14 @@ static void bnx2x_get_drvinfo(struct net_device *dev, bnx2x_release_phy_lock(bp); } - strncpy(info->fw_version, bp->fw_ver, 32); + strlcpy(info->fw_version, bp->fw_ver, sizeof(info->fw_version)); snprintf(info->fw_version + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver), "bc %d.%d.%d%s%s", (bp->common.bc_ver & 0xff0000) >> 16, (bp->common.bc_ver & 0xff00) >> 8, (bp->common.bc_ver & 0xff), ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver); - strcpy(info->bus_info, pci_name(bp->pdev)); + strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info)); info->n_stats = BNX2X_NUM_STATS; info->testinfo_len = BNX2X_NUM_TESTS; info->eedump_len = bp->common.flash_size; @@ -1740,6 +1740,7 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode) struct sw_rx_bd *rx_buf; u16 len; int rc = -ENODEV; + u8 *data; /* check the loopback mode */ switch (loopback_mode) { @@ -1865,10 +1866,9 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode) dma_sync_single_for_cpu(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping), fp_rx->rx_buf_size, DMA_FROM_DEVICE); - skb = rx_buf->skb; - skb_reserve(skb, cqe->fast_path_cqe.placement_offset); + data = rx_buf->data + NET_SKB_PAD + cqe->fast_path_cqe.placement_offset; for (i = ETH_HLEN; i < pkt_size; i++) - if (*(skb->data + i) != (unsigned char) (i & 0xff)) + if (*(data + i) != (unsigned char) (i & 0xff)) goto test_loopback_rx_exit; rc = 0; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 2f6361e949f0..0cdbb70ef83e 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -2318,12 +2318,6 @@ static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp) CMNG_FLAGS_PER_PORT_FAIRNESS_VN; } -/* returns func by VN for current port */ -static inline int func_by_vn(struct bnx2x *bp, int vn) -{ - return 2 * vn + BP_PORT(bp); -} - static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn) { struct rate_shaping_vars_per_vn m_rs_vn; @@ -2475,22 +2469,6 @@ static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type) "rate shaping and fairness are disabled\n"); } -static inline void bnx2x_link_sync_notify(struct bnx2x *bp) -{ - int func; - int vn; - - /* Set the attention towards other drivers on the same port */ - for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) { - if (vn == BP_VN(bp)) - continue; - - func = func_by_vn(bp, vn); - REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 + - (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1); - } -} - /* This function is called upon link interrupt */ static void bnx2x_link_attn(struct bnx2x *bp) { @@ -2549,6 +2527,9 @@ void bnx2x__link_status_update(struct bnx2x *bp) if (bp->state != BNX2X_STATE_OPEN) return; + /* read updated dcb configuration */ + bnx2x_dcbx_pmf_update(bp); + bnx2x_link_status_update(&bp->link_params, &bp->link_vars); if (bp->link_vars.link_up) @@ -2808,8 +2789,8 @@ static void bnx2x_pf_rx_q_prep(struct bnx2x *bp, /* This should be a maximum number of data bytes that may be * placed on the BD (not including paddings). */ - rxq_init->buf_sz = fp->rx_buf_size - BNX2X_FW_RX_ALIGN - - IP_HEADER_ALIGNMENT_PADDING; + rxq_init->buf_sz = fp->rx_buf_size - BNX2X_FW_RX_ALIGN_START - + BNX2X_FW_RX_ALIGN_END - IP_HEADER_ALIGNMENT_PADDING; rxq_init->cl_qzone_id = fp->cl_qzone_id; rxq_init->tpa_agg_sz = tpa_agg_size; @@ -3318,6 +3299,17 @@ static inline void bnx2x_fan_failure(struct bnx2x *bp) netdev_err(bp->dev, "Fan Failure on Network Controller has caused" " the driver to shutdown the card to prevent permanent" " damage. Please contact OEM Support for assistance\n"); + + /* + * Scheudle device reset (unload) + * This is due to some boards consuming sufficient power when driver is + * up to overheat if fan fails. + */ + smp_mb__before_clear_bit(); + set_bit(BNX2X_SP_RTNL_FAN_FAILURE, &bp->sp_rtnl_state); + smp_mb__after_clear_bit(); + schedule_delayed_work(&bp->sp_rtnl_task, 0); + } static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn) @@ -5247,7 +5239,7 @@ static void bnx2x_init_eth_fp(struct bnx2x *bp, int fp_idx) u8 cos; unsigned long q_type = 0; u32 cids[BNX2X_MULTI_TX_COS] = { 0 }; - + fp->rx_queue = fp_idx; fp->cid = fp_idx; fp->cl_id = bnx2x_fp_cl_id(fp); fp->fw_sb_id = bnx2x_fp_fw_sb_id(fp); @@ -8522,6 +8514,17 @@ sp_rtnl_not_reset: if (test_and_clear_bit(BNX2X_SP_RTNL_SETUP_TC, &bp->sp_rtnl_state)) bnx2x_setup_tc(bp->dev, bp->dcbx_port_params.ets.num_of_cos); + /* + * in case of fan failure we need to reset id if the "stop on error" + * debug flag is set, since we trying to prevent permanent overheating + * damage + */ + if (test_and_clear_bit(BNX2X_SP_RTNL_FAN_FAILURE, &bp->sp_rtnl_state)) { + DP(BNX2X_MSG_SP, "fan failure detected. Unloading driver\n"); + netif_device_detach(bp->dev); + bnx2x_close(bp->dev); + } + sp_rtnl_exit: rtnl_unlock(); } @@ -9268,21 +9271,38 @@ static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp) } #ifdef BCM_CNIC -static void __devinit bnx2x_get_cnic_info(struct bnx2x *bp) +void bnx2x_get_iscsi_info(struct bnx2x *bp) { int port = BP_PORT(bp); - int func = BP_ABS_FUNC(bp); u32 max_iscsi_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp, drv_lic_key[port].max_iscsi_conn); - u32 max_fcoe_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp, - drv_lic_key[port].max_fcoe_conn); - /* Get the number of maximum allowed iSCSI and FCoE connections */ + /* Get the number of maximum allowed iSCSI connections */ bp->cnic_eth_dev.max_iscsi_conn = (max_iscsi_conn & BNX2X_MAX_ISCSI_INIT_CONN_MASK) >> BNX2X_MAX_ISCSI_INIT_CONN_SHIFT; + BNX2X_DEV_INFO("max_iscsi_conn 0x%x\n", + bp->cnic_eth_dev.max_iscsi_conn); + + /* + * If maximum allowed number of connections is zero - + * disable the feature. + */ + if (!bp->cnic_eth_dev.max_iscsi_conn) + bp->flags |= NO_ISCSI_FLAG; +} + +static void __devinit bnx2x_get_fcoe_info(struct bnx2x *bp) +{ + int port = BP_PORT(bp); + int func = BP_ABS_FUNC(bp); + + u32 max_fcoe_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp, + drv_lic_key[port].max_fcoe_conn); + + /* Get the number of maximum allowed FCoE connections */ bp->cnic_eth_dev.max_fcoe_conn = (max_fcoe_conn & BNX2X_MAX_FCOE_INIT_CONN_MASK) >> BNX2X_MAX_FCOE_INIT_CONN_SHIFT; @@ -9334,20 +9354,26 @@ static void __devinit bnx2x_get_cnic_info(struct bnx2x *bp) } } - BNX2X_DEV_INFO("max_iscsi_conn 0x%x max_fcoe_conn 0x%x\n", - bp->cnic_eth_dev.max_iscsi_conn, - bp->cnic_eth_dev.max_fcoe_conn); + BNX2X_DEV_INFO("max_fcoe_conn 0x%x\n", bp->cnic_eth_dev.max_fcoe_conn); /* * If maximum allowed number of connections is zero - * disable the feature. */ - if (!bp->cnic_eth_dev.max_iscsi_conn) - bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG; - if (!bp->cnic_eth_dev.max_fcoe_conn) bp->flags |= NO_FCOE_FLAG; } + +static void __devinit bnx2x_get_cnic_info(struct bnx2x *bp) +{ + /* + * iSCSI may be dynamically disabled but reading + * info here we will decrease memory usage by driver + * if the feature is disabled for good + */ + bnx2x_get_iscsi_info(bp); + bnx2x_get_fcoe_info(bp); +} #endif static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp) @@ -9965,7 +9991,7 @@ static int bnx2x_open(struct net_device *dev) } /* called with rtnl_lock */ -static int bnx2x_close(struct net_device *dev) +int bnx2x_close(struct net_device *dev) { struct bnx2x *bp = netdev_priv(dev); @@ -10823,8 +10849,8 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev, bp->qm_cid_count = bnx2x_set_qm_cid_count(bp); #ifdef BCM_CNIC - /* disable FCOE L2 queue for E1x and E3*/ - if (CHIP_IS_E1x(bp) || CHIP_IS_E3(bp)) + /* disable FCOE L2 queue for E1x */ + if (CHIP_IS_E1x(bp)) bp->flags |= NO_FCOE_FLAG; #endif @@ -11561,7 +11587,7 @@ static int bnx2x_unregister_cnic(struct net_device *dev) mutex_lock(&bp->cnic_mutex); cp->drv_state = 0; - rcu_assign_pointer(bp->cnic_ops, NULL); + RCU_INIT_POINTER(bp->cnic_ops, NULL); mutex_unlock(&bp->cnic_mutex); synchronize_rcu(); kfree(bp->cnic_kwq); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c index 02ac6a771bf9..3034f0e31938 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c @@ -1349,12 +1349,14 @@ void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event) enum bnx2x_stats_state state; if (unlikely(bp->panic)) return; - bnx2x_stats_stm[bp->stats_state][event].action(bp); + spin_lock_bh(&bp->stats_lock); state = bp->stats_state; bp->stats_state = bnx2x_stats_stm[state][event].next_state; spin_unlock_bh(&bp->stats_lock); + bnx2x_stats_stm[state][event].action(bp); + if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp)) DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n", state, event, bp->stats_state); diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c index 6f10c6939834..b336e55e0d80 100644 --- a/drivers/net/ethernet/broadcom/cnic.c +++ b/drivers/net/ethernet/broadcom/cnic.c @@ -506,7 +506,7 @@ int cnic_unregister_driver(int ulp_type) } read_unlock(&cnic_dev_lock); - rcu_assign_pointer(cnic_ulp_tbl[ulp_type], NULL); + RCU_INIT_POINTER(cnic_ulp_tbl[ulp_type], NULL); mutex_unlock(&cnic_lock); synchronize_rcu(); @@ -579,7 +579,7 @@ static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type) } mutex_lock(&cnic_lock); if (rcu_dereference(cp->ulp_ops[ulp_type])) { - rcu_assign_pointer(cp->ulp_ops[ulp_type], NULL); + RCU_INIT_POINTER(cp->ulp_ops[ulp_type], NULL); cnic_put(dev); } else { pr_err("%s: device not registered to this ulp type %d\n", @@ -3475,7 +3475,7 @@ static int cnic_get_v6_route(struct sockaddr_in6 *dst_addr, struct flowi6 fl6; memset(&fl6, 0, sizeof(fl6)); - ipv6_addr_copy(&fl6.daddr, &dst_addr->sin6_addr); + fl6.daddr = dst_addr->sin6_addr; if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL) fl6.flowi6_oif = dst_addr->sin6_scope_id; @@ -5134,7 +5134,7 @@ static void cnic_stop_hw(struct cnic_dev *dev) } cnic_shutdown_rings(dev); clear_bit(CNIC_F_CNIC_UP, &dev->flags); - rcu_assign_pointer(cp->ulp_ops[CNIC_ULP_L4], NULL); + RCU_INIT_POINTER(cp->ulp_ops[CNIC_ULP_L4], NULL); synchronize_rcu(); cnic_cm_shutdown(dev); cp->stop_hw(dev); diff --git a/drivers/net/ethernet/broadcom/sb1250-mac.c b/drivers/net/ethernet/broadcom/sb1250-mac.c index 0a1d7f279fc8..aa58f9e3f913 100644 --- a/drivers/net/ethernet/broadcom/sb1250-mac.c +++ b/drivers/net/ethernet/broadcom/sb1250-mac.c @@ -163,7 +163,6 @@ enum sbmac_state { #define SBMAC_MAX_TXDESCR 256 #define SBMAC_MAX_RXDESCR 256 -#define ETHER_ADDR_LEN 6 #define ENET_PACKET_SIZE 1518 /*#define ENET_PACKET_SIZE 9216 */ @@ -266,7 +265,7 @@ struct sbmac_softc { int sbm_pause; /* current pause setting */ int sbm_link; /* current link state */ - unsigned char sbm_hwaddr[ETHER_ADDR_LEN]; + unsigned char sbm_hwaddr[ETH_ALEN]; struct sbmacdma sbm_txdma; /* only channel 0 for now */ struct sbmacdma sbm_rxdma; diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index bf4074167d6a..0acb279dcf5c 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -194,7 +194,7 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits) #if (NET_IP_ALIGN != 0) #define TG3_RX_OFFSET(tp) ((tp)->rx_offset) #else -#define TG3_RX_OFFSET(tp) 0 +#define TG3_RX_OFFSET(tp) (NET_SKB_PAD) #endif /* minimum number of free TX descriptors required to wake up TX process */ @@ -1706,18 +1706,12 @@ static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv) { u8 cap = 0; - if (lcladv & ADVERTISE_1000XPAUSE) { - if (lcladv & ADVERTISE_1000XPSE_ASYM) { - if (rmtadv & LPA_1000XPAUSE) - cap = FLOW_CTRL_TX | FLOW_CTRL_RX; - else if (rmtadv & LPA_1000XPAUSE_ASYM) - cap = FLOW_CTRL_RX; - } else { - if (rmtadv & LPA_1000XPAUSE) - cap = FLOW_CTRL_TX | FLOW_CTRL_RX; - } - } else if (lcladv & ADVERTISE_1000XPSE_ASYM) { - if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM)) + if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) { + cap = FLOW_CTRL_TX | FLOW_CTRL_RX; + } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) { + if (lcladv & ADVERTISE_1000XPAUSE) + cap = FLOW_CTRL_RX; + if (rmtadv & ADVERTISE_1000XPAUSE) cap = FLOW_CTRL_TX; } @@ -3594,15 +3588,7 @@ static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl) u32 val, new_adv; new_adv = ADVERTISE_CSMA; - if (advertise & ADVERTISED_10baseT_Half) - new_adv |= ADVERTISE_10HALF; - if (advertise & ADVERTISED_10baseT_Full) - new_adv |= ADVERTISE_10FULL; - if (advertise & ADVERTISED_100baseT_Half) - new_adv |= ADVERTISE_100HALF; - if (advertise & ADVERTISED_100baseT_Full) - new_adv |= ADVERTISE_100FULL; - + new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL; new_adv |= tg3_advert_flowctrl_1000T(flowctrl); err = tg3_writephy(tp, MII_ADVERTISE, new_adv); @@ -3612,11 +3598,7 @@ static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl) if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY) goto done; - new_adv = 0; - if (advertise & ADVERTISED_1000baseT_Half) - new_adv |= ADVERTISE_1000HALF; - if (advertise & ADVERTISED_1000baseT_Full) - new_adv |= ADVERTISE_1000FULL; + new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise); if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 || tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) @@ -3790,14 +3772,7 @@ static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask) { u32 adv_reg, all_mask = 0; - if (mask & ADVERTISED_10baseT_Half) - all_mask |= ADVERTISE_10HALF; - if (mask & ADVERTISED_10baseT_Full) - all_mask |= ADVERTISE_10FULL; - if (mask & ADVERTISED_100baseT_Half) - all_mask |= ADVERTISE_100HALF; - if (mask & ADVERTISED_100baseT_Full) - all_mask |= ADVERTISE_100FULL; + all_mask = ethtool_adv_to_mii_adv_t(mask) & ADVERTISE_ALL; if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg)) return 0; @@ -3808,11 +3783,7 @@ static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask) if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { u32 tg3_ctrl; - all_mask = 0; - if (mask & ADVERTISED_1000baseT_Half) - all_mask |= ADVERTISE_1000HALF; - if (mask & ADVERTISED_1000baseT_Full) - all_mask |= ADVERTISE_1000FULL; + all_mask = ethtool_adv_to_mii_ctrl1000_t(mask); if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl)) return 0; @@ -3961,6 +3932,7 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset) current_link_up = 0; current_speed = SPEED_INVALID; current_duplex = DUPLEX_INVALID; + tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE; if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) { err = tg3_phy_auxctl_read(tp, @@ -4033,8 +4005,22 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset) } if (current_link_up == 1 && - tp->link_config.active_duplex == DUPLEX_FULL) + tp->link_config.active_duplex == DUPLEX_FULL) { + u32 reg, bit; + + if (tp->phy_flags & TG3_PHYFLG_IS_FET) { + reg = MII_TG3_FET_GEN_STAT; + bit = MII_TG3_FET_GEN_STAT_MDIXSTAT; + } else { + reg = MII_TG3_EXT_STAT; + bit = MII_TG3_EXT_STAT_MDIX; + } + + if (!tg3_readphy(tp, reg, &val) && (val & bit)) + tp->phy_flags |= TG3_PHYFLG_MDIX_STATE; + tg3_setup_flow_control(tp, lcl_adv, rmt_adv); + } } relink: @@ -4903,23 +4889,19 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset) (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) { /* do nothing, just check for link up at the end */ } else if (tp->link_config.autoneg == AUTONEG_ENABLE) { - u32 adv, new_adv; + u32 adv, newadv; err |= tg3_readphy(tp, MII_ADVERTISE, &adv); - new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF | - ADVERTISE_1000XPAUSE | - ADVERTISE_1000XPSE_ASYM | - ADVERTISE_SLCT); - - new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl); + newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF | + ADVERTISE_1000XPAUSE | + ADVERTISE_1000XPSE_ASYM | + ADVERTISE_SLCT); - if (tp->link_config.advertising & ADVERTISED_1000baseT_Half) - new_adv |= ADVERTISE_1000XHALF; - if (tp->link_config.advertising & ADVERTISED_1000baseT_Full) - new_adv |= ADVERTISE_1000XFULL; + newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl); + newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising); - if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) { - tg3_writephy(tp, MII_ADVERTISE, new_adv); + if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) { + tg3_writephy(tp, MII_ADVERTISE, newadv); bmcr |= BMCR_ANENABLE | BMCR_ANRESTART; tg3_writephy(tp, MII_BMCR, bmcr); @@ -5397,15 +5379,15 @@ static void tg3_tx(struct tg3_napi *tnapi) } } -static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz) +static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz) { - if (!ri->skb) + if (!ri->data) return; pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping), map_sz, PCI_DMA_FROMDEVICE); - dev_kfree_skb_any(ri->skb); - ri->skb = NULL; + kfree(ri->data); + ri->data = NULL; } /* Returns size of skb allocated or < 0 on error. @@ -5419,28 +5401,28 @@ static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz) * buffers the cpu only reads the last cacheline of the RX descriptor * (to fetch the error flags, vlan tag, checksum, and opaque cookie). */ -static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr, +static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr, u32 opaque_key, u32 dest_idx_unmasked) { struct tg3_rx_buffer_desc *desc; struct ring_info *map; - struct sk_buff *skb; + u8 *data; dma_addr_t mapping; - int skb_size, dest_idx; + int skb_size, data_size, dest_idx; switch (opaque_key) { case RXD_OPAQUE_RING_STD: dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask; desc = &tpr->rx_std[dest_idx]; map = &tpr->rx_std_buffers[dest_idx]; - skb_size = tp->rx_pkt_map_sz; + data_size = tp->rx_pkt_map_sz; break; case RXD_OPAQUE_RING_JUMBO: dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask; desc = &tpr->rx_jmb[dest_idx].std; map = &tpr->rx_jmb_buffers[dest_idx]; - skb_size = TG3_RX_JMB_MAP_SZ; + data_size = TG3_RX_JMB_MAP_SZ; break; default: @@ -5453,31 +5435,33 @@ static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr, * Callers depend upon this behavior and assume that * we leave everything unchanged if we fail. */ - skb = netdev_alloc_skb(tp->dev, skb_size + TG3_RX_OFFSET(tp)); - if (skb == NULL) + skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + data = kmalloc(skb_size, GFP_ATOMIC); + if (!data) return -ENOMEM; - skb_reserve(skb, TG3_RX_OFFSET(tp)); - - mapping = pci_map_single(tp->pdev, skb->data, skb_size, + mapping = pci_map_single(tp->pdev, + data + TG3_RX_OFFSET(tp), + data_size, PCI_DMA_FROMDEVICE); if (pci_dma_mapping_error(tp->pdev, mapping)) { - dev_kfree_skb(skb); + kfree(data); return -EIO; } - map->skb = skb; + map->data = data; dma_unmap_addr_set(map, mapping, mapping); desc->addr_hi = ((u64)mapping >> 32); desc->addr_lo = ((u64)mapping & 0xffffffff); - return skb_size; + return data_size; } /* We only need to move over in the address because the other * members of the RX descriptor are invariant. See notes above - * tg3_alloc_rx_skb for full details. + * tg3_alloc_rx_data for full details. */ static void tg3_recycle_rx(struct tg3_napi *tnapi, struct tg3_rx_prodring_set *dpr, @@ -5511,7 +5495,7 @@ static void tg3_recycle_rx(struct tg3_napi *tnapi, return; } - dest_map->skb = src_map->skb; + dest_map->data = src_map->data; dma_unmap_addr_set(dest_map, mapping, dma_unmap_addr(src_map, mapping)); dest_desc->addr_hi = src_desc->addr_hi; @@ -5522,7 +5506,7 @@ static void tg3_recycle_rx(struct tg3_napi *tnapi, */ smp_wmb(); - src_map->skb = NULL; + src_map->data = NULL; } /* The RX ring scheme is composed of multiple rings which post fresh @@ -5576,19 +5560,20 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget) struct sk_buff *skb; dma_addr_t dma_addr; u32 opaque_key, desc_idx, *post_ptr; + u8 *data; desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK; opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK; if (opaque_key == RXD_OPAQUE_RING_STD) { ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx]; dma_addr = dma_unmap_addr(ri, mapping); - skb = ri->skb; + data = ri->data; post_ptr = &std_prod_idx; rx_std_posted++; } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) { ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx]; dma_addr = dma_unmap_addr(ri, mapping); - skb = ri->skb; + data = ri->data; post_ptr = &jmb_prod_idx; } else goto next_pkt_nopost; @@ -5606,13 +5591,14 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget) goto next_pkt; } + prefetch(data + TG3_RX_OFFSET(tp)); len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - ETH_FCS_LEN; if (len > TG3_RX_COPY_THRESH(tp)) { int skb_size; - skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key, + skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key, *post_ptr); if (skb_size < 0) goto drop_it; @@ -5620,35 +5606,37 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget) pci_unmap_single(tp->pdev, dma_addr, skb_size, PCI_DMA_FROMDEVICE); - /* Ensure that the update to the skb happens + skb = build_skb(data); + if (!skb) { + kfree(data); + goto drop_it_no_recycle; + } + skb_reserve(skb, TG3_RX_OFFSET(tp)); + /* Ensure that the update to the data happens * after the usage of the old DMA mapping. */ smp_wmb(); - ri->skb = NULL; + ri->data = NULL; - skb_put(skb, len); } else { - struct sk_buff *copy_skb; - tg3_recycle_rx(tnapi, tpr, opaque_key, desc_idx, *post_ptr); - copy_skb = netdev_alloc_skb(tp->dev, len + - TG3_RAW_IP_ALIGN); - if (copy_skb == NULL) + skb = netdev_alloc_skb(tp->dev, + len + TG3_RAW_IP_ALIGN); + if (skb == NULL) goto drop_it_no_recycle; - skb_reserve(copy_skb, TG3_RAW_IP_ALIGN); - skb_put(copy_skb, len); + skb_reserve(skb, TG3_RAW_IP_ALIGN); pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); - skb_copy_from_linear_data(skb, copy_skb->data, len); + memcpy(skb->data, + data + TG3_RX_OFFSET(tp), + len); pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); - - /* We'll reuse the original ring buffer. */ - skb = copy_skb; } + skb_put(skb, len); if ((tp->dev->features & NETIF_F_RXCSUM) && (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) && (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK) @@ -5787,7 +5775,7 @@ static int tg3_rx_prodring_xfer(struct tg3 *tp, di = dpr->rx_std_prod_idx; for (i = di; i < di + cpycnt; i++) { - if (dpr->rx_std_buffers[i].skb) { + if (dpr->rx_std_buffers[i].data) { cpycnt = i - di; err = -ENOSPC; break; @@ -5845,7 +5833,7 @@ static int tg3_rx_prodring_xfer(struct tg3 *tp, di = dpr->rx_jmb_prod_idx; for (i = di; i < di + cpycnt; i++) { - if (dpr->rx_jmb_buffers[i].skb) { + if (dpr->rx_jmb_buffers[i].data) { cpycnt = i - di; err = -ENOSPC; break; @@ -6968,7 +6956,7 @@ static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk) return 0; } -static void tg3_set_loopback(struct net_device *dev, u32 features) +static void tg3_set_loopback(struct net_device *dev, netdev_features_t features) { struct tg3 *tp = netdev_priv(dev); @@ -6994,7 +6982,8 @@ static void tg3_set_loopback(struct net_device *dev, u32 features) } } -static u32 tg3_fix_features(struct net_device *dev, u32 features) +static netdev_features_t tg3_fix_features(struct net_device *dev, + netdev_features_t features) { struct tg3 *tp = netdev_priv(dev); @@ -7004,9 +6993,9 @@ static u32 tg3_fix_features(struct net_device *dev, u32 features) return features; } -static int tg3_set_features(struct net_device *dev, u32 features) +static int tg3_set_features(struct net_device *dev, netdev_features_t features) { - u32 changed = dev->features ^ features; + netdev_features_t changed = dev->features ^ features; if ((changed & NETIF_F_LOOPBACK) && netif_running(dev)) tg3_set_loopback(dev, features); @@ -7082,14 +7071,14 @@ static void tg3_rx_prodring_free(struct tg3 *tp, if (tpr != &tp->napi[0].prodring) { for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx; i = (i + 1) & tp->rx_std_ring_mask) - tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i], + tg3_rx_data_free(tp, &tpr->rx_std_buffers[i], tp->rx_pkt_map_sz); if (tg3_flag(tp, JUMBO_CAPABLE)) { for (i = tpr->rx_jmb_cons_idx; i != tpr->rx_jmb_prod_idx; i = (i + 1) & tp->rx_jmb_ring_mask) { - tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i], + tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i], TG3_RX_JMB_MAP_SZ); } } @@ -7098,12 +7087,12 @@ static void tg3_rx_prodring_free(struct tg3 *tp, } for (i = 0; i <= tp->rx_std_ring_mask; i++) - tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i], + tg3_rx_data_free(tp, &tpr->rx_std_buffers[i], tp->rx_pkt_map_sz); if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) { for (i = 0; i <= tp->rx_jmb_ring_mask; i++) - tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i], + tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i], TG3_RX_JMB_MAP_SZ); } } @@ -7159,7 +7148,7 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp, /* Now allocate fresh SKBs for each rx ring. */ for (i = 0; i < tp->rx_pending; i++) { - if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) { + if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) { netdev_warn(tp->dev, "Using a smaller RX standard ring. Only " "%d out of %d buffers were allocated " @@ -7191,7 +7180,7 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp, } for (i = 0; i < tp->rx_jumbo_pending; i++) { - if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) { + if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) { netdev_warn(tp->dev, "Using a smaller RX jumbo ring. Only %d " "out of %d buffers were allocated " @@ -8197,7 +8186,8 @@ static void tg3_setup_rxbd_thresholds(struct tg3 *tp) if (!tg3_flag(tp, 5750_PLUS) || tg3_flag(tp, 5780_CLASS) || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752) + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 || + tg3_flag(tp, 57765_PLUS)) bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700; else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) @@ -8217,10 +8207,7 @@ static void tg3_setup_rxbd_thresholds(struct tg3 *tp) if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS)) return; - if (!tg3_flag(tp, 5705_PLUS)) - bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700; - else - bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5717; + bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700; host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1); @@ -8581,10 +8568,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) } if (tg3_flag(tp, 57765_PLUS)) { - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) - val = TG3_RX_STD_MAX_SIZE_5700; - else - val = TG3_RX_STD_MAX_SIZE_5717; + val = TG3_RX_STD_RING_SIZE(tp); val <<= BDINFO_FLAGS_MAXLEN_SHIFT; val |= (TG3_RX_STD_DMA_SZ << 2); } else @@ -10321,9 +10305,16 @@ static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) if (netif_running(dev)) { ethtool_cmd_speed_set(cmd, tp->link_config.active_speed); cmd->duplex = tp->link_config.active_duplex; + if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) { + if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE) + cmd->eth_tp_mdix = ETH_TP_MDI_X; + else + cmd->eth_tp_mdix = ETH_TP_MDI; + } } else { ethtool_cmd_speed_set(cmd, SPEED_INVALID); cmd->duplex = DUPLEX_INVALID; + cmd->eth_tp_mdix = ETH_TP_MDI_INVALID; } cmd->phy_address = tp->phy_addr; cmd->transceiver = XCVR_INTERNAL; @@ -10428,10 +10419,10 @@ static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info { struct tg3 *tp = netdev_priv(dev); - strcpy(info->driver, DRV_MODULE_NAME); - strcpy(info->version, DRV_MODULE_VERSION); - strcpy(info->fw_version, tp->fw_ver); - strcpy(info->bus_info, pci_name(tp->pdev)); + strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version)); + strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version)); + strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info)); } static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) @@ -11400,8 +11391,8 @@ static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback) u32 rx_start_idx, rx_idx, tx_idx, opaque_key; u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val; u32 budget; - struct sk_buff *skb, *rx_skb; - u8 *tx_data; + struct sk_buff *skb; + u8 *tx_data, *rx_data; dma_addr_t map; int num_pkts, tx_len, rx_len, i, err; struct tg3_rx_buffer_desc *desc; @@ -11569,11 +11560,11 @@ static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback) } if (opaque_key == RXD_OPAQUE_RING_STD) { - rx_skb = tpr->rx_std_buffers[desc_idx].skb; + rx_data = tpr->rx_std_buffers[desc_idx].data; map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx], mapping); } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) { - rx_skb = tpr->rx_jmb_buffers[desc_idx].skb; + rx_data = tpr->rx_jmb_buffers[desc_idx].data; map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx], mapping); } else @@ -11582,15 +11573,16 @@ static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback) pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE); + rx_data += TG3_RX_OFFSET(tp); for (i = data_off; i < rx_len; i++, val++) { - if (*(rx_skb->data + i) != (u8) (val & 0xff)) + if (*(rx_data + i) != (u8) (val & 0xff)) goto out; } } err = 0; - /* tg3_free_rings will unmap and free the rx_skb */ + /* tg3_free_rings will unmap and free the rx_data */ out: return err; } @@ -13218,8 +13210,7 @@ static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp) static void __devinit tg3_phy_init_link_config(struct tg3 *tp) { - u32 adv = ADVERTISED_Autoneg | - ADVERTISED_Pause; + u32 adv = ADVERTISED_Autoneg; if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) adv |= ADVERTISED_1000baseT_Half | @@ -14036,7 +14027,9 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) tg3_flag_set(tp, 4K_FIFO_LIMIT); - if (tg3_flag(tp, 5717_PLUS)) + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) tg3_flag_set(tp, LRG_PROD_RING_CAP); if (tg3_flag(tp, 57765_PLUS) && @@ -14548,11 +14541,11 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) else tg3_flag_clear(tp, POLL_SERDES); - tp->rx_offset = NET_IP_ALIGN; + tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN; tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD; if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 && tg3_flag(tp, PCIX_MODE)) { - tp->rx_offset = 0; + tp->rx_offset = NET_SKB_PAD; #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS tp->rx_copy_thresh = ~(u16)0; #endif @@ -15313,7 +15306,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, u32 sndmbx, rcvmbx, intmbx; char str[40]; u64 dma_mask, persist_dma_mask; - u32 features = 0; + netdev_features_t features = 0; printk_once(KERN_INFO "%s\n", version); diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h index 94b4bd049a33..9cc10a868dcd 100644 --- a/drivers/net/ethernet/broadcom/tg3.h +++ b/drivers/net/ethernet/broadcom/tg3.h @@ -2174,6 +2174,7 @@ #define MII_TG3_EXT_CTRL_TBI 0x8000 #define MII_TG3_EXT_STAT 0x11 /* Extended status register */ +#define MII_TG3_EXT_STAT_MDIX 0x2000 #define MII_TG3_EXT_STAT_LPASS 0x0100 #define MII_TG3_RXR_COUNTERS 0x14 /* Local/Remote Receiver Counts */ @@ -2277,6 +2278,9 @@ #define MII_TG3_FET_PTEST_FRC_TX_LINK 0x1000 #define MII_TG3_FET_PTEST_FRC_TX_LOCK 0x0800 +#define MII_TG3_FET_GEN_STAT 0x1c +#define MII_TG3_FET_GEN_STAT_MDIXSTAT 0x2000 + #define MII_TG3_FET_TEST 0x1f #define MII_TG3_FET_SHADOW_EN 0x0080 @@ -2662,9 +2666,13 @@ struct tg3_hw_stats { /* 'mapping' is superfluous as the chip does not write into * the tx/rx post rings so we could just fetch it from there. * But the cache behavior is better how we are doing it now. + * + * This driver uses new build_skb() API : + * RX ring buffer contains pointer to kmalloc() data only, + * skb are built only after Hardware filled the frame. */ struct ring_info { - struct sk_buff *skb; + u8 *data; DEFINE_DMA_UNMAP_ADDR(mapping); }; @@ -3131,6 +3139,7 @@ struct tg3 { #define TG3_PHYFLG_SERDES_PREEMPHASIS 0x00010000 #define TG3_PHYFLG_PARALLEL_DETECT 0x00020000 #define TG3_PHYFLG_EEE_CAP 0x00040000 +#define TG3_PHYFLG_MDIX_STATE 0x00200000 u32 led_ctrl; u32 phy_otp; |
